Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/include/asm/book3s/64/pgalloc.h
26519 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3
#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
4
/*
5
*/
6
7
#include <linux/slab.h>
8
#include <linux/cpumask.h>
9
#include <linux/kmemleak.h>
10
#include <linux/percpu.h>
11
12
struct vmemmap_backing {
13
struct vmemmap_backing *list;
14
unsigned long phys;
15
unsigned long virt_addr;
16
};
17
extern struct vmemmap_backing *vmemmap_list;
18
19
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
20
extern void pmd_fragment_free(unsigned long *);
21
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
22
extern void __tlb_remove_table(void *_table);
23
void pte_frag_destroy(void *pte_frag);
24
25
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
26
{
27
#ifdef CONFIG_PPC_64K_PAGES
28
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
29
#else
30
struct page *page;
31
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
32
4);
33
if (!page)
34
return NULL;
35
return (pgd_t *) page_address(page);
36
#endif
37
}
38
39
static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
40
{
41
#ifdef CONFIG_PPC_64K_PAGES
42
free_page((unsigned long)pgd);
43
#else
44
free_pages((unsigned long)pgd, 4);
45
#endif
46
}
47
48
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49
{
50
pgd_t *pgd;
51
52
if (radix_enabled())
53
return radix__pgd_alloc(mm);
54
55
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
56
pgtable_gfp_flags(mm, GFP_KERNEL));
57
if (unlikely(!pgd))
58
return pgd;
59
60
/*
61
* Don't scan the PGD for pointers, it contains references to PUDs but
62
* those references are not full pointers and so can't be recognised by
63
* kmemleak.
64
*/
65
kmemleak_no_scan(pgd);
66
67
/*
68
* With hugetlb, we don't clear the second half of the page table.
69
* If we share the same slab cache with the pmd or pud level table,
70
* we need to make sure we zero out the full table on alloc.
71
* With 4K we don't store slot in the second half. Hence we don't
72
* need to do this for 4k.
73
*/
74
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
75
(H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
76
memset(pgd, 0, PGD_TABLE_SIZE);
77
#endif
78
return pgd;
79
}
80
81
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
82
{
83
if (radix_enabled())
84
return radix__pgd_free(mm, pgd);
85
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
86
}
87
88
static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
89
{
90
*pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
91
}
92
93
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
94
{
95
pud_t *pud;
96
97
pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
98
pgtable_gfp_flags(mm, GFP_KERNEL));
99
/*
100
* Tell kmemleak to ignore the PUD, that means don't scan it for
101
* pointers and don't consider it a leak. PUDs are typically only
102
* referred to by their PGD, but kmemleak is not able to recognise those
103
* as pointers, leading to false leak reports.
104
*/
105
kmemleak_ignore(pud);
106
107
return pud;
108
}
109
110
static inline void __pud_free(pud_t *pud)
111
{
112
struct page *page = virt_to_page(pud);
113
114
/*
115
* Early pud pages allocated via memblock allocator
116
* can't be directly freed to slab. KFENCE pages have
117
* both reserved and slab flags set so need to be freed
118
* kmem_cache_free.
119
*/
120
if (PageReserved(page) && !PageSlab(page))
121
free_reserved_page(page);
122
else
123
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
124
}
125
126
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
127
{
128
return __pud_free(pud);
129
}
130
131
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
132
{
133
*pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
134
}
135
136
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
137
unsigned long address)
138
{
139
pgtable_free_tlb(tlb, pud, PUD_INDEX);
140
}
141
142
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
143
{
144
return pmd_fragment_alloc(mm, addr);
145
}
146
147
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
148
{
149
pmd_fragment_free((unsigned long *)pmd);
150
}
151
152
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
153
unsigned long address)
154
{
155
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
156
}
157
158
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
159
pte_t *pte)
160
{
161
*pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
162
}
163
164
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
165
pgtable_t pte_page)
166
{
167
*pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
168
}
169
170
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
171
unsigned long address)
172
{
173
pgtable_free_tlb(tlb, table, PTE_INDEX);
174
}
175
176
extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
177
static inline void update_page_count(int psize, long count)
178
{
179
if (IS_ENABLED(CONFIG_PROC_FS))
180
atomic_long_add(count, &direct_pages_count[psize]);
181
}
182
183
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
184
185