Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/mm/hugetlbpage.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* SPARC64 Huge TLB page support.
4
*
5
* Copyright (C) 2002, 2003, 2006 David S. Miller ([email protected])
6
*/
7
8
#include <linux/fs.h>
9
#include <linux/mm.h>
10
#include <linux/sched/mm.h>
11
#include <linux/hugetlb.h>
12
#include <linux/pagemap.h>
13
#include <linux/sysctl.h>
14
15
#include <asm/mman.h>
16
#include <asm/pgalloc.h>
17
#include <asm/tlb.h>
18
#include <asm/tlbflush.h>
19
#include <asm/cacheflush.h>
20
#include <asm/mmu_context.h>
21
22
23
static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
24
{
25
return entry;
26
}
27
28
static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
29
{
30
unsigned long hugepage_size = _PAGE_SZ4MB_4V;
31
32
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
33
34
switch (shift) {
35
case HPAGE_16GB_SHIFT:
36
hugepage_size = _PAGE_SZ16GB_4V;
37
pte_val(entry) |= _PAGE_PUD_HUGE;
38
break;
39
case HPAGE_2GB_SHIFT:
40
hugepage_size = _PAGE_SZ2GB_4V;
41
pte_val(entry) |= _PAGE_PMD_HUGE;
42
break;
43
case HPAGE_256MB_SHIFT:
44
hugepage_size = _PAGE_SZ256MB_4V;
45
pte_val(entry) |= _PAGE_PMD_HUGE;
46
break;
47
case HPAGE_SHIFT:
48
pte_val(entry) |= _PAGE_PMD_HUGE;
49
break;
50
case HPAGE_64K_SHIFT:
51
hugepage_size = _PAGE_SZ64K_4V;
52
break;
53
default:
54
WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
55
}
56
57
pte_val(entry) = pte_val(entry) | hugepage_size;
58
return entry;
59
}
60
61
static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
62
{
63
if (tlb_type == hypervisor)
64
return sun4v_hugepage_shift_to_tte(entry, shift);
65
else
66
return sun4u_hugepage_shift_to_tte(entry, shift);
67
}
68
69
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
70
{
71
pte_t pte;
72
73
entry = pte_mkhuge(entry);
74
pte = hugepage_shift_to_tte(entry, shift);
75
76
#ifdef CONFIG_SPARC64
77
/* If this vma has ADI enabled on it, turn on TTE.mcd
78
*/
79
if (flags & VM_SPARC_ADI)
80
return pte_mkmcd(pte);
81
else
82
return pte_mknotmcd(pte);
83
#else
84
return pte;
85
#endif
86
}
87
88
static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
89
{
90
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
91
unsigned int shift;
92
93
switch (tte_szbits) {
94
case _PAGE_SZ16GB_4V:
95
shift = HPAGE_16GB_SHIFT;
96
break;
97
case _PAGE_SZ2GB_4V:
98
shift = HPAGE_2GB_SHIFT;
99
break;
100
case _PAGE_SZ256MB_4V:
101
shift = HPAGE_256MB_SHIFT;
102
break;
103
case _PAGE_SZ4MB_4V:
104
shift = REAL_HPAGE_SHIFT;
105
break;
106
case _PAGE_SZ64K_4V:
107
shift = HPAGE_64K_SHIFT;
108
break;
109
default:
110
shift = PAGE_SHIFT;
111
break;
112
}
113
return shift;
114
}
115
116
static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
117
{
118
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
119
unsigned int shift;
120
121
switch (tte_szbits) {
122
case _PAGE_SZ256MB_4U:
123
shift = HPAGE_256MB_SHIFT;
124
break;
125
case _PAGE_SZ4MB_4U:
126
shift = REAL_HPAGE_SHIFT;
127
break;
128
case _PAGE_SZ64K_4U:
129
shift = HPAGE_64K_SHIFT;
130
break;
131
default:
132
shift = PAGE_SHIFT;
133
break;
134
}
135
return shift;
136
}
137
138
static unsigned long tte_to_shift(pte_t entry)
139
{
140
if (tlb_type == hypervisor)
141
return sun4v_huge_tte_to_shift(entry);
142
143
return sun4u_huge_tte_to_shift(entry);
144
}
145
146
static unsigned int huge_tte_to_shift(pte_t entry)
147
{
148
unsigned long shift = tte_to_shift(entry);
149
150
if (shift == PAGE_SHIFT)
151
WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
152
pte_val(entry));
153
154
return shift;
155
}
156
157
static unsigned long huge_tte_to_size(pte_t pte)
158
{
159
unsigned long size = 1UL << huge_tte_to_shift(pte);
160
161
if (size == REAL_HPAGE_SIZE)
162
size = HPAGE_SIZE;
163
return size;
164
}
165
166
unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
167
unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
168
unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
169
170
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
171
unsigned long addr, unsigned long sz)
172
{
173
pgd_t *pgd;
174
p4d_t *p4d;
175
pud_t *pud;
176
pmd_t *pmd;
177
178
pgd = pgd_offset(mm, addr);
179
p4d = p4d_offset(pgd, addr);
180
pud = pud_alloc(mm, p4d, addr);
181
if (!pud)
182
return NULL;
183
if (sz >= PUD_SIZE)
184
return (pte_t *)pud;
185
pmd = pmd_alloc(mm, pud, addr);
186
if (!pmd)
187
return NULL;
188
if (sz >= PMD_SIZE)
189
return (pte_t *)pmd;
190
return pte_alloc_huge(mm, pmd, addr);
191
}
192
193
pte_t *huge_pte_offset(struct mm_struct *mm,
194
unsigned long addr, unsigned long sz)
195
{
196
pgd_t *pgd;
197
p4d_t *p4d;
198
pud_t *pud;
199
pmd_t *pmd;
200
201
pgd = pgd_offset(mm, addr);
202
if (pgd_none(*pgd))
203
return NULL;
204
p4d = p4d_offset(pgd, addr);
205
if (p4d_none(*p4d))
206
return NULL;
207
pud = pud_offset(p4d, addr);
208
if (pud_none(*pud))
209
return NULL;
210
if (is_hugetlb_pud(*pud))
211
return (pte_t *)pud;
212
pmd = pmd_offset(pud, addr);
213
if (pmd_none(*pmd))
214
return NULL;
215
if (is_hugetlb_pmd(*pmd))
216
return (pte_t *)pmd;
217
return pte_offset_huge(pmd, addr);
218
}
219
220
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
221
pte_t *ptep, pte_t entry)
222
{
223
unsigned int nptes, orig_shift, shift;
224
unsigned long i, size;
225
pte_t orig;
226
227
size = huge_tte_to_size(entry);
228
229
shift = PAGE_SHIFT;
230
if (size >= PUD_SIZE)
231
shift = PUD_SHIFT;
232
else if (size >= PMD_SIZE)
233
shift = PMD_SHIFT;
234
else
235
shift = PAGE_SHIFT;
236
237
nptes = size >> shift;
238
239
if (!pte_present(*ptep) && pte_present(entry))
240
mm->context.hugetlb_pte_count += nptes;
241
242
addr &= ~(size - 1);
243
orig = *ptep;
244
orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
245
246
for (i = 0; i < nptes; i++)
247
ptep[i] = __pte(pte_val(entry) + (i << shift));
248
249
maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
250
/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
251
if (size == HPAGE_SIZE)
252
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
253
orig_shift);
254
}
255
256
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
257
pte_t *ptep, pte_t entry, unsigned long sz)
258
{
259
__set_huge_pte_at(mm, addr, ptep, entry);
260
}
261
262
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
263
pte_t *ptep, unsigned long sz)
264
{
265
unsigned int i, nptes, orig_shift, shift;
266
unsigned long size;
267
pte_t entry;
268
269
entry = *ptep;
270
size = huge_tte_to_size(entry);
271
272
shift = PAGE_SHIFT;
273
if (size >= PUD_SIZE)
274
shift = PUD_SHIFT;
275
else if (size >= PMD_SIZE)
276
shift = PMD_SHIFT;
277
else
278
shift = PAGE_SHIFT;
279
280
nptes = size >> shift;
281
orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
282
283
if (pte_present(entry))
284
mm->context.hugetlb_pte_count -= nptes;
285
286
addr &= ~(size - 1);
287
for (i = 0; i < nptes; i++)
288
ptep[i] = __pte(0UL);
289
290
maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
291
/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
292
if (size == HPAGE_SIZE)
293
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
294
orig_shift);
295
296
return entry;
297
}
298
299