Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/debug_vm_pgtable.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* This kernel test validates architecture page table helpers and
4
* accessors and helps in verifying their continued compliance with
5
* expected generic MM semantics.
6
*
7
* Copyright (C) 2019 ARM Ltd.
8
*
9
* Author: Anshuman Khandual <[email protected]>
10
*/
11
#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12
13
#include <linux/gfp.h>
14
#include <linux/highmem.h>
15
#include <linux/hugetlb.h>
16
#include <linux/kernel.h>
17
#include <linux/kconfig.h>
18
#include <linux/memblock.h>
19
#include <linux/mm.h>
20
#include <linux/mman.h>
21
#include <linux/mm_types.h>
22
#include <linux/module.h>
23
#include <linux/printk.h>
24
#include <linux/pgtable.h>
25
#include <linux/random.h>
26
#include <linux/spinlock.h>
27
#include <linux/swap.h>
28
#include <linux/swapops.h>
29
#include <linux/start_kernel.h>
30
#include <linux/sched/mm.h>
31
#include <linux/io.h>
32
#include <linux/vmalloc.h>
33
34
#include <asm/cacheflush.h>
35
#include <asm/pgalloc.h>
36
#include <asm/tlbflush.h>
37
38
/*
39
* Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40
* expectations that are being validated here. All future changes in here
41
* or the documentation need to be in sync.
42
*/
43
#define RANDOM_NZVALUE GENMASK(7, 0)
44
45
struct pgtable_debug_args {
46
struct mm_struct *mm;
47
struct vm_area_struct *vma;
48
49
pgd_t *pgdp;
50
p4d_t *p4dp;
51
pud_t *pudp;
52
pmd_t *pmdp;
53
pte_t *ptep;
54
55
p4d_t *start_p4dp;
56
pud_t *start_pudp;
57
pmd_t *start_pmdp;
58
pgtable_t start_ptep;
59
60
unsigned long vaddr;
61
pgprot_t page_prot;
62
pgprot_t page_prot_none;
63
64
bool is_contiguous_page;
65
unsigned long pud_pfn;
66
unsigned long pmd_pfn;
67
unsigned long pte_pfn;
68
69
unsigned long fixed_alignment;
70
unsigned long fixed_pgd_pfn;
71
unsigned long fixed_p4d_pfn;
72
unsigned long fixed_pud_pfn;
73
unsigned long fixed_pmd_pfn;
74
unsigned long fixed_pte_pfn;
75
76
swp_entry_t swp_entry;
77
};
78
79
static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
80
{
81
pgprot_t prot = vm_get_page_prot(idx);
82
pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
83
unsigned long val = idx, *ptr = &val;
84
85
pr_debug("Validating PTE basic (%pGv)\n", ptr);
86
87
/*
88
* This test needs to be executed after the given page table entry
89
* is created with pfn_pte() to make sure that vm_get_page_prot(idx)
90
* does not have the dirty bit enabled from the beginning. This is
91
* important for platforms like arm64 where (!PTE_RDONLY) indicate
92
* dirty bit being set.
93
*/
94
WARN_ON(pte_dirty(pte_wrprotect(pte)));
95
96
WARN_ON(!pte_same(pte, pte));
97
WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
98
WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
99
WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
100
WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
101
WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
102
WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
103
WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
104
WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
105
}
106
107
static void __init pte_advanced_tests(struct pgtable_debug_args *args)
108
{
109
struct page *page;
110
pte_t pte;
111
112
/*
113
* Architectures optimize set_pte_at by avoiding TLB flush.
114
* This requires set_pte_at to be not used to update an
115
* existing pte entry. Clear pte before we do set_pte_at
116
*
117
* flush_dcache_page() is called after set_pte_at() to clear
118
* PG_arch_1 for the page on ARM64. The page flag isn't cleared
119
* when it's released and page allocation check will fail when
120
* the page is allocated again. For architectures other than ARM64,
121
* the unexpected overhead of cache flushing is acceptable.
122
*/
123
page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
124
if (!page)
125
return;
126
127
pr_debug("Validating PTE advanced\n");
128
if (WARN_ON(!args->ptep))
129
return;
130
131
pte = pfn_pte(args->pte_pfn, args->page_prot);
132
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
133
flush_dcache_page(page);
134
ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
135
pte = ptep_get(args->ptep);
136
WARN_ON(pte_write(pte));
137
ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
138
pte = ptep_get(args->ptep);
139
WARN_ON(!pte_none(pte));
140
141
pte = pfn_pte(args->pte_pfn, args->page_prot);
142
pte = pte_wrprotect(pte);
143
pte = pte_mkclean(pte);
144
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
145
flush_dcache_page(page);
146
pte = pte_mkwrite(pte, args->vma);
147
pte = pte_mkdirty(pte);
148
ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
149
pte = ptep_get(args->ptep);
150
WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
151
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
152
pte = ptep_get(args->ptep);
153
WARN_ON(!pte_none(pte));
154
155
pte = pfn_pte(args->pte_pfn, args->page_prot);
156
pte = pte_mkyoung(pte);
157
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
158
flush_dcache_page(page);
159
ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
160
pte = ptep_get(args->ptep);
161
WARN_ON(pte_young(pte));
162
163
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
164
}
165
166
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
167
static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
168
{
169
pgprot_t prot = vm_get_page_prot(idx);
170
unsigned long val = idx, *ptr = &val;
171
pmd_t pmd;
172
173
if (!has_transparent_hugepage())
174
return;
175
176
pr_debug("Validating PMD basic (%pGv)\n", ptr);
177
pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
178
179
/*
180
* This test needs to be executed after the given page table entry
181
* is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
182
* does not have the dirty bit enabled from the beginning. This is
183
* important for platforms like arm64 where (!PTE_RDONLY) indicate
184
* dirty bit being set.
185
*/
186
WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
187
188
189
WARN_ON(!pmd_same(pmd, pmd));
190
WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
191
WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
192
WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
193
WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
194
WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
195
WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
196
WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
197
WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
198
/*
199
* A huge page does not point to next level page table
200
* entry. Hence this must qualify as pmd_bad().
201
*/
202
WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
203
}
204
205
static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
206
{
207
struct page *page;
208
pmd_t pmd;
209
unsigned long vaddr = args->vaddr;
210
211
if (!has_transparent_hugepage())
212
return;
213
214
page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
215
if (!page)
216
return;
217
218
/*
219
* flush_dcache_page() is called after set_pmd_at() to clear
220
* PG_arch_1 for the page on ARM64. The page flag isn't cleared
221
* when it's released and page allocation check will fail when
222
* the page is allocated again. For architectures other than ARM64,
223
* the unexpected overhead of cache flushing is acceptable.
224
*/
225
pr_debug("Validating PMD advanced\n");
226
/* Align the address wrt HPAGE_PMD_SIZE */
227
vaddr &= HPAGE_PMD_MASK;
228
229
pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
230
231
pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
232
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
233
flush_dcache_page(page);
234
pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
235
pmd = pmdp_get(args->pmdp);
236
WARN_ON(pmd_write(pmd));
237
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
238
pmd = pmdp_get(args->pmdp);
239
WARN_ON(!pmd_none(pmd));
240
241
pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
242
pmd = pmd_wrprotect(pmd);
243
pmd = pmd_mkclean(pmd);
244
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
245
flush_dcache_page(page);
246
pmd = pmd_mkwrite(pmd, args->vma);
247
pmd = pmd_mkdirty(pmd);
248
pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
249
pmd = pmdp_get(args->pmdp);
250
WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
251
pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
252
pmd = pmdp_get(args->pmdp);
253
WARN_ON(!pmd_none(pmd));
254
255
pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
256
pmd = pmd_mkyoung(pmd);
257
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
258
flush_dcache_page(page);
259
pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
260
pmd = pmdp_get(args->pmdp);
261
WARN_ON(pmd_young(pmd));
262
263
/* Clear the pte entries */
264
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
265
pgtable_trans_huge_withdraw(args->mm, args->pmdp);
266
}
267
268
static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
269
{
270
pmd_t pmd;
271
272
if (!has_transparent_hugepage())
273
return;
274
275
pr_debug("Validating PMD leaf\n");
276
pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
277
278
/*
279
* PMD based THP is a leaf entry.
280
*/
281
pmd = pmd_mkhuge(pmd);
282
WARN_ON(!pmd_leaf(pmd));
283
}
284
285
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
286
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
287
{
288
pgprot_t prot = vm_get_page_prot(idx);
289
unsigned long val = idx, *ptr = &val;
290
pud_t pud;
291
292
if (!has_transparent_pud_hugepage())
293
return;
294
295
pr_debug("Validating PUD basic (%pGv)\n", ptr);
296
pud = pfn_pud(args->fixed_pud_pfn, prot);
297
298
/*
299
* This test needs to be executed after the given page table entry
300
* is created with pfn_pud() to make sure that vm_get_page_prot(idx)
301
* does not have the dirty bit enabled from the beginning. This is
302
* important for platforms like arm64 where (!PTE_RDONLY) indicate
303
* dirty bit being set.
304
*/
305
WARN_ON(pud_dirty(pud_wrprotect(pud)));
306
307
WARN_ON(!pud_same(pud, pud));
308
WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
309
WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
310
WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
311
WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
312
WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
313
WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
314
WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
315
WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
316
317
if (mm_pmd_folded(args->mm))
318
return;
319
320
/*
321
* A huge page does not point to next level page table
322
* entry. Hence this must qualify as pud_bad().
323
*/
324
WARN_ON(!pud_bad(pud_mkhuge(pud)));
325
}
326
327
static void __init pud_advanced_tests(struct pgtable_debug_args *args)
328
{
329
struct page *page;
330
unsigned long vaddr = args->vaddr;
331
pud_t pud;
332
333
if (!has_transparent_pud_hugepage())
334
return;
335
336
page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
337
if (!page)
338
return;
339
340
/*
341
* flush_dcache_page() is called after set_pud_at() to clear
342
* PG_arch_1 for the page on ARM64. The page flag isn't cleared
343
* when it's released and page allocation check will fail when
344
* the page is allocated again. For architectures other than ARM64,
345
* the unexpected overhead of cache flushing is acceptable.
346
*/
347
pr_debug("Validating PUD advanced\n");
348
/* Align the address wrt HPAGE_PUD_SIZE */
349
vaddr &= HPAGE_PUD_MASK;
350
351
pud = pfn_pud(args->pud_pfn, args->page_prot);
352
set_pud_at(args->mm, vaddr, args->pudp, pud);
353
flush_dcache_page(page);
354
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
355
pud = pudp_get(args->pudp);
356
WARN_ON(pud_write(pud));
357
358
#ifndef __PAGETABLE_PMD_FOLDED
359
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
360
pud = pudp_get(args->pudp);
361
WARN_ON(!pud_none(pud));
362
#endif /* __PAGETABLE_PMD_FOLDED */
363
pud = pfn_pud(args->pud_pfn, args->page_prot);
364
pud = pud_wrprotect(pud);
365
pud = pud_mkclean(pud);
366
set_pud_at(args->mm, vaddr, args->pudp, pud);
367
flush_dcache_page(page);
368
pud = pud_mkwrite(pud);
369
pud = pud_mkdirty(pud);
370
pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
371
pud = pudp_get(args->pudp);
372
WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
373
374
#ifndef __PAGETABLE_PMD_FOLDED
375
pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
376
pud = pudp_get(args->pudp);
377
WARN_ON(!pud_none(pud));
378
#endif /* __PAGETABLE_PMD_FOLDED */
379
380
pud = pfn_pud(args->pud_pfn, args->page_prot);
381
pud = pud_mkyoung(pud);
382
set_pud_at(args->mm, vaddr, args->pudp, pud);
383
flush_dcache_page(page);
384
pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
385
pud = pudp_get(args->pudp);
386
WARN_ON(pud_young(pud));
387
388
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
389
}
390
391
static void __init pud_leaf_tests(struct pgtable_debug_args *args)
392
{
393
pud_t pud;
394
395
if (!has_transparent_pud_hugepage())
396
return;
397
398
pr_debug("Validating PUD leaf\n");
399
pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
400
/*
401
* PUD based THP is a leaf entry.
402
*/
403
pud = pud_mkhuge(pud);
404
WARN_ON(!pud_leaf(pud));
405
}
406
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
407
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
408
static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
409
static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
410
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
411
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
412
static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
413
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
414
static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
415
static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
416
static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
417
static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
418
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
419
420
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
421
static void __init pmd_huge_tests(struct pgtable_debug_args *args)
422
{
423
pmd_t pmd;
424
425
if (!arch_vmap_pmd_supported(args->page_prot) ||
426
args->fixed_alignment < PMD_SIZE)
427
return;
428
429
pr_debug("Validating PMD huge\n");
430
/*
431
* X86 defined pmd_set_huge() verifies that the given
432
* PMD is not a populated non-leaf entry.
433
*/
434
WRITE_ONCE(*args->pmdp, __pmd(0));
435
WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
436
WARN_ON(!pmd_clear_huge(args->pmdp));
437
pmd = pmdp_get(args->pmdp);
438
WARN_ON(!pmd_none(pmd));
439
}
440
441
static void __init pud_huge_tests(struct pgtable_debug_args *args)
442
{
443
pud_t pud;
444
445
if (!arch_vmap_pud_supported(args->page_prot) ||
446
args->fixed_alignment < PUD_SIZE)
447
return;
448
449
pr_debug("Validating PUD huge\n");
450
/*
451
* X86 defined pud_set_huge() verifies that the given
452
* PUD is not a populated non-leaf entry.
453
*/
454
WRITE_ONCE(*args->pudp, __pud(0));
455
WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
456
WARN_ON(!pud_clear_huge(args->pudp));
457
pud = pudp_get(args->pudp);
458
WARN_ON(!pud_none(pud));
459
}
460
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
461
static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
462
static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
463
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
464
465
static void __init p4d_basic_tests(struct pgtable_debug_args *args)
466
{
467
p4d_t p4d;
468
469
pr_debug("Validating P4D basic\n");
470
memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
471
WARN_ON(!p4d_same(p4d, p4d));
472
}
473
474
static void __init pgd_basic_tests(struct pgtable_debug_args *args)
475
{
476
pgd_t pgd;
477
478
pr_debug("Validating PGD basic\n");
479
memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
480
WARN_ON(!pgd_same(pgd, pgd));
481
}
482
483
#ifndef __PAGETABLE_PUD_FOLDED
484
static void __init pud_clear_tests(struct pgtable_debug_args *args)
485
{
486
pud_t pud = pudp_get(args->pudp);
487
488
if (mm_pmd_folded(args->mm))
489
return;
490
491
pr_debug("Validating PUD clear\n");
492
WARN_ON(pud_none(pud));
493
pud_clear(args->pudp);
494
pud = pudp_get(args->pudp);
495
WARN_ON(!pud_none(pud));
496
}
497
498
static void __init pud_populate_tests(struct pgtable_debug_args *args)
499
{
500
pud_t pud;
501
502
if (mm_pmd_folded(args->mm))
503
return;
504
505
pr_debug("Validating PUD populate\n");
506
/*
507
* This entry points to next level page table page.
508
* Hence this must not qualify as pud_bad().
509
*/
510
pud_populate(args->mm, args->pudp, args->start_pmdp);
511
pud = pudp_get(args->pudp);
512
WARN_ON(pud_bad(pud));
513
}
514
#else /* !__PAGETABLE_PUD_FOLDED */
515
static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
516
static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
517
#endif /* PAGETABLE_PUD_FOLDED */
518
519
#ifndef __PAGETABLE_P4D_FOLDED
520
static void __init p4d_clear_tests(struct pgtable_debug_args *args)
521
{
522
p4d_t p4d = p4dp_get(args->p4dp);
523
524
if (mm_pud_folded(args->mm))
525
return;
526
527
pr_debug("Validating P4D clear\n");
528
WARN_ON(p4d_none(p4d));
529
p4d_clear(args->p4dp);
530
p4d = p4dp_get(args->p4dp);
531
WARN_ON(!p4d_none(p4d));
532
}
533
534
static void __init p4d_populate_tests(struct pgtable_debug_args *args)
535
{
536
p4d_t p4d;
537
538
if (mm_pud_folded(args->mm))
539
return;
540
541
pr_debug("Validating P4D populate\n");
542
/*
543
* This entry points to next level page table page.
544
* Hence this must not qualify as p4d_bad().
545
*/
546
pud_clear(args->pudp);
547
p4d_clear(args->p4dp);
548
p4d_populate(args->mm, args->p4dp, args->start_pudp);
549
p4d = p4dp_get(args->p4dp);
550
WARN_ON(p4d_bad(p4d));
551
}
552
553
static void __init pgd_clear_tests(struct pgtable_debug_args *args)
554
{
555
pgd_t pgd = pgdp_get(args->pgdp);
556
557
if (mm_p4d_folded(args->mm))
558
return;
559
560
pr_debug("Validating PGD clear\n");
561
WARN_ON(pgd_none(pgd));
562
pgd_clear(args->pgdp);
563
pgd = pgdp_get(args->pgdp);
564
WARN_ON(!pgd_none(pgd));
565
}
566
567
static void __init pgd_populate_tests(struct pgtable_debug_args *args)
568
{
569
pgd_t pgd;
570
571
if (mm_p4d_folded(args->mm))
572
return;
573
574
pr_debug("Validating PGD populate\n");
575
/*
576
* This entry points to next level page table page.
577
* Hence this must not qualify as pgd_bad().
578
*/
579
p4d_clear(args->p4dp);
580
pgd_clear(args->pgdp);
581
pgd_populate(args->mm, args->pgdp, args->start_p4dp);
582
pgd = pgdp_get(args->pgdp);
583
WARN_ON(pgd_bad(pgd));
584
}
585
#else /* !__PAGETABLE_P4D_FOLDED */
586
static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
587
static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
588
static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
589
static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
590
#endif /* PAGETABLE_P4D_FOLDED */
591
592
static void __init pte_clear_tests(struct pgtable_debug_args *args)
593
{
594
struct page *page;
595
pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
596
597
page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
598
if (!page)
599
return;
600
601
/*
602
* flush_dcache_page() is called after set_pte_at() to clear
603
* PG_arch_1 for the page on ARM64. The page flag isn't cleared
604
* when it's released and page allocation check will fail when
605
* the page is allocated again. For architectures other than ARM64,
606
* the unexpected overhead of cache flushing is acceptable.
607
*/
608
pr_debug("Validating PTE clear\n");
609
if (WARN_ON(!args->ptep))
610
return;
611
612
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
613
WARN_ON(pte_none(pte));
614
flush_dcache_page(page);
615
barrier();
616
ptep_clear(args->mm, args->vaddr, args->ptep);
617
pte = ptep_get(args->ptep);
618
WARN_ON(!pte_none(pte));
619
}
620
621
static void __init pmd_clear_tests(struct pgtable_debug_args *args)
622
{
623
pmd_t pmd = pmdp_get(args->pmdp);
624
625
pr_debug("Validating PMD clear\n");
626
WARN_ON(pmd_none(pmd));
627
pmd_clear(args->pmdp);
628
pmd = pmdp_get(args->pmdp);
629
WARN_ON(!pmd_none(pmd));
630
}
631
632
static void __init pmd_populate_tests(struct pgtable_debug_args *args)
633
{
634
pmd_t pmd;
635
636
pr_debug("Validating PMD populate\n");
637
/*
638
* This entry points to next level page table page.
639
* Hence this must not qualify as pmd_bad().
640
*/
641
pmd_populate(args->mm, args->pmdp, args->start_ptep);
642
pmd = pmdp_get(args->pmdp);
643
WARN_ON(pmd_bad(pmd));
644
}
645
646
static void __init pte_special_tests(struct pgtable_debug_args *args)
647
{
648
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
649
650
if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
651
return;
652
653
pr_debug("Validating PTE special\n");
654
WARN_ON(!pte_special(pte_mkspecial(pte)));
655
}
656
657
static void __init pte_protnone_tests(struct pgtable_debug_args *args)
658
{
659
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
660
661
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
662
return;
663
664
pr_debug("Validating PTE protnone\n");
665
WARN_ON(!pte_protnone(pte));
666
WARN_ON(!pte_present(pte));
667
}
668
669
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
670
static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
671
{
672
pmd_t pmd;
673
674
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
675
return;
676
677
if (!has_transparent_hugepage())
678
return;
679
680
pr_debug("Validating PMD protnone\n");
681
pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
682
WARN_ON(!pmd_protnone(pmd));
683
WARN_ON(!pmd_present(pmd));
684
}
685
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
686
static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
687
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
688
689
static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
690
{
691
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
692
693
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
694
return;
695
696
pr_debug("Validating PTE soft dirty\n");
697
WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
698
WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
699
}
700
701
static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
702
{
703
pte_t pte;
704
705
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
706
return;
707
708
pr_debug("Validating PTE swap soft dirty\n");
709
pte = swp_entry_to_pte(args->swp_entry);
710
WARN_ON(!is_swap_pte(pte));
711
712
WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
713
WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
714
}
715
716
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
717
static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
718
{
719
pmd_t pmd;
720
721
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
722
return;
723
724
if (!has_transparent_hugepage())
725
return;
726
727
pr_debug("Validating PMD soft dirty\n");
728
pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
729
WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
730
WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
731
}
732
733
static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
734
{
735
pmd_t pmd;
736
737
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
738
!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
739
return;
740
741
if (!has_transparent_hugepage())
742
return;
743
744
pr_debug("Validating PMD swap soft dirty\n");
745
pmd = swp_entry_to_pmd(args->swp_entry);
746
WARN_ON(!is_swap_pmd(pmd));
747
748
WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
749
WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
750
}
751
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
752
static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
753
static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
754
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
755
756
static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
757
{
758
swp_entry_t entry, entry2;
759
pte_t pte;
760
761
pr_debug("Validating PTE swap exclusive\n");
762
entry = args->swp_entry;
763
764
pte = swp_entry_to_pte(entry);
765
WARN_ON(pte_swp_exclusive(pte));
766
WARN_ON(!is_swap_pte(pte));
767
entry2 = pte_to_swp_entry(pte);
768
WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
769
770
pte = pte_swp_mkexclusive(pte);
771
WARN_ON(!pte_swp_exclusive(pte));
772
WARN_ON(!is_swap_pte(pte));
773
WARN_ON(pte_swp_soft_dirty(pte));
774
entry2 = pte_to_swp_entry(pte);
775
WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
776
777
pte = pte_swp_clear_exclusive(pte);
778
WARN_ON(pte_swp_exclusive(pte));
779
WARN_ON(!is_swap_pte(pte));
780
entry2 = pte_to_swp_entry(pte);
781
WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
782
}
783
784
static void __init pte_swap_tests(struct pgtable_debug_args *args)
785
{
786
swp_entry_t arch_entry;
787
pte_t pte1, pte2;
788
789
pr_debug("Validating PTE swap\n");
790
pte1 = swp_entry_to_pte(args->swp_entry);
791
WARN_ON(!is_swap_pte(pte1));
792
793
arch_entry = __pte_to_swp_entry(pte1);
794
pte2 = __swp_entry_to_pte(arch_entry);
795
WARN_ON(memcmp(&pte1, &pte2, sizeof(pte1)));
796
}
797
798
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
799
static void __init pmd_swap_tests(struct pgtable_debug_args *args)
800
{
801
swp_entry_t arch_entry;
802
pmd_t pmd1, pmd2;
803
804
if (!has_transparent_hugepage())
805
return;
806
807
pr_debug("Validating PMD swap\n");
808
pmd1 = swp_entry_to_pmd(args->swp_entry);
809
WARN_ON(!is_swap_pmd(pmd1));
810
811
arch_entry = __pmd_to_swp_entry(pmd1);
812
pmd2 = __swp_entry_to_pmd(arch_entry);
813
WARN_ON(memcmp(&pmd1, &pmd2, sizeof(pmd1)));
814
}
815
#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
816
static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
817
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
818
819
static void __init swap_migration_tests(struct pgtable_debug_args *args)
820
{
821
struct page *page;
822
swp_entry_t swp;
823
824
if (!IS_ENABLED(CONFIG_MIGRATION))
825
return;
826
827
/*
828
* swap_migration_tests() requires a dedicated page as it needs to
829
* be locked before creating a migration entry from it. Locking the
830
* page that actually maps kernel text ('start_kernel') can be real
831
* problematic. Lets use the allocated page explicitly for this
832
* purpose.
833
*/
834
page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
835
if (!page)
836
return;
837
838
pr_debug("Validating swap migration\n");
839
840
/*
841
* make_[readable|writable]_migration_entry() expects given page to
842
* be locked, otherwise it stumbles upon a BUG_ON().
843
*/
844
__SetPageLocked(page);
845
swp = make_writable_migration_entry(page_to_pfn(page));
846
WARN_ON(!is_migration_entry(swp));
847
WARN_ON(!is_writable_migration_entry(swp));
848
849
swp = make_readable_migration_entry(swp_offset(swp));
850
WARN_ON(!is_migration_entry(swp));
851
WARN_ON(is_writable_migration_entry(swp));
852
853
swp = make_readable_migration_entry(page_to_pfn(page));
854
WARN_ON(!is_migration_entry(swp));
855
WARN_ON(is_writable_migration_entry(swp));
856
__ClearPageLocked(page);
857
}
858
859
#ifdef CONFIG_HUGETLB_PAGE
860
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
861
{
862
pte_t pte;
863
864
pr_debug("Validating HugeTLB basic\n");
865
pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
866
pte = arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS);
867
868
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
869
WARN_ON(!pte_huge(pte));
870
#endif
871
WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
872
WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
873
WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
874
}
875
#else /* !CONFIG_HUGETLB_PAGE */
876
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
877
#endif /* CONFIG_HUGETLB_PAGE */
878
879
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
880
static void __init pmd_thp_tests(struct pgtable_debug_args *args)
881
{
882
pmd_t pmd;
883
884
if (!has_transparent_hugepage())
885
return;
886
887
pr_debug("Validating PMD based THP\n");
888
/*
889
* pmd_trans_huge() and pmd_present() must return positive after
890
* MMU invalidation with pmd_mkinvalid(). This behavior is an
891
* optimization for transparent huge page. pmd_trans_huge() must
892
* be true if pmd_page() returns a valid THP to avoid taking the
893
* pmd_lock when others walk over non transhuge pmds (i.e. there
894
* are no THP allocated). Especially when splitting a THP and
895
* removing the present bit from the pmd, pmd_trans_huge() still
896
* needs to return true. pmd_present() should be true whenever
897
* pmd_trans_huge() returns true.
898
*/
899
pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
900
WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
901
902
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
903
WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
904
WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
905
WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
906
#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
907
}
908
909
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
910
static void __init pud_thp_tests(struct pgtable_debug_args *args)
911
{
912
pud_t pud;
913
914
if (!has_transparent_pud_hugepage())
915
return;
916
917
pr_debug("Validating PUD based THP\n");
918
pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
919
WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
920
921
/*
922
* pud_mkinvalid() has been dropped for now. Enable back
923
* these tests when it comes back with a modified pud_present().
924
*
925
* WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
926
* WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
927
*/
928
}
929
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
930
static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
931
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
932
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
933
static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
934
static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
935
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
936
937
static unsigned long __init get_random_vaddr(void)
938
{
939
unsigned long random_vaddr, random_pages, total_user_pages;
940
941
total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
942
943
random_pages = get_random_long() % total_user_pages;
944
random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
945
946
return random_vaddr;
947
}
948
949
static void __init destroy_args(struct pgtable_debug_args *args)
950
{
951
struct page *page = NULL;
952
953
/* Free (huge) page */
954
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
955
has_transparent_pud_hugepage() &&
956
args->pud_pfn != ULONG_MAX) {
957
if (args->is_contiguous_page) {
958
free_contig_range(args->pud_pfn,
959
(1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
960
} else {
961
page = pfn_to_page(args->pud_pfn);
962
__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
963
}
964
965
args->pud_pfn = ULONG_MAX;
966
args->pmd_pfn = ULONG_MAX;
967
args->pte_pfn = ULONG_MAX;
968
}
969
970
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
971
has_transparent_hugepage() &&
972
args->pmd_pfn != ULONG_MAX) {
973
if (args->is_contiguous_page) {
974
free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
975
} else {
976
page = pfn_to_page(args->pmd_pfn);
977
__free_pages(page, HPAGE_PMD_ORDER);
978
}
979
980
args->pmd_pfn = ULONG_MAX;
981
args->pte_pfn = ULONG_MAX;
982
}
983
984
if (args->pte_pfn != ULONG_MAX) {
985
page = pfn_to_page(args->pte_pfn);
986
__free_page(page);
987
988
args->pte_pfn = ULONG_MAX;
989
}
990
991
/* Free page table entries */
992
if (args->start_ptep) {
993
pmd_clear(args->pmdp);
994
pte_free(args->mm, args->start_ptep);
995
mm_dec_nr_ptes(args->mm);
996
}
997
998
if (args->start_pmdp) {
999
pud_clear(args->pudp);
1000
pmd_free(args->mm, args->start_pmdp);
1001
mm_dec_nr_pmds(args->mm);
1002
}
1003
1004
if (args->start_pudp) {
1005
p4d_clear(args->p4dp);
1006
pud_free(args->mm, args->start_pudp);
1007
mm_dec_nr_puds(args->mm);
1008
}
1009
1010
if (args->start_p4dp) {
1011
pgd_clear(args->pgdp);
1012
p4d_free(args->mm, args->start_p4dp);
1013
}
1014
1015
/* Free vma and mm struct */
1016
if (args->vma)
1017
vm_area_free(args->vma);
1018
1019
if (args->mm)
1020
mmput(args->mm);
1021
}
1022
1023
static struct page * __init
1024
debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1025
{
1026
struct page *page = NULL;
1027
1028
#ifdef CONFIG_CONTIG_ALLOC
1029
if (order > MAX_PAGE_ORDER) {
1030
page = alloc_contig_pages((1 << order), GFP_KERNEL,
1031
first_online_node, NULL);
1032
if (page) {
1033
args->is_contiguous_page = true;
1034
return page;
1035
}
1036
}
1037
#endif
1038
1039
if (order <= MAX_PAGE_ORDER)
1040
page = alloc_pages(GFP_KERNEL, order);
1041
1042
return page;
1043
}
1044
1045
/*
1046
* Check if a physical memory range described by <pstart, pend> contains
1047
* an area that is of size psize, and aligned to psize.
1048
*
1049
* Don't use address 0, an all-zeroes physical address might mask bugs, and
1050
* it's not used on x86.
1051
*/
1052
static void __init phys_align_check(phys_addr_t pstart,
1053
phys_addr_t pend, unsigned long psize,
1054
phys_addr_t *physp, unsigned long *alignp)
1055
{
1056
phys_addr_t aligned_start, aligned_end;
1057
1058
if (pstart == 0)
1059
pstart = PAGE_SIZE;
1060
1061
aligned_start = ALIGN(pstart, psize);
1062
aligned_end = aligned_start + psize;
1063
1064
if (aligned_end > aligned_start && aligned_end <= pend) {
1065
*alignp = psize;
1066
*physp = aligned_start;
1067
}
1068
}
1069
1070
static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1071
{
1072
u64 idx;
1073
phys_addr_t phys, pstart, pend;
1074
1075
/*
1076
* Initialize the fixed pfns. To do this, try to find a
1077
* valid physical range, preferably aligned to PUD_SIZE,
1078
* but settling for aligned to PMD_SIZE as a fallback. If
1079
* neither of those is found, use the physical address of
1080
* the start_kernel symbol.
1081
*
1082
* The memory doesn't need to be allocated, it just needs to exist
1083
* as usable memory. It won't be touched.
1084
*
1085
* The alignment is recorded, and can be checked to see if we
1086
* can run the tests that require an actual valid physical
1087
* address range on some architectures ({pmd,pud}_huge_test
1088
* on x86).
1089
*/
1090
1091
phys = __pa_symbol(&start_kernel);
1092
args->fixed_alignment = PAGE_SIZE;
1093
1094
for_each_mem_range(idx, &pstart, &pend) {
1095
/* First check for a PUD-aligned area */
1096
phys_align_check(pstart, pend, PUD_SIZE, &phys,
1097
&args->fixed_alignment);
1098
1099
/* If a PUD-aligned area is found, we're done */
1100
if (args->fixed_alignment == PUD_SIZE)
1101
break;
1102
1103
/*
1104
* If no PMD-aligned area found yet, check for one,
1105
* but continue the loop to look for a PUD-aligned area.
1106
*/
1107
if (args->fixed_alignment < PMD_SIZE)
1108
phys_align_check(pstart, pend, PMD_SIZE, &phys,
1109
&args->fixed_alignment);
1110
}
1111
1112
args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1113
args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1114
args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1115
args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1116
args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1117
WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1118
}
1119
1120
1121
static int __init init_args(struct pgtable_debug_args *args)
1122
{
1123
unsigned long max_swap_offset;
1124
struct page *page = NULL;
1125
int ret = 0;
1126
1127
/*
1128
* Initialize the debugging data.
1129
*
1130
* vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1131
* will help create page table entries with PROT_NONE permission as
1132
* required for pxx_protnone_tests().
1133
*/
1134
memset(args, 0, sizeof(*args));
1135
args->vaddr = get_random_vaddr();
1136
args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS);
1137
args->page_prot_none = vm_get_page_prot(VM_NONE);
1138
args->is_contiguous_page = false;
1139
args->pud_pfn = ULONG_MAX;
1140
args->pmd_pfn = ULONG_MAX;
1141
args->pte_pfn = ULONG_MAX;
1142
args->fixed_pgd_pfn = ULONG_MAX;
1143
args->fixed_p4d_pfn = ULONG_MAX;
1144
args->fixed_pud_pfn = ULONG_MAX;
1145
args->fixed_pmd_pfn = ULONG_MAX;
1146
args->fixed_pte_pfn = ULONG_MAX;
1147
1148
/* Allocate mm and vma */
1149
args->mm = mm_alloc();
1150
if (!args->mm) {
1151
pr_err("Failed to allocate mm struct\n");
1152
ret = -ENOMEM;
1153
goto error;
1154
}
1155
1156
args->vma = vm_area_alloc(args->mm);
1157
if (!args->vma) {
1158
pr_err("Failed to allocate vma\n");
1159
ret = -ENOMEM;
1160
goto error;
1161
}
1162
1163
/*
1164
* Allocate page table entries. They will be modified in the tests.
1165
* Lets save the page table entries so that they can be released
1166
* when the tests are completed.
1167
*/
1168
args->pgdp = pgd_offset(args->mm, args->vaddr);
1169
args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1170
if (!args->p4dp) {
1171
pr_err("Failed to allocate p4d entries\n");
1172
ret = -ENOMEM;
1173
goto error;
1174
}
1175
args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1176
WARN_ON(!args->start_p4dp);
1177
1178
args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1179
if (!args->pudp) {
1180
pr_err("Failed to allocate pud entries\n");
1181
ret = -ENOMEM;
1182
goto error;
1183
}
1184
args->start_pudp = pud_offset(args->p4dp, 0UL);
1185
WARN_ON(!args->start_pudp);
1186
1187
args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1188
if (!args->pmdp) {
1189
pr_err("Failed to allocate pmd entries\n");
1190
ret = -ENOMEM;
1191
goto error;
1192
}
1193
args->start_pmdp = pmd_offset(args->pudp, 0UL);
1194
WARN_ON(!args->start_pmdp);
1195
1196
if (pte_alloc(args->mm, args->pmdp)) {
1197
pr_err("Failed to allocate pte entries\n");
1198
ret = -ENOMEM;
1199
goto error;
1200
}
1201
args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
1202
WARN_ON(!args->start_ptep);
1203
1204
init_fixed_pfns(args);
1205
1206
/* See generic_max_swapfile_size(): probe the maximum offset */
1207
max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
1208
/* Create a swp entry with all possible bits set */
1209
args->swp_entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
1210
1211
/*
1212
* Allocate (huge) pages because some of the tests need to access
1213
* the data in the pages. The corresponding tests will be skipped
1214
* if we fail to allocate (huge) pages.
1215
*/
1216
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1217
has_transparent_pud_hugepage()) {
1218
page = debug_vm_pgtable_alloc_huge_page(args,
1219
HPAGE_PUD_SHIFT - PAGE_SHIFT);
1220
if (page) {
1221
args->pud_pfn = page_to_pfn(page);
1222
args->pmd_pfn = args->pud_pfn;
1223
args->pte_pfn = args->pud_pfn;
1224
return 0;
1225
}
1226
}
1227
1228
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1229
has_transparent_hugepage()) {
1230
page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1231
if (page) {
1232
args->pmd_pfn = page_to_pfn(page);
1233
args->pte_pfn = args->pmd_pfn;
1234
return 0;
1235
}
1236
}
1237
1238
page = alloc_page(GFP_KERNEL);
1239
if (page)
1240
args->pte_pfn = page_to_pfn(page);
1241
1242
return 0;
1243
1244
error:
1245
destroy_args(args);
1246
return ret;
1247
}
1248
1249
static int __init debug_vm_pgtable(void)
1250
{
1251
struct pgtable_debug_args args;
1252
spinlock_t *ptl = NULL;
1253
int idx, ret;
1254
1255
pr_info("Validating architecture page table helpers\n");
1256
ret = init_args(&args);
1257
if (ret)
1258
return ret;
1259
1260
/*
1261
* Iterate over each possible vm_flags to make sure that all
1262
* the basic page table transformation validations just hold
1263
* true irrespective of the starting protection value for a
1264
* given page table entry.
1265
*
1266
* Protection based vm_flags combinations are always linear
1267
* and increasing i.e starting from VM_NONE and going up to
1268
* (VM_SHARED | READ | WRITE | EXEC).
1269
*/
1270
#define VM_FLAGS_START (VM_NONE)
1271
#define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1272
1273
for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1274
pte_basic_tests(&args, idx);
1275
pmd_basic_tests(&args, idx);
1276
pud_basic_tests(&args, idx);
1277
}
1278
1279
/*
1280
* Both P4D and PGD level tests are very basic which do not
1281
* involve creating page table entries from the protection
1282
* value and the given pfn. Hence just keep them out from
1283
* the above iteration for now to save some test execution
1284
* time.
1285
*/
1286
p4d_basic_tests(&args);
1287
pgd_basic_tests(&args);
1288
1289
pmd_leaf_tests(&args);
1290
pud_leaf_tests(&args);
1291
1292
pte_special_tests(&args);
1293
pte_protnone_tests(&args);
1294
pmd_protnone_tests(&args);
1295
1296
pte_soft_dirty_tests(&args);
1297
pmd_soft_dirty_tests(&args);
1298
pte_swap_soft_dirty_tests(&args);
1299
pmd_swap_soft_dirty_tests(&args);
1300
1301
pte_swap_exclusive_tests(&args);
1302
1303
pte_swap_tests(&args);
1304
pmd_swap_tests(&args);
1305
1306
swap_migration_tests(&args);
1307
1308
pmd_thp_tests(&args);
1309
pud_thp_tests(&args);
1310
1311
hugetlb_basic_tests(&args);
1312
1313
/*
1314
* Page table modifying tests. They need to hold
1315
* proper page table lock.
1316
*/
1317
1318
args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1319
pte_clear_tests(&args);
1320
pte_advanced_tests(&args);
1321
if (args.ptep)
1322
pte_unmap_unlock(args.ptep, ptl);
1323
1324
ptl = pmd_lock(args.mm, args.pmdp);
1325
pmd_clear_tests(&args);
1326
pmd_advanced_tests(&args);
1327
pmd_huge_tests(&args);
1328
pmd_populate_tests(&args);
1329
spin_unlock(ptl);
1330
1331
ptl = pud_lock(args.mm, args.pudp);
1332
pud_clear_tests(&args);
1333
pud_advanced_tests(&args);
1334
pud_huge_tests(&args);
1335
pud_populate_tests(&args);
1336
spin_unlock(ptl);
1337
1338
spin_lock(&(args.mm->page_table_lock));
1339
p4d_clear_tests(&args);
1340
pgd_clear_tests(&args);
1341
p4d_populate_tests(&args);
1342
pgd_populate_tests(&args);
1343
spin_unlock(&(args.mm->page_table_lock));
1344
1345
destroy_args(&args);
1346
return 0;
1347
}
1348
late_initcall(debug_vm_pgtable);
1349
1350