Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/mm/book3s64/pgtable.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4
*/
5
6
#include <linux/sched.h>
7
#include <linux/mm_types.h>
8
#include <linux/memblock.h>
9
#include <linux/memremap.h>
10
#include <linux/pkeys.h>
11
#include <linux/debugfs.h>
12
#include <linux/proc_fs.h>
13
14
#include <asm/pgalloc.h>
15
#include <asm/tlb.h>
16
#include <asm/trace.h>
17
#include <asm/powernv.h>
18
#include <asm/firmware.h>
19
#include <asm/ultravisor.h>
20
#include <asm/kexec.h>
21
22
#include <mm/mmu_decl.h>
23
#include <trace/events/thp.h>
24
25
#include "internal.h"
26
27
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
28
EXPORT_SYMBOL_GPL(mmu_psize_defs);
29
30
#ifdef CONFIG_SPARSEMEM_VMEMMAP
31
int mmu_vmemmap_psize = MMU_PAGE_4K;
32
#endif
33
34
unsigned long __pmd_frag_nr;
35
EXPORT_SYMBOL(__pmd_frag_nr);
36
unsigned long __pmd_frag_size_shift;
37
EXPORT_SYMBOL(__pmd_frag_size_shift);
38
39
#ifdef CONFIG_KFENCE
40
extern bool kfence_early_init;
41
static int __init parse_kfence_early_init(char *arg)
42
{
43
int val;
44
45
if (get_option(&arg, &val))
46
kfence_early_init = !!val;
47
return 0;
48
}
49
early_param("kfence.sample_interval", parse_kfence_early_init);
50
#endif
51
52
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
53
/*
54
* This is called when relaxing access to a hugepage. It's also called in the page
55
* fault path when we don't hit any of the major fault cases, ie, a minor
56
* update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
57
* handled those two for us, we additionally deal with missing execute
58
* permission here on some processors
59
*/
60
int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
61
pmd_t *pmdp, pmd_t entry, int dirty)
62
{
63
int changed;
64
#ifdef CONFIG_DEBUG_VM
65
WARN_ON(!pmd_trans_huge(*pmdp));
66
assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
67
#endif
68
changed = !pmd_same(*(pmdp), entry);
69
if (changed) {
70
/*
71
* We can use MMU_PAGE_2M here, because only radix
72
* path look at the psize.
73
*/
74
__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
75
pmd_pte(entry), address, MMU_PAGE_2M);
76
}
77
return changed;
78
}
79
80
int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
81
pud_t *pudp, pud_t entry, int dirty)
82
{
83
int changed;
84
#ifdef CONFIG_DEBUG_VM
85
assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
86
#endif
87
changed = !pud_same(*(pudp), entry);
88
if (changed) {
89
/*
90
* We can use MMU_PAGE_1G here, because only radix
91
* path look at the psize.
92
*/
93
__ptep_set_access_flags(vma, pudp_ptep(pudp),
94
pud_pte(entry), address, MMU_PAGE_1G);
95
}
96
return changed;
97
}
98
99
100
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
101
unsigned long address, pmd_t *pmdp)
102
{
103
return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
104
}
105
106
int pudp_test_and_clear_young(struct vm_area_struct *vma,
107
unsigned long address, pud_t *pudp)
108
{
109
return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
110
}
111
112
/*
113
* set a new huge pmd. We should not be called for updating
114
* an existing pmd entry. That should go via pmd_hugepage_update.
115
*/
116
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
117
pmd_t *pmdp, pmd_t pmd)
118
{
119
#ifdef CONFIG_DEBUG_VM
120
/*
121
* Make sure hardware valid bit is not set. We don't do
122
* tlb flush for this update.
123
*/
124
125
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
126
assert_spin_locked(pmd_lockptr(mm, pmdp));
127
WARN_ON(!(pmd_leaf(pmd)));
128
#endif
129
trace_hugepage_set_pmd(addr, pmd_val(pmd));
130
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
131
}
132
133
void set_pud_at(struct mm_struct *mm, unsigned long addr,
134
pud_t *pudp, pud_t pud)
135
{
136
#ifdef CONFIG_DEBUG_VM
137
/*
138
* Make sure hardware valid bit is not set. We don't do
139
* tlb flush for this update.
140
*/
141
142
WARN_ON(pte_hw_valid(pud_pte(*pudp)));
143
assert_spin_locked(pud_lockptr(mm, pudp));
144
WARN_ON(!(pud_leaf(pud)));
145
#endif
146
trace_hugepage_set_pud(addr, pud_val(pud));
147
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
148
}
149
150
static void do_serialize(void *arg)
151
{
152
/* We've taken the IPI, so try to trim the mask while here */
153
if (radix_enabled()) {
154
struct mm_struct *mm = arg;
155
exit_lazy_flush_tlb(mm, false);
156
}
157
}
158
159
/*
160
* Serialize against __find_linux_pte() which does lock-less
161
* lookup in page tables with local interrupts disabled. For huge pages
162
* it casts pmd_t to pte_t. Since format of pte_t is different from
163
* pmd_t we want to prevent transit from pmd pointing to page table
164
* to pmd pointing to huge page (and back) while interrupts are disabled.
165
* We clear pmd to possibly replace it with page table pointer in
166
* different code paths. So make sure we wait for the parallel
167
* __find_linux_pte() to finish.
168
*/
169
void serialize_against_pte_lookup(struct mm_struct *mm)
170
{
171
smp_mb();
172
smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
173
}
174
175
/*
176
* We use this to invalidate a pmdp entry before switching from a
177
* hugepte to regular pmd entry.
178
*/
179
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
180
pmd_t *pmdp)
181
{
182
unsigned long old_pmd;
183
184
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
185
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
186
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
187
return __pmd(old_pmd);
188
}
189
190
pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
191
pud_t *pudp)
192
{
193
unsigned long old_pud;
194
195
VM_WARN_ON_ONCE(!pud_present(*pudp));
196
old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
197
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
198
return __pud(old_pud);
199
}
200
201
pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
202
unsigned long addr, pmd_t *pmdp, int full)
203
{
204
pmd_t pmd;
205
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
206
VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp)) ||
207
!pmd_present(*pmdp));
208
pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
209
/*
210
* if it not a fullmm flush, then we can possibly end up converting
211
* this PMD pte entry to a regular level 0 PTE by a parallel page fault.
212
* Make sure we flush the tlb in this case.
213
*/
214
if (!full)
215
flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
216
return pmd;
217
}
218
219
pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
220
unsigned long addr, pud_t *pudp, int full)
221
{
222
pud_t pud;
223
224
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
225
VM_BUG_ON(!pud_present(*pudp));
226
pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
227
/*
228
* if it not a fullmm flush, then we can possibly end up converting
229
* this PMD pte entry to a regular level 0 PTE by a parallel page fault.
230
* Make sure we flush the tlb in this case.
231
*/
232
if (!full)
233
flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
234
return pud;
235
}
236
237
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
238
{
239
return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
240
}
241
242
static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
243
{
244
return __pud(pud_val(pud) | pgprot_val(pgprot));
245
}
246
247
/*
248
* At some point we should be able to get rid of
249
* pmd_mkhuge() and mk_huge_pmd() when we update all the
250
* other archs to mark the pmd huge in pfn_pmd()
251
*/
252
pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
253
{
254
unsigned long pmdv;
255
256
pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
257
258
return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
259
}
260
261
pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
262
{
263
unsigned long pudv;
264
265
pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
266
267
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
268
}
269
270
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
271
{
272
unsigned long pmdv;
273
274
pmdv = pmd_val(pmd);
275
pmdv &= _HPAGE_CHG_MASK;
276
return pmd_set_protbits(__pmd(pmdv), newprot);
277
}
278
279
pud_t pud_modify(pud_t pud, pgprot_t newprot)
280
{
281
unsigned long pudv;
282
283
pudv = pud_val(pud);
284
pudv &= _HPAGE_CHG_MASK;
285
return pud_set_protbits(__pud(pudv), newprot);
286
}
287
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
288
289
/* For use by kexec, called with MMU off */
290
notrace void mmu_cleanup_all(void)
291
{
292
if (radix_enabled())
293
radix__mmu_cleanup_all();
294
else if (mmu_hash_ops.hpte_clear_all)
295
mmu_hash_ops.hpte_clear_all();
296
297
reset_sprs();
298
}
299
300
#ifdef CONFIG_MEMORY_HOTPLUG
301
int __meminit create_section_mapping(unsigned long start, unsigned long end,
302
int nid, pgprot_t prot)
303
{
304
if (radix_enabled())
305
return radix__create_section_mapping(start, end, nid, prot);
306
307
return hash__create_section_mapping(start, end, nid, prot);
308
}
309
310
int __meminit remove_section_mapping(unsigned long start, unsigned long end)
311
{
312
if (radix_enabled())
313
return radix__remove_section_mapping(start, end);
314
315
return hash__remove_section_mapping(start, end);
316
}
317
#endif /* CONFIG_MEMORY_HOTPLUG */
318
319
void __init mmu_partition_table_init(void)
320
{
321
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
322
unsigned long ptcr;
323
324
/* Initialize the Partition Table with no entries */
325
partition_tb = memblock_alloc_or_panic(patb_size, patb_size);
326
ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
327
set_ptcr_when_no_uv(ptcr);
328
powernv_set_nmmu_ptcr(ptcr);
329
}
330
331
static void flush_partition(unsigned int lpid, bool radix)
332
{
333
if (radix) {
334
radix__flush_all_lpid(lpid);
335
radix__flush_all_lpid_guest(lpid);
336
} else {
337
asm volatile("ptesync" : : : "memory");
338
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
339
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
340
/* do we need fixup here ?*/
341
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
342
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
343
}
344
}
345
346
void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
347
unsigned long dw1, bool flush)
348
{
349
unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
350
351
/*
352
* When ultravisor is enabled, the partition table is stored in secure
353
* memory and can only be accessed doing an ultravisor call. However, we
354
* maintain a copy of the partition table in normal memory to allow Nest
355
* MMU translations to occur (for normal VMs).
356
*
357
* Therefore, here we always update partition_tb, regardless of whether
358
* we are running under an ultravisor or not.
359
*/
360
partition_tb[lpid].patb0 = cpu_to_be64(dw0);
361
partition_tb[lpid].patb1 = cpu_to_be64(dw1);
362
363
/*
364
* If ultravisor is enabled, we do an ultravisor call to register the
365
* partition table entry (PATE), which also do a global flush of TLBs
366
* and partition table caches for the lpid. Otherwise, just do the
367
* flush. The type of flush (hash or radix) depends on what the previous
368
* use of the partition ID was, not the new use.
369
*/
370
if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
371
uv_register_pate(lpid, dw0, dw1);
372
pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
373
dw0, dw1);
374
} else if (flush) {
375
/*
376
* Boot does not need to flush, because MMU is off and each
377
* CPU does a tlbiel_all() before switching them on, which
378
* flushes everything.
379
*/
380
flush_partition(lpid, (old & PATB_HR));
381
}
382
}
383
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
384
385
static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
386
{
387
void *pmd_frag, *ret;
388
389
if (PMD_FRAG_NR == 1)
390
return NULL;
391
392
spin_lock(&mm->page_table_lock);
393
ret = mm->context.pmd_frag;
394
if (ret) {
395
pmd_frag = ret + PMD_FRAG_SIZE;
396
/*
397
* If we have taken up all the fragments mark PTE page NULL
398
*/
399
if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
400
pmd_frag = NULL;
401
mm->context.pmd_frag = pmd_frag;
402
}
403
spin_unlock(&mm->page_table_lock);
404
return (pmd_t *)ret;
405
}
406
407
static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
408
{
409
void *ret = NULL;
410
struct ptdesc *ptdesc;
411
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
412
413
if (mm == &init_mm)
414
gfp &= ~__GFP_ACCOUNT;
415
ptdesc = pagetable_alloc(gfp, 0);
416
if (!ptdesc)
417
return NULL;
418
if (!pagetable_pmd_ctor(mm, ptdesc)) {
419
pagetable_free(ptdesc);
420
return NULL;
421
}
422
423
atomic_set(&ptdesc->pt_frag_refcount, 1);
424
425
ret = ptdesc_address(ptdesc);
426
/*
427
* if we support only one fragment just return the
428
* allocated page.
429
*/
430
if (PMD_FRAG_NR == 1)
431
return ret;
432
433
spin_lock(&mm->page_table_lock);
434
/*
435
* If we find ptdesc_page set, we return
436
* the allocated page with single fragment
437
* count.
438
*/
439
if (likely(!mm->context.pmd_frag)) {
440
atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
441
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
442
}
443
spin_unlock(&mm->page_table_lock);
444
445
return (pmd_t *)ret;
446
}
447
448
pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
449
{
450
pmd_t *pmd;
451
452
pmd = get_pmd_from_cache(mm);
453
if (pmd)
454
return pmd;
455
456
return __alloc_for_pmdcache(mm);
457
}
458
459
void pmd_fragment_free(unsigned long *pmd)
460
{
461
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
462
463
if (pagetable_is_reserved(ptdesc))
464
return free_reserved_ptdesc(ptdesc);
465
466
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
467
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
468
pagetable_dtor(ptdesc);
469
pagetable_free(ptdesc);
470
}
471
}
472
473
static inline void pgtable_free(void *table, int index)
474
{
475
switch (index) {
476
case PTE_INDEX:
477
pte_fragment_free(table, 0);
478
break;
479
case PMD_INDEX:
480
pmd_fragment_free(table);
481
break;
482
case PUD_INDEX:
483
__pud_free(table);
484
break;
485
/* We don't free pgd table via RCU callback */
486
default:
487
BUG();
488
}
489
}
490
491
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
492
{
493
unsigned long pgf = (unsigned long)table;
494
495
BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
496
pgf |= index;
497
tlb_remove_table(tlb, (void *)pgf);
498
}
499
500
void __tlb_remove_table(void *_table)
501
{
502
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
503
unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
504
505
return pgtable_free(table, index);
506
}
507
508
#ifdef CONFIG_PROC_FS
509
atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
510
511
void arch_report_meminfo(struct seq_file *m)
512
{
513
/*
514
* Hash maps the memory with one size mmu_linear_psize.
515
* So don't bother to print these on hash
516
*/
517
if (!radix_enabled())
518
return;
519
seq_printf(m, "DirectMap4k: %8lu kB\n",
520
atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
521
seq_printf(m, "DirectMap64k: %8lu kB\n",
522
atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
523
seq_printf(m, "DirectMap2M: %8lu kB\n",
524
atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
525
seq_printf(m, "DirectMap1G: %8lu kB\n",
526
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
527
}
528
#endif /* CONFIG_PROC_FS */
529
530
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
531
pte_t *ptep)
532
{
533
unsigned long pte_val;
534
535
/*
536
* Clear the _PAGE_PRESENT so that no hardware parallel update is
537
* possible. Also keep the pte_present true so that we don't take
538
* wrong fault.
539
*/
540
pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
541
542
return __pte(pte_val);
543
544
}
545
546
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
547
pte_t *ptep, pte_t old_pte, pte_t pte)
548
{
549
if (radix_enabled())
550
return radix__ptep_modify_prot_commit(vma, addr,
551
ptep, old_pte, pte);
552
set_pte_at(vma->vm_mm, addr, ptep, pte);
553
}
554
555
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
556
/*
557
* For hash translation mode, we use the deposited table to store hash slot
558
* information and they are stored at PTRS_PER_PMD offset from related pmd
559
* location. Hence a pmd move requires deposit and withdraw.
560
*
561
* For radix translation with split pmd ptl, we store the deposited table in the
562
* pmd page. Hence if we have different pmd page we need to withdraw during pmd
563
* move.
564
*
565
* With hash we use deposited table always irrespective of anon or not.
566
* With radix we use deposited table only for anonymous mapping.
567
*/
568
int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
569
struct spinlock *old_pmd_ptl,
570
struct vm_area_struct *vma)
571
{
572
if (radix_enabled())
573
return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
574
575
return true;
576
}
577
#endif
578
579
/*
580
* Does the CPU support tlbie?
581
*/
582
bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
583
EXPORT_SYMBOL(tlbie_capable);
584
585
/*
586
* Should tlbie be used for management of CPU TLBs, for kernel and process
587
* address spaces? tlbie may still be used for nMMU accelerators, and for KVM
588
* guest address spaces.
589
*/
590
bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
591
592
static int __init setup_disable_tlbie(char *str)
593
{
594
if (!radix_enabled()) {
595
pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
596
return 1;
597
}
598
599
tlbie_capable = false;
600
tlbie_enabled = false;
601
602
return 1;
603
}
604
__setup("disable_tlbie", setup_disable_tlbie);
605
606
static int __init pgtable_debugfs_setup(void)
607
{
608
if (!tlbie_capable)
609
return 0;
610
611
/*
612
* There is no locking vs tlb flushing when changing this value.
613
* The tlb flushers will see one value or another, and use either
614
* tlbie or tlbiel with IPIs. In both cases the TLBs will be
615
* invalidated as expected.
616
*/
617
debugfs_create_bool("tlbie_enabled", 0600,
618
arch_debugfs_dir,
619
&tlbie_enabled);
620
621
return 0;
622
}
623
arch_initcall(pgtable_debugfs_setup);
624
625
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
626
/*
627
* Override the generic version in mm/memremap.c.
628
*
629
* With hash translation, the direct-map range is mapped with just one
630
* page size selected by htab_init_page_sizes(). Consult
631
* mmu_psize_defs[] to determine the minimum page size alignment.
632
*/
633
unsigned long memremap_compat_align(void)
634
{
635
if (!radix_enabled()) {
636
unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
637
return max(SUBSECTION_SIZE, 1UL << shift);
638
}
639
640
return SUBSECTION_SIZE;
641
}
642
EXPORT_SYMBOL_GPL(memremap_compat_align);
643
#endif
644
645
pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
646
{
647
unsigned long prot;
648
649
/* Radix supports execute-only, but protection_map maps X -> RX */
650
if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
651
vm_flags |= VM_READ;
652
653
prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
654
655
if (vm_flags & VM_SAO)
656
prot |= _PAGE_SAO;
657
658
#ifdef CONFIG_PPC_MEM_KEYS
659
prot |= vmflag_to_pte_pkey_bits(vm_flags);
660
#endif
661
662
return __pgprot(prot);
663
}
664
EXPORT_SYMBOL(vm_get_page_prot);
665
666