Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/include/asm/book3s/32/pgtable.h
50831 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
5
#include <asm-generic/pgtable-nopmd.h>
6
7
/*
8
* The "classic" 32-bit implementation of the PowerPC MMU uses a hash
9
* table containing PTEs, together with a set of 16 segment registers,
10
* to define the virtual to physical address mapping.
11
*
12
* We use the hash table as an extended TLB, i.e. a cache of currently
13
* active mappings. We maintain a two-level page table tree, much
14
* like that used by the i386, for the sake of the Linux memory
15
* management code. Low-level assembler code in hash_low_32.S
16
* (procedure hash_page) is responsible for extracting ptes from the
17
* tree and putting them into the hash table when necessary, and
18
* updating the accessed and modified bits in the page table tree.
19
*/
20
21
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
22
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
23
#define _PAGE_READ 0x004 /* software: read access allowed */
24
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
25
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
26
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
27
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
28
#define _PAGE_DIRTY 0x080 /* C: page changed */
29
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
30
#define _PAGE_EXEC 0x200 /* software: exec allowed */
31
#define _PAGE_WRITE 0x400 /* software: user write access allowed */
32
#define _PAGE_SPECIAL 0x800 /* software: Special page */
33
34
#ifdef CONFIG_PTE_64BIT
35
/* We never clear the high word of the pte */
36
#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
37
#else
38
#define _PTE_NONE_MASK _PAGE_HASHPTE
39
#endif
40
41
#define _PMD_PRESENT 0
42
#define _PMD_PRESENT_MASK (PAGE_MASK)
43
#define _PMD_BAD (~PAGE_MASK)
44
45
/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
46
#define _PAGE_SWP_EXCLUSIVE _PAGE_READ
47
48
/* And here we include common definitions */
49
50
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
51
52
/*
53
* Location of the PFN in the PTE. Most 32-bit platforms use the same
54
* as _PAGE_SHIFT here (ie, naturally aligned).
55
* Platform who don't just pre-define the value so we don't override it here.
56
*/
57
#define PTE_RPN_SHIFT (PAGE_SHIFT)
58
59
/*
60
* The mask covered by the RPN must be a ULL on 32-bit platforms with
61
* 64-bit PTEs.
62
*/
63
#ifdef CONFIG_PTE_64BIT
64
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
65
#define MAX_POSSIBLE_PHYSMEM_BITS 36
66
#else
67
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
68
#define MAX_POSSIBLE_PHYSMEM_BITS 32
69
#endif
70
71
/*
72
* _PAGE_CHG_MASK masks of bits that are to be preserved across
73
* pgprot changes.
74
*/
75
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
76
_PAGE_ACCESSED | _PAGE_SPECIAL)
77
78
/*
79
* We define 2 sets of base prot bits, one for basic pages (ie,
80
* cacheable kernel and user pages) and one for non cacheable
81
* pages. We always set _PAGE_COHERENT when SMP is enabled or
82
* the processor might need it for DMA coherency.
83
*/
84
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
85
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
86
87
#include <asm/pgtable-masks.h>
88
89
/* Permission masks used for kernel mappings */
90
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
91
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
92
#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
93
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
94
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
95
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
96
97
#define PTE_INDEX_SIZE PTE_SHIFT
98
#define PMD_INDEX_SIZE 0
99
#define PUD_INDEX_SIZE 0
100
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
101
102
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
103
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
104
105
#ifndef __ASSEMBLER__
106
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
107
#define PMD_TABLE_SIZE 0
108
#define PUD_TABLE_SIZE 0
109
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
110
111
/* Bits to mask out from a PMD to get to the PTE page */
112
#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
113
#endif /* __ASSEMBLER__ */
114
115
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
116
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
117
118
/*
119
* The normal case is that PTEs are 32-bits and we have a 1-page
120
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
121
*
122
* For any >32-bit physical address platform, we can use the following
123
* two level page table layout where the pgdir is 8KB and the MS 13 bits
124
* are an index to the second level table. The combined pgdir/pmd first
125
* level has 2048 entries and the second level has 512 64-bit PTE entries.
126
* -Matt
127
*/
128
/* PGDIR_SHIFT determines what a top-level page table entry can map */
129
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
130
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
131
#define PGDIR_MASK (~(PGDIR_SIZE-1))
132
133
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
134
135
#ifndef __ASSEMBLER__
136
137
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
138
void unmap_kernel_page(unsigned long va);
139
140
#endif /* !__ASSEMBLER__ */
141
142
/*
143
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
144
* value (for now) on others, from where we can start layout kernel
145
* virtual space that goes below PKMAP and FIXMAP
146
*/
147
148
#define FIXADDR_SIZE 0
149
#ifdef CONFIG_KASAN
150
#include <asm/kasan.h>
151
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
152
#else
153
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
154
#endif
155
156
/*
157
* ioremap_bot starts at that address. Early ioremaps move down from there,
158
* until mem_init() at which point this becomes the top of the vmalloc
159
* and ioremap space
160
*/
161
#ifdef CONFIG_HIGHMEM
162
#define IOREMAP_TOP PKMAP_BASE
163
#else
164
#define IOREMAP_TOP FIXADDR_START
165
#endif
166
167
/* PPC32 shares vmalloc area with ioremap */
168
#define IOREMAP_START VMALLOC_START
169
#define IOREMAP_END VMALLOC_END
170
171
/*
172
* Just any arbitrary offset to the start of the vmalloc VM area: the
173
* current 16MB value just means that there will be a 64MB "hole" after the
174
* physical memory until the kernel virtual memory starts. That means that
175
* any out-of-bounds memory accesses will hopefully be caught.
176
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
177
* area for the same reason. ;)
178
*
179
* We no longer map larger than phys RAM with the BATs so we don't have
180
* to worry about the VMALLOC_OFFSET causing problems. We do have to worry
181
* about clashes between our early calls to ioremap() that start growing down
182
* from ioremap_base being run into the VM area allocations (growing upwards
183
* from VMALLOC_START). For this reason we have ioremap_bot to check when
184
* we actually run into our mappings setup in the early boot with the VM
185
* system. This really does become a problem for machines with good amounts
186
* of RAM. -- Cort
187
*/
188
#define VMALLOC_OFFSET (0x1000000) /* 16M */
189
190
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191
192
#ifdef CONFIG_KASAN_VMALLOC
193
#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
194
#else
195
#define VMALLOC_END ioremap_bot
196
#endif
197
198
#ifndef __ASSEMBLER__
199
#include <linux/sched.h>
200
#include <linux/threads.h>
201
202
/* Bits to mask out from a PGD to get to the PUD page */
203
#define PGD_MASKED_BITS 0
204
205
#define pgd_ERROR(e) \
206
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
207
/*
208
* Bits in a linux-style PTE. These match the bits in the
209
* (hardware-defined) PowerPC PTE as closely as possible.
210
*/
211
212
#define pte_clear(mm, addr, ptep) \
213
do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
214
215
#define pmd_none(pmd) (!pmd_val(pmd))
216
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
217
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
218
static inline void pmd_clear(pmd_t *pmdp)
219
{
220
*pmdp = __pmd(0);
221
}
222
223
224
/*
225
* When flushing the tlb entry for a page, we also need to flush the hash
226
* table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
227
*/
228
extern int flush_hash_pages(unsigned context, unsigned long va,
229
unsigned long pmdval, int count);
230
231
/* Add an HPTE to the hash table */
232
extern void add_hash_page(unsigned context, unsigned long va,
233
unsigned long pmdval);
234
235
/* Flush an entry from the TLB/hash table */
236
static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
237
{
238
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
239
unsigned long ptephys = __pa(ptep) & PAGE_MASK;
240
241
flush_hash_pages(mm->context.id, addr, ptephys, 1);
242
}
243
}
244
245
/*
246
* PTE updates. This function is called whenever an existing
247
* valid PTE is updated. This does -not- include set_pte_at()
248
* which nowadays only sets a new PTE.
249
*
250
* Depending on the type of MMU, we may need to use atomic updates
251
* and the PTE may be either 32 or 64 bit wide. In the later case,
252
* when using atomic updates, only the low part of the PTE is
253
* accessed atomically.
254
*/
255
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
256
unsigned long clr, unsigned long set, int huge)
257
{
258
pte_basic_t old;
259
260
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
261
unsigned long tmp;
262
263
asm volatile(
264
#ifndef CONFIG_PTE_64BIT
265
"1: lwarx %0, 0, %3\n"
266
" andc %1, %0, %4\n"
267
#else
268
"1: lwarx %L0, 0, %3\n"
269
" lwz %0, -4(%3)\n"
270
" andc %1, %L0, %4\n"
271
#endif
272
" or %1, %1, %5\n"
273
" stwcx. %1, 0, %3\n"
274
" bne- 1b"
275
: "=&r" (old), "=&r" (tmp), "=m" (*p)
276
#ifndef CONFIG_PTE_64BIT
277
: "r" (p),
278
#else
279
: "b" ((unsigned long)(p) + 4),
280
#endif
281
"r" (clr), "r" (set), "m" (*p)
282
: "cc" );
283
} else {
284
old = pte_val(*p);
285
286
*p = __pte((old & ~(pte_basic_t)clr) | set);
287
}
288
289
return old;
290
}
291
292
/*
293
* 2.6 calls this without flushing the TLB entry; this is wrong
294
* for our hash-based implementation, we fix that up here.
295
*/
296
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
297
static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
298
unsigned long addr, pte_t *ptep)
299
{
300
unsigned long old;
301
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
302
if (old & _PAGE_HASHPTE)
303
flush_hash_entry(mm, ptep, addr);
304
305
return (old & _PAGE_ACCESSED) != 0;
306
}
307
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
308
__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
309
310
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
311
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
312
pte_t *ptep)
313
{
314
return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
315
}
316
317
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
318
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
319
pte_t *ptep)
320
{
321
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
322
}
323
324
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
325
pte_t *ptep, pte_t entry,
326
unsigned long address,
327
int psize)
328
{
329
unsigned long set = pte_val(entry) &
330
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
331
332
pte_update(vma->vm_mm, address, ptep, 0, set, 0);
333
334
flush_tlb_page(vma, address);
335
}
336
337
#define __HAVE_ARCH_PTE_SAME
338
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
339
340
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
341
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
342
343
/*
344
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
345
* are !pte_none() && !pte_present().
346
*
347
* Format of swap PTEs (32bit PTEs):
348
*
349
* 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
350
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
351
* <----------------- offset --------------------> < type -> E H P
352
*
353
* E is the exclusive marker that is not stored in swap entries.
354
* _PAGE_PRESENT (P) and __PAGE_HASHPTE (H) must be 0.
355
*
356
* For 64bit PTEs, the offset is extended by 32bit.
357
*/
358
#define __swp_type(entry) ((entry).val & 0x1f)
359
#define __swp_offset(entry) ((entry).val >> 5)
360
#define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
361
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
362
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
363
364
static inline bool pte_swp_exclusive(pte_t pte)
365
{
366
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
367
}
368
369
static inline pte_t pte_swp_mkexclusive(pte_t pte)
370
{
371
return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
372
}
373
374
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
375
{
376
return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
377
}
378
379
/* Generic accessors to PTE bits */
380
static inline bool pte_read(pte_t pte)
381
{
382
return !!(pte_val(pte) & _PAGE_READ);
383
}
384
385
static inline bool pte_write(pte_t pte)
386
{
387
return !!(pte_val(pte) & _PAGE_WRITE);
388
}
389
390
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
391
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
392
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
393
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
394
static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
395
396
static inline int pte_present(pte_t pte)
397
{
398
return pte_val(pte) & _PAGE_PRESENT;
399
}
400
401
static inline bool pte_hw_valid(pte_t pte)
402
{
403
return pte_val(pte) & _PAGE_PRESENT;
404
}
405
406
static inline bool pte_hashpte(pte_t pte)
407
{
408
return !!(pte_val(pte) & _PAGE_HASHPTE);
409
}
410
411
static inline bool pte_ci(pte_t pte)
412
{
413
return !!(pte_val(pte) & _PAGE_NO_CACHE);
414
}
415
416
/*
417
* We only find page table entry in the last level
418
* Hence no need for other accessors
419
*/
420
#define pte_access_permitted pte_access_permitted
421
static inline bool pte_access_permitted(pte_t pte, bool write)
422
{
423
/*
424
* A read-only access is controlled by _PAGE_READ bit.
425
* We have _PAGE_READ set for WRITE
426
*/
427
if (!pte_present(pte) || !pte_read(pte))
428
return false;
429
430
if (write && !pte_write(pte))
431
return false;
432
433
return true;
434
}
435
436
/* Conversion functions: convert a page and protection to a page entry,
437
* and a page entry and page directory to the page they refer to.
438
*
439
* Even if PTEs can be unsigned long long, a PFN is always an unsigned
440
* long for now.
441
*/
442
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
443
{
444
return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
445
pgprot_val(pgprot));
446
}
447
448
/* Generic modifiers for PTE bits */
449
static inline pte_t pte_wrprotect(pte_t pte)
450
{
451
return __pte(pte_val(pte) & ~_PAGE_WRITE);
452
}
453
454
static inline pte_t pte_exprotect(pte_t pte)
455
{
456
return __pte(pte_val(pte) & ~_PAGE_EXEC);
457
}
458
459
static inline pte_t pte_mkclean(pte_t pte)
460
{
461
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
462
}
463
464
static inline pte_t pte_mkold(pte_t pte)
465
{
466
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
467
}
468
469
static inline pte_t pte_mkexec(pte_t pte)
470
{
471
return __pte(pte_val(pte) | _PAGE_EXEC);
472
}
473
474
static inline pte_t pte_mkpte(pte_t pte)
475
{
476
return pte;
477
}
478
479
static inline pte_t pte_mkwrite_novma(pte_t pte)
480
{
481
/*
482
* write implies read, hence set both
483
*/
484
return __pte(pte_val(pte) | _PAGE_RW);
485
}
486
487
static inline pte_t pte_mkdirty(pte_t pte)
488
{
489
return __pte(pte_val(pte) | _PAGE_DIRTY);
490
}
491
492
static inline pte_t pte_mkyoung(pte_t pte)
493
{
494
return __pte(pte_val(pte) | _PAGE_ACCESSED);
495
}
496
497
static inline pte_t pte_mkspecial(pte_t pte)
498
{
499
return __pte(pte_val(pte) | _PAGE_SPECIAL);
500
}
501
502
static inline pte_t pte_mkhuge(pte_t pte)
503
{
504
return pte;
505
}
506
507
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
508
{
509
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
510
}
511
512
513
514
/* This low level function performs the actual PTE insertion
515
* Setting the PTE depends on the MMU type and other factors.
516
*
517
* First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve
518
* the _PAGE_HASHPTE bit since we may not have invalidated the previous
519
* translation in the hash yet (done in a subsequent flush_tlb_xxx())
520
* and see we need to keep track that this PTE needs invalidating.
521
*
522
* Second case is 32-bit with 64-bit PTE. In this case, we
523
* can just store as long as we do the two halves in the right order
524
* with a barrier in between. This is possible because we take care,
525
* in the hash code, to pre-invalidate if the PTE was already hashed,
526
* which synchronizes us with any concurrent invalidation.
527
* In the percpu case, we fallback to the simple update preserving
528
* the hash bits (ie, same as the non-SMP case).
529
*
530
* Third case is 32-bit in SMP mode with 32-bit PTEs. We use the
531
* helper pte_update() which does an atomic update. We need to do that
532
* because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
533
* per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
534
* the hash bits instead.
535
*/
536
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
537
pte_t *ptep, pte_t pte, int percpu)
538
{
539
if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
540
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
541
(pte_val(pte) & ~_PAGE_HASHPTE));
542
} else if (IS_ENABLED(CONFIG_PTE_64BIT)) {
543
if (pte_val(*ptep) & _PAGE_HASHPTE)
544
flush_hash_entry(mm, ptep, addr);
545
546
asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" :
547
"=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) :
548
"r" (pte) : "memory");
549
} else {
550
pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
551
}
552
}
553
554
/*
555
* Macro to mark a page protection value as "uncacheable".
556
*/
557
558
#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
559
_PAGE_WRITETHRU)
560
561
#define pgprot_noncached pgprot_noncached
562
static inline pgprot_t pgprot_noncached(pgprot_t prot)
563
{
564
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
565
_PAGE_NO_CACHE | _PAGE_GUARDED);
566
}
567
568
#define pgprot_noncached_wc pgprot_noncached_wc
569
static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
570
{
571
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
572
_PAGE_NO_CACHE);
573
}
574
575
#define pgprot_cached pgprot_cached
576
static inline pgprot_t pgprot_cached(pgprot_t prot)
577
{
578
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
579
_PAGE_COHERENT);
580
}
581
582
#define pgprot_cached_wthru pgprot_cached_wthru
583
static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
584
{
585
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
586
_PAGE_COHERENT | _PAGE_WRITETHRU);
587
}
588
589
#define pgprot_cached_noncoherent pgprot_cached_noncoherent
590
static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
591
{
592
return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
593
}
594
595
#define pgprot_writecombine pgprot_writecombine
596
static inline pgprot_t pgprot_writecombine(pgprot_t prot)
597
{
598
return pgprot_noncached_wc(prot);
599
}
600
601
#endif /* !__ASSEMBLER__ */
602
603
#endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
604
605