Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/um/include/asm/pgtable.h
10820 views
1
/*
2
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
* Copyright 2003 PathScale, Inc.
4
* Derived from include/asm-i386/pgtable.h
5
* Licensed under the GPL
6
*/
7
8
#ifndef __UM_PGTABLE_H
9
#define __UM_PGTABLE_H
10
11
#include <asm/fixmap.h>
12
13
#define _PAGE_PRESENT 0x001
14
#define _PAGE_NEWPAGE 0x002
15
#define _PAGE_NEWPROT 0x004
16
#define _PAGE_RW 0x020
17
#define _PAGE_USER 0x040
18
#define _PAGE_ACCESSED 0x080
19
#define _PAGE_DIRTY 0x100
20
/* If _PAGE_PRESENT is clear, we use these: */
21
#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
22
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
23
pte_present gives true */
24
25
#ifdef CONFIG_3_LEVEL_PGTABLES
26
#include "asm/pgtable-3level.h"
27
#else
28
#include "asm/pgtable-2level.h"
29
#endif
30
31
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33
/* zero page used for uninitialized stuff */
34
extern unsigned long *empty_zero_page;
35
36
#define pgtable_cache_init() do ; while (0)
37
38
/* Just any arbitrary offset to the start of the vmalloc VM area: the
39
* current 8MB value just means that there will be a 8MB "hole" after the
40
* physical memory until the kernel virtual memory starts. That means that
41
* any out-of-bounds memory accesses will hopefully be caught.
42
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
43
* area for the same reason. ;)
44
*/
45
46
extern unsigned long end_iomem;
47
48
#define VMALLOC_OFFSET (__va_space)
49
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
50
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
51
#ifdef CONFIG_HIGHMEM
52
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
53
#else
54
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
55
#endif
56
#define MODULES_VADDR VMALLOC_START
57
#define MODULES_END VMALLOC_END
58
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
59
60
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
61
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
62
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
63
#define __PAGE_KERNEL_EXEC \
64
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
65
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
66
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
67
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
68
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
69
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
70
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
71
72
/*
73
* The i386 can't do page protection for execute, and considers that the same
74
* are read.
75
* Also, write permissions imply read permissions. This is the closest we can
76
* get..
77
*/
78
#define __P000 PAGE_NONE
79
#define __P001 PAGE_READONLY
80
#define __P010 PAGE_COPY
81
#define __P011 PAGE_COPY
82
#define __P100 PAGE_READONLY
83
#define __P101 PAGE_READONLY
84
#define __P110 PAGE_COPY
85
#define __P111 PAGE_COPY
86
87
#define __S000 PAGE_NONE
88
#define __S001 PAGE_READONLY
89
#define __S010 PAGE_SHARED
90
#define __S011 PAGE_SHARED
91
#define __S100 PAGE_READONLY
92
#define __S101 PAGE_READONLY
93
#define __S110 PAGE_SHARED
94
#define __S111 PAGE_SHARED
95
96
/*
97
* ZERO_PAGE is a global shared page that is always zero: used
98
* for zero-mapped memory areas etc..
99
*/
100
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
101
102
#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
103
104
#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
105
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
106
107
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
108
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
109
110
#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
111
#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
112
113
#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
114
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
115
116
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
117
118
#define pte_page(x) pfn_to_page(pte_pfn(x))
119
120
#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
121
122
/*
123
* =================================
124
* Flags checking section.
125
* =================================
126
*/
127
128
static inline int pte_none(pte_t pte)
129
{
130
return pte_is_zero(pte);
131
}
132
133
/*
134
* The following only work if pte_present() is true.
135
* Undefined behaviour if not..
136
*/
137
static inline int pte_read(pte_t pte)
138
{
139
return((pte_get_bits(pte, _PAGE_USER)) &&
140
!(pte_get_bits(pte, _PAGE_PROTNONE)));
141
}
142
143
static inline int pte_exec(pte_t pte){
144
return((pte_get_bits(pte, _PAGE_USER)) &&
145
!(pte_get_bits(pte, _PAGE_PROTNONE)));
146
}
147
148
static inline int pte_write(pte_t pte)
149
{
150
return((pte_get_bits(pte, _PAGE_RW)) &&
151
!(pte_get_bits(pte, _PAGE_PROTNONE)));
152
}
153
154
/*
155
* The following only works if pte_present() is not true.
156
*/
157
static inline int pte_file(pte_t pte)
158
{
159
return pte_get_bits(pte, _PAGE_FILE);
160
}
161
162
static inline int pte_dirty(pte_t pte)
163
{
164
return pte_get_bits(pte, _PAGE_DIRTY);
165
}
166
167
static inline int pte_young(pte_t pte)
168
{
169
return pte_get_bits(pte, _PAGE_ACCESSED);
170
}
171
172
static inline int pte_newpage(pte_t pte)
173
{
174
return pte_get_bits(pte, _PAGE_NEWPAGE);
175
}
176
177
static inline int pte_newprot(pte_t pte)
178
{
179
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
180
}
181
182
static inline int pte_special(pte_t pte)
183
{
184
return 0;
185
}
186
187
/*
188
* =================================
189
* Flags setting section.
190
* =================================
191
*/
192
193
static inline pte_t pte_mknewprot(pte_t pte)
194
{
195
pte_set_bits(pte, _PAGE_NEWPROT);
196
return(pte);
197
}
198
199
static inline pte_t pte_mkclean(pte_t pte)
200
{
201
pte_clear_bits(pte, _PAGE_DIRTY);
202
return(pte);
203
}
204
205
static inline pte_t pte_mkold(pte_t pte)
206
{
207
pte_clear_bits(pte, _PAGE_ACCESSED);
208
return(pte);
209
}
210
211
static inline pte_t pte_wrprotect(pte_t pte)
212
{
213
pte_clear_bits(pte, _PAGE_RW);
214
return(pte_mknewprot(pte));
215
}
216
217
static inline pte_t pte_mkread(pte_t pte)
218
{
219
pte_set_bits(pte, _PAGE_USER);
220
return(pte_mknewprot(pte));
221
}
222
223
static inline pte_t pte_mkdirty(pte_t pte)
224
{
225
pte_set_bits(pte, _PAGE_DIRTY);
226
return(pte);
227
}
228
229
static inline pte_t pte_mkyoung(pte_t pte)
230
{
231
pte_set_bits(pte, _PAGE_ACCESSED);
232
return(pte);
233
}
234
235
static inline pte_t pte_mkwrite(pte_t pte)
236
{
237
pte_set_bits(pte, _PAGE_RW);
238
return(pte_mknewprot(pte));
239
}
240
241
static inline pte_t pte_mkuptodate(pte_t pte)
242
{
243
pte_clear_bits(pte, _PAGE_NEWPAGE);
244
if(pte_present(pte))
245
pte_clear_bits(pte, _PAGE_NEWPROT);
246
return(pte);
247
}
248
249
static inline pte_t pte_mknewpage(pte_t pte)
250
{
251
pte_set_bits(pte, _PAGE_NEWPAGE);
252
return(pte);
253
}
254
255
static inline pte_t pte_mkspecial(pte_t pte)
256
{
257
return(pte);
258
}
259
260
static inline void set_pte(pte_t *pteptr, pte_t pteval)
261
{
262
pte_copy(*pteptr, pteval);
263
264
/* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
265
* fix_range knows to unmap it. _PAGE_NEWPROT is specific to
266
* mapped pages.
267
*/
268
269
*pteptr = pte_mknewpage(*pteptr);
270
if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
271
}
272
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
273
274
/*
275
* Conversion functions: convert a page and protection to a page entry,
276
* and a page entry and page directory to the page they refer to.
277
*/
278
279
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
280
#define __virt_to_page(virt) phys_to_page(__pa(virt))
281
#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
282
#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
283
284
#define mk_pte(page, pgprot) \
285
({ pte_t pte; \
286
\
287
pte_set_val(pte, page_to_phys(page), (pgprot)); \
288
if (pte_present(pte)) \
289
pte_mknewprot(pte_mknewpage(pte)); \
290
pte;})
291
292
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
293
{
294
pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
295
return pte;
296
}
297
298
/*
299
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
300
*
301
* this macro returns the index of the entry in the pgd page which would
302
* control the given virtual address
303
*/
304
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
305
306
/*
307
* pgd_offset() returns a (pgd_t *)
308
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
309
*/
310
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
311
312
/*
313
* a shortcut which implies the use of the kernel's pgd, instead
314
* of a process's
315
*/
316
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
317
318
/*
319
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
320
*
321
* this macro returns the index of the entry in the pmd page which would
322
* control the given virtual address
323
*/
324
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
325
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
326
327
#define pmd_page_vaddr(pmd) \
328
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
329
330
/*
331
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
332
*
333
* this macro returns the index of the entry in the pte page which would
334
* control the given virtual address
335
*/
336
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
337
#define pte_offset_kernel(dir, address) \
338
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
339
#define pte_offset_map(dir, address) \
340
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
341
#define pte_unmap(pte) do { } while (0)
342
343
struct mm_struct;
344
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
345
346
#define update_mmu_cache(vma,address,ptep) do ; while (0)
347
348
/* Encode and de-code a swap entry */
349
#define __swp_type(x) (((x).val >> 4) & 0x3f)
350
#define __swp_offset(x) ((x).val >> 11)
351
352
#define __swp_entry(type, offset) \
353
((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
354
#define __pte_to_swp_entry(pte) \
355
((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
356
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
357
358
#define kern_addr_valid(addr) (1)
359
360
#include <asm-generic/pgtable.h>
361
362
/* Clear a kernel PTE and flush it from the TLB */
363
#define kpte_clear_flush(ptep, vaddr) \
364
do { \
365
pte_clear(&init_mm, (vaddr), (ptep)); \
366
__flush_tlb_one((vaddr)); \
367
} while (0)
368
369
#endif
370
371