Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/csky/include/asm/pgtable.h
26493 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
#ifndef __ASM_CSKY_PGTABLE_H
4
#define __ASM_CSKY_PGTABLE_H
5
6
#include <asm/fixmap.h>
7
#include <asm/memory.h>
8
#include <asm/addrspace.h>
9
#include <abi/pgtable-bits.h>
10
#include <asm-generic/pgtable-nopmd.h>
11
12
#define PGDIR_SHIFT 22
13
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
14
#define PGDIR_MASK (~(PGDIR_SIZE-1))
15
16
#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
17
18
/*
19
* C-SKY is two-level paging structure:
20
*/
21
22
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
23
#define PTRS_PER_PMD 1
24
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
25
26
#define pte_ERROR(e) \
27
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
28
#define pgd_ERROR(e) \
29
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
30
31
#define PFN_PTE_SHIFT PAGE_SHIFT
32
#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
33
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
34
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
35
(((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
36
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
37
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
38
#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
39
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
40
| pgprot_val(prot))
41
42
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
43
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
44
45
#define pte_page(x) pfn_to_page(pte_pfn(x))
46
#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
47
pgprot_val(pgprot))
48
49
/*
50
* C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
51
* two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
52
*/
53
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
54
55
#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
56
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \
57
_CACHE_CACHED)
58
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
59
_CACHE_CACHED)
60
#define PAGE_SHARED PAGE_WRITE
61
62
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
63
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
64
_PAGE_GLOBAL | \
65
_CACHE_CACHED)
66
67
#define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
68
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
69
_PAGE_GLOBAL | \
70
_CACHE_UNCACHED | _PAGE_SO)
71
72
#define _PAGE_CHG_MASK (~(unsigned long) \
73
(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
74
_CACHE_MASK | _PAGE_GLOBAL))
75
76
#define MAX_SWAPFILES_CHECK() \
77
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
78
79
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
80
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
81
82
extern void load_pgd(unsigned long pg_dir);
83
extern pte_t invalid_pte_table[PTRS_PER_PTE];
84
85
static inline void set_pte(pte_t *p, pte_t pte)
86
{
87
*p = pte;
88
#if defined(CONFIG_CPU_NEED_TLBSYNC)
89
dcache_wb_line((u32)p);
90
#endif
91
/* prevent out of order excution */
92
smp_mb();
93
}
94
95
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
96
{
97
unsigned long ptr;
98
99
ptr = pmd_val(pmd);
100
101
return __va(ptr);
102
}
103
104
#define pmd_phys(pmd) pmd_val(pmd)
105
106
static inline void set_pmd(pmd_t *p, pmd_t pmd)
107
{
108
*p = pmd;
109
#if defined(CONFIG_CPU_NEED_TLBSYNC)
110
dcache_wb_line((u32)p);
111
#endif
112
/* prevent specul excute */
113
smp_mb();
114
}
115
116
117
static inline int pmd_none(pmd_t pmd)
118
{
119
return pmd_val(pmd) == __pa(invalid_pte_table);
120
}
121
122
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
123
124
static inline int pmd_present(pmd_t pmd)
125
{
126
return (pmd_val(pmd) != __pa(invalid_pte_table));
127
}
128
129
static inline void pmd_clear(pmd_t *p)
130
{
131
pmd_val(*p) = (__pa(invalid_pte_table));
132
#if defined(CONFIG_CPU_NEED_TLBSYNC)
133
dcache_wb_line((u32)p);
134
#endif
135
}
136
137
/*
138
* The following only work if pte_present() is true.
139
* Undefined behaviour if not..
140
*/
141
static inline int pte_read(pte_t pte)
142
{
143
return pte.pte_low & _PAGE_READ;
144
}
145
146
static inline int pte_write(pte_t pte)
147
{
148
return (pte).pte_low & _PAGE_WRITE;
149
}
150
151
static inline int pte_dirty(pte_t pte)
152
{
153
return (pte).pte_low & _PAGE_MODIFIED;
154
}
155
156
static inline int pte_young(pte_t pte)
157
{
158
return (pte).pte_low & _PAGE_ACCESSED;
159
}
160
161
static inline pte_t pte_wrprotect(pte_t pte)
162
{
163
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
164
return pte;
165
}
166
167
static inline pte_t pte_mkclean(pte_t pte)
168
{
169
pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
170
return pte;
171
}
172
173
static inline pte_t pte_mkold(pte_t pte)
174
{
175
pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
176
return pte;
177
}
178
179
static inline pte_t pte_mkwrite_novma(pte_t pte)
180
{
181
pte_val(pte) |= _PAGE_WRITE;
182
if (pte_val(pte) & _PAGE_MODIFIED)
183
pte_val(pte) |= _PAGE_DIRTY;
184
return pte;
185
}
186
187
static inline pte_t pte_mkdirty(pte_t pte)
188
{
189
pte_val(pte) |= _PAGE_MODIFIED;
190
if (pte_val(pte) & _PAGE_WRITE)
191
pte_val(pte) |= _PAGE_DIRTY;
192
return pte;
193
}
194
195
static inline pte_t pte_mkyoung(pte_t pte)
196
{
197
pte_val(pte) |= _PAGE_ACCESSED;
198
if (pte_val(pte) & _PAGE_READ)
199
pte_val(pte) |= _PAGE_VALID;
200
return pte;
201
}
202
203
static inline bool pte_swp_exclusive(pte_t pte)
204
{
205
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
206
}
207
208
static inline pte_t pte_swp_mkexclusive(pte_t pte)
209
{
210
pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
211
return pte;
212
}
213
214
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
215
{
216
pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
217
return pte;
218
}
219
220
#define __HAVE_PHYS_MEM_ACCESS_PROT
221
struct file;
222
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
223
unsigned long size, pgprot_t vma_prot);
224
225
/*
226
* Macro to make mark a page protection value as "uncacheable". Note
227
* that "protection" is really a misnomer here as the protection value
228
* contains the memory attribute bits, dirty bits, and various other
229
* bits as well.
230
*/
231
#define pgprot_noncached pgprot_noncached
232
233
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
234
{
235
unsigned long prot = pgprot_val(_prot);
236
237
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
238
239
return __pgprot(prot);
240
}
241
242
#define pgprot_writecombine pgprot_writecombine
243
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
244
{
245
unsigned long prot = pgprot_val(_prot);
246
247
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
248
249
return __pgprot(prot);
250
}
251
252
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
253
{
254
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
255
(pgprot_val(newprot)));
256
}
257
258
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
259
extern void paging_init(void);
260
261
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
262
unsigned long address, pte_t *pte, unsigned int nr);
263
#define update_mmu_cache(vma, addr, ptep) \
264
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
265
266
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
267
remap_pfn_range(vma, vaddr, pfn, size, prot)
268
269
#endif /* __ASM_CSKY_PGTABLE_H */
270
271