Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/xtensa/mm/tlb.c
26424 views
1
/*
2
* arch/xtensa/mm/tlb.c
3
*
4
* Logic that manipulates the Xtensa MMU. Derived from MIPS.
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file "COPYING" in the main directory of this archive
8
* for more details.
9
*
10
* Copyright (C) 2001 - 2003 Tensilica Inc.
11
*
12
* Joe Taylor
13
* Chris Zankel <[email protected]>
14
* Marc Gauthier
15
*/
16
17
#include <linux/mm.h>
18
#include <asm/processor.h>
19
#include <asm/mmu_context.h>
20
#include <asm/tlb.h>
21
#include <asm/tlbflush.h>
22
#include <asm/cacheflush.h>
23
24
25
static inline void __flush_itlb_all (void)
26
{
27
int w, i;
28
29
for (w = 0; w < ITLB_ARF_WAYS; w++) {
30
for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
31
int e = w + (i << PAGE_SHIFT);
32
invalidate_itlb_entry_no_isync(e);
33
}
34
}
35
asm volatile ("isync\n");
36
}
37
38
static inline void __flush_dtlb_all (void)
39
{
40
int w, i;
41
42
for (w = 0; w < DTLB_ARF_WAYS; w++) {
43
for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
44
int e = w + (i << PAGE_SHIFT);
45
invalidate_dtlb_entry_no_isync(e);
46
}
47
}
48
asm volatile ("isync\n");
49
}
50
51
52
void local_flush_tlb_all(void)
53
{
54
__flush_itlb_all();
55
__flush_dtlb_all();
56
}
57
58
/* If mm is current, we simply assign the current task a new ASID, thus,
59
* invalidating all previous tlb entries. If mm is someone else's user mapping,
60
* wie invalidate the context, thus, when that user mapping is swapped in,
61
* a new context will be assigned to it.
62
*/
63
64
void local_flush_tlb_mm(struct mm_struct *mm)
65
{
66
int cpu = smp_processor_id();
67
68
if (mm == current->active_mm) {
69
unsigned long flags;
70
local_irq_save(flags);
71
mm->context.asid[cpu] = NO_CONTEXT;
72
activate_context(mm, cpu);
73
local_irq_restore(flags);
74
} else {
75
mm->context.asid[cpu] = NO_CONTEXT;
76
mm->context.cpu = -1;
77
}
78
}
79
80
81
#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
82
#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
83
#if _ITLB_ENTRIES > _DTLB_ENTRIES
84
# define _TLB_ENTRIES _ITLB_ENTRIES
85
#else
86
# define _TLB_ENTRIES _DTLB_ENTRIES
87
#endif
88
89
void local_flush_tlb_range(struct vm_area_struct *vma,
90
unsigned long start, unsigned long end)
91
{
92
int cpu = smp_processor_id();
93
struct mm_struct *mm = vma->vm_mm;
94
unsigned long flags;
95
96
if (mm->context.asid[cpu] == NO_CONTEXT)
97
return;
98
99
pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
100
(unsigned long)mm->context.asid[cpu], start, end);
101
local_irq_save(flags);
102
103
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
104
int oldpid = get_rasid_register();
105
106
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
107
start &= PAGE_MASK;
108
if (vma->vm_flags & VM_EXEC)
109
while(start < end) {
110
invalidate_itlb_mapping(start);
111
invalidate_dtlb_mapping(start);
112
start += PAGE_SIZE;
113
}
114
else
115
while(start < end) {
116
invalidate_dtlb_mapping(start);
117
start += PAGE_SIZE;
118
}
119
120
set_rasid_register(oldpid);
121
} else {
122
local_flush_tlb_mm(mm);
123
}
124
local_irq_restore(flags);
125
}
126
127
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
128
{
129
int cpu = smp_processor_id();
130
struct mm_struct* mm = vma->vm_mm;
131
unsigned long flags;
132
int oldpid;
133
134
if (mm->context.asid[cpu] == NO_CONTEXT)
135
return;
136
137
local_irq_save(flags);
138
139
oldpid = get_rasid_register();
140
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
141
142
if (vma->vm_flags & VM_EXEC)
143
invalidate_itlb_mapping(page);
144
invalidate_dtlb_mapping(page);
145
146
set_rasid_register(oldpid);
147
148
local_irq_restore(flags);
149
}
150
151
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
152
{
153
if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
154
end - start < _TLB_ENTRIES << PAGE_SHIFT) {
155
start &= PAGE_MASK;
156
while (start < end) {
157
invalidate_itlb_mapping(start);
158
invalidate_dtlb_mapping(start);
159
start += PAGE_SIZE;
160
}
161
} else {
162
local_flush_tlb_all();
163
}
164
}
165
166
void update_mmu_tlb_range(struct vm_area_struct *vma,
167
unsigned long address, pte_t *ptep, unsigned int nr)
168
{
169
local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr);
170
}
171
172
#ifdef CONFIG_DEBUG_TLB_SANITY
173
174
static unsigned get_pte_for_vaddr(unsigned vaddr)
175
{
176
struct task_struct *task = get_current();
177
struct mm_struct *mm = task->mm;
178
pgd_t *pgd;
179
p4d_t *p4d;
180
pud_t *pud;
181
pmd_t *pmd;
182
pte_t *pte;
183
unsigned int pteval;
184
185
if (!mm)
186
mm = task->active_mm;
187
pgd = pgd_offset(mm, vaddr);
188
if (pgd_none_or_clear_bad(pgd))
189
return 0;
190
p4d = p4d_offset(pgd, vaddr);
191
if (p4d_none_or_clear_bad(p4d))
192
return 0;
193
pud = pud_offset(p4d, vaddr);
194
if (pud_none_or_clear_bad(pud))
195
return 0;
196
pmd = pmd_offset(pud, vaddr);
197
if (pmd_none_or_clear_bad(pmd))
198
return 0;
199
pte = pte_offset_map(pmd, vaddr);
200
if (!pte)
201
return 0;
202
pteval = pte_val(*pte);
203
pte_unmap(pte);
204
return pteval;
205
}
206
207
enum {
208
TLB_SUSPICIOUS = 1,
209
TLB_INSANE = 2,
210
};
211
212
static void tlb_insane(void)
213
{
214
BUG_ON(1);
215
}
216
217
static void tlb_suspicious(void)
218
{
219
WARN_ON(1);
220
}
221
222
/*
223
* Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
224
* and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
225
*
226
* Check that valid TLB entries either have the same PA as the PTE, or PTE is
227
* marked as non-present. Non-present PTE and the page with non-zero refcount
228
* and zero mapcount is normal for batched TLB flush operation. Zero refcount
229
* means that the page was freed prematurely. Non-zero mapcount is unusual,
230
* but does not necessary means an error, thus marked as suspicious.
231
*/
232
static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
233
{
234
unsigned tlbidx = w | (e << PAGE_SHIFT);
235
unsigned r0 = dtlb ?
236
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
237
unsigned r1 = dtlb ?
238
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
239
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
240
unsigned pte = get_pte_for_vaddr(vpn);
241
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
242
unsigned tlb_asid = r0 & ASID_MASK;
243
bool kernel = tlb_asid == 1;
244
int rc = 0;
245
246
if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
247
pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
248
dtlb ? 'D' : 'I', w, e, vpn,
249
kernel ? "kernel" : "user");
250
rc |= TLB_INSANE;
251
}
252
253
if (tlb_asid == mm_asid) {
254
if ((pte ^ r1) & PAGE_MASK) {
255
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
256
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
257
if (pte == 0 || !pte_present(__pte(pte))) {
258
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
259
struct folio *f = page_folio(p);
260
261
pr_err("folio refcount: %d, mapcount: %d\n",
262
folio_ref_count(f), folio_mapcount(f));
263
if (!folio_ref_count(f))
264
rc |= TLB_INSANE;
265
else if (folio_mapped(f))
266
rc |= TLB_SUSPICIOUS;
267
} else {
268
rc |= TLB_INSANE;
269
}
270
}
271
}
272
return rc;
273
}
274
275
void check_tlb_sanity(void)
276
{
277
unsigned long flags;
278
unsigned w, e;
279
int bug = 0;
280
281
local_irq_save(flags);
282
for (w = 0; w < DTLB_ARF_WAYS; ++w)
283
for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
284
bug |= check_tlb_entry(w, e, true);
285
for (w = 0; w < ITLB_ARF_WAYS; ++w)
286
for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
287
bug |= check_tlb_entry(w, e, false);
288
if (bug & TLB_INSANE)
289
tlb_insane();
290
if (bug & TLB_SUSPICIOUS)
291
tlb_suspicious();
292
local_irq_restore(flags);
293
}
294
295
#endif /* CONFIG_DEBUG_TLB_SANITY */
296
297