Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/mm/tlb.c
26436 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4
*/
5
#include <linux/init.h>
6
#include <linux/sched.h>
7
#include <linux/smp.h>
8
#include <linux/mm.h>
9
#include <linux/hugetlb.h>
10
#include <linux/export.h>
11
12
#include <asm/bootinfo.h>
13
#include <asm/cpu.h>
14
#include <asm/exception.h>
15
#include <asm/mmu_context.h>
16
#include <asm/pgtable.h>
17
#include <asm/tlb.h>
18
19
void local_flush_tlb_all(void)
20
{
21
invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
22
}
23
EXPORT_SYMBOL(local_flush_tlb_all);
24
25
void local_flush_tlb_user(void)
26
{
27
invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
28
}
29
EXPORT_SYMBOL(local_flush_tlb_user);
30
31
void local_flush_tlb_kernel(void)
32
{
33
invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
34
}
35
EXPORT_SYMBOL(local_flush_tlb_kernel);
36
37
/*
38
* All entries common to a mm share an asid. To effectively flush
39
* these entries, we just bump the asid.
40
*/
41
void local_flush_tlb_mm(struct mm_struct *mm)
42
{
43
int cpu;
44
45
preempt_disable();
46
47
cpu = smp_processor_id();
48
49
if (asid_valid(mm, cpu))
50
drop_mmu_context(mm, cpu);
51
else
52
cpumask_clear_cpu(cpu, mm_cpumask(mm));
53
54
preempt_enable();
55
}
56
57
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
58
unsigned long end)
59
{
60
struct mm_struct *mm = vma->vm_mm;
61
int cpu = smp_processor_id();
62
63
if (asid_valid(mm, cpu)) {
64
unsigned long size, flags;
65
66
local_irq_save(flags);
67
start = round_down(start, PAGE_SIZE << 1);
68
end = round_up(end, PAGE_SIZE << 1);
69
size = (end - start) >> (PAGE_SHIFT + 1);
70
if (size <= (current_cpu_data.tlbsizestlbsets ?
71
current_cpu_data.tlbsize / 8 :
72
current_cpu_data.tlbsize / 2)) {
73
int asid = cpu_asid(cpu, mm);
74
75
while (start < end) {
76
invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
77
start += (PAGE_SIZE << 1);
78
}
79
} else {
80
drop_mmu_context(mm, cpu);
81
}
82
local_irq_restore(flags);
83
} else {
84
cpumask_clear_cpu(cpu, mm_cpumask(mm));
85
}
86
}
87
88
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
89
{
90
unsigned long size, flags;
91
92
local_irq_save(flags);
93
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94
size = (size + 1) >> 1;
95
if (size <= (current_cpu_data.tlbsizestlbsets ?
96
current_cpu_data.tlbsize / 8 :
97
current_cpu_data.tlbsize / 2)) {
98
99
start &= (PAGE_MASK << 1);
100
end += ((PAGE_SIZE << 1) - 1);
101
end &= (PAGE_MASK << 1);
102
103
while (start < end) {
104
invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
105
start += (PAGE_SIZE << 1);
106
}
107
} else {
108
local_flush_tlb_kernel();
109
}
110
local_irq_restore(flags);
111
}
112
113
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
114
{
115
int cpu = smp_processor_id();
116
117
if (asid_valid(vma->vm_mm, cpu)) {
118
int newpid;
119
120
newpid = cpu_asid(cpu, vma->vm_mm);
121
page &= (PAGE_MASK << 1);
122
invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
123
} else {
124
cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
125
}
126
}
127
128
/*
129
* This one is only used for pages with the global bit set so we don't care
130
* much about the ASID.
131
*/
132
void local_flush_tlb_one(unsigned long page)
133
{
134
page &= (PAGE_MASK << 1);
135
invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
136
}
137
138
static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
139
{
140
#ifdef CONFIG_HUGETLB_PAGE
141
int idx;
142
unsigned long lo;
143
unsigned long flags;
144
145
local_irq_save(flags);
146
147
address &= (PAGE_MASK << 1);
148
write_csr_entryhi(address);
149
tlb_probe();
150
idx = read_csr_tlbidx();
151
write_csr_pagesize(PS_HUGE_SIZE);
152
lo = pmd_to_entrylo(pte_val(*ptep));
153
write_csr_entrylo0(lo);
154
write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
155
156
if (idx < 0)
157
tlb_write_random();
158
else
159
tlb_write_indexed();
160
write_csr_pagesize(PS_DEFAULT_SIZE);
161
162
local_irq_restore(flags);
163
#endif
164
}
165
166
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
167
{
168
int idx;
169
unsigned long flags;
170
171
if (cpu_has_ptw)
172
return;
173
174
/*
175
* Handle debugger faulting in for debugee.
176
*/
177
if (current->active_mm != vma->vm_mm)
178
return;
179
180
if (pte_val(*ptep) & _PAGE_HUGE) {
181
__update_hugetlb(vma, address, ptep);
182
return;
183
}
184
185
local_irq_save(flags);
186
187
if ((unsigned long)ptep & sizeof(pte_t))
188
ptep--;
189
190
address &= (PAGE_MASK << 1);
191
write_csr_entryhi(address);
192
tlb_probe();
193
idx = read_csr_tlbidx();
194
write_csr_pagesize(PS_DEFAULT_SIZE);
195
write_csr_entrylo0(pte_val(*ptep++));
196
write_csr_entrylo1(pte_val(*ptep));
197
if (idx < 0)
198
tlb_write_random();
199
else
200
tlb_write_indexed();
201
202
local_irq_restore(flags);
203
}
204
205
static void setup_ptwalker(void)
206
{
207
unsigned long pwctl0, pwctl1;
208
unsigned long pgd_i = 0, pgd_w = 0;
209
unsigned long pud_i = 0, pud_w = 0;
210
unsigned long pmd_i = 0, pmd_w = 0;
211
unsigned long pte_i = 0, pte_w = 0;
212
213
pgd_i = PGDIR_SHIFT;
214
pgd_w = PAGE_SHIFT - 3;
215
#if CONFIG_PGTABLE_LEVELS > 3
216
pud_i = PUD_SHIFT;
217
pud_w = PAGE_SHIFT - 3;
218
#endif
219
#if CONFIG_PGTABLE_LEVELS > 2
220
pmd_i = PMD_SHIFT;
221
pmd_w = PAGE_SHIFT - 3;
222
#endif
223
pte_i = PAGE_SHIFT;
224
pte_w = PAGE_SHIFT - 3;
225
226
pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
227
pwctl1 = pgd_i | pgd_w << 6;
228
229
if (cpu_has_ptw)
230
pwctl1 |= CSR_PWCTL1_PTW;
231
232
csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
233
csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
234
csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
235
csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
236
csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
237
}
238
239
static void output_pgtable_bits_defines(void)
240
{
241
#define pr_define(fmt, ...) \
242
pr_debug("#define " fmt, ##__VA_ARGS__)
243
244
pr_debug("#include <asm/asm.h>\n");
245
pr_debug("#include <asm/regdef.h>\n");
246
pr_debug("\n");
247
248
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
249
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
250
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
251
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
252
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
253
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
254
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
255
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
256
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
257
pr_debug("\n");
258
}
259
260
#ifdef CONFIG_NUMA
261
unsigned long pcpu_handlers[NR_CPUS];
262
#endif
263
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
264
265
static void setup_tlb_handler(int cpu)
266
{
267
setup_ptwalker();
268
local_flush_tlb_all();
269
270
if (cpu_has_ptw) {
271
exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw;
272
exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw;
273
exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw;
274
exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw;
275
}
276
277
/* The tlb handlers are generated only once */
278
if (cpu == 0) {
279
memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
280
local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
281
282
for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++)
283
set_handler(i * VECSIZE, exception_table[i], VECSIZE);
284
} else {
285
int vec_sz __maybe_unused;
286
void *addr __maybe_unused;
287
struct page *page __maybe_unused;
288
289
/* Avoid lockdep warning */
290
rcutree_report_cpu_starting(cpu);
291
292
#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
293
vec_sz = sizeof(exception_handlers);
294
295
if (pcpu_handlers[cpu])
296
return;
297
298
page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
299
if (!page)
300
return;
301
302
addr = page_address(page);
303
pcpu_handlers[cpu] = (unsigned long)addr;
304
memcpy((void *)addr, (void *)eentry, vec_sz);
305
local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
306
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
307
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
308
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
309
#endif
310
}
311
}
312
313
void tlb_init(int cpu)
314
{
315
write_csr_pagesize(PS_DEFAULT_SIZE);
316
write_csr_stlbpgsize(PS_DEFAULT_SIZE);
317
write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
318
319
setup_tlb_handler(cpu);
320
output_pgtable_bits_defines();
321
}
322
323