Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/mm/tlbex.S
51948 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4
*/
5
#include <asm/asm.h>
6
#include <asm/loongarch.h>
7
#include <asm/page.h>
8
#include <asm/pgtable.h>
9
#include <asm/regdef.h>
10
#include <asm/stackframe.h>
11
12
#define INVTLB_ADDR_GFALSE_AND_ASID 5
13
14
#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG)
15
#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG)
16
#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG)
17
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG)
18
19
#ifdef CONFIG_32BIT
20
#define PTE_LL ll.w
21
#define PTE_SC sc.w
22
#else
23
#define PTE_LL ll.d
24
#define PTE_SC sc.d
25
#endif
26
27
.macro tlb_do_page_fault, write
28
SYM_CODE_START(tlb_do_page_fault_\write)
29
UNWIND_HINT_UNDEFINED
30
SAVE_ALL
31
csrrd a2, LOONGARCH_CSR_BADV
32
move a0, sp
33
REG_S a2, sp, PT_BVADDR
34
li.w a1, \write
35
bl do_page_fault
36
RESTORE_ALL_AND_RET
37
SYM_CODE_END(tlb_do_page_fault_\write)
38
.endm
39
40
tlb_do_page_fault 0
41
tlb_do_page_fault 1
42
43
SYM_CODE_START(handle_tlb_protect)
44
UNWIND_HINT_UNDEFINED
45
BACKUP_T0T1
46
SAVE_ALL
47
move a0, sp
48
move a1, zero
49
csrrd a2, LOONGARCH_CSR_BADV
50
REG_S a2, sp, PT_BVADDR
51
la_abs t0, do_page_fault
52
jirl ra, t0, 0
53
RESTORE_ALL_AND_RET
54
SYM_CODE_END(handle_tlb_protect)
55
56
SYM_CODE_START(handle_tlb_load)
57
UNWIND_HINT_UNDEFINED
58
csrwr t0, EXCEPTION_KS0
59
csrwr t1, EXCEPTION_KS1
60
csrwr ra, EXCEPTION_KS2
61
62
/*
63
* The vmalloc handling is not in the hotpath.
64
*/
65
csrrd t0, LOONGARCH_CSR_BADV
66
bltz t0, vmalloc_load
67
csrrd t1, LOONGARCH_CSR_PGDL
68
69
vmalloc_done_load:
70
/* Get PGD offset in bytes */
71
#ifdef CONFIG_32BIT
72
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
73
#else
74
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
75
#endif
76
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
77
78
#if CONFIG_PGTABLE_LEVELS > 3
79
PTR_L t1, t1, 0
80
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
81
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
82
83
#endif
84
#if CONFIG_PGTABLE_LEVELS > 2
85
PTR_L t1, t1, 0
86
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
87
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
88
89
#endif
90
PTR_L ra, t1, 0
91
92
/*
93
* For huge tlb entries, pmde doesn't contain an address but
94
* instead contains the tlb pte. Check the PAGE_HUGE bit and
95
* see if we need to jump to huge tlb processing.
96
*/
97
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
98
bltz ra, tlb_huge_update_load
99
100
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
101
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
102
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
103
104
#ifdef CONFIG_SMP
105
smp_pgtable_change_load:
106
PTE_LL t0, t1, 0
107
#else
108
PTR_L t0, t1, 0
109
#endif
110
andi ra, t0, _PAGE_PRESENT
111
beqz ra, nopage_tlb_load
112
113
ori t0, t0, _PAGE_VALID
114
115
#ifdef CONFIG_SMP
116
PTE_SC t0, t1, 0
117
beqz t0, smp_pgtable_change_load
118
#else
119
PTR_S t0, t1, 0
120
#endif
121
122
tlbsrch
123
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
124
PTR_L t0, t1, 0
125
PTR_L t1, t1, _PTE_T_SIZE
126
csrwr t0, LOONGARCH_CSR_TLBELO0
127
csrwr t1, LOONGARCH_CSR_TLBELO1
128
tlbwr
129
130
csrrd t0, EXCEPTION_KS0
131
csrrd t1, EXCEPTION_KS1
132
csrrd ra, EXCEPTION_KS2
133
ertn
134
135
vmalloc_load:
136
la_abs t1, swapper_pg_dir
137
b vmalloc_done_load
138
139
/* This is the entry point of a huge page. */
140
tlb_huge_update_load:
141
#ifdef CONFIG_SMP
142
PTE_LL ra, t1, 0
143
#else
144
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
145
#endif
146
andi t0, ra, _PAGE_PRESENT
147
beqz t0, nopage_tlb_load
148
149
#ifdef CONFIG_SMP
150
ori t0, ra, _PAGE_VALID
151
PTE_SC t0, t1, 0
152
beqz t0, tlb_huge_update_load
153
ori t0, ra, _PAGE_VALID
154
#else
155
ori t0, ra, _PAGE_VALID
156
PTR_S t0, t1, 0
157
#endif
158
csrrd ra, LOONGARCH_CSR_ASID
159
csrrd t1, LOONGARCH_CSR_BADV
160
andi ra, ra, CSR_ASID_ASID
161
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
162
163
/*
164
* A huge PTE describes an area the size of the
165
* configured huge page size. This is twice the
166
* of the large TLB entry size we intend to use.
167
* A TLB entry half the size of the configured
168
* huge page size is configured into entrylo0
169
* and entrylo1 to cover the contiguous huge PTE
170
* address space.
171
*/
172
/* Huge page: Move Global bit */
173
xori t0, t0, _PAGE_HUGE
174
lu12i.w t1, _PAGE_HGLOBAL >> 12
175
and t1, t0, t1
176
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
177
or t0, t0, t1
178
179
move ra, t0
180
csrwr ra, LOONGARCH_CSR_TLBELO0
181
182
/* Convert to entrylo1 */
183
PTR_ADDI t1, zero, 1
184
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
185
PTR_ADD t0, t0, t1
186
csrwr t0, LOONGARCH_CSR_TLBELO1
187
188
/* Set huge page tlb entry size */
189
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
190
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
191
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
192
193
tlbfill
194
195
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
196
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
197
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
198
199
csrrd t0, EXCEPTION_KS0
200
csrrd t1, EXCEPTION_KS1
201
csrrd ra, EXCEPTION_KS2
202
ertn
203
204
nopage_tlb_load:
205
dbar 0x700
206
csrrd ra, EXCEPTION_KS2
207
la_abs t0, tlb_do_page_fault_0
208
jr t0
209
SYM_CODE_END(handle_tlb_load)
210
211
SYM_CODE_START(handle_tlb_load_ptw)
212
UNWIND_HINT_UNDEFINED
213
csrwr t0, LOONGARCH_CSR_KS0
214
csrwr t1, LOONGARCH_CSR_KS1
215
la_abs t0, tlb_do_page_fault_0
216
jr t0
217
SYM_CODE_END(handle_tlb_load_ptw)
218
219
SYM_CODE_START(handle_tlb_store)
220
UNWIND_HINT_UNDEFINED
221
csrwr t0, EXCEPTION_KS0
222
csrwr t1, EXCEPTION_KS1
223
csrwr ra, EXCEPTION_KS2
224
225
/*
226
* The vmalloc handling is not in the hotpath.
227
*/
228
csrrd t0, LOONGARCH_CSR_BADV
229
bltz t0, vmalloc_store
230
csrrd t1, LOONGARCH_CSR_PGDL
231
232
vmalloc_done_store:
233
/* Get PGD offset in bytes */
234
#ifdef CONFIG_32BIT
235
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
236
#else
237
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
238
#endif
239
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
240
241
#if CONFIG_PGTABLE_LEVELS > 3
242
PTR_L t1, t1, 0
243
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
244
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
245
#endif
246
#if CONFIG_PGTABLE_LEVELS > 2
247
PTR_L t1, t1, 0
248
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
249
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
250
#endif
251
PTR_L ra, t1, 0
252
253
/*
254
* For huge tlb entries, pmde doesn't contain an address but
255
* instead contains the tlb pte. Check the PAGE_HUGE bit and
256
* see if we need to jump to huge tlb processing.
257
*/
258
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
259
bltz ra, tlb_huge_update_store
260
261
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
262
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
263
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
264
265
#ifdef CONFIG_SMP
266
smp_pgtable_change_store:
267
PTE_LL t0, t1, 0
268
#else
269
PTR_L t0, t1, 0
270
#endif
271
272
#ifdef CONFIG_64BIT
273
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
274
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
275
#else
276
PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE
277
and ra, ra, t0
278
nor ra, ra, zero
279
#endif
280
bnez ra, nopage_tlb_store
281
282
#ifdef CONFIG_64BIT
283
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
284
#else
285
PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
286
or t0, ra, t0
287
#endif
288
289
#ifdef CONFIG_SMP
290
PTE_SC t0, t1, 0
291
beqz t0, smp_pgtable_change_store
292
#else
293
PTR_S t0, t1, 0
294
#endif
295
tlbsrch
296
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
297
PTR_L t0, t1, 0
298
PTR_L t1, t1, _PTE_T_SIZE
299
csrwr t0, LOONGARCH_CSR_TLBELO0
300
csrwr t1, LOONGARCH_CSR_TLBELO1
301
tlbwr
302
303
csrrd t0, EXCEPTION_KS0
304
csrrd t1, EXCEPTION_KS1
305
csrrd ra, EXCEPTION_KS2
306
ertn
307
308
vmalloc_store:
309
la_abs t1, swapper_pg_dir
310
b vmalloc_done_store
311
312
/* This is the entry point of a huge page. */
313
tlb_huge_update_store:
314
#ifdef CONFIG_SMP
315
PTE_LL ra, t1, 0
316
#else
317
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
318
#endif
319
320
#ifdef CONFIG_64BIT
321
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
322
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
323
#else
324
PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE
325
and t0, t0, ra
326
nor t0, t0, zero
327
#endif
328
329
bnez t0, nopage_tlb_store
330
331
#ifdef CONFIG_SMP
332
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
333
PTE_SC t0, t1, 0
334
beqz t0, tlb_huge_update_store
335
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
336
#else
337
#ifdef CONFIG_64BIT
338
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
339
#else
340
PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
341
or t0, ra, t0
342
#endif
343
PTR_S t0, t1, 0
344
#endif
345
csrrd ra, LOONGARCH_CSR_ASID
346
csrrd t1, LOONGARCH_CSR_BADV
347
andi ra, ra, CSR_ASID_ASID
348
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
349
350
/*
351
* A huge PTE describes an area the size of the
352
* configured huge page size. This is twice the
353
* of the large TLB entry size we intend to use.
354
* A TLB entry half the size of the configured
355
* huge page size is configured into entrylo0
356
* and entrylo1 to cover the contiguous huge PTE
357
* address space.
358
*/
359
/* Huge page: Move Global bit */
360
xori t0, t0, _PAGE_HUGE
361
lu12i.w t1, _PAGE_HGLOBAL >> 12
362
and t1, t0, t1
363
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
364
or t0, t0, t1
365
366
move ra, t0
367
csrwr ra, LOONGARCH_CSR_TLBELO0
368
369
/* Convert to entrylo1 */
370
PTR_ADDI t1, zero, 1
371
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
372
PTR_ADD t0, t0, t1
373
csrwr t0, LOONGARCH_CSR_TLBELO1
374
375
/* Set huge page tlb entry size */
376
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
377
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
378
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
379
380
tlbfill
381
382
/* Reset default page size */
383
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
384
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
385
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
386
387
csrrd t0, EXCEPTION_KS0
388
csrrd t1, EXCEPTION_KS1
389
csrrd ra, EXCEPTION_KS2
390
ertn
391
392
nopage_tlb_store:
393
dbar 0x700
394
csrrd ra, EXCEPTION_KS2
395
la_abs t0, tlb_do_page_fault_1
396
jr t0
397
SYM_CODE_END(handle_tlb_store)
398
399
SYM_CODE_START(handle_tlb_store_ptw)
400
UNWIND_HINT_UNDEFINED
401
csrwr t0, LOONGARCH_CSR_KS0
402
csrwr t1, LOONGARCH_CSR_KS1
403
la_abs t0, tlb_do_page_fault_1
404
jr t0
405
SYM_CODE_END(handle_tlb_store_ptw)
406
407
SYM_CODE_START(handle_tlb_modify)
408
UNWIND_HINT_UNDEFINED
409
csrwr t0, EXCEPTION_KS0
410
csrwr t1, EXCEPTION_KS1
411
csrwr ra, EXCEPTION_KS2
412
413
/*
414
* The vmalloc handling is not in the hotpath.
415
*/
416
csrrd t0, LOONGARCH_CSR_BADV
417
bltz t0, vmalloc_modify
418
csrrd t1, LOONGARCH_CSR_PGDL
419
420
vmalloc_done_modify:
421
/* Get PGD offset in bytes */
422
#ifdef CONFIG_32BIT
423
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
424
#else
425
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
426
#endif
427
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
428
429
#if CONFIG_PGTABLE_LEVELS > 3
430
PTR_L t1, t1, 0
431
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
432
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
433
#endif
434
#if CONFIG_PGTABLE_LEVELS > 2
435
PTR_L t1, t1, 0
436
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
437
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
438
#endif
439
PTR_L ra, t1, 0
440
441
/*
442
* For huge tlb entries, pmde doesn't contain an address but
443
* instead contains the tlb pte. Check the PAGE_HUGE bit and
444
* see if we need to jump to huge tlb processing.
445
*/
446
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
447
bltz ra, tlb_huge_update_modify
448
449
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
450
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
451
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
452
453
#ifdef CONFIG_SMP
454
smp_pgtable_change_modify:
455
PTE_LL t0, t1, 0
456
#else
457
PTR_L t0, t1, 0
458
#endif
459
#ifdef CONFIG_64BIT
460
andi ra, t0, _PAGE_WRITE
461
#else
462
PTR_LI ra, _PAGE_WRITE
463
and ra, t0, ra
464
#endif
465
466
beqz ra, nopage_tlb_modify
467
468
#ifdef CONFIG_64BIT
469
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
470
#else
471
PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
472
or t0, ra, t0
473
#endif
474
475
#ifdef CONFIG_SMP
476
PTE_SC t0, t1, 0
477
beqz t0, smp_pgtable_change_modify
478
#else
479
PTR_S t0, t1, 0
480
#endif
481
tlbsrch
482
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
483
PTR_L t0, t1, 0
484
PTR_L t1, t1, _PTE_T_SIZE
485
csrwr t0, LOONGARCH_CSR_TLBELO0
486
csrwr t1, LOONGARCH_CSR_TLBELO1
487
tlbwr
488
489
csrrd t0, EXCEPTION_KS0
490
csrrd t1, EXCEPTION_KS1
491
csrrd ra, EXCEPTION_KS2
492
ertn
493
494
vmalloc_modify:
495
la_abs t1, swapper_pg_dir
496
b vmalloc_done_modify
497
498
/* This is the entry point of a huge page. */
499
tlb_huge_update_modify:
500
#ifdef CONFIG_SMP
501
PTE_LL ra, t1, 0
502
#else
503
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
504
#endif
505
506
#ifdef CONFIG_64BIT
507
andi t0, ra, _PAGE_WRITE
508
#else
509
PTR_LI t0, _PAGE_WRITE
510
and t0, ra, t0
511
#endif
512
513
beqz t0, nopage_tlb_modify
514
515
#ifdef CONFIG_SMP
516
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
517
PTE_SC t0, t1, 0
518
beqz t0, tlb_huge_update_modify
519
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
520
#else
521
#ifdef CONFIG_64BIT
522
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
523
#else
524
PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
525
or t0, ra, t0
526
#endif
527
PTR_S t0, t1, 0
528
#endif
529
csrrd ra, LOONGARCH_CSR_ASID
530
csrrd t1, LOONGARCH_CSR_BADV
531
andi ra, ra, CSR_ASID_ASID
532
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
533
534
/*
535
* A huge PTE describes an area the size of the
536
* configured huge page size. This is twice the
537
* of the large TLB entry size we intend to use.
538
* A TLB entry half the size of the configured
539
* huge page size is configured into entrylo0
540
* and entrylo1 to cover the contiguous huge PTE
541
* address space.
542
*/
543
/* Huge page: Move Global bit */
544
xori t0, t0, _PAGE_HUGE
545
lu12i.w t1, _PAGE_HGLOBAL >> 12
546
and t1, t0, t1
547
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
548
or t0, t0, t1
549
550
move ra, t0
551
csrwr ra, LOONGARCH_CSR_TLBELO0
552
553
/* Convert to entrylo1 */
554
PTR_ADDI t1, zero, 1
555
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
556
PTR_ADD t0, t0, t1
557
csrwr t0, LOONGARCH_CSR_TLBELO1
558
559
/* Set huge page tlb entry size */
560
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
561
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
562
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
563
564
tlbfill
565
566
/* Reset default page size */
567
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
568
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
569
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
570
571
csrrd t0, EXCEPTION_KS0
572
csrrd t1, EXCEPTION_KS1
573
csrrd ra, EXCEPTION_KS2
574
ertn
575
576
nopage_tlb_modify:
577
dbar 0x700
578
csrrd ra, EXCEPTION_KS2
579
la_abs t0, tlb_do_page_fault_1
580
jr t0
581
SYM_CODE_END(handle_tlb_modify)
582
583
SYM_CODE_START(handle_tlb_modify_ptw)
584
UNWIND_HINT_UNDEFINED
585
csrwr t0, LOONGARCH_CSR_KS0
586
csrwr t1, LOONGARCH_CSR_KS1
587
la_abs t0, tlb_do_page_fault_1
588
jr t0
589
SYM_CODE_END(handle_tlb_modify_ptw)
590
591
#ifdef CONFIG_32BIT
592
SYM_CODE_START(handle_tlb_refill)
593
UNWIND_HINT_UNDEFINED
594
csrwr t0, EXCEPTION_KS0
595
csrwr t1, EXCEPTION_KS1
596
csrwr ra, EXCEPTION_KS2
597
li.w ra, 0x1fffffff
598
599
csrrd t0, LOONGARCH_CSR_PGD
600
csrrd t1, LOONGARCH_CSR_TLBRBADV
601
srli.w t1, t1, PGDIR_SHIFT
602
slli.w t1, t1, 0x2
603
add.w t0, t0, t1
604
and t0, t0, ra
605
606
ld.w t0, t0, 0
607
csrrd t1, LOONGARCH_CSR_TLBRBADV
608
slli.w t1, t1, (32 - PGDIR_SHIFT)
609
srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1)
610
slli.w t1, t1, (0x2 + 1)
611
add.w t0, t0, t1
612
and t0, t0, ra
613
614
ld.w t1, t0, 0x0
615
csrwr t1, LOONGARCH_CSR_TLBRELO0
616
617
ld.w t1, t0, 0x4
618
csrwr t1, LOONGARCH_CSR_TLBRELO1
619
620
tlbfill
621
csrrd t0, EXCEPTION_KS0
622
csrrd t1, EXCEPTION_KS1
623
csrrd ra, EXCEPTION_KS2
624
ertn
625
SYM_CODE_END(handle_tlb_refill)
626
#endif
627
628
#ifdef CONFIG_64BIT
629
SYM_CODE_START(handle_tlb_refill)
630
UNWIND_HINT_UNDEFINED
631
csrwr t0, LOONGARCH_CSR_TLBRSAVE
632
csrrd t0, LOONGARCH_CSR_PGD
633
lddir t0, t0, 3
634
#if CONFIG_PGTABLE_LEVELS > 3
635
lddir t0, t0, 2
636
#endif
637
#if CONFIG_PGTABLE_LEVELS > 2
638
lddir t0, t0, 1
639
#endif
640
ldpte t0, 0
641
ldpte t0, 1
642
tlbfill
643
csrrd t0, LOONGARCH_CSR_TLBRSAVE
644
ertn
645
SYM_CODE_END(handle_tlb_refill)
646
#endif
647
648