Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kernel/entry.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2012 Regents of the University of California
4
* Copyright (C) 2017 SiFive
5
*/
6
7
#include <linux/init.h>
8
#include <linux/linkage.h>
9
10
#include <asm/asm.h>
11
#include <asm/csr.h>
12
#include <asm/scs.h>
13
#include <asm/unistd.h>
14
#include <asm/page.h>
15
#include <asm/thread_info.h>
16
#include <asm/asm-offsets.h>
17
#include <asm/errata_list.h>
18
#include <linux/sizes.h>
19
20
.section .irqentry.text, "ax"
21
22
.macro new_vmalloc_check
23
REG_S a0, TASK_TI_A0(tp)
24
csrr a0, CSR_CAUSE
25
/* Exclude IRQs */
26
blt a0, zero, .Lnew_vmalloc_restore_context_a0
27
28
REG_S a1, TASK_TI_A1(tp)
29
/* Only check new_vmalloc if we are in page/protection fault */
30
li a1, EXC_LOAD_PAGE_FAULT
31
beq a0, a1, .Lnew_vmalloc_kernel_address
32
li a1, EXC_STORE_PAGE_FAULT
33
beq a0, a1, .Lnew_vmalloc_kernel_address
34
li a1, EXC_INST_PAGE_FAULT
35
bne a0, a1, .Lnew_vmalloc_restore_context_a1
36
37
.Lnew_vmalloc_kernel_address:
38
/* Is it a kernel address? */
39
csrr a0, CSR_TVAL
40
bge a0, zero, .Lnew_vmalloc_restore_context_a1
41
42
/* Check if a new vmalloc mapping appeared that could explain the trap */
43
REG_S a2, TASK_TI_A2(tp)
44
/*
45
* Computes:
46
* a0 = &new_vmalloc[BIT_WORD(cpu)]
47
* a1 = BIT_MASK(cpu)
48
*/
49
lw a2, TASK_TI_CPU(tp)
50
/*
51
* Compute the new_vmalloc element position:
52
* (cpu / 64) * 8 = (cpu >> 6) << 3
53
*/
54
srli a1, a2, 6
55
slli a1, a1, 3
56
la a0, new_vmalloc
57
add a0, a0, a1
58
/*
59
* Compute the bit position in the new_vmalloc element:
60
* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
61
* = cpu - ((cpu >> 6) << 3) << 3
62
*/
63
slli a1, a1, 3
64
sub a1, a2, a1
65
/* Compute the "get mask": 1 << bit_pos */
66
li a2, 1
67
sll a1, a2, a1
68
69
/* Check the value of new_vmalloc for this cpu */
70
REG_L a2, 0(a0)
71
and a2, a2, a1
72
beq a2, zero, .Lnew_vmalloc_restore_context
73
74
/* Atomically reset the current cpu bit in new_vmalloc */
75
amoxor.d a0, a1, (a0)
76
77
/* Only emit a sfence.vma if the uarch caches invalid entries */
78
ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
79
80
REG_L a0, TASK_TI_A0(tp)
81
REG_L a1, TASK_TI_A1(tp)
82
REG_L a2, TASK_TI_A2(tp)
83
csrw CSR_SCRATCH, x0
84
sret
85
86
.Lnew_vmalloc_restore_context:
87
REG_L a2, TASK_TI_A2(tp)
88
.Lnew_vmalloc_restore_context_a1:
89
REG_L a1, TASK_TI_A1(tp)
90
.Lnew_vmalloc_restore_context_a0:
91
REG_L a0, TASK_TI_A0(tp)
92
.endm
93
94
95
SYM_CODE_START(handle_exception)
96
/*
97
* If coming from userspace, preserve the user thread pointer and load
98
* the kernel thread pointer. If we came from the kernel, the scratch
99
* register will contain 0, and we should continue on the current TP.
100
*/
101
csrrw tp, CSR_SCRATCH, tp
102
bnez tp, .Lsave_context
103
104
.Lrestore_kernel_tpsp:
105
csrr tp, CSR_SCRATCH
106
107
#ifdef CONFIG_64BIT
108
/*
109
* The RISC-V kernel does not eagerly emit a sfence.vma after each
110
* new vmalloc mapping, which may result in exceptions:
111
* - if the uarch caches invalid entries, the new mapping would not be
112
* observed by the page table walker and an invalidation is needed.
113
* - if the uarch does not cache invalid entries, a reordered access
114
* could "miss" the new mapping and traps: in that case, we only need
115
* to retry the access, no sfence.vma is required.
116
*/
117
new_vmalloc_check
118
#endif
119
120
REG_S sp, TASK_TI_KERNEL_SP(tp)
121
122
#ifdef CONFIG_VMAP_STACK
123
addi sp, sp, -(PT_SIZE_ON_STACK)
124
srli sp, sp, THREAD_SHIFT
125
andi sp, sp, 0x1
126
bnez sp, handle_kernel_stack_overflow
127
REG_L sp, TASK_TI_KERNEL_SP(tp)
128
#endif
129
130
.Lsave_context:
131
REG_S sp, TASK_TI_USER_SP(tp)
132
REG_L sp, TASK_TI_KERNEL_SP(tp)
133
addi sp, sp, -(PT_SIZE_ON_STACK)
134
REG_S x1, PT_RA(sp)
135
REG_S x3, PT_GP(sp)
136
REG_S x5, PT_T0(sp)
137
save_from_x6_to_x31
138
139
/*
140
* Disable user-mode memory access as it should only be set in the
141
* actual user copy routines.
142
*
143
* Disable the FPU/Vector to detect illegal usage of floating point
144
* or vector in kernel space.
145
*/
146
li t0, SR_SUM | SR_FS_VS
147
148
REG_L s0, TASK_TI_USER_SP(tp)
149
csrrc s1, CSR_STATUS, t0
150
csrr s2, CSR_EPC
151
csrr s3, CSR_TVAL
152
csrr s4, CSR_CAUSE
153
csrr s5, CSR_SCRATCH
154
REG_S s0, PT_SP(sp)
155
REG_S s1, PT_STATUS(sp)
156
REG_S s2, PT_EPC(sp)
157
REG_S s3, PT_BADADDR(sp)
158
REG_S s4, PT_CAUSE(sp)
159
REG_S s5, PT_TP(sp)
160
161
/*
162
* Set the scratch register to 0, so that if a recursive exception
163
* occurs, the exception vector knows it came from the kernel
164
*/
165
csrw CSR_SCRATCH, x0
166
167
/* Load the global pointer */
168
load_global_pointer
169
170
/* Load the kernel shadow call stack pointer if coming from userspace */
171
scs_load_current_if_task_changed s5
172
173
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
174
move a0, sp
175
call riscv_v_context_nesting_start
176
#endif
177
move a0, sp /* pt_regs */
178
179
/*
180
* MSB of cause differentiates between
181
* interrupts and exceptions
182
*/
183
bge s4, zero, 1f
184
185
/* Handle interrupts */
186
call do_irq
187
j ret_from_exception
188
1:
189
/* Handle other exceptions */
190
slli t0, s4, RISCV_LGPTR
191
la t1, excp_vect_table
192
la t2, excp_vect_table_end
193
add t0, t1, t0
194
/* Check if exception code lies within bounds */
195
bgeu t0, t2, 3f
196
REG_L t1, 0(t0)
197
2: jalr t1
198
j ret_from_exception
199
3:
200
201
la t1, do_trap_unknown
202
j 2b
203
SYM_CODE_END(handle_exception)
204
ASM_NOKPROBE(handle_exception)
205
206
/*
207
* The ret_from_exception must be called with interrupt disabled. Here is the
208
* caller list:
209
* - handle_exception
210
* - ret_from_fork
211
*/
212
SYM_CODE_START_NOALIGN(ret_from_exception)
213
REG_L s0, PT_STATUS(sp)
214
#ifdef CONFIG_RISCV_M_MODE
215
/* the MPP value is too large to be used as an immediate arg for addi */
216
li t0, SR_MPP
217
and s0, s0, t0
218
#else
219
andi s0, s0, SR_SPP
220
#endif
221
bnez s0, 1f
222
223
#ifdef CONFIG_KSTACK_ERASE
224
call stackleak_erase_on_task_stack
225
#endif
226
227
/* Save unwound kernel stack pointer in thread_info */
228
addi s0, sp, PT_SIZE_ON_STACK
229
REG_S s0, TASK_TI_KERNEL_SP(tp)
230
231
/* Save the kernel shadow call stack pointer */
232
scs_save_current
233
234
/*
235
* Save TP into the scratch register , so we can find the kernel data
236
* structures again.
237
*/
238
csrw CSR_SCRATCH, tp
239
1:
240
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
241
move a0, sp
242
call riscv_v_context_nesting_end
243
#endif
244
REG_L a0, PT_STATUS(sp)
245
/*
246
* The current load reservation is effectively part of the processor's
247
* state, in the sense that load reservations cannot be shared between
248
* different hart contexts. We can't actually save and restore a load
249
* reservation, so instead here we clear any existing reservation --
250
* it's always legal for implementations to clear load reservations at
251
* any point (as long as the forward progress guarantee is kept, but
252
* we'll ignore that here).
253
*
254
* Dangling load reservations can be the result of taking a trap in the
255
* middle of an LR/SC sequence, but can also be the result of a taken
256
* forward branch around an SC -- which is how we implement CAS. As a
257
* result we need to clear reservations between the last CAS and the
258
* jump back to the new context. While it is unlikely the store
259
* completes, implementations are allowed to expand reservations to be
260
* arbitrarily large.
261
*/
262
REG_L a2, PT_EPC(sp)
263
REG_SC x0, a2, PT_EPC(sp)
264
265
csrw CSR_STATUS, a0
266
csrw CSR_EPC, a2
267
268
REG_L x1, PT_RA(sp)
269
REG_L x3, PT_GP(sp)
270
REG_L x4, PT_TP(sp)
271
REG_L x5, PT_T0(sp)
272
restore_from_x6_to_x31
273
274
REG_L x2, PT_SP(sp)
275
276
#ifdef CONFIG_RISCV_M_MODE
277
mret
278
#else
279
sret
280
#endif
281
SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
282
SYM_CODE_END(ret_from_exception)
283
ASM_NOKPROBE(ret_from_exception)
284
285
#ifdef CONFIG_VMAP_STACK
286
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
287
/* we reach here from kernel context, sscratch must be 0 */
288
csrrw x31, CSR_SCRATCH, x31
289
asm_per_cpu sp, overflow_stack, x31
290
li x31, OVERFLOW_STACK_SIZE
291
add sp, sp, x31
292
/* zero out x31 again and restore x31 */
293
xor x31, x31, x31
294
csrrw x31, CSR_SCRATCH, x31
295
296
addi sp, sp, -(PT_SIZE_ON_STACK)
297
298
//save context to overflow stack
299
REG_S x1, PT_RA(sp)
300
REG_S x3, PT_GP(sp)
301
REG_S x5, PT_T0(sp)
302
save_from_x6_to_x31
303
304
REG_L s0, TASK_TI_KERNEL_SP(tp)
305
csrr s1, CSR_STATUS
306
csrr s2, CSR_EPC
307
csrr s3, CSR_TVAL
308
csrr s4, CSR_CAUSE
309
csrr s5, CSR_SCRATCH
310
REG_S s0, PT_SP(sp)
311
REG_S s1, PT_STATUS(sp)
312
REG_S s2, PT_EPC(sp)
313
REG_S s3, PT_BADADDR(sp)
314
REG_S s4, PT_CAUSE(sp)
315
REG_S s5, PT_TP(sp)
316
move a0, sp
317
tail handle_bad_stack
318
SYM_CODE_END(handle_kernel_stack_overflow)
319
ASM_NOKPROBE(handle_kernel_stack_overflow)
320
#endif
321
322
SYM_CODE_START(ret_from_fork_kernel_asm)
323
call schedule_tail
324
move a0, s1 /* fn_arg */
325
move a1, s0 /* fn */
326
move a2, sp /* pt_regs */
327
call ret_from_fork_kernel
328
j ret_from_exception
329
SYM_CODE_END(ret_from_fork_kernel_asm)
330
331
SYM_CODE_START(ret_from_fork_user_asm)
332
call schedule_tail
333
move a0, sp /* pt_regs */
334
call ret_from_fork_user
335
j ret_from_exception
336
SYM_CODE_END(ret_from_fork_user_asm)
337
338
#ifdef CONFIG_IRQ_STACKS
339
/*
340
* void call_on_irq_stack(struct pt_regs *regs,
341
* void (*func)(struct pt_regs *));
342
*
343
* Calls func(regs) using the per-CPU IRQ stack.
344
*/
345
SYM_FUNC_START(call_on_irq_stack)
346
/* Create a frame record to save ra and s0 (fp) */
347
addi sp, sp, -STACKFRAME_SIZE_ON_STACK
348
REG_S ra, STACKFRAME_RA(sp)
349
REG_S s0, STACKFRAME_FP(sp)
350
addi s0, sp, STACKFRAME_SIZE_ON_STACK
351
352
/* Switch to the per-CPU shadow call stack */
353
scs_save_current
354
scs_load_irq_stack t0
355
356
/* Switch to the per-CPU IRQ stack and call the handler */
357
load_per_cpu t0, irq_stack_ptr, t1
358
li t1, IRQ_STACK_SIZE
359
add sp, t0, t1
360
jalr a1
361
362
/* Switch back to the thread shadow call stack */
363
scs_load_current
364
365
/* Switch back to the thread stack and restore ra and s0 */
366
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
367
REG_L ra, STACKFRAME_RA(sp)
368
REG_L s0, STACKFRAME_FP(sp)
369
addi sp, sp, STACKFRAME_SIZE_ON_STACK
370
371
ret
372
SYM_FUNC_END(call_on_irq_stack)
373
#endif /* CONFIG_IRQ_STACKS */
374
375
/*
376
* Integer register context switch
377
* The callee-saved registers must be saved and restored.
378
*
379
* a0: previous task_struct (must be preserved across the switch)
380
* a1: next task_struct
381
*
382
* The value of a0 and a1 must be preserved by this function, as that's how
383
* arguments are passed to schedule_tail.
384
*/
385
SYM_FUNC_START(__switch_to)
386
/* Save context into prev->thread */
387
li a4, TASK_THREAD_RA
388
add a3, a0, a4
389
add a4, a1, a4
390
REG_S ra, TASK_THREAD_RA_RA(a3)
391
REG_S sp, TASK_THREAD_SP_RA(a3)
392
REG_S s0, TASK_THREAD_S0_RA(a3)
393
REG_S s1, TASK_THREAD_S1_RA(a3)
394
REG_S s2, TASK_THREAD_S2_RA(a3)
395
REG_S s3, TASK_THREAD_S3_RA(a3)
396
REG_S s4, TASK_THREAD_S4_RA(a3)
397
REG_S s5, TASK_THREAD_S5_RA(a3)
398
REG_S s6, TASK_THREAD_S6_RA(a3)
399
REG_S s7, TASK_THREAD_S7_RA(a3)
400
REG_S s8, TASK_THREAD_S8_RA(a3)
401
REG_S s9, TASK_THREAD_S9_RA(a3)
402
REG_S s10, TASK_THREAD_S10_RA(a3)
403
REG_S s11, TASK_THREAD_S11_RA(a3)
404
405
/* save the user space access flag */
406
csrr s0, CSR_STATUS
407
REG_S s0, TASK_THREAD_SUM_RA(a3)
408
409
/* Save the kernel shadow call stack pointer */
410
scs_save_current
411
/* Restore context from next->thread */
412
REG_L s0, TASK_THREAD_SUM_RA(a4)
413
li s1, SR_SUM
414
and s0, s0, s1
415
csrs CSR_STATUS, s0
416
REG_L ra, TASK_THREAD_RA_RA(a4)
417
REG_L sp, TASK_THREAD_SP_RA(a4)
418
REG_L s0, TASK_THREAD_S0_RA(a4)
419
REG_L s1, TASK_THREAD_S1_RA(a4)
420
REG_L s2, TASK_THREAD_S2_RA(a4)
421
REG_L s3, TASK_THREAD_S3_RA(a4)
422
REG_L s4, TASK_THREAD_S4_RA(a4)
423
REG_L s5, TASK_THREAD_S5_RA(a4)
424
REG_L s6, TASK_THREAD_S6_RA(a4)
425
REG_L s7, TASK_THREAD_S7_RA(a4)
426
REG_L s8, TASK_THREAD_S8_RA(a4)
427
REG_L s9, TASK_THREAD_S9_RA(a4)
428
REG_L s10, TASK_THREAD_S10_RA(a4)
429
REG_L s11, TASK_THREAD_S11_RA(a4)
430
/* The offset of thread_info in task_struct is zero. */
431
move tp, a1
432
/* Switch to the next shadow call stack */
433
scs_load_current
434
ret
435
SYM_FUNC_END(__switch_to)
436
437
#ifndef CONFIG_MMU
438
#define do_page_fault do_trap_unknown
439
#endif
440
441
.section ".rodata"
442
.align LGREG
443
/* Exception vector table */
444
SYM_DATA_START_LOCAL(excp_vect_table)
445
RISCV_PTR do_trap_insn_misaligned
446
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
447
RISCV_PTR do_trap_insn_illegal
448
RISCV_PTR do_trap_break
449
RISCV_PTR do_trap_load_misaligned
450
RISCV_PTR do_trap_load_fault
451
RISCV_PTR do_trap_store_misaligned
452
RISCV_PTR do_trap_store_fault
453
RISCV_PTR do_trap_ecall_u /* system call */
454
RISCV_PTR do_trap_ecall_s
455
RISCV_PTR do_trap_unknown
456
RISCV_PTR do_trap_ecall_m
457
/* instruciton page fault */
458
ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
459
RISCV_PTR do_page_fault /* load page fault */
460
RISCV_PTR do_trap_unknown
461
RISCV_PTR do_page_fault /* store page fault */
462
SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
463
464
#ifndef CONFIG_MMU
465
SYM_DATA_START(__user_rt_sigreturn)
466
li a7, __NR_rt_sigreturn
467
ecall
468
SYM_DATA_END(__user_rt_sigreturn)
469
#endif
470
471