/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2012 Regents of the University of California3* Copyright (C) 2017 SiFive4*/56#include <linux/init.h>7#include <linux/linkage.h>89#include <asm/asm.h>10#include <asm/csr.h>11#include <asm/scs.h>12#include <asm/unistd.h>13#include <asm/page.h>14#include <asm/thread_info.h>15#include <asm/asm-offsets.h>16#include <asm/errata_list.h>17#include <linux/sizes.h>1819.section .irqentry.text, "ax"2021.macro new_vmalloc_check22REG_S a0, TASK_TI_A0(tp)23csrr a0, CSR_CAUSE24/* Exclude IRQs */25blt a0, zero, .Lnew_vmalloc_restore_context_a02627REG_S a1, TASK_TI_A1(tp)28/* Only check new_vmalloc if we are in page/protection fault */29li a1, EXC_LOAD_PAGE_FAULT30beq a0, a1, .Lnew_vmalloc_kernel_address31li a1, EXC_STORE_PAGE_FAULT32beq a0, a1, .Lnew_vmalloc_kernel_address33li a1, EXC_INST_PAGE_FAULT34bne a0, a1, .Lnew_vmalloc_restore_context_a13536.Lnew_vmalloc_kernel_address:37/* Is it a kernel address? */38csrr a0, CSR_TVAL39bge a0, zero, .Lnew_vmalloc_restore_context_a14041/* Check if a new vmalloc mapping appeared that could explain the trap */42REG_S a2, TASK_TI_A2(tp)43/*44* Computes:45* a0 = &new_vmalloc[BIT_WORD(cpu)]46* a1 = BIT_MASK(cpu)47*/48lw a2, TASK_TI_CPU(tp)49/*50* Compute the new_vmalloc element position:51* (cpu / 64) * 8 = (cpu >> 6) << 352*/53srli a1, a2, 654slli a1, a1, 355la a0, new_vmalloc56add a0, a0, a157/*58* Compute the bit position in the new_vmalloc element:59* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 660* = cpu - ((cpu >> 6) << 3) << 361*/62slli a1, a1, 363sub a1, a2, a164/* Compute the "get mask": 1 << bit_pos */65li a2, 166sll a1, a2, a16768/* Check the value of new_vmalloc for this cpu */69REG_L a2, 0(a0)70and a2, a2, a171beq a2, zero, .Lnew_vmalloc_restore_context7273/* Atomically reset the current cpu bit in new_vmalloc */74amoxor.d a0, a1, (a0)7576/* Only emit a sfence.vma if the uarch caches invalid entries */77ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)7879REG_L a0, TASK_TI_A0(tp)80REG_L a1, TASK_TI_A1(tp)81REG_L a2, TASK_TI_A2(tp)82csrw CSR_SCRATCH, x083sret8485.Lnew_vmalloc_restore_context:86REG_L a2, TASK_TI_A2(tp)87.Lnew_vmalloc_restore_context_a1:88REG_L a1, TASK_TI_A1(tp)89.Lnew_vmalloc_restore_context_a0:90REG_L a0, TASK_TI_A0(tp)91.endm929394SYM_CODE_START(handle_exception)95/*96* If coming from userspace, preserve the user thread pointer and load97* the kernel thread pointer. If we came from the kernel, the scratch98* register will contain 0, and we should continue on the current TP.99*/100csrrw tp, CSR_SCRATCH, tp101bnez tp, .Lsave_context102103.Lrestore_kernel_tpsp:104csrr tp, CSR_SCRATCH105106#ifdef CONFIG_64BIT107/*108* The RISC-V kernel does not eagerly emit a sfence.vma after each109* new vmalloc mapping, which may result in exceptions:110* - if the uarch caches invalid entries, the new mapping would not be111* observed by the page table walker and an invalidation is needed.112* - if the uarch does not cache invalid entries, a reordered access113* could "miss" the new mapping and traps: in that case, we only need114* to retry the access, no sfence.vma is required.115*/116new_vmalloc_check117#endif118119REG_S sp, TASK_TI_KERNEL_SP(tp)120121#ifdef CONFIG_VMAP_STACK122addi sp, sp, -(PT_SIZE_ON_STACK)123srli sp, sp, THREAD_SHIFT124andi sp, sp, 0x1125bnez sp, handle_kernel_stack_overflow126REG_L sp, TASK_TI_KERNEL_SP(tp)127#endif128129.Lsave_context:130REG_S sp, TASK_TI_USER_SP(tp)131REG_L sp, TASK_TI_KERNEL_SP(tp)132addi sp, sp, -(PT_SIZE_ON_STACK)133REG_S x1, PT_RA(sp)134REG_S x3, PT_GP(sp)135REG_S x5, PT_T0(sp)136save_from_x6_to_x31137138/*139* Disable user-mode memory access as it should only be set in the140* actual user copy routines.141*142* Disable the FPU/Vector to detect illegal usage of floating point143* or vector in kernel space.144*/145li t0, SR_SUM | SR_FS_VS146147REG_L s0, TASK_TI_USER_SP(tp)148csrrc s1, CSR_STATUS, t0149csrr s2, CSR_EPC150csrr s3, CSR_TVAL151csrr s4, CSR_CAUSE152csrr s5, CSR_SCRATCH153REG_S s0, PT_SP(sp)154REG_S s1, PT_STATUS(sp)155REG_S s2, PT_EPC(sp)156REG_S s3, PT_BADADDR(sp)157REG_S s4, PT_CAUSE(sp)158REG_S s5, PT_TP(sp)159160/*161* Set the scratch register to 0, so that if a recursive exception162* occurs, the exception vector knows it came from the kernel163*/164csrw CSR_SCRATCH, x0165166/* Load the global pointer */167load_global_pointer168169/* Load the kernel shadow call stack pointer if coming from userspace */170scs_load_current_if_task_changed s5171172#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE173move a0, sp174call riscv_v_context_nesting_start175#endif176move a0, sp /* pt_regs */177178/*179* MSB of cause differentiates between180* interrupts and exceptions181*/182bge s4, zero, 1f183184/* Handle interrupts */185call do_irq186j ret_from_exception1871:188/* Handle other exceptions */189slli t0, s4, RISCV_LGPTR190la t1, excp_vect_table191la t2, excp_vect_table_end192add t0, t1, t0193/* Check if exception code lies within bounds */194bgeu t0, t2, 3f195REG_L t1, 0(t0)1962: jalr t1197j ret_from_exception1983:199200la t1, do_trap_unknown201j 2b202SYM_CODE_END(handle_exception)203ASM_NOKPROBE(handle_exception)204205/*206* The ret_from_exception must be called with interrupt disabled. Here is the207* caller list:208* - handle_exception209* - ret_from_fork210*/211SYM_CODE_START_NOALIGN(ret_from_exception)212REG_L s0, PT_STATUS(sp)213#ifdef CONFIG_RISCV_M_MODE214/* the MPP value is too large to be used as an immediate arg for addi */215li t0, SR_MPP216and s0, s0, t0217#else218andi s0, s0, SR_SPP219#endif220bnez s0, 1f221222#ifdef CONFIG_KSTACK_ERASE223call stackleak_erase_on_task_stack224#endif225226/* Save unwound kernel stack pointer in thread_info */227addi s0, sp, PT_SIZE_ON_STACK228REG_S s0, TASK_TI_KERNEL_SP(tp)229230/* Save the kernel shadow call stack pointer */231scs_save_current232233/*234* Save TP into the scratch register , so we can find the kernel data235* structures again.236*/237csrw CSR_SCRATCH, tp2381:239#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE240move a0, sp241call riscv_v_context_nesting_end242#endif243REG_L a0, PT_STATUS(sp)244/*245* The current load reservation is effectively part of the processor's246* state, in the sense that load reservations cannot be shared between247* different hart contexts. We can't actually save and restore a load248* reservation, so instead here we clear any existing reservation --249* it's always legal for implementations to clear load reservations at250* any point (as long as the forward progress guarantee is kept, but251* we'll ignore that here).252*253* Dangling load reservations can be the result of taking a trap in the254* middle of an LR/SC sequence, but can also be the result of a taken255* forward branch around an SC -- which is how we implement CAS. As a256* result we need to clear reservations between the last CAS and the257* jump back to the new context. While it is unlikely the store258* completes, implementations are allowed to expand reservations to be259* arbitrarily large.260*/261REG_L a2, PT_EPC(sp)262REG_SC x0, a2, PT_EPC(sp)263264csrw CSR_STATUS, a0265csrw CSR_EPC, a2266267REG_L x1, PT_RA(sp)268REG_L x3, PT_GP(sp)269REG_L x4, PT_TP(sp)270REG_L x5, PT_T0(sp)271restore_from_x6_to_x31272273REG_L x2, PT_SP(sp)274275#ifdef CONFIG_RISCV_M_MODE276mret277#else278sret279#endif280SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)281SYM_CODE_END(ret_from_exception)282ASM_NOKPROBE(ret_from_exception)283284#ifdef CONFIG_VMAP_STACK285SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)286/* we reach here from kernel context, sscratch must be 0 */287csrrw x31, CSR_SCRATCH, x31288asm_per_cpu sp, overflow_stack, x31289li x31, OVERFLOW_STACK_SIZE290add sp, sp, x31291/* zero out x31 again and restore x31 */292xor x31, x31, x31293csrrw x31, CSR_SCRATCH, x31294295addi sp, sp, -(PT_SIZE_ON_STACK)296297//save context to overflow stack298REG_S x1, PT_RA(sp)299REG_S x3, PT_GP(sp)300REG_S x5, PT_T0(sp)301save_from_x6_to_x31302303REG_L s0, TASK_TI_KERNEL_SP(tp)304csrr s1, CSR_STATUS305csrr s2, CSR_EPC306csrr s3, CSR_TVAL307csrr s4, CSR_CAUSE308csrr s5, CSR_SCRATCH309REG_S s0, PT_SP(sp)310REG_S s1, PT_STATUS(sp)311REG_S s2, PT_EPC(sp)312REG_S s3, PT_BADADDR(sp)313REG_S s4, PT_CAUSE(sp)314REG_S s5, PT_TP(sp)315move a0, sp316tail handle_bad_stack317SYM_CODE_END(handle_kernel_stack_overflow)318ASM_NOKPROBE(handle_kernel_stack_overflow)319#endif320321SYM_CODE_START(ret_from_fork_kernel_asm)322call schedule_tail323move a0, s1 /* fn_arg */324move a1, s0 /* fn */325move a2, sp /* pt_regs */326call ret_from_fork_kernel327j ret_from_exception328SYM_CODE_END(ret_from_fork_kernel_asm)329330SYM_CODE_START(ret_from_fork_user_asm)331call schedule_tail332move a0, sp /* pt_regs */333call ret_from_fork_user334j ret_from_exception335SYM_CODE_END(ret_from_fork_user_asm)336337#ifdef CONFIG_IRQ_STACKS338/*339* void call_on_irq_stack(struct pt_regs *regs,340* void (*func)(struct pt_regs *));341*342* Calls func(regs) using the per-CPU IRQ stack.343*/344SYM_FUNC_START(call_on_irq_stack)345/* Create a frame record to save ra and s0 (fp) */346addi sp, sp, -STACKFRAME_SIZE_ON_STACK347REG_S ra, STACKFRAME_RA(sp)348REG_S s0, STACKFRAME_FP(sp)349addi s0, sp, STACKFRAME_SIZE_ON_STACK350351/* Switch to the per-CPU shadow call stack */352scs_save_current353scs_load_irq_stack t0354355/* Switch to the per-CPU IRQ stack and call the handler */356load_per_cpu t0, irq_stack_ptr, t1357li t1, IRQ_STACK_SIZE358add sp, t0, t1359jalr a1360361/* Switch back to the thread shadow call stack */362scs_load_current363364/* Switch back to the thread stack and restore ra and s0 */365addi sp, s0, -STACKFRAME_SIZE_ON_STACK366REG_L ra, STACKFRAME_RA(sp)367REG_L s0, STACKFRAME_FP(sp)368addi sp, sp, STACKFRAME_SIZE_ON_STACK369370ret371SYM_FUNC_END(call_on_irq_stack)372#endif /* CONFIG_IRQ_STACKS */373374/*375* Integer register context switch376* The callee-saved registers must be saved and restored.377*378* a0: previous task_struct (must be preserved across the switch)379* a1: next task_struct380*381* The value of a0 and a1 must be preserved by this function, as that's how382* arguments are passed to schedule_tail.383*/384SYM_FUNC_START(__switch_to)385/* Save context into prev->thread */386li a4, TASK_THREAD_RA387add a3, a0, a4388add a4, a1, a4389REG_S ra, TASK_THREAD_RA_RA(a3)390REG_S sp, TASK_THREAD_SP_RA(a3)391REG_S s0, TASK_THREAD_S0_RA(a3)392REG_S s1, TASK_THREAD_S1_RA(a3)393REG_S s2, TASK_THREAD_S2_RA(a3)394REG_S s3, TASK_THREAD_S3_RA(a3)395REG_S s4, TASK_THREAD_S4_RA(a3)396REG_S s5, TASK_THREAD_S5_RA(a3)397REG_S s6, TASK_THREAD_S6_RA(a3)398REG_S s7, TASK_THREAD_S7_RA(a3)399REG_S s8, TASK_THREAD_S8_RA(a3)400REG_S s9, TASK_THREAD_S9_RA(a3)401REG_S s10, TASK_THREAD_S10_RA(a3)402REG_S s11, TASK_THREAD_S11_RA(a3)403404/* save the user space access flag */405csrr s0, CSR_STATUS406REG_S s0, TASK_THREAD_SUM_RA(a3)407408/* Save the kernel shadow call stack pointer */409scs_save_current410/* Restore context from next->thread */411REG_L s0, TASK_THREAD_SUM_RA(a4)412li s1, SR_SUM413and s0, s0, s1414csrs CSR_STATUS, s0415REG_L ra, TASK_THREAD_RA_RA(a4)416REG_L sp, TASK_THREAD_SP_RA(a4)417REG_L s0, TASK_THREAD_S0_RA(a4)418REG_L s1, TASK_THREAD_S1_RA(a4)419REG_L s2, TASK_THREAD_S2_RA(a4)420REG_L s3, TASK_THREAD_S3_RA(a4)421REG_L s4, TASK_THREAD_S4_RA(a4)422REG_L s5, TASK_THREAD_S5_RA(a4)423REG_L s6, TASK_THREAD_S6_RA(a4)424REG_L s7, TASK_THREAD_S7_RA(a4)425REG_L s8, TASK_THREAD_S8_RA(a4)426REG_L s9, TASK_THREAD_S9_RA(a4)427REG_L s10, TASK_THREAD_S10_RA(a4)428REG_L s11, TASK_THREAD_S11_RA(a4)429/* The offset of thread_info in task_struct is zero. */430move tp, a1431/* Switch to the next shadow call stack */432scs_load_current433ret434SYM_FUNC_END(__switch_to)435436#ifndef CONFIG_MMU437#define do_page_fault do_trap_unknown438#endif439440.section ".rodata"441.align LGREG442/* Exception vector table */443SYM_DATA_START_LOCAL(excp_vect_table)444RISCV_PTR do_trap_insn_misaligned445ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)446RISCV_PTR do_trap_insn_illegal447RISCV_PTR do_trap_break448RISCV_PTR do_trap_load_misaligned449RISCV_PTR do_trap_load_fault450RISCV_PTR do_trap_store_misaligned451RISCV_PTR do_trap_store_fault452RISCV_PTR do_trap_ecall_u /* system call */453RISCV_PTR do_trap_ecall_s454RISCV_PTR do_trap_unknown455RISCV_PTR do_trap_ecall_m456/* instruciton page fault */457ALT_PAGE_FAULT(RISCV_PTR do_page_fault)458RISCV_PTR do_page_fault /* load page fault */459RISCV_PTR do_trap_unknown460RISCV_PTR do_page_fault /* store page fault */461SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)462463#ifndef CONFIG_MMU464SYM_DATA_START(__user_rt_sigreturn)465li a7, __NR_rt_sigreturn466ecall467SYM_DATA_END(__user_rt_sigreturn)468#endif469470471