/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Low-level exception handling code3*4* Copyright (C) 2012 ARM Ltd.5* Authors: Catalin Marinas <[email protected]>6* Will Deacon <[email protected]>7*/89#include <linux/arm-smccc.h>10#include <linux/init.h>11#include <linux/linkage.h>1213#include <asm/alternative.h>14#include <asm/assembler.h>15#include <asm/asm-offsets.h>16#include <asm/asm_pointer_auth.h>17#include <asm/bug.h>18#include <asm/cpufeature.h>19#include <asm/errno.h>20#include <asm/esr.h>21#include <asm/irq.h>22#include <asm/memory.h>23#include <asm/mmu.h>24#include <asm/processor.h>25#include <asm/ptrace.h>26#include <asm/scs.h>27#include <asm/stacktrace/frame.h>28#include <asm/thread_info.h>29#include <asm/asm-uaccess.h>30#include <asm/unistd.h>3132.macro clear_gp_regs33.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,2934mov x\n, xzr35.endr36.endm3738.macro kernel_ventry, el:req, ht:req, regsize:req, label:req39.align 740.Lventry_start\@:41.if \el == 042/*43* This must be the first instruction of the EL0 vector entries. It is44* skipped by the trampoline vectors, to trigger the cleanup.45*/46b .Lskip_tramp_vectors_cleanup\@47.if \regsize == 6448mrs x30, tpidrro_el049msr tpidrro_el0, xzr50.else51mov x30, xzr52.endif53.Lskip_tramp_vectors_cleanup\@:54.endif5556sub sp, sp, #PT_REGS_SIZE57/*58* Test whether the SP has overflowed, without corrupting a GPR.59* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)60* should always be zero.61*/62add sp, sp, x0 // sp' = sp + x063sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp64tbnz x0, #THREAD_SHIFT, 0f65sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x066sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp67b el\el\ht\()_\regsize\()_\label68690:70/*71* Either we've just detected an overflow, or we've taken an exception72* while on the overflow stack. Either way, we won't return to73* userspace, and can clobber EL0 registers to free up GPRs.74*/7576/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */77msr tpidr_el0, x07879/* Recover the original x0 value and stash it in tpidrro_el0 */80sub x0, sp, x081msr tpidrro_el0, x08283/* Switch to the overflow stack */84adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x08586/*87* Check whether we were already on the overflow stack. This may happen88* after panic() re-enables interrupts.89*/90mrs x0, tpidr_el0 // sp of interrupted context91sub x0, sp, x0 // delta with top of overflow stack92tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?93b.ne __bad_stack // no? -> bad stack pointer9495/* We were already on the overflow stack. Restore sp/x0 and carry on. */96sub sp, sp, x097mrs x0, tpidrro_el098b el\el\ht\()_\regsize\()_\label99.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?100.endm101102.macro tramp_alias, dst, sym103.set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text104movz \dst, :abs_g2_s:.Lalias\@105movk \dst, :abs_g1_nc:.Lalias\@106movk \dst, :abs_g0_nc:.Lalias\@107.endm108109/*110* This macro corrupts x0-x3. It is the caller's duty to save/restore111* them if required.112*/113.macro apply_ssbd, state, tmp1, tmp2114alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable115b .L__asm_ssbd_skip\@ // Patched to NOP116alternative_cb_end117ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1118cbz \tmp2, .L__asm_ssbd_skip\@119ldr \tmp2, [tsk, #TSK_TI_FLAGS]120tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@121mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2122mov w1, #\state123alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit124nop // Patched to SMC/HVC #0125alternative_cb_end126.L__asm_ssbd_skip\@:127.endm128129/* Check for MTE asynchronous tag check faults */130.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr131#ifdef CONFIG_ARM64_MTE132.arch_extension lse133alternative_if_not ARM64_MTE134b 1f135alternative_else_nop_endif136/*137* Asynchronous tag check faults are only possible in ASYNC (2) or138* ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is139* set, so skip the check if it is unset.140*/141tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f142mrs_s \tmp, SYS_TFSRE0_EL1143tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f144/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */145mov \tmp, #_TIF_MTE_ASYNC_FAULT146add \ti_flags, tsk, #TSK_TI_FLAGS147stset \tmp, [\ti_flags]1481:149#endif150.endm151152/* Clear the MTE asynchronous tag check faults */153.macro clear_mte_async_tcf thread_sctlr154#ifdef CONFIG_ARM64_MTE155alternative_if ARM64_MTE156/* See comment in check_mte_async_tcf above. */157tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f158dsb ish159msr_s SYS_TFSRE0_EL1, xzr1601:161alternative_else_nop_endif162#endif163.endm164165.macro mte_set_gcr, mte_ctrl, tmp166#ifdef CONFIG_ARM64_MTE167ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16168orr \tmp, \tmp, #SYS_GCR_EL1_RRND169msr_s SYS_GCR_EL1, \tmp170#endif171.endm172173.macro mte_set_kernel_gcr, tmp, tmp2174#ifdef CONFIG_KASAN_HW_TAGS175alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable176b 1f177alternative_cb_end178mov \tmp, KERNEL_GCR_EL1179msr_s SYS_GCR_EL1, \tmp1801:181#endif182.endm183184.macro mte_set_user_gcr, tsk, tmp, tmp2185#ifdef CONFIG_KASAN_HW_TAGS186alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable187b 1f188alternative_cb_end189ldr \tmp, [\tsk, #THREAD_MTE_CTRL]190191mte_set_gcr \tmp, \tmp21921:193#endif194.endm195196.macro kernel_entry, el, regsize = 64197.if \el == 0198alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT199.endif200.if \regsize == 32201mov w0, w0 // zero upper 32 bits of x0202.endif203stp x0, x1, [sp, #16 * 0]204stp x2, x3, [sp, #16 * 1]205stp x4, x5, [sp, #16 * 2]206stp x6, x7, [sp, #16 * 3]207stp x8, x9, [sp, #16 * 4]208stp x10, x11, [sp, #16 * 5]209stp x12, x13, [sp, #16 * 6]210stp x14, x15, [sp, #16 * 7]211stp x16, x17, [sp, #16 * 8]212stp x18, x19, [sp, #16 * 9]213stp x20, x21, [sp, #16 * 10]214stp x22, x23, [sp, #16 * 11]215stp x24, x25, [sp, #16 * 12]216stp x26, x27, [sp, #16 * 13]217stp x28, x29, [sp, #16 * 14]218219.if \el == 0220clear_gp_regs221mrs x21, sp_el0222ldr_this_cpu tsk, __entry_task, x20223msr sp_el0, tsk224225/*226* Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions227* when scheduling.228*/229ldr x19, [tsk, #TSK_TI_FLAGS]230disable_step_tsk x19, x20231232/* Check for asynchronous tag check faults in user space */233ldr x0, [tsk, THREAD_SCTLR_USER]234check_mte_async_tcf x22, x23, x0235236#ifdef CONFIG_ARM64_PTR_AUTH237alternative_if ARM64_HAS_ADDRESS_AUTH238/*239* Enable IA for in-kernel PAC if the task had it disabled. Although240* this could be implemented with an unconditional MRS which would avoid241* a load, this was measured to be slower on Cortex-A75 and Cortex-A76.242*243* Install the kernel IA key only if IA was enabled in the task. If IA244* was disabled on kernel exit then we would have left the kernel IA245* installed so there is no need to install it again.246*/247tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f248__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23249b 2f2501:251mrs x0, sctlr_el1252orr x0, x0, SCTLR_ELx_ENIA253msr sctlr_el1, x02542:255alternative_else_nop_endif256#endif257258apply_ssbd 1, x22, x23259260mte_set_kernel_gcr x22, x23261262/*263* Any non-self-synchronizing system register updates required for264* kernel entry should be placed before this point.265*/266alternative_if ARM64_MTE267isb268b 1f269alternative_else_nop_endif270alternative_if ARM64_HAS_ADDRESS_AUTH271isb272alternative_else_nop_endif2731:274275scs_load_current276.else277add x21, sp, #PT_REGS_SIZE278get_current_task tsk279.endif /* \el == 0 */280mrs x22, elr_el1281mrs x23, spsr_el1282stp lr, x21, [sp, #S_LR]283284/*285* Create a metadata frame record. The unwinder will use this to286* identify and unwind exception boundaries.287*/288stp xzr, xzr, [sp, #S_STACKFRAME]289.if \el == 0290mov x0, #FRAME_META_TYPE_FINAL291.else292mov x0, #FRAME_META_TYPE_PT_REGS293.endif294str x0, [sp, #S_STACKFRAME_TYPE]295add x29, sp, #S_STACKFRAME296297#ifdef CONFIG_ARM64_SW_TTBR0_PAN298alternative_if_not ARM64_HAS_PAN299bl __swpan_entry_el\el300alternative_else_nop_endif301#endif302303stp x22, x23, [sp, #S_PC]304305/* Not in a syscall by default (el0_svc overwrites for real syscall) */306.if \el == 0307mov w21, #NO_SYSCALL308str w21, [sp, #S_SYSCALLNO]309.endif310311#ifdef CONFIG_ARM64_PSEUDO_NMI312alternative_if_not ARM64_HAS_GIC_PRIO_MASKING313b .Lskip_pmr_save\@314alternative_else_nop_endif315316mrs_s x20, SYS_ICC_PMR_EL1317str w20, [sp, #S_PMR]318mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET319msr_s SYS_ICC_PMR_EL1, x20320321.Lskip_pmr_save\@:322#endif323324/*325* Registers that may be useful after this macro is invoked:326*327* x20 - ICC_PMR_EL1328* x21 - aborted SP329* x22 - aborted PC330* x23 - aborted PSTATE331*/332.endm333334.macro kernel_exit, el335.if \el != 0336disable_daif337.endif338339#ifdef CONFIG_ARM64_PSEUDO_NMI340alternative_if_not ARM64_HAS_GIC_PRIO_MASKING341b .Lskip_pmr_restore\@342alternative_else_nop_endif343344ldr w20, [sp, #S_PMR]345msr_s SYS_ICC_PMR_EL1, x20346347/* Ensure priority change is seen by redistributor */348alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC349dsb sy350alternative_else_nop_endif351352.Lskip_pmr_restore\@:353#endif354355ldp x21, x22, [sp, #S_PC] // load ELR, SPSR356357#ifdef CONFIG_ARM64_SW_TTBR0_PAN358alternative_if_not ARM64_HAS_PAN359bl __swpan_exit_el\el360alternative_else_nop_endif361#endif362363.if \el == 0364ldr x23, [sp, #S_SP] // load return stack pointer365msr sp_el0, x23366tst x22, #PSR_MODE32_BIT // native task?367b.eq 3f368369#ifdef CONFIG_ARM64_ERRATUM_845719370alternative_if ARM64_WORKAROUND_845719371#ifdef CONFIG_PID_IN_CONTEXTIDR372mrs x29, contextidr_el1373msr contextidr_el1, x29374#else375msr contextidr_el1, xzr376#endif377alternative_else_nop_endif378#endif3793:380scs_save tsk381382/* Ignore asynchronous tag check faults in the uaccess routines */383ldr x0, [tsk, THREAD_SCTLR_USER]384clear_mte_async_tcf x0385386#ifdef CONFIG_ARM64_PTR_AUTH387alternative_if ARM64_HAS_ADDRESS_AUTH388/*389* IA was enabled for in-kernel PAC. Disable it now if needed, or390* alternatively install the user's IA. All other per-task keys and391* SCTLR bits were updated on task switch.392*393* No kernel C function calls after this.394*/395tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f396__ptrauth_keys_install_user tsk, x0, x1, x2397b 2f3981:399mrs x0, sctlr_el1400bic x0, x0, SCTLR_ELx_ENIA401msr sctlr_el1, x04022:403alternative_else_nop_endif404#endif405406mte_set_user_gcr tsk, x0, x1407408apply_ssbd 0, x0, x1409.endif410411msr elr_el1, x21 // set up the return data412msr spsr_el1, x22413ldp x0, x1, [sp, #16 * 0]414ldp x2, x3, [sp, #16 * 1]415ldp x4, x5, [sp, #16 * 2]416ldp x6, x7, [sp, #16 * 3]417ldp x8, x9, [sp, #16 * 4]418ldp x10, x11, [sp, #16 * 5]419ldp x12, x13, [sp, #16 * 6]420ldp x14, x15, [sp, #16 * 7]421ldp x16, x17, [sp, #16 * 8]422ldp x18, x19, [sp, #16 * 9]423ldp x20, x21, [sp, #16 * 10]424ldp x22, x23, [sp, #16 * 11]425ldp x24, x25, [sp, #16 * 12]426ldp x26, x27, [sp, #16 * 13]427ldp x28, x29, [sp, #16 * 14]428429.if \el == 0430#ifdef CONFIG_UNMAP_KERNEL_AT_EL0431alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0432433msr far_el1, x29434435ldr_this_cpu x30, this_cpu_vector, x29436tramp_alias x29, tramp_exit437msr vbar_el1, x30 // install vector table438ldr lr, [sp, #S_LR] // restore x30439add sp, sp, #PT_REGS_SIZE // restore sp440br x29441442.L_skip_tramp_exit_\@:443#endif444.endif445446ldr lr, [sp, #S_LR]447add sp, sp, #PT_REGS_SIZE // restore sp448449.if \el == 0450/* This must be after the last explicit memory access */451alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD452tlbi vale1, xzr453dsb nsh454alternative_else_nop_endif455.else456/* Ensure any device/NC reads complete */457alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412458.endif459460eret461sb462.endm463464#ifdef CONFIG_ARM64_SW_TTBR0_PAN465/*466* Set the TTBR0 PAN bit in SPSR. When the exception is taken from467* EL0, there is no need to check the state of TTBR0_EL1 since468* accesses are always enabled.469* Note that the meaning of this bit differs from the ARMv8.1 PAN470* feature as all TTBR0_EL1 accesses are disabled, not just those to471* user mappings.472*/473SYM_CODE_START_LOCAL(__swpan_entry_el1)474mrs x21, ttbr0_el1475tst x21, #TTBR_ASID_MASK // Check for the reserved ASID476orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR477b.eq 1f // TTBR0 access already disabled478and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR479SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)480__uaccess_ttbr0_disable x214811: ret482SYM_CODE_END(__swpan_entry_el1)483484/*485* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR486* PAN bit checking.487*/488SYM_CODE_START_LOCAL(__swpan_exit_el1)489tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set490__uaccess_ttbr0_enable x0, x14911: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit492ret493SYM_CODE_END(__swpan_exit_el1)494495SYM_CODE_START_LOCAL(__swpan_exit_el0)496__uaccess_ttbr0_enable x0, x1497/*498* Enable errata workarounds only if returning to user. The only499* workaround currently required for TTBR0_EL1 changes are for the500* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache501* corruption).502*/503b post_ttbr_update_workaround504SYM_CODE_END(__swpan_exit_el0)505#endif506507/* GPRs used by entry code */508tsk .req x28 // current thread_info509510.text511512/*513* Exception vectors.514*/515.pushsection ".entry.text", "ax"516517.align 11518SYM_CODE_START(vectors)519kernel_ventry 1, t, 64, sync // Synchronous EL1t520kernel_ventry 1, t, 64, irq // IRQ EL1t521kernel_ventry 1, t, 64, fiq // FIQ EL1t522kernel_ventry 1, t, 64, error // Error EL1t523524kernel_ventry 1, h, 64, sync // Synchronous EL1h525kernel_ventry 1, h, 64, irq // IRQ EL1h526kernel_ventry 1, h, 64, fiq // FIQ EL1h527kernel_ventry 1, h, 64, error // Error EL1h528529kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0530kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0531kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0532kernel_ventry 0, t, 64, error // Error 64-bit EL0533534kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0535kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0536kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0537kernel_ventry 0, t, 32, error // Error 32-bit EL0538SYM_CODE_END(vectors)539540SYM_CODE_START_LOCAL(__bad_stack)541/*542* We detected an overflow in kernel_ventry, which switched to the543* overflow stack. Stash the exception regs, and head to our overflow544* handler.545*/546547/* Restore the original x0 value */548mrs x0, tpidrro_el0549550/*551* Store the original GPRs to the new stack. The orginal SP (minus552* PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.553*/554sub sp, sp, #PT_REGS_SIZE555kernel_entry 1556mrs x0, tpidr_el0557add x0, x0, #PT_REGS_SIZE558str x0, [sp, #S_SP]559560/* Stash the regs for handle_bad_stack */561mov x0, sp562563/* Time to die */564bl handle_bad_stack565ASM_BUG()566SYM_CODE_END(__bad_stack)567568569.macro entry_handler el:req, ht:req, regsize:req, label:req570SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)571kernel_entry \el, \regsize572mov x0, sp573bl el\el\ht\()_\regsize\()_\label\()_handler574.if \el == 0575b ret_to_user576.else577b ret_to_kernel578.endif579SYM_CODE_END(el\el\ht\()_\regsize\()_\label)580.endm581582/*583* Early exception handlers584*/585entry_handler 1, t, 64, sync586entry_handler 1, t, 64, irq587entry_handler 1, t, 64, fiq588entry_handler 1, t, 64, error589590entry_handler 1, h, 64, sync591entry_handler 1, h, 64, irq592entry_handler 1, h, 64, fiq593entry_handler 1, h, 64, error594595entry_handler 0, t, 64, sync596entry_handler 0, t, 64, irq597entry_handler 0, t, 64, fiq598entry_handler 0, t, 64, error599600entry_handler 0, t, 32, sync601entry_handler 0, t, 32, irq602entry_handler 0, t, 32, fiq603entry_handler 0, t, 32, error604605SYM_CODE_START_LOCAL(ret_to_kernel)606kernel_exit 1607SYM_CODE_END(ret_to_kernel)608609SYM_CODE_START_LOCAL(ret_to_user)610ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step611enable_step_tsk x19, x2612#ifdef CONFIG_KSTACK_ERASE613bl stackleak_erase_on_task_stack614#endif615kernel_exit 0616SYM_CODE_END(ret_to_user)617618.popsection // .entry.text619620// Move from tramp_pg_dir to swapper_pg_dir621.macro tramp_map_kernel, tmp622mrs \tmp, ttbr1_el1623add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET624bic \tmp, \tmp, #USER_ASID_FLAG625msr ttbr1_el1, \tmp626#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003627alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003628/* ASID already in \tmp[63:48] */629movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)630movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)631/* 2MB boundary containing the vectors, so we nobble the walk cache */632movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)633isb634tlbi vae1, \tmp635dsb nsh636alternative_else_nop_endif637#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */638.endm639640// Move from swapper_pg_dir to tramp_pg_dir641.macro tramp_unmap_kernel, tmp642mrs \tmp, ttbr1_el1643sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET644orr \tmp, \tmp, #USER_ASID_FLAG645msr ttbr1_el1, \tmp646/*647* We avoid running the post_ttbr_update_workaround here because648* it's only needed by Cavium ThunderX, which requires KPTI to be649* disabled.650*/651.endm652653.macro tramp_data_read_var dst, var654#ifdef CONFIG_RELOCATABLE655ldr \dst, .L__tramp_data_\var656.ifndef .L__tramp_data_\var657.pushsection ".entry.tramp.rodata", "a", %progbits658.align 3659.L__tramp_data_\var:660.quad \var661.popsection662.endif663#else664/*665* As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a666* compile time constant (and hence not secret and not worth hiding).667*668* As statically allocated kernel code and data always live in the top669* 47 bits of the address space we can sign-extend bit 47 and avoid an670* instruction to load the upper 16 bits (which must be 0xFFFF).671*/672movz \dst, :abs_g2_s:\var673movk \dst, :abs_g1_nc:\var674movk \dst, :abs_g0_nc:\var675#endif676.endm677678#define BHB_MITIGATION_NONE 0679#define BHB_MITIGATION_LOOP 1680#define BHB_MITIGATION_FW 2681#define BHB_MITIGATION_INSN 3682683.macro tramp_ventry, vector_start, regsize, kpti, bhb684.align 76851:686.if \regsize == 64687msr tpidrro_el0, x30 // Restored in kernel_ventry688.endif689690.if \bhb == BHB_MITIGATION_LOOP691/*692* This sequence must appear before the first indirect branch. i.e. the693* ret out of tramp_ventry. It appears here because x30 is free.694*/695__mitigate_spectre_bhb_loop x30696.endif // \bhb == BHB_MITIGATION_LOOP697698.if \bhb == BHB_MITIGATION_INSN699clearbhb700isb701.endif // \bhb == BHB_MITIGATION_INSN702703.if \kpti == 1704/*705* Defend against branch aliasing attacks by pushing a dummy706* entry onto the return stack and using a RET instruction to707* enter the full-fat kernel vectors.708*/709bl 2f710b .7112:712tramp_map_kernel x30713alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003714tramp_data_read_var x30, vectors715alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM716prfm plil1strm, [x30, #(1b - \vector_start)]717alternative_else_nop_endif718719msr vbar_el1, x30720isb721.else722adr_l x30, vectors723.endif // \kpti == 1724725.if \bhb == BHB_MITIGATION_FW726/*727* The firmware sequence must appear before the first indirect branch.728* i.e. the ret out of tramp_ventry. But it also needs the stack to be729* mapped to save/restore the registers the SMC clobbers.730*/731__mitigate_spectre_bhb_fw732.endif // \bhb == BHB_MITIGATION_FW733734add x30, x30, #(1b - \vector_start + 4)735ret736.org 1b + 128 // Did we overflow the ventry slot?737.endm738739.macro generate_tramp_vector, kpti, bhb740.Lvector_start\@:741.space 0x400742743.rept 4744tramp_ventry .Lvector_start\@, 64, \kpti, \bhb745.endr746.rept 4747tramp_ventry .Lvector_start\@, 32, \kpti, \bhb748.endr749.endm750751#ifdef CONFIG_UNMAP_KERNEL_AT_EL0752/*753* Exception vectors trampoline.754* The order must match __bp_harden_el1_vectors and the755* arm64_bp_harden_el1_vectors enum.756*/757.pushsection ".entry.tramp.text", "ax"758.align 11759SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)760#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY761generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP762generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW763generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN764#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */765generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE766SYM_CODE_END(tramp_vectors)767768SYM_CODE_START_LOCAL(tramp_exit)769tramp_unmap_kernel x29770mrs x29, far_el1 // restore x29771eret772sb773SYM_CODE_END(tramp_exit)774.popsection // .entry.tramp.text775#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */776777/*778* Exception vectors for spectre mitigations on entry from EL1 when779* kpti is not in use.780*/781.macro generate_el1_vector, bhb782.Lvector_start\@:783kernel_ventry 1, t, 64, sync // Synchronous EL1t784kernel_ventry 1, t, 64, irq // IRQ EL1t785kernel_ventry 1, t, 64, fiq // FIQ EL1h786kernel_ventry 1, t, 64, error // Error EL1t787788kernel_ventry 1, h, 64, sync // Synchronous EL1h789kernel_ventry 1, h, 64, irq // IRQ EL1h790kernel_ventry 1, h, 64, fiq // FIQ EL1h791kernel_ventry 1, h, 64, error // Error EL1h792793.rept 4794tramp_ventry .Lvector_start\@, 64, 0, \bhb795.endr796.rept 4797tramp_ventry .Lvector_start\@, 32, 0, \bhb798.endr799.endm800801/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */802.pushsection ".entry.text", "ax"803.align 11804SYM_CODE_START(__bp_harden_el1_vectors)805#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY806generate_el1_vector bhb=BHB_MITIGATION_LOOP807generate_el1_vector bhb=BHB_MITIGATION_FW808generate_el1_vector bhb=BHB_MITIGATION_INSN809#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */810SYM_CODE_END(__bp_harden_el1_vectors)811.popsection812813814/*815* Register switch for AArch64. The callee-saved registers need to be saved816* and restored. On entry:817* x0 = previous task_struct (must be preserved across the switch)818* x1 = next task_struct819* Previous and next are guaranteed not to be the same.820*821*/822SYM_FUNC_START(cpu_switch_to)823save_and_disable_daif x11824mov x10, #THREAD_CPU_CONTEXT825add x8, x0, x10826mov x9, sp827stp x19, x20, [x8], #16 // store callee-saved registers828stp x21, x22, [x8], #16829stp x23, x24, [x8], #16830stp x25, x26, [x8], #16831stp x27, x28, [x8], #16832stp x29, x9, [x8], #16833str lr, [x8]834add x8, x1, x10835ldp x19, x20, [x8], #16 // restore callee-saved registers836ldp x21, x22, [x8], #16837ldp x23, x24, [x8], #16838ldp x25, x26, [x8], #16839ldp x27, x28, [x8], #16840ldp x29, x9, [x8], #16841ldr lr, [x8]842mov sp, x9843msr sp_el0, x1844ptrauth_keys_install_kernel x1, x8, x9, x10845scs_save x0846scs_load_current847restore_irq x11848ret849SYM_FUNC_END(cpu_switch_to)850NOKPROBE(cpu_switch_to)851852/*853* This is how we return from a fork.854*/855SYM_CODE_START(ret_from_fork)856bl schedule_tail857cbz x19, 1f // not a kernel thread858mov x0, x20859blr x198601: get_current_task tsk861mov x0, sp862bl asm_exit_to_user_mode863b ret_to_user864SYM_CODE_END(ret_from_fork)865NOKPROBE(ret_from_fork)866867/*868* void call_on_irq_stack(struct pt_regs *regs,869* void (*func)(struct pt_regs *));870*871* Calls func(regs) using this CPU's irq stack and shadow irq stack.872*/873SYM_FUNC_START(call_on_irq_stack)874save_and_disable_daif x9875#ifdef CONFIG_SHADOW_CALL_STACK876get_current_task x16877scs_save x16878ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17879#endif880881/* Create a frame record to save our LR and SP (implicit in FP) */882stp x29, x30, [sp, #-16]!883mov x29, sp884885ldr_this_cpu x16, irq_stack_ptr, x17886887/* Move to the new stack and call the function there */888add sp, x16, #IRQ_STACK_SIZE889restore_irq x9890blr x1891892save_and_disable_daif x9893/*894* Restore the SP from the FP, and restore the FP and LR from the frame895* record.896*/897mov sp, x29898ldp x29, x30, [sp], #16899scs_load_current900restore_irq x9901ret902SYM_FUNC_END(call_on_irq_stack)903NOKPROBE(call_on_irq_stack)904905#ifdef CONFIG_ARM_SDE_INTERFACE906907#include <asm/sdei.h>908#include <uapi/linux/arm_sdei.h>909910.macro sdei_handler_exit exit_mode911/* On success, this call never returns... */912cmp \exit_mode, #SDEI_EXIT_SMC913b.ne 99f914smc #0915b .91699: hvc #0917b .918.endm919920#ifdef CONFIG_UNMAP_KERNEL_AT_EL0921/*922* The regular SDEI entry point may have been unmapped along with the rest of923* the kernel. This trampoline restores the kernel mapping to make the x1 memory924* argument accessible.925*926* This clobbers x4, __sdei_handler() will restore this from firmware's927* copy.928*/929.pushsection ".entry.tramp.text", "ax"930SYM_CODE_START(__sdei_asm_entry_trampoline)931mrs x4, ttbr1_el1932tbz x4, #USER_ASID_BIT, 1f933934tramp_map_kernel tmp=x4935isb936mov x4, xzr937938/*939* Remember whether to unmap the kernel on exit.940*/9411: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]942tramp_data_read_var x4, __sdei_asm_handler943br x4944SYM_CODE_END(__sdei_asm_entry_trampoline)945NOKPROBE(__sdei_asm_entry_trampoline)946947/*948* Make the exit call and restore the original ttbr1_el1949*950* x0 & x1: setup for the exit API call951* x2: exit_mode952* x4: struct sdei_registered_event argument from registration time.953*/954SYM_CODE_START(__sdei_asm_exit_trampoline)955ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]956cbnz x4, 1f957958tramp_unmap_kernel tmp=x49599601: sdei_handler_exit exit_mode=x2961SYM_CODE_END(__sdei_asm_exit_trampoline)962NOKPROBE(__sdei_asm_exit_trampoline)963.popsection // .entry.tramp.text964#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */965966/*967* Software Delegated Exception entry point.968*969* x0: Event number970* x1: struct sdei_registered_event argument from registration time.971* x2: interrupted PC972* x3: interrupted PSTATE973* x4: maybe clobbered by the trampoline974*975* Firmware has preserved x0->x17 for us, we must save/restore the rest to976* follow SMC-CC. We save (or retrieve) all the registers as the handler may977* want them.978*/979SYM_CODE_START(__sdei_asm_handler)980stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]981stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]982stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]983stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]984stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]985stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]986stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]987stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]988stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]989stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]990stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]991stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]992stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]993stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]994mov x4, sp995stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]996997mov x19, x1998999/* Store the registered-event for crash_smp_send_stop() */1000ldrb w4, [x19, #SDEI_EVENT_PRIORITY]1001cbnz w4, 1f1002adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x61003b 2f10041: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x610052: str x19, [x5]10061007/*1008* entry.S may have been using sp as a scratch register, find whether1009* this is a normal or critical event and switch to the appropriate1010* stack for this CPU.1011*/1012cbnz w4, 1f1013ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x61014b 2f10151: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x610162: mov x6, #SDEI_STACK_SIZE1017add x5, x5, x61018mov sp, x510191020#ifdef CONFIG_SHADOW_CALL_STACK1021/* Use a separate shadow call stack for normal and critical events */1022cbnz w4, 3f1023ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x61024b 4f10253: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x610264:1027#endif10281029/*1030* We may have interrupted userspace, or a guest, or exit-from or1031* return-to either of these. We can't trust sp_el0, restore it.1032*/1033mrs x28, sp_el01034ldr_this_cpu dst=x0, sym=__entry_task, tmp=x11035msr sp_el0, x010361037/* If we interrupted the kernel point to the previous stack/frame. */1038and x0, x3, #0xc1039mrs x1, CurrentEL1040cmp x0, x11041csel x29, x29, xzr, eq // fp, or zero1042csel x4, x2, xzr, eq // elr, or zero10431044stp x29, x4, [sp, #-16]!1045mov x29, sp10461047add x0, x19, #SDEI_EVENT_INTREGS1048mov x1, x191049bl __sdei_handler10501051msr sp_el0, x281052/* restore regs >x17 that we clobbered */1053mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline1054ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]1055ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]1056ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]1057mov sp, x110581059mov x1, x0 // address to complete_and_resume1060/* x0 = (x0 <= SDEI_EV_FAILED) ?1061* EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME1062*/1063cmp x0, #SDEI_EV_FAILED1064mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE1065mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME1066csel x0, x2, x3, ls10671068ldr_l x2, sdei_exit_mode10691070/* Clear the registered-event seen by crash_smp_send_stop() */1071ldrb w3, [x4, #SDEI_EVENT_PRIORITY]1072cbnz w3, 1f1073adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x61074b 2f10751: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x610762: str xzr, [x5]10771078alternative_if_not ARM64_UNMAP_KERNEL_AT_EL01079sdei_handler_exit exit_mode=x21080alternative_else_nop_endif10811082#ifdef CONFIG_UNMAP_KERNEL_AT_EL01083tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline1084br x51085#endif1086SYM_CODE_END(__sdei_asm_handler)1087NOKPROBE(__sdei_asm_handler)10881089SYM_CODE_START(__sdei_handler_abort)1090mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME1091adr x1, 1f1092ldr_l x2, sdei_exit_mode1093sdei_handler_exit exit_mode=x21094// exit the handler and jump to the next instruction.1095// Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.10961: ret1097SYM_CODE_END(__sdei_handler_abort)1098NOKPROBE(__sdei_handler_abort)1099#endif /* CONFIG_ARM_SDE_INTERFACE */110011011102