/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2020 - Google Inc3* Author: Andrew Scull <[email protected]>4*/56#include <linux/linkage.h>78#include <asm/assembler.h>9#include <asm/kvm_arm.h>10#include <asm/kvm_asm.h>11#include <asm/kvm_mmu.h>12#include <asm/kvm_ptrauth.h>1314.text1516SYM_FUNC_START(__host_exit)17get_host_ctxt x0, x11819/* Store the host regs x2 and x3 */20stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]2122/* Retrieve the host regs x0-x1 from the stack */23ldp x2, x3, [sp], #16 // x0, x12425/* Store the host regs x0-x1 and x4-x17 */26stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]27stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]28stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]29stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]30stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]31stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]32stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]33stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]3435/* Store the host regs x18-x29, lr */36save_callee_saved_regs x03738/* Save the host context pointer in x29 across the function call */39mov x29, x04041#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL42alternative_if_not ARM64_HAS_ADDRESS_AUTH43b __skip_pauth_save44alternative_else_nop_endif4546alternative_if ARM64_KVM_PROTECTED_MODE47/* Save kernel ptrauth keys. */48add x18, x29, #CPU_APIAKEYLO_EL149ptrauth_save_state x18, x19, x205051/* Use hyp keys. */52adr_this_cpu x18, kvm_hyp_ctxt, x1953add x18, x18, #CPU_APIAKEYLO_EL154ptrauth_restore_state x18, x19, x2055isb56alternative_else_nop_endif57__skip_pauth_save:58#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */5960bl handle_trap6162__host_enter_restore_full:63/* Restore kernel keys. */64#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL65alternative_if_not ARM64_HAS_ADDRESS_AUTH66b __skip_pauth_restore67alternative_else_nop_endif6869alternative_if ARM64_KVM_PROTECTED_MODE70add x18, x29, #CPU_APIAKEYLO_EL171ptrauth_restore_state x18, x19, x2072alternative_else_nop_endif73__skip_pauth_restore:74#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */7576/* Restore host regs x0-x17 */77ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]78ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]79ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]80ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]8182/* x0-7 are use for panic arguments */83__host_enter_for_panic:84ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]85ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]86ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]87ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]88ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]8990/* Restore host regs x18-x29, lr */91restore_callee_saved_regs x299293/* Do not touch any register after this! */94__host_enter_without_restoring:95eret96sb97SYM_FUNC_END(__host_exit)9899/*100* void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);101*/102SYM_FUNC_START(__host_enter)103mov x29, x0104b __host_enter_restore_full105SYM_FUNC_END(__host_enter)106107/*108* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,109* u64 elr, u64 par);110*/111SYM_FUNC_START(__hyp_do_panic)112/* Prepare and exit to the host's panic function. */113mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\114PSR_MODE_EL1h)115msr spsr_el2, lr116adr_l lr, nvhe_hyp_panic_handler117hyp_kimg_va lr, x6118msr elr_el2, lr119120mov x29, x0121122#ifdef CONFIG_NVHE_EL2_DEBUG123/* Ensure host stage-2 is disabled */124mrs x0, hcr_el2125bic x0, x0, #HCR_VM126msr_hcr_el2 x0127isb128tlbi vmalls12e1129dsb nsh130#endif131132/* Load the panic arguments into x0-7 */133mrs x0, esr_el2134mov x4, x3135mov x3, x2136hyp_pa x3, x6137get_vcpu_ptr x5, x6138mrs x6, far_el2139mrs x7, hpfar_el2140141/* Enter the host, conditionally restoring the host context. */142cbz x29, __host_enter_without_restoring143b __host_enter_for_panic144SYM_FUNC_END(__hyp_do_panic)145146SYM_FUNC_START(__host_hvc)147ldp x0, x1, [sp] // Don't fixup the stack yet148149/* No stub for you, sonny Jim */150alternative_if ARM64_KVM_PROTECTED_MODE151b __host_exit152alternative_else_nop_endif153154/* Check for a stub HVC call */155cmp x0, #HVC_STUB_HCALL_NR156b.hs __host_exit157158add sp, sp, #16159/*160* Compute the idmap address of __kvm_handle_stub_hvc and161* jump there.162*163* Preserve x0-x4, which may contain stub parameters.164*/165adr_l x5, __kvm_handle_stub_hvc166hyp_pa x5, x6167br x5168SYM_FUNC_END(__host_hvc)169170.macro host_el1_sync_vect171.align 7172.L__vect_start\@:173stp x0, x1, [sp, #-16]!174mrs x0, esr_el2175ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH176cmp x0, #ESR_ELx_EC_HVC64177b.eq __host_hvc178b __host_exit179.L__vect_end\@:180.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)181.error "host_el1_sync_vect larger than vector entry"182.endif183.endm184185.macro invalid_host_el2_vect186.align 7187188/*189* Test whether the SP has overflowed, without corrupting a GPR.190* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit191* of SP should always be 1.192*/193add sp, sp, x0 // sp' = sp + x0194sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp195tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@196sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0197sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp198199/*200* The panic may not be clean if the exception is taken before the host201* context has been saved by __host_exit or after the hyp context has202* been partially clobbered by __host_enter.203*/204b hyp_panic205206.L__hyp_sp_overflow\@:207/* Switch to the overflow stack */208adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0209210b hyp_panic_bad_stack211ASM_BUG()212.endm213214.macro invalid_host_el1_vect215.align 7216mov x0, xzr /* restore_host = false */217mrs x1, spsr_el2218mrs x2, elr_el2219mrs x3, par_el1220b __hyp_do_panic221.endm222223/*224* The host vector does not use an ESB instruction in order to avoid consuming225* SErrors that should only be consumed by the host. Guest entry is deferred by226* __guest_enter if there are any pending asynchronous exceptions so hyp will227* always return to the host without having consumerd host SErrors.228*229* CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the230* host knows about the EL2 vectors already, and there is no point in hiding231* them.232*/233.align 11234SYM_CODE_START(__kvm_hyp_host_vector)235invalid_host_el2_vect // Synchronous EL2t236invalid_host_el2_vect // IRQ EL2t237invalid_host_el2_vect // FIQ EL2t238invalid_host_el2_vect // Error EL2t239240invalid_host_el2_vect // Synchronous EL2h241invalid_host_el2_vect // IRQ EL2h242invalid_host_el2_vect // FIQ EL2h243invalid_host_el2_vect // Error EL2h244245host_el1_sync_vect // Synchronous 64-bit EL1/EL0246invalid_host_el1_vect // IRQ 64-bit EL1/EL0247invalid_host_el1_vect // FIQ 64-bit EL1/EL0248invalid_host_el1_vect // Error 64-bit EL1/EL0249250host_el1_sync_vect // Synchronous 32-bit EL1/EL0251invalid_host_el1_vect // IRQ 32-bit EL1/EL0252invalid_host_el1_vect // FIQ 32-bit EL1/EL0253invalid_host_el1_vect // Error 32-bit EL1/EL0254SYM_CODE_END(__kvm_hyp_host_vector)255256/*257* Forward SMC with arguments in struct kvm_cpu_context, and258* store the result into the same struct. Assumes SMCCC 1.2 or older.259*260* x0: struct kvm_cpu_context*261*/262SYM_CODE_START(__kvm_hyp_host_forward_smc)263/*264* Use x18 to keep the pointer to the host context because265* x18 is callee-saved in SMCCC but not in AAPCS64.266*/267mov x18, x0268269ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]270ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]271ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]272ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]273ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]274ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]275ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]276ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]277ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]278279smc #0280281stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]282stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]283stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]284stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]285stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]286stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]287stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]288stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]289stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]290291ret292SYM_CODE_END(__kvm_hyp_host_forward_smc)293294/*295* kvm_host_psci_cpu_entry is called through br instruction, which requires296* bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external297* functions, but bti c instead.298*/299SYM_CODE_START(kvm_host_psci_cpu_entry)300bti j301b __kvm_host_psci_cpu_entry302SYM_CODE_END(kvm_host_psci_cpu_entry)303304305