/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2012,2013 - ARM Ltd3* Author: Marc Zyngier <[email protected]>4*/56#include <linux/arm-smccc.h>7#include <linux/cfi_types.h>8#include <linux/linkage.h>910#include <asm/alternative.h>11#include <asm/assembler.h>12#include <asm/el2_setup.h>13#include <asm/kvm_arm.h>14#include <asm/kvm_asm.h>15#include <asm/kvm_mmu.h>16#include <asm/pgtable-hwdef.h>17#include <asm/sysreg.h>18#include <asm/virt.h>1920.text21.pushsection .idmap.text, "ax"2223.align 112425SYM_CODE_START(__kvm_hyp_init)26ventry . // Synchronous EL2t27ventry . // IRQ EL2t28ventry . // FIQ EL2t29ventry . // Error EL2t3031ventry . // Synchronous EL2h32ventry . // IRQ EL2h33ventry . // FIQ EL2h34ventry . // Error EL2h3536ventry __do_hyp_init // Synchronous 64-bit EL137ventry . // IRQ 64-bit EL138ventry . // FIQ 64-bit EL139ventry . // Error 64-bit EL14041ventry . // Synchronous 32-bit EL142ventry . // IRQ 32-bit EL143ventry . // FIQ 32-bit EL144ventry . // Error 32-bit EL14546/*47* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.48*49* x0: SMCCC function ID50* x1: struct kvm_nvhe_init_params PA51*/52__do_hyp_init:53/* Check for a stub HVC call */54cmp x0, #HVC_STUB_HCALL_NR55b.lo __kvm_handle_stub_hvc5657bic x0, x0, #ARM_SMCCC_CALL_HINTS58mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)59cmp x0, x360b.eq 1f6162mov x0, #SMCCC_RET_NOT_SUPPORTED63eret64651: mov x0, x166mov x3, lr67bl ___kvm_hyp_init // Clobbers x0..x268mov lr, x36970/* Hello, World! */71mov x0, #SMCCC_RET_SUCCESS72eret73SYM_CODE_END(__kvm_hyp_init)7475/*76* Initialize EL2 CPU state to sane values.77*78* HCR_EL2.E2H must have been initialized already.79*/80SYM_CODE_START_LOCAL(__kvm_init_el2_state)81init_el2_state // Clobbers x0..x282finalise_el2_state83ret84SYM_CODE_END(__kvm_init_el2_state)8586/*87* Initialize the hypervisor in EL2.88*89* Only uses x0..x2 so as to not clobber callee-saved SMCCC registers90* and leave x3 for the caller.91*92* x0: struct kvm_nvhe_init_params PA93*/94SYM_CODE_START_LOCAL(___kvm_hyp_init)95ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA]96mov sp, x19798ldr x1, [x0, #NVHE_INIT_MAIR_EL2]99msr mair_el2, x1100101ldr x1, [x0, #NVHE_INIT_HCR_EL2]102msr_hcr_el2 x1103104mov x2, #HCR_E2H105and x2, x1, x2106cbz x2, 1f107108// hVHE: Replay the EL2 setup to account for the E2H bit109// TPIDR_EL2 is used to preserve x0 across the macro maze...110isb111msr tpidr_el2, x0112str lr, [x0, #NVHE_INIT_TMP]113114bl __kvm_init_el2_state115116mrs x0, tpidr_el2117ldr lr, [x0, #NVHE_INIT_TMP]1181191:120ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]121msr tpidr_el2, x1122123ldr x1, [x0, #NVHE_INIT_VTTBR]124msr vttbr_el2, x1125126ldr x1, [x0, #NVHE_INIT_VTCR]127msr vtcr_el2, x1128129ldr x1, [x0, #NVHE_INIT_PGD_PA]130phys_to_ttbr x2, x1131alternative_if ARM64_HAS_CNP132orr x2, x2, #TTBR_CNP_BIT133alternative_else_nop_endif134msr ttbr0_el2, x2135136ldr x0, [x0, #NVHE_INIT_TCR_EL2]137msr tcr_el2, x0138139isb140141/* Invalidate the stale TLBs from Bootloader */142tlbi alle2143tlbi alle1144dsb sy145146mov_q x0, INIT_SCTLR_EL2_MMU_ON147alternative_if ARM64_HAS_ADDRESS_AUTH148mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \149SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)150orr x0, x0, x1151alternative_else_nop_endif152153#ifdef CONFIG_ARM64_BTI_KERNEL154alternative_if ARM64_BTI155orr x0, x0, #SCTLR_EL2_BT156alternative_else_nop_endif157#endif /* CONFIG_ARM64_BTI_KERNEL */158159msr sctlr_el2, x0160isb161162/* Set the host vector */163ldr x0, =__kvm_hyp_host_vector164msr vbar_el2, x0165166ret167SYM_CODE_END(___kvm_hyp_init)168169/*170* PSCI CPU_ON entry point171*172* x0: struct kvm_nvhe_init_params PA173*/174SYM_CODE_START(kvm_hyp_cpu_entry)175mov x1, #1 // is_cpu_on = true176b __kvm_hyp_init_cpu177SYM_CODE_END(kvm_hyp_cpu_entry)178179/*180* PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point181*182* x0: struct kvm_nvhe_init_params PA183*/184SYM_CODE_START(kvm_hyp_cpu_resume)185mov x1, #0 // is_cpu_on = false186b __kvm_hyp_init_cpu187SYM_CODE_END(kvm_hyp_cpu_resume)188189/*190* Common code for CPU entry points. Initializes EL2 state and191* installs the hypervisor before handing over to a C handler.192*193* x0: struct kvm_nvhe_init_params PA194* x1: bool is_cpu_on195*/196SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)197mov x28, x0 // Stash arguments198mov x29, x1199200/* Check that the core was booted in EL2. */201mrs x0, CurrentEL202cmp x0, #CurrentEL_EL2203b.eq 2f204205/* The core booted in EL1. KVM cannot be initialized on it. */2061: wfe207wfi208b 1b2092102: msr SPsel, #1 // We want to use SP_EL{1,2}211212init_el2_hcr 0213214bl __kvm_init_el2_state215216/* Enable MMU, set vectors and stack. */217mov x0, x28218bl ___kvm_hyp_init // Clobbers x0..x2219220/* Leave idmap. */221mov x0, x29222ldr x1, =kvm_host_psci_cpu_entry223br x1224SYM_CODE_END(__kvm_hyp_init_cpu)225226SYM_CODE_START(__kvm_handle_stub_hvc)227/*228* __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so229* we need bti j at beginning.230*/231bti j232cmp x0, #HVC_SOFT_RESTART233b.ne 1f234235/* This is where we're about to jump, staying at EL2 */236msr elr_el2, x1237mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)238msr spsr_el2, x0239240/* Shuffle the arguments, and don't come back */241mov x0, x2242mov x1, x3243mov x2, x4244b reset2452461: cmp x0, #HVC_RESET_VECTORS247b.ne 1f248249/*250* Set the HVC_RESET_VECTORS return code before entering the common251* path so that we do not clobber x0-x2 in case we are coming via252* HVC_SOFT_RESTART.253*/254mov x0, xzr255reset:256/* Reset kvm back to the hyp stub. */257mov_q x5, INIT_SCTLR_EL2_MMU_OFF258pre_disable_mmu_workaround259msr sctlr_el2, x5260isb261262alternative_if ARM64_KVM_PROTECTED_MODE263mov_q x5, HCR_HOST_NVHE_FLAGS264msr_hcr_el2 x5265alternative_else_nop_endif266267/* Install stub vectors */268adr_l x5, __hyp_stub_vectors269msr vbar_el2, x5270eret2712721: /* Bad stub call */273mov_q x0, HVC_STUB_ERR274eret275276SYM_CODE_END(__kvm_handle_stub_hvc)277278/*279* void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,280* void (*fn)(void));281*282* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly283* using a physical pointer without triggering a kCFI failure.284*/285SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)286/* Turn the MMU off */287pre_disable_mmu_workaround288mrs x3, sctlr_el2289bic x4, x3, #SCTLR_ELx_M290msr sctlr_el2, x4291isb292293tlbi alle2294295/* Install the new pgtables */296phys_to_ttbr x5, x0297alternative_if ARM64_HAS_CNP298orr x5, x5, #TTBR_CNP_BIT299alternative_else_nop_endif300msr ttbr0_el2, x5301302/* Set the new stack pointer */303mov sp, x1304305/* And turn the MMU back on! */306dsb nsh307isb308set_sctlr_el2 x3309ret x2310SYM_FUNC_END(__pkvm_init_switch_pgd)311312.popsection313314315