Path: blob/master/tools/testing/selftests/kvm/include/arm64/processor.h
49022 views
/* SPDX-License-Identifier: GPL-2.0 */1/*2* AArch64 processor specific defines3*4* Copyright (C) 2018, Red Hat, Inc.5*/6#ifndef SELFTEST_KVM_PROCESSOR_H7#define SELFTEST_KVM_PROCESSOR_H89#include "kvm_util.h"10#include "ucall_common.h"1112#include <linux/stringify.h>13#include <linux/types.h>14#include <asm/brk-imm.h>15#include <asm/esr.h>16#include <asm/sysreg.h>171819#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \20KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))2122/*23* KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert24* SYS_* register definitions in asm/sysreg.h to use in KVM25* calls such as vcpu_get_reg() and vcpu_set_reg().26*/27#define KVM_ARM64_SYS_REG(sys_reg_id) \28ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \29sys_reg_Op1(sys_reg_id), \30sys_reg_CRn(sys_reg_id), \31sys_reg_CRm(sys_reg_id), \32sys_reg_Op2(sys_reg_id))3334/*35* Default MAIR36* index attribute37* DEVICE_nGnRnE 0 0000:000038* DEVICE_nGnRE 1 0000:010039* DEVICE_GRE 2 0000:110040* NORMAL_NC 3 0100:010041* NORMAL 4 1111:111142* NORMAL_WT 5 1011:101143*/4445/* Linux doesn't use these memory types, so let's define them. */46#define MAIR_ATTR_DEVICE_GRE UL(0x0c)47#define MAIR_ATTR_NORMAL_WT UL(0xbb)4849#define MT_DEVICE_nGnRnE 050#define MT_DEVICE_nGnRE 151#define MT_DEVICE_GRE 252#define MT_NORMAL_NC 353#define MT_NORMAL 454#define MT_NORMAL_WT 55556#define DEFAULT_MAIR_EL1 \57(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \58MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \59MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \60MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \61MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \62MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))6364/* TCR_EL1 specific flags */65#define TCR_T0SZ_OFFSET 066#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)6768#define TCR_IRGN0_SHIFT 869#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)70#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)71#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)72#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)73#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)7475#define TCR_ORGN0_SHIFT 1076#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)77#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)78#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)79#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)80#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)8182#define TCR_SH0_SHIFT 1283#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)84#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)8586#define TCR_TG0_SHIFT 1487#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)88#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)89#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)90#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)9192#define TCR_IPS_SHIFT 3293#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)94#define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT)95#define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT)96#define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT)97#define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT)9899#define TCR_HA (UL(1) << 39)100#define TCR_DS (UL(1) << 59)101102/*103* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).104*/105#define PTE_ATTRINDX(t) ((t) << 2)106#define PTE_ATTRINDX_MASK GENMASK(4, 2)107#define PTE_ATTRINDX_SHIFT 2108109#define PTE_VALID BIT(0)110#define PGD_TYPE_TABLE BIT(1)111#define PUD_TYPE_TABLE BIT(1)112#define PMD_TYPE_TABLE BIT(1)113#define PTE_TYPE_PAGE BIT(1)114115#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */116#define PTE_AF BIT(10)117118#define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift))119#define PTE_ADDR_51_48 GENMASK(15, 12)120#define PTE_ADDR_51_48_SHIFT 12121#define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift))122#define PTE_ADDR_51_50_LPA2 GENMASK(9, 8)123#define PTE_ADDR_51_50_LPA2_SHIFT 8124125void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);126struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,127struct kvm_vcpu_init *init, void *guest_code);128129struct ex_regs {130u64 regs[31];131u64 sp;132u64 pc;133u64 pstate;134};135136#define VECTOR_NUM 16137138enum {139VECTOR_SYNC_CURRENT_SP0,140VECTOR_IRQ_CURRENT_SP0,141VECTOR_FIQ_CURRENT_SP0,142VECTOR_ERROR_CURRENT_SP0,143144VECTOR_SYNC_CURRENT,145VECTOR_IRQ_CURRENT,146VECTOR_FIQ_CURRENT,147VECTOR_ERROR_CURRENT,148149VECTOR_SYNC_LOWER_64,150VECTOR_IRQ_LOWER_64,151VECTOR_FIQ_LOWER_64,152VECTOR_ERROR_LOWER_64,153154VECTOR_SYNC_LOWER_32,155VECTOR_IRQ_LOWER_32,156VECTOR_FIQ_LOWER_32,157VECTOR_ERROR_LOWER_32,158};159160#define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \161(v) == VECTOR_SYNC_CURRENT || \162(v) == VECTOR_SYNC_LOWER_64 || \163(v) == VECTOR_SYNC_LOWER_32)164165void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,166uint32_t *ipa16k, uint32_t *ipa64k);167168void vm_init_descriptor_tables(struct kvm_vm *vm);169void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);170171typedef void(*handler_fn)(struct ex_regs *);172void vm_install_exception_handler(struct kvm_vm *vm,173int vector, handler_fn handler);174void vm_install_sync_handler(struct kvm_vm *vm,175int vector, int ec, handler_fn handler);176177uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);178uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);179180static inline void cpu_relax(void)181{182asm volatile("yield" ::: "memory");183}184185#define isb() asm volatile("isb" : : : "memory")186#define dsb(opt) asm volatile("dsb " #opt : : : "memory")187#define dmb(opt) asm volatile("dmb " #opt : : : "memory")188189#define dma_wmb() dmb(oshst)190#define __iowmb() dma_wmb()191192#define dma_rmb() dmb(oshld)193194#define __iormb(v) \195({ \196unsigned long tmp; \197\198dma_rmb(); \199\200/* \201* Courtesy of arch/arm64/include/asm/io.h: \202* Create a dummy control dependency from the IO read to any \203* later instructions. This ensures that a subsequent call \204* to udelay() will be ordered due to the ISB in __delay(). \205*/ \206asm volatile("eor %0, %1, %1\n" \207"cbnz %0, ." \208: "=r" (tmp) : "r" ((unsigned long)(v)) \209: "memory"); \210})211212static __always_inline void __raw_writel(u32 val, volatile void *addr)213{214asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));215}216217static __always_inline u32 __raw_readl(const volatile void *addr)218{219u32 val;220asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));221return val;222}223224static __always_inline void __raw_writeq(u64 val, volatile void *addr)225{226asm volatile("str %0, [%1]" : : "rZ" (val), "r" (addr));227}228229static __always_inline u64 __raw_readq(const volatile void *addr)230{231u64 val;232asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));233return val;234}235236#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))237#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })238#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))239#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })240241#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})242#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })243#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c));})244#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })245246247static inline void local_irq_enable(void)248{249asm volatile("msr daifclr, #3" : : : "memory");250}251252static inline void local_irq_disable(void)253{254asm volatile("msr daifset, #3" : : : "memory");255}256257static inline void local_serror_enable(void)258{259asm volatile("msr daifclr, #4" : : : "memory");260}261262static inline void local_serror_disable(void)263{264asm volatile("msr daifset, #4" : : : "memory");265}266267/**268* struct arm_smccc_res - Result from SMC/HVC call269* @a0-a3 result values from registers 0 to 3270*/271struct arm_smccc_res {272unsigned long a0;273unsigned long a1;274unsigned long a2;275unsigned long a3;276};277278/**279* smccc_hvc - Invoke a SMCCC function using the hvc conduit280* @function_id: the SMCCC function to be called281* @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7282* @res: pointer to write the return values from registers x0-x3283*284*/285void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,286uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,287uint64_t arg6, struct arm_smccc_res *res);288289/**290* smccc_smc - Invoke a SMCCC function using the smc conduit291* @function_id: the SMCCC function to be called292* @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7293* @res: pointer to write the return values from registers x0-x3294*295*/296void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,297uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,298uint64_t arg6, struct arm_smccc_res *res);299300/* Execute a Wait For Interrupt instruction. */301void wfi(void);302303void test_wants_mte(void);304void test_disable_default_vgic(void);305306bool vm_supports_el2(struct kvm_vm *vm);307308static inline bool test_supports_el2(void)309{310struct kvm_vm *vm = vm_create(1);311bool supported = vm_supports_el2(vm);312313kvm_vm_free(vm);314return supported;315}316317static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)318{319return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);320}321322#define MAPPED_EL2_SYSREG(el2, el1) \323case SYS_##el1: \324if (vcpu_has_el2(vcpu)) \325alias = SYS_##el2; \326break327328329static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding)330{331u32 alias = encoding;332333BUILD_BUG_ON(!__builtin_constant_p(encoding));334335switch (encoding) {336MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1);337MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1);338MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1);339MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1);340MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1);341MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1);342MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1);343MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1);344MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1);345MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1);346MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1);347MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1);348MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1);349MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1);350MAPPED_EL2_SYSREG(POR_EL2, POR_EL1);351MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1);352MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1);353MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1);354MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1);355MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1);356MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1);357MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1);358case SYS_SP_EL1:359if (!vcpu_has_el2(vcpu))360return ARM64_CORE_REG(sp_el1);361362alias = SYS_SP_EL2;363break;364default:365BUILD_BUG();366}367368return KVM_ARM64_SYS_REG(alias);369}370371void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init);372373static inline unsigned int get_current_el(void)374{375return (read_sysreg(CurrentEL) >> 2) & 0x3;376}377378#define do_smccc(...) \379do { \380if (get_current_el() == 2) \381smccc_smc(__VA_ARGS__); \382else \383smccc_hvc(__VA_ARGS__); \384} while (0)385386#endif /* SELFTEST_KVM_PROCESSOR_H */387388389