Path: blob/master/tools/testing/selftests/kvm/arm64/at.c
38237 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* at - Test for KVM's AT emulation in the EL2&0 and EL1&0 translation regimes.3*/4#include "kvm_util.h"5#include "processor.h"6#include "test_util.h"7#include "ucall.h"89#include <asm/sysreg.h>1011#define TEST_ADDR 0x800000001213enum {14CLEAR_ACCESS_FLAG,15TEST_ACCESS_FLAG,16};1718static u64 *ptep_hva;1920#define copy_el2_to_el1(reg) \21write_sysreg_s(read_sysreg_s(SYS_##reg##_EL1), SYS_##reg##_EL12)2223/* Yes, this is an ugly hack */24#define __at(op, addr) write_sysreg_s(addr, op)2526#define test_at_insn(op, expect_fault) \27do { \28u64 par, fsc; \29bool fault; \30\31GUEST_SYNC(CLEAR_ACCESS_FLAG); \32\33__at(OP_AT_##op, TEST_ADDR); \34isb(); \35par = read_sysreg(par_el1); \36\37fault = par & SYS_PAR_EL1_F; \38fsc = FIELD_GET(SYS_PAR_EL1_FST, par); \39\40__GUEST_ASSERT((expect_fault) == fault, \41"AT "#op": %sexpected fault (par: %lx)1", \42(expect_fault) ? "" : "un", par); \43if ((expect_fault)) { \44__GUEST_ASSERT(fsc == ESR_ELx_FSC_ACCESS_L(3), \45"AT "#op": expected access flag fault (par: %lx)", \46par); \47} else { \48GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \49GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \50GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \51GUEST_SYNC(TEST_ACCESS_FLAG); \52} \53} while (0)5455static void test_at(bool expect_fault)56{57test_at_insn(S1E2R, expect_fault);58test_at_insn(S1E2W, expect_fault);5960/* Reuse the stage-1 MMU context from EL2 at EL1 */61copy_el2_to_el1(SCTLR);62copy_el2_to_el1(MAIR);63copy_el2_to_el1(TCR);64copy_el2_to_el1(TTBR0);65copy_el2_to_el1(TTBR1);6667/* Disable stage-2 translation and enter a non-host context */68write_sysreg(0, vtcr_el2);69write_sysreg(0, vttbr_el2);70sysreg_clear_set(hcr_el2, HCR_EL2_TGE | HCR_EL2_VM, 0);71isb();7273test_at_insn(S1E1R, expect_fault);74test_at_insn(S1E1W, expect_fault);75}7677static void guest_code(void)78{79sysreg_clear_set(tcr_el1, TCR_HA, 0);80isb();8182test_at(true);8384if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))85GUEST_DONE();8687/*88* KVM's software PTW makes the implementation choice that the AT89* instruction sets the access flag.90*/91sysreg_clear_set(tcr_el1, 0, TCR_HA);92isb();93test_at(false);9495GUEST_DONE();96}9798static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)99{100switch (uc->args[1]) {101case CLEAR_ACCESS_FLAG:102/*103* Delete + reinstall the memslot to invalidate stage-2104* mappings of the stage-1 page tables, forcing KVM to105* use the 'slow' AT emulation path.106*107* This and clearing the access flag from host userspace108* ensures that the access flag cannot be set speculatively109* and is reliably cleared at the time of the AT instruction.110*/111clear_bit(__ffs(PTE_AF), ptep_hva);112vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);113break;114case TEST_ACCESS_FLAG:115TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva),116"Expected access flag to be set (desc: %lu)", *ptep_hva);117break;118default:119TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);120}121}122123static void run_test(struct kvm_vcpu *vcpu)124{125struct ucall uc;126127while (true) {128vcpu_run(vcpu);129switch (get_ucall(vcpu, &uc)) {130case UCALL_DONE:131return;132case UCALL_SYNC:133handle_sync(vcpu, &uc);134continue;135case UCALL_ABORT:136REPORT_GUEST_ASSERT(uc);137return;138default:139TEST_FAIL("Unexpected ucall: %lu", uc.cmd);140}141}142}143144int main(void)145{146struct kvm_vcpu_init init;147struct kvm_vcpu *vcpu;148struct kvm_vm *vm;149150TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2));151152vm = vm_create(1);153154kvm_get_default_vcpu_target(vm, &init);155init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);156vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);157kvm_arch_vm_finalize_vcpus(vm);158159virt_map(vm, TEST_ADDR, TEST_ADDR, 1);160ptep_hva = virt_get_pte_hva_at_level(vm, TEST_ADDR, 3);161run_test(vcpu);162163kvm_vm_free(vm);164return 0;165}166167168