Path: blob/master/tools/testing/selftests/kvm/arm64/set_id_regs.c
38245 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* set_id_regs - Test for setting ID register from usersapce.3*4* Copyright (c) 2023 Google LLC.5*6*7* Test that KVM supports setting ID registers from userspace and handles the8* feature set correctly.9*/1011#include <stdint.h>12#include "kvm_util.h"13#include "processor.h"14#include "test_util.h"15#include <linux/bitfield.h>1617enum ftr_type {18FTR_EXACT, /* Use a predefined safe value */19FTR_LOWER_SAFE, /* Smaller value is safe */20FTR_HIGHER_SAFE, /* Bigger value is safe */21FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */22FTR_END, /* Mark the last ftr bits */23};2425#define FTR_SIGNED true /* Value should be treated as signed */26#define FTR_UNSIGNED false /* Value should be treated as unsigned */2728struct reg_ftr_bits {29char *name;30bool sign;31enum ftr_type type;32uint8_t shift;33uint64_t mask;34/*35* For FTR_EXACT, safe_val is used as the exact safe value.36* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.37*/38int64_t safe_val;39};4041struct test_feature_reg {42uint32_t reg;43const struct reg_ftr_bits *ftr_bits;44};4546#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \47{ \48.name = #NAME, \49.sign = SIGNED, \50.type = TYPE, \51.shift = SHIFT, \52.mask = MASK, \53.safe_val = SAFE_VAL, \54}5556#define REG_FTR_BITS(type, reg, field, safe_val) \57__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \58reg##_##field##_MASK, safe_val)5960#define S_REG_FTR_BITS(type, reg, field, safe_val) \61__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \62reg##_##field##_MASK, safe_val)6364#define REG_FTR_END \65{ \66.type = FTR_END, \67}6869static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {70S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0),71REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0),72S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),73REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),74REG_FTR_END,75};7677static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {78S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),79REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),80REG_FTR_END,81};8283static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {84REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),85REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),86REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),87REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),88REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),89REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),90REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),91REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),92REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),93REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),94REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),95REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),96REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),97REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),98REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),99REG_FTR_END,100};101102static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {103REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),104REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),105REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),106REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),107REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),108REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),109REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),110REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),111REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),112REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),113REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),114REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),115REG_FTR_END,116};117118static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {119REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),120REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),121REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),122REG_FTR_END,123};124125static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = {126REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0),127REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0),128REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0),129REG_FTR_END,130};131132static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {133REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),134REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),135REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),136REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),137REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),138REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1),139REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1),140REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1),141REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 1),142REG_FTR_END,143};144145static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {146REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, DF2, 0),147REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0),148REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI),149REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0),150REG_FTR_END,151};152153static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {154REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),155REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),156REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN4_2, 1),157REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN64_2, 1),158REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN16_2, 1),159S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),160S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),161REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),162REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),163REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),164REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),165REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),166REG_FTR_END,167};168169static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {170REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),171REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),172REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HCX, 0),173REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),174REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TWED, 0),175REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),176REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),177REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),178REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),179REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),180REG_FTR_END,181};182183static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {184REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),185REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),186REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),187REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),188REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),189REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),190REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),191REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),192REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),193REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),194REG_FTR_END,195};196197static const struct reg_ftr_bits ftr_id_aa64mmfr3_el1[] = {198REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1POE, 0),199REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1PIE, 0),200REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, SCTLRX, 0),201REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, TCRX, 0),202REG_FTR_END,203};204205static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {206REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),207REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),208REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),209REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),210REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),211REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),212REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),213REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),214REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),215REG_FTR_END,216};217218#define TEST_REG(id, table) \219{ \220.reg = id, \221.ftr_bits = &((table)[0]), \222}223224static struct test_feature_reg test_regs[] = {225TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),226TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),227TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),228TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),229TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),230TEST_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3_el1),231TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),232TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1),233TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),234TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),235TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),236TEST_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3_el1),237TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),238};239240#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);241242static void guest_code(void)243{244GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);245GUEST_REG_SYNC(SYS_ID_DFR0_EL1);246GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);247GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);248GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);249GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1);250GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);251GUEST_REG_SYNC(SYS_ID_AA64PFR1_EL1);252GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);253GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);254GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);255GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1);256GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);257GUEST_REG_SYNC(SYS_MPIDR_EL1);258GUEST_REG_SYNC(SYS_CLIDR_EL1);259GUEST_REG_SYNC(SYS_CTR_EL0);260GUEST_REG_SYNC(SYS_MIDR_EL1);261GUEST_REG_SYNC(SYS_REVIDR_EL1);262GUEST_REG_SYNC(SYS_AIDR_EL1);263264GUEST_DONE();265}266267/* Return a safe value to a given ftr_bits an ftr value */268uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)269{270uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;271272TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");273274if (ftr_bits->sign == FTR_UNSIGNED) {275switch (ftr_bits->type) {276case FTR_EXACT:277ftr = ftr_bits->safe_val;278break;279case FTR_LOWER_SAFE:280if (ftr > ftr_bits->safe_val)281ftr--;282break;283case FTR_HIGHER_SAFE:284if (ftr < ftr_max)285ftr++;286break;287case FTR_HIGHER_OR_ZERO_SAFE:288if (ftr == ftr_max)289ftr = 0;290else if (ftr != 0)291ftr++;292break;293default:294break;295}296} else if (ftr != ftr_max) {297switch (ftr_bits->type) {298case FTR_EXACT:299ftr = ftr_bits->safe_val;300break;301case FTR_LOWER_SAFE:302if (ftr > ftr_bits->safe_val)303ftr--;304break;305case FTR_HIGHER_SAFE:306if (ftr < ftr_max - 1)307ftr++;308break;309case FTR_HIGHER_OR_ZERO_SAFE:310if (ftr != 0 && ftr != ftr_max - 1)311ftr++;312break;313default:314break;315}316}317318return ftr;319}320321/* Return an invalid value to a given ftr_bits an ftr value */322uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)323{324uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;325326TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");327328if (ftr_bits->sign == FTR_UNSIGNED) {329switch (ftr_bits->type) {330case FTR_EXACT:331ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);332break;333case FTR_LOWER_SAFE:334ftr++;335break;336case FTR_HIGHER_SAFE:337ftr--;338break;339case FTR_HIGHER_OR_ZERO_SAFE:340if (ftr == 0)341ftr = ftr_max;342else343ftr--;344break;345default:346break;347}348} else if (ftr != ftr_max) {349switch (ftr_bits->type) {350case FTR_EXACT:351ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);352break;353case FTR_LOWER_SAFE:354ftr++;355break;356case FTR_HIGHER_SAFE:357ftr--;358break;359case FTR_HIGHER_OR_ZERO_SAFE:360if (ftr == 0)361ftr = ftr_max - 1;362else363ftr--;364break;365default:366break;367}368} else {369ftr = 0;370}371372return ftr;373}374375static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,376const struct reg_ftr_bits *ftr_bits)377{378uint8_t shift = ftr_bits->shift;379uint64_t mask = ftr_bits->mask;380uint64_t val, new_val, ftr;381382val = vcpu_get_reg(vcpu, reg);383ftr = (val & mask) >> shift;384385ftr = get_safe_value(ftr_bits, ftr);386387ftr <<= shift;388val &= ~mask;389val |= ftr;390391vcpu_set_reg(vcpu, reg, val);392new_val = vcpu_get_reg(vcpu, reg);393TEST_ASSERT_EQ(new_val, val);394395return new_val;396}397398static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,399const struct reg_ftr_bits *ftr_bits)400{401uint8_t shift = ftr_bits->shift;402uint64_t mask = ftr_bits->mask;403uint64_t val, old_val, ftr;404int r;405406val = vcpu_get_reg(vcpu, reg);407ftr = (val & mask) >> shift;408409ftr = get_invalid_value(ftr_bits, ftr);410411old_val = val;412ftr <<= shift;413val &= ~mask;414val |= ftr;415416r = __vcpu_set_reg(vcpu, reg, val);417TEST_ASSERT(r < 0 && errno == EINVAL,418"Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);419420val = vcpu_get_reg(vcpu, reg);421TEST_ASSERT_EQ(val, old_val);422}423424static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];425426#define encoding_to_range_idx(encoding) \427KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \428sys_reg_CRn(encoding), sys_reg_CRm(encoding), \429sys_reg_Op2(encoding))430431432static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)433{434uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];435struct reg_mask_range range = {436.addr = (__u64)masks,437};438int ret;439440/* KVM should return error when reserved field is not zero */441range.reserved[0] = 1;442ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);443TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");444445/* Get writable masks for feature ID registers */446memset(range.reserved, 0, sizeof(range.reserved));447vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);448449for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {450const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;451uint32_t reg_id = test_regs[i].reg;452uint64_t reg = KVM_ARM64_SYS_REG(reg_id);453int idx;454455/* Get the index to masks array for the idreg */456idx = encoding_to_range_idx(reg_id);457458for (int j = 0; ftr_bits[j].type != FTR_END; j++) {459/* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */460if (aarch64_only && sys_reg_CRm(reg_id) < 4) {461ksft_test_result_skip("%s on AARCH64 only system\n",462ftr_bits[j].name);463continue;464}465466/* Make sure the feature field is writable */467TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);468469test_reg_set_fail(vcpu, reg, &ftr_bits[j]);470471test_reg_vals[idx] = test_reg_set_success(vcpu, reg,472&ftr_bits[j]);473474ksft_test_result_pass("%s\n", ftr_bits[j].name);475}476}477}478479#define MPAM_IDREG_TEST 6480static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)481{482uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];483struct reg_mask_range range = {484.addr = (__u64)masks,485};486uint64_t val;487int idx, err;488489/*490* If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero,491* check that if it can be set to 1, (i.e. it is supported by the492* hardware), that it can't be set to other values.493*/494495/* Get writable masks for feature ID registers */496memset(range.reserved, 0, sizeof(range.reserved));497vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);498499/* Writeable? Nothing to test! */500idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1);501if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {502ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n");503return;504}505506/* Get the id register value */507val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));508509/* Try to set MPAM=0. This should always be possible. */510val &= ~ID_AA64PFR0_EL1_MPAM_MASK;511val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);512err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);513if (err)514ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n");515else516ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");517518/* Try to set MPAM=1 */519val &= ~ID_AA64PFR0_EL1_MPAM_MASK;520val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);521err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);522if (err)523ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n");524else525ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");526527/* Try to set MPAM=2 */528val &= ~ID_AA64PFR0_EL1_MPAM_MASK;529val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);530err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);531if (err)532ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n");533else534ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");535536/* And again for ID_AA64PFR1_EL1.MPAM_frac */537idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);538if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {539ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n");540return;541}542543/* Get the id register value */544val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));545546/* Try to set MPAM_frac=0. This should always be possible. */547val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;548val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);549err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);550if (err)551ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n");552else553ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");554555/* Try to set MPAM_frac=1 */556val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;557val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);558err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);559if (err)560ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n");561else562ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");563564/* Try to set MPAM_frac=2 */565val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;566val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);567err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);568if (err)569ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n");570else571ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");572}573574#define MTE_IDREG_TEST 1575static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)576{577uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];578struct reg_mask_range range = {579.addr = (__u64)masks,580};581uint64_t val;582uint64_t mte;583uint64_t mte_frac;584int idx, err;585586val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));587mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);588if (!mte) {589ksft_test_result_skip("MTE capability not supported, nothing to test\n");590return;591}592593/* Get writable masks for feature ID registers */594memset(range.reserved, 0, sizeof(range.reserved));595vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);596597idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);598if ((masks[idx] & ID_AA64PFR1_EL1_MTE_frac_MASK) == ID_AA64PFR1_EL1_MTE_frac_MASK) {599ksft_test_result_skip("ID_AA64PFR1_EL1.MTE_frac is officially writable, nothing to test\n");600return;601}602603/*604* When MTE is supported but MTE_ASYMM is not (ID_AA64PFR1_EL1.MTE == 2)605* ID_AA64PFR1_EL1.MTE_frac == 0xF indicates MTE_ASYNC is unsupported606* and MTE_frac == 0 indicates it is supported.607*608* As MTE_frac was previously unconditionally read as 0, check609* that the set to 0 succeeds but does not change MTE_frac610* from unsupported (0xF) to supported (0).611*612*/613mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);614if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||615mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {616ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");617return;618}619620/* Try to set MTE_frac=0. */621val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;622val |= FIELD_PREP(ID_AA64PFR1_EL1_MTE_frac_MASK, 0);623err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);624if (err) {625ksft_test_result_fail("ID_AA64PFR1_EL1.MTE_frac=0 was not accepted\n");626return;627}628629val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));630mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);631if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)632ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");633else634ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");635}636637static void test_guest_reg_read(struct kvm_vcpu *vcpu)638{639bool done = false;640struct ucall uc;641642while (!done) {643vcpu_run(vcpu);644645switch (get_ucall(vcpu, &uc)) {646case UCALL_ABORT:647REPORT_GUEST_ASSERT(uc);648break;649case UCALL_SYNC:650/* Make sure the written values are seen by guest */651TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],652uc.args[3]);653break;654case UCALL_DONE:655done = true;656break;657default:658TEST_FAIL("Unexpected ucall: %lu", uc.cmd);659}660}661}662663/* Politely lifted from arch/arm64/include/asm/cache.h */664/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */665#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))666#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))667#define CLIDR_CTYPE(clidr, level) \668(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))669670static void test_clidr(struct kvm_vcpu *vcpu)671{672uint64_t clidr;673int level;674675clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));676677/* find the first empty level in the cache hierarchy */678for (level = 1; level <= 7; level++) {679if (!CLIDR_CTYPE(clidr, level))680break;681}682683/*684* If you have a mind-boggling 7 levels of cache, congratulations, you685* get to fix this.686*/687TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");688689/* stick in a unified cache level */690clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);691692vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);693test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;694}695696static void test_ctr(struct kvm_vcpu *vcpu)697{698u64 ctr;699700ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0));701ctr &= ~CTR_EL0_DIC_MASK;702if (ctr & CTR_EL0_IminLine_MASK)703ctr--;704705vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);706test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;707}708709static void test_id_reg(struct kvm_vcpu *vcpu, u32 id)710{711u64 val;712713val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(id));714val++;715vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(id), val);716test_reg_vals[encoding_to_range_idx(id)] = val;717}718719static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)720{721test_clidr(vcpu);722test_ctr(vcpu);723724test_id_reg(vcpu, SYS_MPIDR_EL1);725ksft_test_result_pass("%s\n", __func__);726}727728static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu)729{730test_id_reg(vcpu, SYS_MIDR_EL1);731test_id_reg(vcpu, SYS_REVIDR_EL1);732test_id_reg(vcpu, SYS_AIDR_EL1);733734ksft_test_result_pass("%s\n", __func__);735}736737static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)738{739size_t idx = encoding_to_range_idx(encoding);740uint64_t observed;741742observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));743TEST_ASSERT_EQ(test_reg_vals[idx], observed);744}745746static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)747{748/*749* Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an750* architectural reset of the vCPU.751*/752aarch64_vcpu_setup(vcpu, NULL);753754for (int i = 0; i < ARRAY_SIZE(test_regs); i++)755test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);756757test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);758test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);759test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);760test_assert_id_reg_unchanged(vcpu, SYS_MIDR_EL1);761test_assert_id_reg_unchanged(vcpu, SYS_REVIDR_EL1);762test_assert_id_reg_unchanged(vcpu, SYS_AIDR_EL1);763764ksft_test_result_pass("%s\n", __func__);765}766767int main(void)768{769struct kvm_vcpu *vcpu;770struct kvm_vm *vm;771bool aarch64_only;772uint64_t val, el0;773int test_cnt, i, j;774775TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));776TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS));777778test_wants_mte();779780vm = vm_create(1);781vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0);782vcpu = vm_vcpu_add(vm, 0, guest_code);783kvm_arch_vm_finalize_vcpus(vm);784785/* Check for AARCH64 only system */786val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));787el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);788aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);789790ksft_print_header();791792test_cnt = 3 + MPAM_IDREG_TEST + MTE_IDREG_TEST;793for (i = 0; i < ARRAY_SIZE(test_regs); i++)794for (j = 0; test_regs[i].ftr_bits[j].type != FTR_END; j++)795test_cnt++;796797ksft_set_plan(test_cnt);798799test_vm_ftr_id_regs(vcpu, aarch64_only);800test_vcpu_ftr_id_regs(vcpu);801test_vcpu_non_ftr_id_regs(vcpu);802test_user_set_mpam_reg(vcpu);803test_user_set_mte_reg(vcpu);804805test_guest_reg_read(vcpu);806807test_reset_preserves_id_regs(vcpu);808809kvm_vm_free(vm);810811ksft_finished();812}813814815