Path: blob/master/tools/testing/selftests/kvm/x86/msrs_test.c
38237 views
// SPDX-License-Identifier: GPL-2.0-only1#include <asm/msr-index.h>23#include <stdint.h>45#include "kvm_util.h"6#include "processor.h"78/* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */9#define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR1011struct kvm_msr {12const struct kvm_x86_cpu_feature feature;13const struct kvm_x86_cpu_feature feature2;14const char *name;15const u64 reset_val;16const u64 write_val;17const u64 rsvd_val;18const u32 index;19const bool is_kvm_defined;20};2122#define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm) \23{ \24.index = msr, \25.name = str, \26.write_val = val, \27.rsvd_val = rsvd, \28.reset_val = reset, \29.feature = X86_FEATURE_ ##feat, \30.feature2 = X86_FEATURE_ ##f2, \31.is_kvm_defined = is_kvm, \32}3334#define __MSR_TEST(msr, str, val, rsvd, reset, feat) \35____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)3637#define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \38__MSR_TEST(msr, #msr, val, rsvd, reset, feat)3940#define MSR_TEST(msr, val, rsvd, feat) \41__MSR_TEST(msr, #msr, val, rsvd, 0, feat)4243#define MSR_TEST2(msr, val, rsvd, feat, f2) \44____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false)4546/*47* Note, use a page aligned value for the canonical value so that the value48* is compatible with MSRs that use bits 11:0 for things other than addresses.49*/50static const u64 canonical_val = 0x123456789000ull;5152/*53* Arbitrary value with bits set in every byte, but not all bits set. This is54* also a non-canonical value, but that's coincidental (any 64-bit value with55* an alternating 0s/1s pattern will be non-canonical).56*/57static const u64 u64_val = 0xaaaa5555aaaa5555ull;5859#define MSR_TEST_CANONICAL(msr, feat) \60__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)6162#define MSR_TEST_KVM(msr, val, rsvd, feat) \63____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)6465/*66* The main struct must be scoped to a function due to the use of structures to67* define features. For the global structure, allocate enough space for the68* foreseeable future without getting too ridiculous, to minimize maintenance69* costs (bumping the array size every time an MSR is added is really annoying).70*/71static struct kvm_msr msrs[128];72static int idx;7374static bool ignore_unsupported_msrs;7576static u64 fixup_rdmsr_val(u32 msr, u64 want)77{78/*79* AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support. KVM80* is supposed to emulate that behavior based on guest vendor model81* (which is the same as the host vendor model for this test).82*/83if (!host_cpu_is_amd)84return want;8586switch (msr) {87case MSR_IA32_SYSENTER_ESP:88case MSR_IA32_SYSENTER_EIP:89case MSR_TSC_AUX:90return want & GENMASK_ULL(31, 0);91default:92return want;93}94}9596static void __rdmsr(u32 msr, u64 want)97{98u64 val;99u8 vec;100101vec = rdmsr_safe(msr, &val);102__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);103104__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",105want, msr, val);106}107108static void __wrmsr(u32 msr, u64 val)109{110u8 vec;111112vec = wrmsr_safe(msr, val);113__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",114ex_str(vec), msr, val);115__rdmsr(msr, fixup_rdmsr_val(msr, val));116}117118static void guest_test_supported_msr(const struct kvm_msr *msr)119{120__rdmsr(msr->index, msr->reset_val);121__wrmsr(msr->index, msr->write_val);122GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));123124__rdmsr(msr->index, msr->reset_val);125}126127static void guest_test_unsupported_msr(const struct kvm_msr *msr)128{129u64 val;130u8 vec;131132/*133* KVM's ABI with respect to ignore_msrs is a mess and largely beyond134* repair, just skip the unsupported MSR tests.135*/136if (ignore_unsupported_msrs)137goto skip_wrmsr_gp;138139/*140* {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are141* writable only if their associated feature is supported. Skip the142* RDMSR #GP test if the secondary feature is supported, but perform143* the WRMSR #GP test as the to-be-written value is tied to the primary144* feature. For all other MSRs, simply do nothing.145*/146if (this_cpu_has(msr->feature2)) {147if (msr->index != MSR_IA32_U_CET &&148msr->index != MSR_IA32_S_CET)149goto skip_wrmsr_gp;150151goto skip_rdmsr_gp;152}153154vec = rdmsr_safe(msr->index, &val);155__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",156msr->index, ex_str(vec));157158skip_rdmsr_gp:159vec = wrmsr_safe(msr->index, msr->write_val);160__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",161msr->index, msr->write_val, ex_str(vec));162163skip_wrmsr_gp:164GUEST_SYNC(0);165}166167void guest_test_reserved_val(const struct kvm_msr *msr)168{169/* Skip reserved value checks as well, ignore_msrs is trully a mess. */170if (ignore_unsupported_msrs)171return;172173/*174* If the CPU will truncate the written value (e.g. SYSENTER on AMD),175* expect success and a truncated value, not #GP.176*/177if (!this_cpu_has(msr->feature) ||178msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {179u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);180181__GUEST_ASSERT(vec == GP_VECTOR,182"Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",183msr->index, msr->rsvd_val, ex_str(vec));184} else {185__wrmsr(msr->index, msr->rsvd_val);186__wrmsr(msr->index, msr->reset_val);187}188}189190static void guest_main(void)191{192for (;;) {193const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];194195if (this_cpu_has(msr->feature))196guest_test_supported_msr(msr);197else198guest_test_unsupported_msr(msr);199200if (msr->rsvd_val)201guest_test_reserved_val(msr);202203GUEST_SYNC(msr->reset_val);204}205}206207static bool has_one_reg;208static bool use_one_reg;209210#define KVM_X86_MAX_NR_REGS 1211212static bool vcpu_has_reg(struct kvm_vcpu *vcpu, u64 reg)213{214struct {215struct kvm_reg_list list;216u64 regs[KVM_X86_MAX_NR_REGS];217} regs = {};218int r, i;219220/*221* If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported222* regs, then the vCPU obviously doesn't support the reg.223*/224r = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®s.list);225if (!r)226return false;227228TEST_ASSERT_EQ(errno, E2BIG);229230/*231* KVM x86 is expected to support enumerating a relative small number232* of regs. The majority of registers supported by KVM_{G,S}ET_ONE_REG233* are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST. For234* simplicity, hardcode the maximum number of regs and manually update235* the test as necessary.236*/237TEST_ASSERT(regs.list.n <= KVM_X86_MAX_NR_REGS,238"KVM reports %llu regs, test expects at most %u regs, stale test?",239regs.list.n, KVM_X86_MAX_NR_REGS);240241vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®s.list);242for (i = 0; i < regs.list.n; i++) {243if (regs.regs[i] == reg)244return true;245}246247return false;248}249250static void host_test_kvm_reg(struct kvm_vcpu *vcpu)251{252bool has_reg = vcpu_cpuid_has(vcpu, msrs[idx].feature);253u64 reset_val = msrs[idx].reset_val;254u64 write_val = msrs[idx].write_val;255u64 rsvd_val = msrs[idx].rsvd_val;256u32 reg = msrs[idx].index;257u64 val;258int r;259260if (!use_one_reg)261return;262263TEST_ASSERT_EQ(vcpu_has_reg(vcpu, KVM_X86_REG_KVM(reg)), has_reg);264265if (!has_reg) {266r = __vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg), &val);267TEST_ASSERT(r && errno == EINVAL,268"Expected failure on get_reg(0x%x)", reg);269rsvd_val = 0;270goto out;271}272273val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));274TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",275reset_val, reg, val);276277vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), write_val);278val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));279TEST_ASSERT(val == write_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",280write_val, reg, val);281282out:283r = __vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), rsvd_val);284TEST_ASSERT(r, "Expected failure on set_reg(0x%x, 0x%lx)", reg, rsvd_val);285}286287static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)288{289u64 reset_val = msrs[idx].reset_val;290u32 msr = msrs[idx].index;291u64 val;292293if (!kvm_cpu_has(msrs[idx].feature))294return;295296val = vcpu_get_msr(vcpu, msr);297TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",298guest_val, msr, val);299300if (use_one_reg)301vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);302else303vcpu_set_msr(vcpu, msr, reset_val);304305val = vcpu_get_msr(vcpu, msr);306TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",307reset_val, msr, val);308309if (!has_one_reg)310return;311312val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));313TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",314reset_val, msr, val);315}316317static void do_vcpu_run(struct kvm_vcpu *vcpu)318{319struct ucall uc;320321for (;;) {322vcpu_run(vcpu);323324switch (get_ucall(vcpu, &uc)) {325case UCALL_SYNC:326host_test_msr(vcpu, uc.args[1]);327return;328case UCALL_PRINTF:329pr_info("%s", uc.buffer);330break;331case UCALL_ABORT:332REPORT_GUEST_ASSERT(uc);333case UCALL_DONE:334TEST_FAIL("Unexpected UCALL_DONE");335default:336TEST_FAIL("Unexpected ucall: %lu", uc.cmd);337}338}339}340341static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)342{343int i;344345for (i = 0; i < NR_VCPUS; i++)346do_vcpu_run(vcpus[i]);347}348349#define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)350351static void test_msrs(void)352{353const struct kvm_msr __msrs[] = {354MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,355MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,356MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),357MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),358359/*360* TSC_AUX is supported if RDTSCP *or* RDPID is supported. Add361* entries for each features so that TSC_AUX doesn't exists for362* the "unsupported" vCPU, and obviously to test both cases.363*/364MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),365MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),366367MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),368/*369* SYSENTER_{ESP,EIP} are technically non-canonical on Intel,370* but KVM doesn't emulate that behavior on emulated writes,371* i.e. this test will observe different behavior if the MSR372* writes are handed by hardware vs. KVM. KVM's behavior is373* intended (though far from ideal), so don't bother testing374* non-canonical values.375*/376MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),377MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),378379MSR_TEST_CANONICAL(MSR_FS_BASE, LM),380MSR_TEST_CANONICAL(MSR_GS_BASE, LM),381MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),382MSR_TEST_CANONICAL(MSR_LSTAR, LM),383MSR_TEST_CANONICAL(MSR_CSTAR, LM),384MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),385386MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),387MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),388MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),389MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),390MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),391MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),392MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),393MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),394MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),395MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),396MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),397MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),398399MSR_TEST_KVM(GUEST_SSP, canonical_val, NONCANONICAL, SHSTK),400};401402const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;403const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;404405/*406* Create three vCPUs, but run them on the same task, to validate KVM's407* context switching of MSR state. Don't pin the task to a pCPU to408* also validate KVM's handling of cross-pCPU migration. Use the full409* set of features for the first two vCPUs, but clear all features in410* third vCPU in order to test both positive and negative paths.411*/412const int NR_VCPUS = 3;413struct kvm_vcpu *vcpus[NR_VCPUS];414struct kvm_vm *vm;415int i;416417kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));418kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));419memcpy(msrs, __msrs, sizeof(__msrs));420421ignore_unsupported_msrs = kvm_is_ignore_msrs();422423vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);424425sync_global_to_guest(vm, msrs);426sync_global_to_guest(vm, ignore_unsupported_msrs);427428/*429* Clear features in the "unsupported features" vCPU. This needs to be430* done before the first vCPU run as KVM's ABI is that guest CPUID is431* immutable once the vCPU has been run.432*/433for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {434/*435* Don't clear LM; selftests are 64-bit only, and KVM doesn't436* honor LM=0 for MSRs that are supposed to exist if and only437* if the vCPU is a 64-bit model. Ditto for NONE; clearing a438* fake feature flag will result in false failures.439*/440if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&441memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))442vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);443}444445for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {446struct kvm_msr *msr = &msrs[idx];447448if (msr->is_kvm_defined) {449for (i = 0; i < NR_VCPUS; i++)450host_test_kvm_reg(vcpus[i]);451continue;452}453454/*455* Verify KVM_GET_SUPPORTED_CPUID and KVM_GET_MSR_INDEX_LIST456* are consistent with respect to MSRs whose existence is457* enumerated via CPUID. Skip the check for FS/GS.base MSRs,458* as they aren't reported in the save/restore list since their459* state is managed via SREGS.460*/461TEST_ASSERT(msr->index == MSR_FS_BASE || msr->index == MSR_GS_BASE ||462kvm_msr_is_in_save_restore_list(msr->index) ==463(kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)),464"%s %s in save/restore list, but %s according to CPUID", msr->name,465kvm_msr_is_in_save_restore_list(msr->index) ? "is" : "isn't",466(kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)) ?467"supported" : "unsupported");468469sync_global_to_guest(vm, idx);470471vcpus_run(vcpus, NR_VCPUS);472vcpus_run(vcpus, NR_VCPUS);473}474475kvm_vm_free(vm);476}477478int main(void)479{480has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);481482test_msrs();483484if (has_one_reg) {485use_one_reg = true;486test_msrs();487}488}489490491