Path: blob/master/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
38237 views
// SPDX-License-Identifier: GPL-2.0-only1#include "test_util.h"2#include "kvm_util.h"3#include "processor.h"4#include "vmx.h"5#include "svm_util.h"67#define L2_GUEST_STACK_SIZE 25689/*10* Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with11* the "real" exceptions used, #SS/#GP/#DF (12/13/8).12*/13#define FAKE_TRIPLE_FAULT_VECTOR 0xaa1415/* Arbitrary 32-bit error code injected by this test. */16#define SS_ERROR_CODE 0xdeadbeef1718/*19* Bit '0' is set on Intel if the exception occurs while delivering a previous20* event/exception. AMD's wording is ambiguous, but presumably the bit is set21* if the exception occurs while delivering an external event, e.g. NMI or INTR,22* but not for exceptions that occur when delivering other exceptions or23* software interrupts.24*25* Note, Intel's name for it, "External event", is misleading and much more26* aligned with AMD's behavior, but the SDM is quite clear on its behavior.27*/28#define ERROR_CODE_EXT_FLAG BIT(0)2930/*31* Bit '1' is set if the fault occurred when looking up a descriptor in the32* IDT, which is the case here as the IDT is empty/NULL.33*/34#define ERROR_CODE_IDT_FLAG BIT(1)3536/*37* The #GP that occurs when vectoring #SS should show the index into the IDT38* for #SS, plus have the "IDT flag" set.39*/40#define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)41#define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)4243/*44* Intel and AMD both shove '0' into the error code on #DF, regardless of what45* led to the double fault.46*/47#define DF_ERROR_CODE 04849#define INTERCEPT_SS (BIT_ULL(SS_VECTOR))50#define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))51#define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))5253static void l2_ss_pending_test(void)54{55GUEST_SYNC(SS_VECTOR);56}5758static void l2_ss_injected_gp_test(void)59{60GUEST_SYNC(GP_VECTOR);61}6263static void l2_ss_injected_df_test(void)64{65GUEST_SYNC(DF_VECTOR);66}6768static void l2_ss_injected_tf_test(void)69{70GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);71}7273static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,74uint32_t error_code)75{76struct vmcb *vmcb = svm->vmcb;77struct vmcb_control_area *ctrl = &vmcb->control;7879vmcb->save.rip = (u64)l2_code;80run_guest(vmcb, svm->vmcb_gpa);8182if (vector == FAKE_TRIPLE_FAULT_VECTOR)83return;8485GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));86GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);87GUEST_ASSERT(!ctrl->int_state);88}8990static void l1_svm_code(struct svm_test_data *svm)91{92struct vmcb_control_area *ctrl = &svm->vmcb->control;93unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];9495generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);96svm->vmcb->save.idtr.limit = 0;97ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);9899ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;100svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);101svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);102103ctrl->intercept_exceptions = INTERCEPT_SS_DF;104svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);105106ctrl->intercept_exceptions = INTERCEPT_SS;107svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);108GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);109110GUEST_DONE();111}112113static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)114{115GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));116117GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);118119if (vector == FAKE_TRIPLE_FAULT_VECTOR)120return;121122GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);123GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);124GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);125GUEST_ASSERT(!vmreadz(GUEST_INTERRUPTIBILITY_INFO));126}127128static void l1_vmx_code(struct vmx_pages *vmx)129{130unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];131132GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);133134GUEST_ASSERT_EQ(load_vmcs(vmx), true);135136prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);137GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);138139/*140* VMX disallows injecting an exception with error_code[31:16] != 0,141* and hardware will never generate a VM-Exit with bits 31:16 set.142* KVM should likewise truncate the "bad" userspace value.143*/144GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);145vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);146vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);147148GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);149vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);150151GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);152vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);153GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);154155GUEST_DONE();156}157158static void __attribute__((__flatten__)) l1_guest_code(void *test_data)159{160if (this_cpu_has(X86_FEATURE_SVM))161l1_svm_code(test_data);162else163l1_vmx_code(test_data);164}165166static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)167{168struct ucall uc;169170TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);171172switch (get_ucall(vcpu, &uc)) {173case UCALL_SYNC:174TEST_ASSERT(vector == uc.args[1],175"Expected L2 to ask for %d, got %ld", vector, uc.args[1]);176break;177case UCALL_DONE:178TEST_ASSERT(vector == -1,179"Expected L2 to ask for %d, L2 says it's done", vector);180break;181case UCALL_ABORT:182REPORT_GUEST_ASSERT(uc);183break;184default:185TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);186}187}188189static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)190{191struct kvm_vcpu_events events;192193vcpu_events_get(vcpu, &events);194195TEST_ASSERT(!events.exception.pending,196"Vector %d unexpectedlt pending", events.exception.nr);197TEST_ASSERT(!events.exception.injected,198"Vector %d unexpectedly injected", events.exception.nr);199200events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;201events.exception.pending = !inject;202events.exception.injected = inject;203events.exception.nr = SS_VECTOR;204events.exception.has_error_code = true;205events.exception.error_code = SS_ERROR_CODE;206vcpu_events_set(vcpu, &events);207}208209/*210* Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions211* when an exception is being queued for L2. Specifically, verify that KVM212* honors L1 exception intercept controls when a #SS is pending/injected,213* triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted214* by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.215*/216int main(int argc, char *argv[])217{218vm_vaddr_t nested_test_data_gva;219struct kvm_vcpu_events events;220struct kvm_vcpu *vcpu;221struct kvm_vm *vm;222223TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));224TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));225226vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);227vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);228229if (kvm_cpu_has(X86_FEATURE_SVM))230vcpu_alloc_svm(vm, &nested_test_data_gva);231else232vcpu_alloc_vmx(vm, &nested_test_data_gva);233234vcpu_args_set(vcpu, 1, nested_test_data_gva);235236/* Run L1 => L2. L2 should sync and request #SS. */237vcpu_run(vcpu);238assert_ucall_vector(vcpu, SS_VECTOR);239240/* Pend #SS and request immediate exit. #SS should still be pending. */241queue_ss_exception(vcpu, false);242vcpu->run->immediate_exit = true;243vcpu_run_complete_io(vcpu);244245/* Verify the pending events comes back out the same as it went in. */246vcpu_events_get(vcpu, &events);247TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,248KVM_VCPUEVENT_VALID_PAYLOAD);249TEST_ASSERT_EQ(events.exception.pending, true);250TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);251TEST_ASSERT_EQ(events.exception.has_error_code, true);252TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);253254/*255* Run for real with the pending #SS, L1 should get a VM-Exit due to256* #SS interception and re-enter L2 to request #GP (via injected #SS).257*/258vcpu->run->immediate_exit = false;259vcpu_run(vcpu);260assert_ucall_vector(vcpu, GP_VECTOR);261262/*263* Inject #SS, the #SS should bypass interception and cause #GP, which264* L1 should intercept before KVM morphs it to #DF. L1 should then265* disable #GP interception and run L2 to request #DF (via #SS => #GP).266*/267queue_ss_exception(vcpu, true);268vcpu_run(vcpu);269assert_ucall_vector(vcpu, DF_VECTOR);270271/*272* Inject #SS, the #SS should bypass interception and cause #GP, which273* L1 is no longer interception, and so should see a #DF VM-Exit. L1274* should then signal that is done.275*/276queue_ss_exception(vcpu, true);277vcpu_run(vcpu);278assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);279280/*281* Inject #SS yet again. L1 is not intercepting #GP or #DF, and so282* should see nested TRIPLE_FAULT / SHUTDOWN.283*/284queue_ss_exception(vcpu, true);285vcpu_run(vcpu);286assert_ucall_vector(vcpu, -1);287288kvm_vm_free(vm);289}290291292