Path: blob/master/tools/testing/selftests/kvm/x86/smm_test.c
38237 views
// SPDX-License-Identifier: GPL-2.01/*2* Copyright (C) 2018, Red Hat, Inc.3*4* Tests for SMM.5*/6#include <fcntl.h>7#include <stdio.h>8#include <stdlib.h>9#include <stdint.h>10#include <string.h>11#include <sys/ioctl.h>1213#include "test_util.h"1415#include "kvm_util.h"1617#include "vmx.h"18#include "svm_util.h"1920#define SMRAM_SIZE 6553621#define SMRAM_MEMSLOT ((1 << 16) | 1)22#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)23#define SMRAM_GPA 0x100000024#define SMRAM_STAGE 0xfe2526#define STR(x) #x27#define XSTR(s) STR(s)2829#define SYNC_PORT 0xe30#define DONE 0xff3132/*33* This is compiled as normal 64-bit code, however, SMI handler is executed34* in real-address mode. To stay simple we're limiting ourselves to a mode35* independent subset of asm here.36* SMI handler always report back fixed stage SMRAM_STAGE.37*/38uint8_t smi_handler[] = {390xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */400xe4, SYNC_PORT, /* in $SYNC_PORT, %al */410x0f, 0xaa, /* rsm */42};4344static inline void sync_with_host(uint64_t phase)45{46asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"47: "+a" (phase));48}4950static void self_smi(void)51{52x2apic_write_reg(APIC_ICR,53APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);54}5556static void l2_guest_code(void)57{58sync_with_host(8);5960sync_with_host(10);6162vmcall();63}6465static void guest_code(void *arg)66{67#define L2_GUEST_STACK_SIZE 6468unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];69uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);70struct svm_test_data *svm = arg;71struct vmx_pages *vmx_pages = arg;7273sync_with_host(1);7475wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);7677sync_with_host(2);7879self_smi();8081sync_with_host(4);8283if (arg) {84if (this_cpu_has(X86_FEATURE_SVM)) {85generic_svm_setup(svm, l2_guest_code,86&l2_guest_stack[L2_GUEST_STACK_SIZE]);87} else {88GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));89GUEST_ASSERT(load_vmcs(vmx_pages));90prepare_vmcs(vmx_pages, l2_guest_code,91&l2_guest_stack[L2_GUEST_STACK_SIZE]);92}9394sync_with_host(5);9596self_smi();9798sync_with_host(7);99100if (this_cpu_has(X86_FEATURE_SVM)) {101run_guest(svm->vmcb, svm->vmcb_gpa);102run_guest(svm->vmcb, svm->vmcb_gpa);103} else {104vmlaunch();105vmresume();106}107108/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */109sync_with_host(12);110}111112sync_with_host(DONE);113}114115void inject_smi(struct kvm_vcpu *vcpu)116{117struct kvm_vcpu_events events;118119vcpu_events_get(vcpu, &events);120121events.smi.pending = 1;122events.flags |= KVM_VCPUEVENT_VALID_SMM;123124vcpu_events_set(vcpu, &events);125}126127int main(int argc, char *argv[])128{129vm_vaddr_t nested_gva = 0;130131struct kvm_vcpu *vcpu;132struct kvm_regs regs;133struct kvm_vm *vm;134struct kvm_x86_state *state;135int stage, stage_reported;136137TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));138139/* Create VM */140vm = vm_create_with_one_vcpu(&vcpu, guest_code);141142vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,143SMRAM_MEMSLOT, SMRAM_PAGES, 0);144TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)145== SMRAM_GPA, "could not allocate guest physical addresses?");146147memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);148memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,149sizeof(smi_handler));150151vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);152153if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {154if (kvm_cpu_has(X86_FEATURE_SVM))155vcpu_alloc_svm(vm, &nested_gva);156else if (kvm_cpu_has(X86_FEATURE_VMX))157vcpu_alloc_vmx(vm, &nested_gva);158}159160if (!nested_gva)161pr_info("will skip SMM test with VMX enabled\n");162163vcpu_args_set(vcpu, 1, nested_gva);164165for (stage = 1;; stage++) {166vcpu_run(vcpu);167TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);168169memset(®s, 0, sizeof(regs));170vcpu_regs_get(vcpu, ®s);171172stage_reported = regs.rax & 0xff;173174if (stage_reported == DONE)175goto done;176177TEST_ASSERT(stage_reported == stage ||178stage_reported == SMRAM_STAGE,179"Unexpected stage: #%x, got %x",180stage, stage_reported);181182/*183* Enter SMM during L2 execution and check that we correctly184* return from it. Do not perform save/restore while in SMM yet.185*/186if (stage == 8) {187inject_smi(vcpu);188continue;189}190191/*192* Perform save/restore while the guest is in SMM triggered193* during L2 execution.194*/195if (stage == 10)196inject_smi(vcpu);197198state = vcpu_save_state(vcpu);199kvm_vm_release(vm);200201vcpu = vm_recreate_with_one_vcpu(vm);202vcpu_load_state(vcpu, state);203kvm_x86_state_cleanup(state);204}205206done:207kvm_vm_free(vm);208}209210211