Path: blob/master/tools/testing/selftests/kvm/include/x86/svm_util.h
49879 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2020, Red Hat, Inc.3*/45#ifndef SELFTEST_KVM_SVM_UTILS_H6#define SELFTEST_KVM_SVM_UTILS_H78#include <asm/svm.h>910#include <stdint.h>11#include "svm.h"12#include "processor.h"1314struct svm_test_data {15/* VMCB */16struct vmcb *vmcb; /* gva */17void *vmcb_hva;18uint64_t vmcb_gpa;1920/* host state-save area */21struct vmcb_save_area *save_area; /* gva */22void *save_area_hva;23uint64_t save_area_gpa;2425/* MSR-Bitmap */26void *msr; /* gva */27void *msr_hva;28uint64_t msr_gpa;29};3031static inline void vmmcall(void)32{33/*34* Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle35* it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended36* use of this function is to exit to L1 from L2. Clobber all other37* GPRs as L1 doesn't correctly preserve them during vmexits.38*/39__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"40: : "a"(0xdeadbeef), "c"(0xbeefdead)41: "rbx", "rdx", "rsi", "rdi", "r8", "r9",42"r10", "r11", "r12", "r13", "r14", "r15");43}4445#define stgi() \46__asm__ __volatile__( \47"stgi\n" \48)4950#define clgi() \51__asm__ __volatile__( \52"clgi\n" \53)5455struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);56void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);57void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);5859int open_sev_dev_path_or_exit(void);6061#endif /* SELFTEST_KVM_SVM_UTILS_H */626364