/* SPDX-License-Identifier: GPL-2.0-only */1#ifndef __KVM_X86_VMX_COMMON_H2#define __KVM_X86_VMX_COMMON_H34#include <linux/kvm_host.h>5#include <asm/posted_intr.h>67#include "mmu.h"89union vmx_exit_reason {10struct {11u32 basic : 16;12u32 reserved16 : 1;13u32 reserved17 : 1;14u32 reserved18 : 1;15u32 reserved19 : 1;16u32 reserved20 : 1;17u32 reserved21 : 1;18u32 reserved22 : 1;19u32 reserved23 : 1;20u32 reserved24 : 1;21u32 reserved25 : 1;22u32 bus_lock_detected : 1;23u32 enclave_mode : 1;24u32 smi_pending_mtf : 1;25u32 smi_from_vmx_root : 1;26u32 reserved30 : 1;27u32 failed_vmentry : 1;28};29u32 full;30};3132struct vcpu_vt {33/* Posted interrupt descriptor */34struct pi_desc pi_desc;3536/* Used if this vCPU is waiting for PI notification wakeup. */37struct list_head pi_wakeup_list;3839union vmx_exit_reason exit_reason;4041unsigned long exit_qualification;42u32 exit_intr_info;4344/*45* If true, guest state has been loaded into hardware, and host state46* saved into vcpu_{vt,vmx,tdx}. If false, host state is loaded into47* hardware.48*/49bool guest_state_loaded;50bool emulation_required;5152#ifdef CONFIG_X86_6453u64 msr_host_kernel_gs_base;54#endif55};5657#ifdef CONFIG_KVM_INTEL_TDX5859static __always_inline bool is_td(struct kvm *kvm)60{61return kvm->arch.vm_type == KVM_X86_TDX_VM;62}6364static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)65{66return is_td(vcpu->kvm);67}6869#else7071static __always_inline bool is_td(struct kvm *kvm) { return false; }72static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }7374#endif7576static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)77{78/* For TDX the direct mask is the shared mask. */79return !kvm_is_addr_direct(kvm, gpa);80}8182static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,83unsigned long exit_qualification)84{85u64 error_code;8687/* Is it a read fault? */88error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)89? PFERR_USER_MASK : 0;90/* Is it a write fault? */91error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)92? PFERR_WRITE_MASK : 0;93/* Is it a fetch fault? */94error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)95? PFERR_FETCH_MASK : 0;96/* ept page table entry is present? */97error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)98? PFERR_PRESENT_MASK : 0;99100if (error_code & EPT_VIOLATION_GVA_IS_VALID)101error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?102PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;103104if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))105error_code |= PFERR_PRIVATE_ACCESS;106107return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);108}109110static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,111int pi_vec)112{113#ifdef CONFIG_SMP114if (vcpu->mode == IN_GUEST_MODE) {115/*116* The vector of the virtual has already been set in the PIR.117* Send a notification event to deliver the virtual interrupt118* unless the vCPU is the currently running vCPU, i.e. the119* event is being sent from a fastpath VM-Exit handler, in120* which case the PIR will be synced to the vIRR before121* re-entering the guest.122*123* When the target is not the running vCPU, the following124* possibilities emerge:125*126* Case 1: vCPU stays in non-root mode. Sending a notification127* event posts the interrupt to the vCPU.128*129* Case 2: vCPU exits to root mode and is still runnable. The130* PIR will be synced to the vIRR before re-entering the guest.131* Sending a notification event is ok as the host IRQ handler132* will ignore the spurious event.133*134* Case 3: vCPU exits to root mode and is blocked. vcpu_block()135* has already synced PIR to vIRR and never blocks the vCPU if136* the vIRR is not empty. Therefore, a blocked vCPU here does137* not wait for any requested interrupts in PIR, and sending a138* notification event also results in a benign, spurious event.139*/140141if (vcpu != kvm_get_running_vcpu())142__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);143return;144}145#endif146/*147* The vCPU isn't in the guest; wake the vCPU in case it is blocking,148* otherwise do nothing as KVM will grab the highest priority pending149* IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().150*/151kvm_vcpu_wake_up(vcpu);152}153154/*155* Post an interrupt to a vCPU's PIR and trigger the vCPU to process the156* interrupt if necessary.157*/158static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,159struct pi_desc *pi_desc, int vector)160{161if (pi_test_and_set_pir(vector, pi_desc))162return;163164/* If a previous notification has sent the IPI, nothing to do. */165if (pi_test_and_set_on(pi_desc))166return;167168/*169* The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()170* after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is171* guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a172* posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.173*/174kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);175}176177noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);178179#endif /* __KVM_X86_VMX_COMMON_H */180181182