Path: blob/master/tools/arch/x86/include/uapi/asm/kvm.h
51070 views
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */1#ifndef _ASM_X86_KVM_H2#define _ASM_X86_KVM_H34/*5* KVM x86 specific structures and definitions6*7*/89#include <linux/const.h>10#include <linux/bits.h>11#include <linux/types.h>12#include <linux/ioctl.h>13#include <linux/stddef.h>1415#define KVM_PIO_PAGE_OFFSET 116#define KVM_COALESCED_MMIO_PAGE_OFFSET 217#define KVM_DIRTY_LOG_PAGE_OFFSET 641819#define DE_VECTOR 020#define DB_VECTOR 121#define BP_VECTOR 322#define OF_VECTOR 423#define BR_VECTOR 524#define UD_VECTOR 625#define NM_VECTOR 726#define DF_VECTOR 827#define TS_VECTOR 1028#define NP_VECTOR 1129#define SS_VECTOR 1230#define GP_VECTOR 1331#define PF_VECTOR 1432#define MF_VECTOR 1633#define AC_VECTOR 1734#define MC_VECTOR 1835#define XM_VECTOR 1936#define VE_VECTOR 2037#define CP_VECTOR 213839#define HV_VECTOR 2840#define VC_VECTOR 2941#define SX_VECTOR 304243/* Select x86 specific features in <linux/kvm.h> */44#define __KVM_HAVE_PIT45#define __KVM_HAVE_IOAPIC46#define __KVM_HAVE_IRQ_LINE47#define __KVM_HAVE_MSI48#define __KVM_HAVE_USER_NMI49#define __KVM_HAVE_MSIX50#define __KVM_HAVE_MCE51#define __KVM_HAVE_PIT_STATE252#define __KVM_HAVE_XEN_HVM53#define __KVM_HAVE_VCPU_EVENTS54#define __KVM_HAVE_DEBUGREGS55#define __KVM_HAVE_XSAVE56#define __KVM_HAVE_XCRS5758/* Architectural interrupt line count. */59#define KVM_NR_INTERRUPTS 2566061/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */62struct kvm_pic_state {63__u8 last_irr; /* edge detection */64__u8 irr; /* interrupt request register */65__u8 imr; /* interrupt mask register */66__u8 isr; /* interrupt service register */67__u8 priority_add; /* highest irq priority */68__u8 irq_base;69__u8 read_reg_select;70__u8 poll;71__u8 special_mask;72__u8 init_state;73__u8 auto_eoi;74__u8 rotate_on_auto_eoi;75__u8 special_fully_nested_mode;76__u8 init4; /* true if 4 byte init */77__u8 elcr; /* PIIX edge/trigger selection */78__u8 elcr_mask;79};8081#define KVM_IOAPIC_NUM_PINS 2482struct kvm_ioapic_state {83__u64 base_address;84__u32 ioregsel;85__u32 id;86__u32 irr;87__u32 pad;88union {89__u64 bits;90struct {91__u8 vector;92__u8 delivery_mode:3;93__u8 dest_mode:1;94__u8 delivery_status:1;95__u8 polarity:1;96__u8 remote_irr:1;97__u8 trig_mode:1;98__u8 mask:1;99__u8 reserve:7;100__u8 reserved[4];101__u8 dest_id;102} fields;103} redirtbl[KVM_IOAPIC_NUM_PINS];104};105106#define KVM_IRQCHIP_PIC_MASTER 0107#define KVM_IRQCHIP_PIC_SLAVE 1108#define KVM_IRQCHIP_IOAPIC 2109#define KVM_NR_IRQCHIPS 3110111#define KVM_RUN_X86_SMM (1 << 0)112#define KVM_RUN_X86_BUS_LOCK (1 << 1)113#define KVM_RUN_X86_GUEST_MODE (1 << 2)114115/* for KVM_GET_REGS and KVM_SET_REGS */116struct kvm_regs {117/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */118__u64 rax, rbx, rcx, rdx;119__u64 rsi, rdi, rsp, rbp;120__u64 r8, r9, r10, r11;121__u64 r12, r13, r14, r15;122__u64 rip, rflags;123};124125/* for KVM_GET_LAPIC and KVM_SET_LAPIC */126#define KVM_APIC_REG_SIZE 0x400127struct kvm_lapic_state {128char regs[KVM_APIC_REG_SIZE];129};130131struct kvm_segment {132__u64 base;133__u32 limit;134__u16 selector;135__u8 type;136__u8 present, dpl, db, s, l, g, avl;137__u8 unusable;138__u8 padding;139};140141struct kvm_dtable {142__u64 base;143__u16 limit;144__u16 padding[3];145};146147148/* for KVM_GET_SREGS and KVM_SET_SREGS */149struct kvm_sregs {150/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */151struct kvm_segment cs, ds, es, fs, gs, ss;152struct kvm_segment tr, ldt;153struct kvm_dtable gdt, idt;154__u64 cr0, cr2, cr3, cr4, cr8;155__u64 efer;156__u64 apic_base;157__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];158};159160struct kvm_sregs2 {161/* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */162struct kvm_segment cs, ds, es, fs, gs, ss;163struct kvm_segment tr, ldt;164struct kvm_dtable gdt, idt;165__u64 cr0, cr2, cr3, cr4, cr8;166__u64 efer;167__u64 apic_base;168__u64 flags;169__u64 pdptrs[4];170};171#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1172173/* for KVM_GET_FPU and KVM_SET_FPU */174struct kvm_fpu {175__u8 fpr[8][16];176__u16 fcw;177__u16 fsw;178__u8 ftwx; /* in fxsave format */179__u8 pad1;180__u16 last_opcode;181__u64 last_ip;182__u64 last_dp;183__u8 xmm[16][16];184__u32 mxcsr;185__u32 pad2;186};187188struct kvm_msr_entry {189__u32 index;190__u32 reserved;191__u64 data;192};193194/* for KVM_GET_MSRS and KVM_SET_MSRS */195struct kvm_msrs {196__u32 nmsrs; /* number of msrs in entries */197__u32 pad;198199struct kvm_msr_entry entries[];200};201202/* for KVM_GET_MSR_INDEX_LIST */203struct kvm_msr_list {204__u32 nmsrs; /* number of msrs in entries */205__u32 indices[];206};207208/* Maximum size of any access bitmap in bytes */209#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600210211/* for KVM_X86_SET_MSR_FILTER */212struct kvm_msr_filter_range {213#define KVM_MSR_FILTER_READ (1 << 0)214#define KVM_MSR_FILTER_WRITE (1 << 1)215#define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \216KVM_MSR_FILTER_WRITE)217__u32 flags;218__u32 nmsrs; /* number of msrs in bitmap */219__u32 base; /* MSR index the bitmap starts at */220__u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */221};222223#define KVM_MSR_FILTER_MAX_RANGES 16224struct kvm_msr_filter {225#ifndef __KERNEL__226#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)227#endif228#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)229#define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY)230__u32 flags;231struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];232};233234struct kvm_cpuid_entry {235__u32 function;236__u32 eax;237__u32 ebx;238__u32 ecx;239__u32 edx;240__u32 padding;241};242243/* for KVM_SET_CPUID */244struct kvm_cpuid {245__u32 nent;246__u32 padding;247struct kvm_cpuid_entry entries[];248};249250struct kvm_cpuid_entry2 {251__u32 function;252__u32 index;253__u32 flags;254__u32 eax;255__u32 ebx;256__u32 ecx;257__u32 edx;258__u32 padding[3];259};260261#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0)262#define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1)263#define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2)264265/* for KVM_SET_CPUID2 */266struct kvm_cpuid2 {267__u32 nent;268__u32 padding;269struct kvm_cpuid_entry2 entries[];270};271272/* for KVM_GET_PIT and KVM_SET_PIT */273struct kvm_pit_channel_state {274__u32 count; /* can be 65536 */275__u16 latched_count;276__u8 count_latched;277__u8 status_latched;278__u8 status;279__u8 read_state;280__u8 write_state;281__u8 write_latch;282__u8 rw_mode;283__u8 mode;284__u8 bcd;285__u8 gate;286__s64 count_load_time;287};288289struct kvm_debug_exit_arch {290__u32 exception;291__u32 pad;292__u64 pc;293__u64 dr6;294__u64 dr7;295};296297#define KVM_GUESTDBG_USE_SW_BP 0x00010000298#define KVM_GUESTDBG_USE_HW_BP 0x00020000299#define KVM_GUESTDBG_INJECT_DB 0x00040000300#define KVM_GUESTDBG_INJECT_BP 0x00080000301#define KVM_GUESTDBG_BLOCKIRQ 0x00100000302303/* for KVM_SET_GUEST_DEBUG */304struct kvm_guest_debug_arch {305__u64 debugreg[8];306};307308struct kvm_pit_state {309struct kvm_pit_channel_state channels[3];310};311312#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001313#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002314315struct kvm_pit_state2 {316struct kvm_pit_channel_state channels[3];317__u32 flags;318__u32 reserved[9];319};320321struct kvm_reinject_control {322__u8 pit_reinject;323__u8 reserved[31];324};325326/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */327#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001328#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002329#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004330#define KVM_VCPUEVENT_VALID_SMM 0x00000008331#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010332#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020333334/* Interrupt shadow states */335#define KVM_X86_SHADOW_INT_MOV_SS 0x01336#define KVM_X86_SHADOW_INT_STI 0x02337338/* for KVM_GET/SET_VCPU_EVENTS */339struct kvm_vcpu_events {340struct {341__u8 injected;342__u8 nr;343__u8 has_error_code;344__u8 pending;345__u32 error_code;346} exception;347struct {348__u8 injected;349__u8 nr;350__u8 soft;351__u8 shadow;352} interrupt;353struct {354__u8 injected;355__u8 pending;356__u8 masked;357__u8 pad;358} nmi;359__u32 sipi_vector;360__u32 flags;361struct {362__u8 smm;363__u8 pending;364__u8 smm_inside_nmi;365__u8 latched_init;366} smi;367struct {368__u8 pending;369} triple_fault;370__u8 reserved[26];371__u8 exception_has_payload;372__u64 exception_payload;373};374375/* for KVM_GET/SET_DEBUGREGS */376struct kvm_debugregs {377__u64 db[4];378__u64 dr6;379__u64 dr7;380__u64 flags;381__u64 reserved[9];382};383384/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */385struct kvm_xsave {386/*387* KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes388* as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)389* respectively, when invoked on the vm file descriptor.390*391* The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)392* will always be at least 4096. Currently, it is only greater393* than 4096 if a dynamic feature has been enabled with394* ``arch_prctl()``, but this may change in the future.395*396* The offsets of the state save areas in struct kvm_xsave follow397* the contents of CPUID leaf 0xD on the host.398*/399__u32 region[1024];400__u32 extra[];401};402403#define KVM_MAX_XCRS 16404405struct kvm_xcr {406__u32 xcr;407__u32 reserved;408__u64 value;409};410411struct kvm_xcrs {412__u32 nr_xcrs;413__u32 flags;414struct kvm_xcr xcrs[KVM_MAX_XCRS];415__u64 padding[16];416};417418#define KVM_X86_REG_TYPE_MSR 2419#define KVM_X86_REG_TYPE_KVM 3420421#define KVM_X86_KVM_REG_SIZE(reg) \422({ \423reg == KVM_REG_GUEST_SSP ? KVM_REG_SIZE_U64 : 0; \424})425426#define KVM_X86_REG_TYPE_SIZE(type, reg) \427({ \428__u64 type_size = (__u64)type << 32; \429\430type_size |= type == KVM_X86_REG_TYPE_MSR ? KVM_REG_SIZE_U64 : \431type == KVM_X86_REG_TYPE_KVM ? KVM_X86_KVM_REG_SIZE(reg) : \4320; \433type_size; \434})435436#define KVM_X86_REG_ID(type, index) \437(KVM_REG_X86 | KVM_X86_REG_TYPE_SIZE(type, index) | index)438439#define KVM_X86_REG_MSR(index) \440KVM_X86_REG_ID(KVM_X86_REG_TYPE_MSR, index)441#define KVM_X86_REG_KVM(index) \442KVM_X86_REG_ID(KVM_X86_REG_TYPE_KVM, index)443444/* KVM-defined registers starting from 0 */445#define KVM_REG_GUEST_SSP 0446447#define KVM_SYNC_X86_REGS (1UL << 0)448#define KVM_SYNC_X86_SREGS (1UL << 1)449#define KVM_SYNC_X86_EVENTS (1UL << 2)450451#define KVM_SYNC_X86_VALID_FIELDS \452(KVM_SYNC_X86_REGS| \453KVM_SYNC_X86_SREGS| \454KVM_SYNC_X86_EVENTS)455456/* kvm_sync_regs struct included by kvm_run struct */457struct kvm_sync_regs {458/* Members of this structure are potentially malicious.459* Care must be taken by code reading, esp. interpreting,460* data fields from them inside KVM to prevent TOCTOU and461* double-fetch types of vulnerabilities.462*/463struct kvm_regs regs;464struct kvm_sregs sregs;465struct kvm_vcpu_events events;466};467468#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)469#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)470#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)471#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)472#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)473#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)474#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)475#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)476#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)477#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)478479#define KVM_STATE_NESTED_FORMAT_VMX 0480#define KVM_STATE_NESTED_FORMAT_SVM 1481482#define KVM_STATE_NESTED_GUEST_MODE 0x00000001483#define KVM_STATE_NESTED_RUN_PENDING 0x00000002484#define KVM_STATE_NESTED_EVMCS 0x00000004485#define KVM_STATE_NESTED_MTF_PENDING 0x00000008486#define KVM_STATE_NESTED_GIF_SET 0x00000100487488#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001489#define KVM_STATE_NESTED_SMM_VMXON 0x00000002490491#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000492493#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000494495#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001496497/* vendor-independent attributes for system fd (group 0) */498#define KVM_X86_GRP_SYSTEM 0499# define KVM_X86_XCOMP_GUEST_SUPP 0500501/* vendor-specific groups and attributes for system fd */502#define KVM_X86_GRP_SEV 1503# define KVM_X86_SEV_VMSA_FEATURES 0504# define KVM_X86_SNP_POLICY_BITS 1505506struct kvm_vmx_nested_state_data {507__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];508__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];509};510511struct kvm_vmx_nested_state_hdr {512__u64 vmxon_pa;513__u64 vmcs12_pa;514515struct {516__u16 flags;517} smm;518519__u16 pad;520521__u32 flags;522__u64 preemption_timer_deadline;523};524525struct kvm_svm_nested_state_data {526/* Save area only used if KVM_STATE_NESTED_RUN_PENDING. */527__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];528};529530struct kvm_svm_nested_state_hdr {531__u64 vmcb_pa;532};533534/* for KVM_CAP_NESTED_STATE */535struct kvm_nested_state {536__u16 flags;537__u16 format;538__u32 size;539540union {541struct kvm_vmx_nested_state_hdr vmx;542struct kvm_svm_nested_state_hdr svm;543544/* Pad the header to 128 bytes. */545__u8 pad[120];546} hdr;547548/*549* Define data region as 0 bytes to preserve backwards-compatability550* to old definition of kvm_nested_state in order to avoid changing551* KVM_{GET,PUT}_NESTED_STATE ioctl values.552*/553union {554__DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx);555__DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);556} data;557};558559/* for KVM_CAP_PMU_EVENT_FILTER */560struct kvm_pmu_event_filter {561__u32 action;562__u32 nevents;563__u32 fixed_counter_bitmap;564__u32 flags;565__u32 pad[4];566__u64 events[];567};568569#define KVM_PMU_EVENT_ALLOW 0570#define KVM_PMU_EVENT_DENY 1571572#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)573#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)574575/* for KVM_CAP_MCE */576struct kvm_x86_mce {577__u64 status;578__u64 addr;579__u64 misc;580__u64 mcg_status;581__u8 bank;582__u8 pad1[7];583__u64 pad2[3];584};585586/* for KVM_CAP_XEN_HVM */587#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)588#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)589#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)590#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)591#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)592#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)593#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)594#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)595#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)596597#define KVM_XEN_MSR_MIN_INDEX 0x40000000u598#define KVM_XEN_MSR_MAX_INDEX 0x4fffffffu599600struct kvm_xen_hvm_config {601__u32 flags;602__u32 msr;603__u64 blob_addr_32;604__u64 blob_addr_64;605__u8 blob_size_32;606__u8 blob_size_64;607__u8 pad2[30];608};609610struct kvm_xen_hvm_attr {611__u16 type;612__u16 pad[3];613union {614__u8 long_mode;615__u8 vector;616__u8 runstate_update_flag;617union {618__u64 gfn;619#define KVM_XEN_INVALID_GFN ((__u64)-1)620__u64 hva;621} shared_info;622struct {623__u32 send_port;624__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */625__u32 flags;626#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)627#define KVM_XEN_EVTCHN_UPDATE (1 << 1)628#define KVM_XEN_EVTCHN_RESET (1 << 2)629/*630* Events sent by the guest are either looped back to631* the guest itself (potentially on a different port#)632* or signalled via an eventfd.633*/634union {635struct {636__u32 port;637__u32 vcpu;638__u32 priority;639} port;640struct {641__u32 port; /* Zero for eventfd */642__s32 fd;643} eventfd;644__u32 padding[4];645} deliver;646} evtchn;647__u32 xen_version;648__u64 pad[8];649} u;650};651652653/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */654#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0655#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1656#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2657/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */658#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3659#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4660/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */661#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5662/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */663#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6664665struct kvm_xen_vcpu_attr {666__u16 type;667__u16 pad[3];668union {669__u64 gpa;670#define KVM_XEN_INVALID_GPA ((__u64)-1)671__u64 hva;672__u64 pad[8];673struct {674__u64 state;675__u64 state_entry_time;676__u64 time_running;677__u64 time_runnable;678__u64 time_blocked;679__u64 time_offline;680} runstate;681__u32 vcpu_id;682struct {683__u32 port;684__u32 priority;685__u64 expires_ns;686} timer;687__u8 vector;688} u;689};690691/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */692#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0693#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1694#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2695#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3696#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4697#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5698/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */699#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6700#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7701#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8702/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */703#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9704705/* Secure Encrypted Virtualization command */706enum sev_cmd_id {707/* Guest initialization commands */708KVM_SEV_INIT = 0,709KVM_SEV_ES_INIT,710/* Guest launch commands */711KVM_SEV_LAUNCH_START,712KVM_SEV_LAUNCH_UPDATE_DATA,713KVM_SEV_LAUNCH_UPDATE_VMSA,714KVM_SEV_LAUNCH_SECRET,715KVM_SEV_LAUNCH_MEASURE,716KVM_SEV_LAUNCH_FINISH,717/* Guest migration commands (outgoing) */718KVM_SEV_SEND_START,719KVM_SEV_SEND_UPDATE_DATA,720KVM_SEV_SEND_UPDATE_VMSA,721KVM_SEV_SEND_FINISH,722/* Guest migration commands (incoming) */723KVM_SEV_RECEIVE_START,724KVM_SEV_RECEIVE_UPDATE_DATA,725KVM_SEV_RECEIVE_UPDATE_VMSA,726KVM_SEV_RECEIVE_FINISH,727/* Guest status and debug commands */728KVM_SEV_GUEST_STATUS,729KVM_SEV_DBG_DECRYPT,730KVM_SEV_DBG_ENCRYPT,731/* Guest certificates commands */732KVM_SEV_CERT_EXPORT,733/* Attestation report */734KVM_SEV_GET_ATTESTATION_REPORT,735/* Guest Migration Extension */736KVM_SEV_SEND_CANCEL,737738/* Second time is the charm; improved versions of the above ioctls. */739KVM_SEV_INIT2,740741/* SNP-specific commands */742KVM_SEV_SNP_LAUNCH_START = 100,743KVM_SEV_SNP_LAUNCH_UPDATE,744KVM_SEV_SNP_LAUNCH_FINISH,745746KVM_SEV_NR_MAX,747};748749struct kvm_sev_cmd {750__u32 id;751__u32 pad0;752__u64 data;753__u32 error;754__u32 sev_fd;755};756757struct kvm_sev_init {758__u64 vmsa_features;759__u32 flags;760__u16 ghcb_version;761__u16 pad1;762__u32 pad2[8];763};764765struct kvm_sev_launch_start {766__u32 handle;767__u32 policy;768__u64 dh_uaddr;769__u32 dh_len;770__u32 pad0;771__u64 session_uaddr;772__u32 session_len;773__u32 pad1;774};775776struct kvm_sev_launch_update_data {777__u64 uaddr;778__u32 len;779__u32 pad0;780};781782783struct kvm_sev_launch_secret {784__u64 hdr_uaddr;785__u32 hdr_len;786__u32 pad0;787__u64 guest_uaddr;788__u32 guest_len;789__u32 pad1;790__u64 trans_uaddr;791__u32 trans_len;792__u32 pad2;793};794795struct kvm_sev_launch_measure {796__u64 uaddr;797__u32 len;798__u32 pad0;799};800801struct kvm_sev_guest_status {802__u32 handle;803__u32 policy;804__u32 state;805};806807struct kvm_sev_dbg {808__u64 src_uaddr;809__u64 dst_uaddr;810__u32 len;811__u32 pad0;812};813814struct kvm_sev_attestation_report {815__u8 mnonce[16];816__u64 uaddr;817__u32 len;818__u32 pad0;819};820821struct kvm_sev_send_start {822__u32 policy;823__u32 pad0;824__u64 pdh_cert_uaddr;825__u32 pdh_cert_len;826__u32 pad1;827__u64 plat_certs_uaddr;828__u32 plat_certs_len;829__u32 pad2;830__u64 amd_certs_uaddr;831__u32 amd_certs_len;832__u32 pad3;833__u64 session_uaddr;834__u32 session_len;835__u32 pad4;836};837838struct kvm_sev_send_update_data {839__u64 hdr_uaddr;840__u32 hdr_len;841__u32 pad0;842__u64 guest_uaddr;843__u32 guest_len;844__u32 pad1;845__u64 trans_uaddr;846__u32 trans_len;847__u32 pad2;848};849850struct kvm_sev_receive_start {851__u32 handle;852__u32 policy;853__u64 pdh_uaddr;854__u32 pdh_len;855__u32 pad0;856__u64 session_uaddr;857__u32 session_len;858__u32 pad1;859};860861struct kvm_sev_receive_update_data {862__u64 hdr_uaddr;863__u32 hdr_len;864__u32 pad0;865__u64 guest_uaddr;866__u32 guest_len;867__u32 pad1;868__u64 trans_uaddr;869__u32 trans_len;870__u32 pad2;871};872873struct kvm_sev_snp_launch_start {874__u64 policy;875__u8 gosvw[16];876__u16 flags;877__u8 pad0[6];878__u64 pad1[4];879};880881/* Kept in sync with firmware values for simplicity. */882#define KVM_SEV_PAGE_TYPE_INVALID 0x0883#define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1884#define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3885#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4886#define KVM_SEV_SNP_PAGE_TYPE_SECRETS 0x5887#define KVM_SEV_SNP_PAGE_TYPE_CPUID 0x6888889struct kvm_sev_snp_launch_update {890__u64 gfn_start;891__u64 uaddr;892__u64 len;893__u8 type;894__u8 pad0;895__u16 flags;896__u32 pad1;897__u64 pad2[4];898};899900#define KVM_SEV_SNP_ID_BLOCK_SIZE 96901#define KVM_SEV_SNP_ID_AUTH_SIZE 4096902#define KVM_SEV_SNP_FINISH_DATA_SIZE 32903904struct kvm_sev_snp_launch_finish {905__u64 id_block_uaddr;906__u64 id_auth_uaddr;907__u8 id_block_en;908__u8 auth_key_en;909__u8 vcek_disabled;910__u8 host_data[KVM_SEV_SNP_FINISH_DATA_SIZE];911__u8 pad0[3];912__u16 flags;913__u64 pad1[4];914};915916#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)917#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)918919struct kvm_hyperv_eventfd {920__u32 conn_id;921__s32 fd;922__u32 flags;923__u32 padding[3];924};925926#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff927#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)928929/*930* Masked event layout.931* Bits Description932* ---- -----------933* 7:0 event select (low bits)934* 15:8 umask match935* 31:16 unused936* 35:32 event select (high bits)937* 36:54 unused938* 55 exclude bit939* 63:56 umask mask940*/941942#define KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, exclude) \943(((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | \944(((mask) & 0xFFULL) << 56) | \945(((match) & 0xFFULL) << 8) | \946((__u64)(!!(exclude)) << 55))947948#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \949(__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32))950#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56))951#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8))952#define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55))953#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)954955/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */956#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */957#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */958959/* x86-specific KVM_EXIT_HYPERCALL flags. */960#define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0)961962#define KVM_X86_DEFAULT_VM 0963#define KVM_X86_SW_PROTECTED_VM 1964#define KVM_X86_SEV_VM 2965#define KVM_X86_SEV_ES_VM 3966#define KVM_X86_SNP_VM 4967#define KVM_X86_TDX_VM 5968969/* Trust Domain eXtension sub-ioctl() commands. */970enum kvm_tdx_cmd_id {971KVM_TDX_CAPABILITIES = 0,972KVM_TDX_INIT_VM,973KVM_TDX_INIT_VCPU,974KVM_TDX_INIT_MEM_REGION,975KVM_TDX_FINALIZE_VM,976KVM_TDX_GET_CPUID,977978KVM_TDX_CMD_NR_MAX,979};980981struct kvm_tdx_cmd {982/* enum kvm_tdx_cmd_id */983__u32 id;984/* flags for sub-commend. If sub-command doesn't use this, set zero. */985__u32 flags;986/*987* data for each sub-command. An immediate or a pointer to the actual988* data in process virtual address. If sub-command doesn't use it,989* set zero.990*/991__u64 data;992/*993* Auxiliary error code. The sub-command may return TDX SEAMCALL994* status code in addition to -Exxx.995*/996__u64 hw_error;997};998999struct kvm_tdx_capabilities {1000__u64 supported_attrs;1001__u64 supported_xfam;10021003__u64 kernel_tdvmcallinfo_1_r11;1004__u64 user_tdvmcallinfo_1_r11;1005__u64 kernel_tdvmcallinfo_1_r12;1006__u64 user_tdvmcallinfo_1_r12;10071008__u64 reserved[250];10091010/* Configurable CPUID bits for userspace */1011struct kvm_cpuid2 cpuid;1012};10131014struct kvm_tdx_init_vm {1015__u64 attributes;1016__u64 xfam;1017__u64 mrconfigid[6]; /* sha384 digest */1018__u64 mrowner[6]; /* sha384 digest */1019__u64 mrownerconfig[6]; /* sha384 digest */10201021/* The total space for TD_PARAMS before the CPUIDs is 256 bytes */1022__u64 reserved[12];10231024/*1025* Call KVM_TDX_INIT_VM before vcpu creation, thus before1026* KVM_SET_CPUID2.1027* This configuration supersedes KVM_SET_CPUID2s for VCPUs because the1028* TDX module directly virtualizes those CPUIDs without VMM. The user1029* space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with1030* those values. If it doesn't, KVM may have wrong idea of vCPUIDs of1031* the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX1032* module doesn't virtualize.1033*/1034struct kvm_cpuid2 cpuid;1035};10361037#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0)10381039struct kvm_tdx_init_mem_region {1040__u64 source_addr;1041__u64 gpa;1042__u64 nr_pages;1043};10441045#endif /* _ASM_X86_KVM_H */104610471048