Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/vmx/vmx.h
51650 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef __KVM_X86_VMX_H
3
#define __KVM_X86_VMX_H
4
5
#include <linux/kvm_host.h>
6
7
#include <asm/kvm.h>
8
#include <asm/intel_pt.h>
9
#include <asm/perf_event.h>
10
#include <asm/posted_intr.h>
11
12
#include "capabilities.h"
13
#include "../kvm_cache_regs.h"
14
#include "pmu_intel.h"
15
#include "vmcs.h"
16
#include "vmx_ops.h"
17
#include "../cpuid.h"
18
#include "run_flags.h"
19
#include "../mmu.h"
20
#include "common.h"
21
22
#ifdef CONFIG_X86_64
23
#define MAX_NR_USER_RETURN_MSRS 7
24
#else
25
#define MAX_NR_USER_RETURN_MSRS 4
26
#endif
27
28
#define MAX_NR_LOADSTORE_MSRS 8
29
30
struct vmx_msrs {
31
unsigned int nr;
32
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
33
};
34
35
struct vmx_uret_msr {
36
bool load_into_hardware;
37
u64 data;
38
u64 mask;
39
};
40
41
enum segment_cache_field {
42
SEG_FIELD_SEL = 0,
43
SEG_FIELD_BASE = 1,
44
SEG_FIELD_LIMIT = 2,
45
SEG_FIELD_AR = 3,
46
47
SEG_FIELD_NR = 4
48
};
49
50
#define RTIT_ADDR_RANGE 4
51
52
struct pt_ctx {
53
u64 ctl;
54
u64 status;
55
u64 output_base;
56
u64 output_mask;
57
u64 cr3_match;
58
u64 addr_a[RTIT_ADDR_RANGE];
59
u64 addr_b[RTIT_ADDR_RANGE];
60
};
61
62
struct pt_desc {
63
u64 ctl_bitmask;
64
u32 num_address_ranges;
65
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
66
struct pt_ctx host;
67
struct pt_ctx guest;
68
};
69
70
/*
71
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
72
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
73
*/
74
struct nested_vmx {
75
/* Has the level1 guest done vmxon? */
76
bool vmxon;
77
gpa_t vmxon_ptr;
78
bool pml_full;
79
80
/* The guest-physical address of the current VMCS L1 keeps for L2 */
81
gpa_t current_vmptr;
82
/*
83
* Cache of the guest's VMCS, existing outside of guest memory.
84
* Loaded from guest memory during VMPTRLD. Flushed to guest
85
* memory during VMCLEAR and VMPTRLD.
86
*/
87
struct vmcs12 *cached_vmcs12;
88
/*
89
* Cache of the guest's shadow VMCS, existing outside of guest
90
* memory. Loaded from guest memory during VM entry. Flushed
91
* to guest memory during VM exit.
92
*/
93
struct vmcs12 *cached_shadow_vmcs12;
94
95
/*
96
* GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
97
*/
98
struct gfn_to_hva_cache shadow_vmcs12_cache;
99
100
/*
101
* GPA to HVA cache for VMCS12
102
*/
103
struct gfn_to_hva_cache vmcs12_cache;
104
105
/*
106
* Indicates if the shadow vmcs or enlightened vmcs must be updated
107
* with the data held by struct vmcs12.
108
*/
109
bool need_vmcs12_to_shadow_sync;
110
bool dirty_vmcs12;
111
112
/*
113
* Indicates whether MSR bitmap for L2 needs to be rebuilt due to
114
* changes in MSR bitmap for L1 or switching to a different L2. Note,
115
* this flag can only be used reliably in conjunction with a paravirt L1
116
* which informs L0 whether any changes to MSR bitmap for L2 were done
117
* on its side.
118
*/
119
bool force_msr_bitmap_recalc;
120
121
/*
122
* Indicates lazily loaded guest state has not yet been decached from
123
* vmcs02.
124
*/
125
bool need_sync_vmcs02_to_vmcs12_rare;
126
127
/*
128
* vmcs02 has been initialized, i.e. state that is constant for
129
* vmcs02 has been written to the backing VMCS. Initialization
130
* is delayed until L1 actually attempts to run a nested VM.
131
*/
132
bool vmcs02_initialized;
133
134
bool change_vmcs01_virtual_apic_mode;
135
bool reload_vmcs01_apic_access_page;
136
bool update_vmcs01_cpu_dirty_logging;
137
bool update_vmcs01_apicv_status;
138
bool update_vmcs01_hwapic_isr;
139
140
/*
141
* Enlightened VMCS has been enabled. It does not mean that L1 has to
142
* use it. However, VMX features available to L1 will be limited based
143
* on what the enlightened VMCS supports.
144
*/
145
bool enlightened_vmcs_enabled;
146
147
/* L2 must run next, and mustn't decide to exit to L1. */
148
bool nested_run_pending;
149
150
/* Pending MTF VM-exit into L1. */
151
bool mtf_pending;
152
153
struct loaded_vmcs vmcs02;
154
155
/*
156
* Guest pages referred to in the vmcs02 with host-physical
157
* pointers, so we must keep them pinned while L2 runs.
158
*/
159
struct kvm_host_map apic_access_page_map;
160
struct kvm_host_map virtual_apic_map;
161
struct kvm_host_map pi_desc_map;
162
163
struct pi_desc *pi_desc;
164
bool pi_pending;
165
u16 posted_intr_nv;
166
167
struct hrtimer preemption_timer;
168
u64 preemption_timer_deadline;
169
bool has_preemption_timer_deadline;
170
bool preemption_timer_expired;
171
172
/*
173
* Used to snapshot MSRs that are conditionally loaded on VM-Enter in
174
* order to propagate the guest's pre-VM-Enter value into vmcs02. For
175
* emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value.
176
* For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_
177
* userspace restores MSRs before nested state. If userspace restores
178
* MSRs after nested state, the snapshot holds garbage, but KVM can't
179
* detect that, and the garbage value in vmcs02 will be overwritten by
180
* MSR restoration in any case.
181
*/
182
u64 pre_vmenter_debugctl;
183
u64 pre_vmenter_bndcfgs;
184
u64 pre_vmenter_s_cet;
185
u64 pre_vmenter_ssp;
186
u64 pre_vmenter_ssp_tbl;
187
188
/* to migrate it to L1 if L2 writes to L1's CR8 directly */
189
int l1_tpr_threshold;
190
191
u16 vpid02;
192
u16 last_vpid;
193
194
struct nested_vmx_msrs msrs;
195
196
/* SMM related state */
197
struct {
198
/* in VMX operation on SMM entry? */
199
bool vmxon;
200
/* in guest mode on SMM entry? */
201
bool guest_mode;
202
} smm;
203
204
#ifdef CONFIG_KVM_HYPERV
205
gpa_t hv_evmcs_vmptr;
206
struct kvm_host_map hv_evmcs_map;
207
struct hv_enlightened_vmcs *hv_evmcs;
208
#endif
209
};
210
211
struct vcpu_vmx {
212
struct kvm_vcpu vcpu;
213
struct vcpu_vt vt;
214
u8 fail;
215
u8 x2apic_msr_bitmap_mode;
216
217
u32 idt_vectoring_info;
218
ulong rflags;
219
220
/*
221
* User return MSRs are always emulated when enabled in the guest, but
222
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
223
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
224
* be loaded into hardware if those conditions aren't met.
225
*/
226
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
227
bool guest_uret_msrs_loaded;
228
#ifdef CONFIG_X86_64
229
u64 msr_guest_kernel_gs_base;
230
#endif
231
232
u64 spec_ctrl;
233
u32 msr_ia32_umwait_control;
234
235
/*
236
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
237
* non-nested (L1) guest, it always points to vmcs01. For a nested
238
* guest (L2), it points to a different VMCS.
239
*/
240
struct loaded_vmcs vmcs01;
241
struct loaded_vmcs *loaded_vmcs;
242
243
struct msr_autoload {
244
struct vmx_msrs guest;
245
struct vmx_msrs host;
246
} msr_autoload;
247
248
struct msr_autostore {
249
struct vmx_msrs guest;
250
} msr_autostore;
251
252
struct {
253
int vm86_active;
254
ulong save_rflags;
255
struct kvm_segment segs[8];
256
} rmode;
257
struct {
258
u32 bitmask; /* 4 bits per segment (1 bit per field) */
259
struct kvm_save_segment {
260
u16 selector;
261
unsigned long base;
262
u32 limit;
263
u32 ar;
264
} seg[8];
265
} segment_cache;
266
int vpid;
267
268
/* Support for a guest hypervisor (nested VMX) */
269
struct nested_vmx nested;
270
271
/* Dynamic PLE window. */
272
unsigned int ple_window;
273
bool ple_window_dirty;
274
275
/* Support for PML */
276
#define PML_LOG_NR_ENTRIES 512
277
/* PML is written backwards: this is the first entry written by the CPU */
278
#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
279
280
struct page *pml_pg;
281
282
/* apic deadline value in host tsc */
283
u64 hv_deadline_tsc;
284
285
/*
286
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
287
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
288
* in msr_ia32_feature_control_valid_bits.
289
*/
290
u64 msr_ia32_feature_control;
291
u64 msr_ia32_feature_control_valid_bits;
292
/* SGX Launch Control public key hash */
293
u64 msr_ia32_sgxlepubkeyhash[4];
294
u64 msr_ia32_mcu_opt_ctrl;
295
bool disable_fb_clear;
296
297
struct pt_desc pt_desc;
298
struct lbr_desc lbr_desc;
299
300
/* ve_info must be page aligned. */
301
struct vmx_ve_information *ve_info;
302
};
303
304
struct kvm_vmx {
305
struct kvm kvm;
306
307
unsigned int tss_addr;
308
bool ept_identity_pagetable_done;
309
gpa_t ept_identity_map_addr;
310
/* Posted Interrupt Descriptor (PID) table for IPI virtualization */
311
u64 *pid_table;
312
};
313
314
static __always_inline struct vcpu_vt *to_vt(struct kvm_vcpu *vcpu)
315
{
316
return &(container_of(vcpu, struct vcpu_vmx, vcpu)->vt);
317
}
318
319
static __always_inline struct kvm_vcpu *vt_to_vcpu(struct vcpu_vt *vt)
320
{
321
return &(container_of(vt, struct vcpu_vmx, vt)->vcpu);
322
}
323
324
static __always_inline union vmx_exit_reason vmx_get_exit_reason(struct kvm_vcpu *vcpu)
325
{
326
return to_vt(vcpu)->exit_reason;
327
}
328
329
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
330
{
331
struct vcpu_vt *vt = to_vt(vcpu);
332
333
if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1) &&
334
!WARN_ON_ONCE(is_td_vcpu(vcpu)))
335
vt->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
336
337
return vt->exit_qualification;
338
}
339
340
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
341
{
342
struct vcpu_vt *vt = to_vt(vcpu);
343
344
if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2) &&
345
!WARN_ON_ONCE(is_td_vcpu(vcpu)))
346
vt->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
347
348
return vt->exit_intr_info;
349
}
350
351
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
352
int allocate_vpid(void);
353
void free_vpid(int vpid);
354
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
355
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
356
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
357
unsigned long fs_base, unsigned long gs_base);
358
int vmx_get_cpl(struct kvm_vcpu *vcpu);
359
int vmx_get_cpl_no_cache(struct kvm_vcpu *vcpu);
360
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
361
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
362
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
363
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
364
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
365
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
366
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
367
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
368
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
369
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
370
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
371
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
372
373
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
374
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
375
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
376
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
377
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
378
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
379
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
380
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
381
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
382
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
383
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
384
void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
385
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
386
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
387
unsigned int flags);
388
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
389
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
390
391
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
392
393
static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
394
u32 msr, int type)
395
{
396
vmx_set_intercept_for_msr(vcpu, msr, type, false);
397
}
398
399
static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
400
u32 msr, int type)
401
{
402
vmx_set_intercept_for_msr(vcpu, msr, type, true);
403
}
404
405
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
406
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
407
408
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
409
410
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
411
412
u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);
413
bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated);
414
415
#define VMX_HOST_OWNED_DEBUGCTL_BITS (DEBUGCTLMSR_FREEZE_IN_SMM)
416
417
static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val)
418
{
419
WARN_ON_ONCE(val & VMX_HOST_OWNED_DEBUGCTL_BITS);
420
421
val |= vcpu->arch.host_debugctl & VMX_HOST_OWNED_DEBUGCTL_BITS;
422
vmcs_write64(GUEST_IA32_DEBUGCTL, val);
423
}
424
425
static inline u64 vmx_guest_debugctl_read(void)
426
{
427
return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~VMX_HOST_OWNED_DEBUGCTL_BITS;
428
}
429
430
static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu)
431
{
432
u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL);
433
434
if (!((val ^ vcpu->arch.host_debugctl) & VMX_HOST_OWNED_DEBUGCTL_BITS))
435
return;
436
437
vmx_guest_debugctl_write(vcpu, val & ~VMX_HOST_OWNED_DEBUGCTL_BITS);
438
}
439
440
/*
441
* Note, early Intel manuals have the write-low and read-high bitmap offsets
442
* the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
443
* 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
444
* 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
445
* 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
446
* VM-Exit.
447
*/
448
#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
449
static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
450
u32 msr) \
451
{ \
452
int f = sizeof(unsigned long); \
453
\
454
if (msr <= 0x1fff) \
455
return bitop##_bit(msr, bitmap + base / f); \
456
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
457
return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
458
return (rtype)true; \
459
}
460
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
461
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
462
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
463
464
BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
465
BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
466
BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
467
468
static inline u8 vmx_get_rvi(void)
469
{
470
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
471
}
472
473
#define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
474
(VM_ENTRY_LOAD_DEBUG_CONTROLS)
475
#ifdef CONFIG_X86_64
476
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
477
(__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
478
VM_ENTRY_IA32E_MODE)
479
#else
480
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
481
__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
482
#endif
483
#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
484
(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
485
VM_ENTRY_LOAD_IA32_PAT | \
486
VM_ENTRY_LOAD_IA32_EFER | \
487
VM_ENTRY_LOAD_BNDCFGS | \
488
VM_ENTRY_PT_CONCEAL_PIP | \
489
VM_ENTRY_LOAD_IA32_RTIT_CTL | \
490
VM_ENTRY_LOAD_CET_STATE)
491
492
#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
493
(VM_EXIT_SAVE_DEBUG_CONTROLS | \
494
VM_EXIT_ACK_INTR_ON_EXIT)
495
#ifdef CONFIG_X86_64
496
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
497
(__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
498
VM_EXIT_HOST_ADDR_SPACE_SIZE)
499
#else
500
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
501
__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
502
#endif
503
#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
504
(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
505
VM_EXIT_SAVE_IA32_PAT | \
506
VM_EXIT_LOAD_IA32_PAT | \
507
VM_EXIT_SAVE_IA32_EFER | \
508
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
509
VM_EXIT_LOAD_IA32_EFER | \
510
VM_EXIT_CLEAR_BNDCFGS | \
511
VM_EXIT_PT_CONCEAL_PIP | \
512
VM_EXIT_CLEAR_IA32_RTIT_CTL | \
513
VM_EXIT_LOAD_CET_STATE)
514
515
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
516
(PIN_BASED_EXT_INTR_MASK | \
517
PIN_BASED_NMI_EXITING)
518
#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
519
(PIN_BASED_VIRTUAL_NMIS | \
520
PIN_BASED_POSTED_INTR | \
521
PIN_BASED_VMX_PREEMPTION_TIMER)
522
523
#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
524
(CPU_BASED_HLT_EXITING | \
525
CPU_BASED_CR3_LOAD_EXITING | \
526
CPU_BASED_CR3_STORE_EXITING | \
527
CPU_BASED_UNCOND_IO_EXITING | \
528
CPU_BASED_MOV_DR_EXITING | \
529
CPU_BASED_USE_TSC_OFFSETTING | \
530
CPU_BASED_MWAIT_EXITING | \
531
CPU_BASED_MONITOR_EXITING | \
532
CPU_BASED_INVLPG_EXITING | \
533
CPU_BASED_RDPMC_EXITING | \
534
CPU_BASED_INTR_WINDOW_EXITING)
535
536
#ifdef CONFIG_X86_64
537
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
538
(__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
539
CPU_BASED_CR8_LOAD_EXITING | \
540
CPU_BASED_CR8_STORE_EXITING)
541
#else
542
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
543
__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
544
#endif
545
546
#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
547
(CPU_BASED_RDTSC_EXITING | \
548
CPU_BASED_TPR_SHADOW | \
549
CPU_BASED_USE_IO_BITMAPS | \
550
CPU_BASED_MONITOR_TRAP_FLAG | \
551
CPU_BASED_USE_MSR_BITMAPS | \
552
CPU_BASED_NMI_WINDOW_EXITING | \
553
CPU_BASED_PAUSE_EXITING | \
554
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
555
CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
556
557
#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
558
#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
559
(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
560
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
561
SECONDARY_EXEC_WBINVD_EXITING | \
562
SECONDARY_EXEC_ENABLE_VPID | \
563
SECONDARY_EXEC_ENABLE_EPT | \
564
SECONDARY_EXEC_UNRESTRICTED_GUEST | \
565
SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
566
SECONDARY_EXEC_DESC | \
567
SECONDARY_EXEC_ENABLE_RDTSCP | \
568
SECONDARY_EXEC_ENABLE_INVPCID | \
569
SECONDARY_EXEC_APIC_REGISTER_VIRT | \
570
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
571
SECONDARY_EXEC_SHADOW_VMCS | \
572
SECONDARY_EXEC_ENABLE_XSAVES | \
573
SECONDARY_EXEC_RDSEED_EXITING | \
574
SECONDARY_EXEC_RDRAND_EXITING | \
575
SECONDARY_EXEC_ENABLE_PML | \
576
SECONDARY_EXEC_TSC_SCALING | \
577
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
578
SECONDARY_EXEC_PT_USE_GPA | \
579
SECONDARY_EXEC_PT_CONCEAL_VMX | \
580
SECONDARY_EXEC_ENABLE_VMFUNC | \
581
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
582
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
583
SECONDARY_EXEC_ENCLS_EXITING | \
584
SECONDARY_EXEC_EPT_VIOLATION_VE)
585
586
#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
587
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
588
(TERTIARY_EXEC_IPI_VIRT)
589
590
#define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
591
static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
592
{ \
593
if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
594
vmcs_write##bits(uname, val); \
595
vmx->loaded_vmcs->controls_shadow.lname = val; \
596
} \
597
} \
598
static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
599
{ \
600
return vmcs->controls_shadow.lname; \
601
} \
602
static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
603
{ \
604
return __##lname##_controls_get(vmx->loaded_vmcs); \
605
} \
606
static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
607
{ \
608
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
609
lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
610
} \
611
static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
612
{ \
613
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
614
lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
615
} \
616
static __always_inline void lname##_controls_changebit(struct vcpu_vmx *vmx, u##bits val, \
617
bool set) \
618
{ \
619
if (set) \
620
lname##_controls_setbit(vmx, val); \
621
else \
622
lname##_controls_clearbit(vmx, val); \
623
}
624
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
625
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
626
BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
627
BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
628
BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
629
BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
630
631
/*
632
* VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
633
* cache on demand. Other registers not listed here are synced to
634
* the cache immediately after VM-Exit.
635
*/
636
#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
637
(1 << VCPU_REGS_RSP) | \
638
(1 << VCPU_EXREG_RFLAGS) | \
639
(1 << VCPU_EXREG_PDPTR) | \
640
(1 << VCPU_EXREG_SEGMENTS) | \
641
(1 << VCPU_EXREG_CR0) | \
642
(1 << VCPU_EXREG_CR3) | \
643
(1 << VCPU_EXREG_CR4) | \
644
(1 << VCPU_EXREG_EXIT_INFO_1) | \
645
(1 << VCPU_EXREG_EXIT_INFO_2))
646
647
static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
648
{
649
unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
650
651
/*
652
* CR0.WP needs to be intercepted when KVM is shadowing legacy paging
653
* in order to construct shadow PTEs with the correct protections.
654
* Note! CR0.WP technically can be passed through to the guest if
655
* paging is disabled, but checking CR0.PG would generate a cyclical
656
* dependency of sorts due to forcing the caller to ensure CR0 holds
657
* the correct value prior to determining which CR0 bits can be owned
658
* by L1. Keep it simple and limit the optimization to EPT.
659
*/
660
if (!enable_ept)
661
bits &= ~X86_CR0_WP;
662
return bits;
663
}
664
665
static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
666
{
667
return container_of(kvm, struct kvm_vmx, kvm);
668
}
669
670
static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
671
{
672
return container_of(vcpu, struct vcpu_vmx, vcpu);
673
}
674
675
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
676
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
677
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
678
679
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
680
void free_vmcs(struct vmcs *vmcs);
681
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
682
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
683
684
static inline struct vmcs *alloc_vmcs(bool shadow)
685
{
686
return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
687
GFP_KERNEL_ACCOUNT);
688
}
689
690
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
691
{
692
return secondary_exec_controls_get(vmx) &
693
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
694
}
695
696
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
697
{
698
if (!enable_ept)
699
return true;
700
701
return allow_smaller_maxphyaddr &&
702
cpuid_maxphyaddr(vcpu) < kvm_host.maxphyaddr;
703
}
704
705
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
706
{
707
return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
708
(secondary_exec_controls_get(to_vmx(vcpu)) &
709
SECONDARY_EXEC_UNRESTRICTED_GUEST));
710
}
711
712
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
713
static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
714
{
715
return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
716
}
717
718
void dump_vmcs(struct kvm_vcpu *vcpu);
719
720
static inline int vmx_get_instr_info_reg(u32 vmx_instr_info)
721
{
722
return (vmx_instr_info >> 3) & 0xf;
723
}
724
725
static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
726
{
727
return (vmx_instr_info >> 28) & 0xf;
728
}
729
730
static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
731
{
732
return lapic_in_kernel(vcpu) && enable_ipiv;
733
}
734
735
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
736
{
737
vmx->segment_cache.bitmask = 0;
738
}
739
740
int vmx_init(void);
741
void vmx_exit(void);
742
743
#endif /* __KVM_X86_VMX_H */
744
745