Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/x86.h
26424 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef ARCH_X86_KVM_X86_H
3
#define ARCH_X86_KVM_X86_H
4
5
#include <linux/kvm_host.h>
6
#include <asm/fpu/xstate.h>
7
#include <asm/mce.h>
8
#include <asm/pvclock.h>
9
#include "kvm_cache_regs.h"
10
#include "kvm_emulate.h"
11
#include "cpuid.h"
12
13
#define KVM_MAX_MCE_BANKS 32
14
15
struct kvm_caps {
16
/* control of guest tsc rate supported? */
17
bool has_tsc_control;
18
/* maximum supported tsc_khz for guests */
19
u32 max_guest_tsc_khz;
20
/* number of bits of the fractional part of the TSC scaling ratio */
21
u8 tsc_scaling_ratio_frac_bits;
22
/* maximum allowed value of TSC scaling ratio */
23
u64 max_tsc_scaling_ratio;
24
/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
25
u64 default_tsc_scaling_ratio;
26
/* bus lock detection supported? */
27
bool has_bus_lock_exit;
28
/* notify VM exit supported? */
29
bool has_notify_vmexit;
30
/* bit mask of VM types */
31
u32 supported_vm_types;
32
33
u64 supported_mce_cap;
34
u64 supported_xcr0;
35
u64 supported_xss;
36
u64 supported_perf_cap;
37
38
u64 supported_quirks;
39
u64 inapplicable_quirks;
40
};
41
42
struct kvm_host_values {
43
/*
44
* The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
45
* address bits irrespective of features that repurpose legal bits,
46
* e.g. MKTME.
47
*/
48
u8 maxphyaddr;
49
50
u64 efer;
51
u64 xcr0;
52
u64 xss;
53
u64 arch_capabilities;
54
};
55
56
void kvm_spurious_fault(void);
57
58
#define SIZE_OF_MEMSLOTS_HASHTABLE \
59
(sizeof(((struct kvm_memslots *)0)->id_hash) * 2 * KVM_MAX_NR_ADDRESS_SPACES)
60
61
/* Sanity check the size of the memslot hash tables. */
62
static_assert(SIZE_OF_MEMSLOTS_HASHTABLE ==
63
(1024 * (1 + IS_ENABLED(CONFIG_X86_64)) * (1 + IS_ENABLED(CONFIG_KVM_SMM))));
64
65
/*
66
* Assert that "struct kvm_{svm,vmx,tdx}" is an order-0 or order-1 allocation.
67
* Spilling over to an order-2 allocation isn't fundamentally problematic, but
68
* isn't expected to happen in the foreseeable future (O(years)). Assert that
69
* the size is an order-0 allocation when ignoring the memslot hash tables, to
70
* help detect and debug unexpected size increases.
71
*/
72
#define KVM_SANITY_CHECK_VM_STRUCT_SIZE(x) \
73
do { \
74
BUILD_BUG_ON(get_order(sizeof(struct x) - SIZE_OF_MEMSLOTS_HASHTABLE) && \
75
!IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN)); \
76
BUILD_BUG_ON(get_order(sizeof(struct x)) > 1 && \
77
!IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN)); \
78
} while (0)
79
80
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
81
({ \
82
bool failed = (consistency_check); \
83
if (failed) \
84
trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
85
failed; \
86
})
87
88
/*
89
* The first...last VMX feature MSRs that are emulated by KVM. This may or may
90
* not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
91
* associated feature that KVM supports for nested virtualization.
92
*/
93
#define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC
94
#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC
95
96
#define KVM_DEFAULT_PLE_GAP 128
97
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
98
#define KVM_DEFAULT_PLE_WINDOW_GROW 2
99
#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
100
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
101
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
102
#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
103
104
static inline unsigned int __grow_ple_window(unsigned int val,
105
unsigned int base, unsigned int modifier, unsigned int max)
106
{
107
u64 ret = val;
108
109
if (modifier < 1)
110
return base;
111
112
if (modifier < base)
113
ret *= modifier;
114
else
115
ret += modifier;
116
117
return min(ret, (u64)max);
118
}
119
120
static inline unsigned int __shrink_ple_window(unsigned int val,
121
unsigned int base, unsigned int modifier, unsigned int min)
122
{
123
if (modifier < 1)
124
return base;
125
126
if (modifier < base)
127
val /= modifier;
128
else
129
val -= modifier;
130
131
return max(val, min);
132
}
133
134
#define MSR_IA32_CR_PAT_DEFAULT \
135
PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
136
137
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
138
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
139
140
/* Forcibly leave the nested mode in cases like a vCPU reset */
141
static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
142
{
143
kvm_x86_ops.nested_ops->leave_nested(vcpu);
144
}
145
146
/*
147
* If IBRS is advertised to the vCPU, KVM must flush the indirect branch
148
* predictors when transitioning from L2 to L1, as L1 expects hardware (KVM in
149
* this case) to provide separate predictor modes. Bare metal isolates the host
150
* from the guest, but doesn't isolate different guests from one another (in
151
* this case L1 and L2). The exception is if bare metal supports same mode IBRS,
152
* which offers protection within the same mode, and hence protects L1 from L2.
153
*/
154
static inline void kvm_nested_vmexit_handle_ibrs(struct kvm_vcpu *vcpu)
155
{
156
if (cpu_feature_enabled(X86_FEATURE_AMD_IBRS_SAME_MODE))
157
return;
158
159
if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
160
guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS))
161
indirect_branch_prediction_barrier();
162
}
163
164
static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
165
{
166
return vcpu->arch.last_vmentry_cpu != -1;
167
}
168
169
static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state)
170
{
171
vcpu->arch.mp_state = mp_state;
172
if (mp_state == KVM_MP_STATE_RUNNABLE)
173
vcpu->arch.pv.pv_unhalted = false;
174
}
175
176
static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
177
{
178
return vcpu->arch.exception.pending ||
179
vcpu->arch.exception_vmexit.pending ||
180
kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
181
}
182
183
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
184
{
185
vcpu->arch.exception.pending = false;
186
vcpu->arch.exception.injected = false;
187
vcpu->arch.exception_vmexit.pending = false;
188
}
189
190
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
191
bool soft)
192
{
193
vcpu->arch.interrupt.injected = true;
194
vcpu->arch.interrupt.soft = soft;
195
vcpu->arch.interrupt.nr = vector;
196
}
197
198
static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
199
{
200
vcpu->arch.interrupt.injected = false;
201
}
202
203
static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
204
{
205
return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
206
vcpu->arch.nmi_injected;
207
}
208
209
static inline bool kvm_exception_is_soft(unsigned int nr)
210
{
211
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
212
}
213
214
static inline bool is_protmode(struct kvm_vcpu *vcpu)
215
{
216
return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
217
}
218
219
static inline bool is_long_mode(struct kvm_vcpu *vcpu)
220
{
221
#ifdef CONFIG_X86_64
222
return !!(vcpu->arch.efer & EFER_LMA);
223
#else
224
return false;
225
#endif
226
}
227
228
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
229
{
230
int cs_db, cs_l;
231
232
WARN_ON_ONCE(vcpu->arch.guest_state_protected);
233
234
if (!is_long_mode(vcpu))
235
return false;
236
kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
237
return cs_l;
238
}
239
240
static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
241
{
242
/*
243
* If running with protected guest state, the CS register is not
244
* accessible. The hypercall register values will have had to been
245
* provided in 64-bit mode, so assume the guest is in 64-bit.
246
*/
247
return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
248
}
249
250
static inline bool x86_exception_has_error_code(unsigned int vector)
251
{
252
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
253
BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
254
BIT(PF_VECTOR) | BIT(AC_VECTOR);
255
256
return (1U << vector) & exception_has_error_code;
257
}
258
259
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
260
{
261
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
262
}
263
264
static inline bool is_pae(struct kvm_vcpu *vcpu)
265
{
266
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
267
}
268
269
static inline bool is_pse(struct kvm_vcpu *vcpu)
270
{
271
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
272
}
273
274
static inline bool is_paging(struct kvm_vcpu *vcpu)
275
{
276
return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
277
}
278
279
static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
280
{
281
return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
282
}
283
284
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
285
{
286
return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
287
}
288
289
static inline u8 max_host_virt_addr_bits(void)
290
{
291
return kvm_cpu_cap_has(X86_FEATURE_LA57) ? 57 : 48;
292
}
293
294
/*
295
* x86 MSRs which contain linear addresses, x86 hidden segment bases, and
296
* IDT/GDT bases have static canonicality checks, the size of which depends
297
* only on the CPU's support for 5-level paging, rather than on the state of
298
* CR4.LA57. This applies to both WRMSR and to other instructions that set
299
* their values, e.g. SGDT.
300
*
301
* KVM passes through most of these MSRS and also doesn't intercept the
302
* instructions that set the hidden segment bases.
303
*
304
* Because of this, to be consistent with hardware, even if the guest doesn't
305
* have LA57 enabled in its CPUID, perform canonicality checks based on *host*
306
* support for 5 level paging.
307
*
308
* Finally, instructions which are related to MMU invalidation of a given
309
* linear address, also have a similar static canonical check on address.
310
* This allows for example to invalidate 5-level addresses of a guest from a
311
* host which uses 4-level paging.
312
*/
313
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu,
314
unsigned int flags)
315
{
316
if (flags & (X86EMUL_F_INVLPG | X86EMUL_F_MSR | X86EMUL_F_DT_LOAD))
317
return !__is_canonical_address(la, max_host_virt_addr_bits());
318
else
319
return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
320
}
321
322
static inline bool is_noncanonical_msr_address(u64 la, struct kvm_vcpu *vcpu)
323
{
324
return is_noncanonical_address(la, vcpu, X86EMUL_F_MSR);
325
}
326
327
static inline bool is_noncanonical_base_address(u64 la, struct kvm_vcpu *vcpu)
328
{
329
return is_noncanonical_address(la, vcpu, X86EMUL_F_DT_LOAD);
330
}
331
332
static inline bool is_noncanonical_invlpg_address(u64 la, struct kvm_vcpu *vcpu)
333
{
334
return is_noncanonical_address(la, vcpu, X86EMUL_F_INVLPG);
335
}
336
337
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
338
gva_t gva, gfn_t gfn, unsigned access)
339
{
340
u64 gen = kvm_memslots(vcpu->kvm)->generation;
341
342
if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
343
return;
344
345
/*
346
* If this is a shadow nested page table, the "GVA" is
347
* actually a nGPA.
348
*/
349
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
350
vcpu->arch.mmio_access = access;
351
vcpu->arch.mmio_gfn = gfn;
352
vcpu->arch.mmio_gen = gen;
353
}
354
355
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
356
{
357
return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
358
}
359
360
/*
361
* Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
362
* clear all mmio cache info.
363
*/
364
#define MMIO_GVA_ANY (~(gva_t)0)
365
366
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
367
{
368
if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
369
return;
370
371
vcpu->arch.mmio_gva = 0;
372
}
373
374
static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
375
{
376
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
377
vcpu->arch.mmio_gva == (gva & PAGE_MASK))
378
return true;
379
380
return false;
381
}
382
383
static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
384
{
385
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
386
vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
387
return true;
388
389
return false;
390
}
391
392
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
393
{
394
unsigned long val = kvm_register_read_raw(vcpu, reg);
395
396
return is_64_bit_mode(vcpu) ? val : (u32)val;
397
}
398
399
static inline void kvm_register_write(struct kvm_vcpu *vcpu,
400
int reg, unsigned long val)
401
{
402
if (!is_64_bit_mode(vcpu))
403
val = (u32)val;
404
return kvm_register_write_raw(vcpu, reg, val);
405
}
406
407
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
408
{
409
return !(kvm->arch.disabled_quirks & quirk);
410
}
411
412
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
413
414
u64 get_kvmclock_ns(struct kvm *kvm);
415
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
416
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
417
int kvm_guest_time_update(struct kvm_vcpu *v);
418
419
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
420
gva_t addr, void *val, unsigned int bytes,
421
struct x86_exception *exception);
422
423
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
424
gva_t addr, void *val, unsigned int bytes,
425
struct x86_exception *exception);
426
427
int handle_ud(struct kvm_vcpu *vcpu);
428
429
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
430
struct kvm_queued_exception *ex);
431
432
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
433
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
434
bool kvm_vector_hashing_enabled(void);
435
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
436
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
437
void *insn, int insn_len);
438
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
439
int emulation_type, void *insn, int insn_len);
440
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
441
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
442
443
extern struct kvm_caps kvm_caps;
444
extern struct kvm_host_values kvm_host;
445
446
extern bool enable_pmu;
447
448
/*
449
* Get a filtered version of KVM's supported XCR0 that strips out dynamic
450
* features for which the current process doesn't (yet) have permission to use.
451
* This is intended to be used only when enumerating support to userspace,
452
* e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
453
* used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
454
* userspace attempts to enable unpermitted features.
455
*/
456
static inline u64 kvm_get_filtered_xcr0(void)
457
{
458
u64 permitted_xcr0 = kvm_caps.supported_xcr0;
459
460
BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
461
462
if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
463
permitted_xcr0 &= xstate_get_guest_group_perm();
464
465
/*
466
* Treat XTILE_CFG as unsupported if the current process isn't
467
* allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
468
* XCR0 without setting XTILE_DATA is architecturally illegal.
469
*/
470
if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
471
permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
472
}
473
return permitted_xcr0;
474
}
475
476
static inline bool kvm_mpx_supported(void)
477
{
478
return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
479
== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
480
}
481
482
extern unsigned int min_timer_period_us;
483
484
extern bool enable_vmware_backdoor;
485
486
extern int pi_inject_timer;
487
488
extern bool report_ignored_msrs;
489
490
extern bool eager_page_split;
491
492
static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
493
{
494
if (report_ignored_msrs)
495
vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
496
}
497
498
static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
499
{
500
if (report_ignored_msrs)
501
vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
502
}
503
504
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
505
{
506
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
507
vcpu->arch.virtual_tsc_shift);
508
}
509
510
/* Same "calling convention" as do_div:
511
* - divide (n << 32) by base
512
* - put result in n
513
* - return remainder
514
*/
515
#define do_shl32_div32(n, base) \
516
({ \
517
u32 __quot, __rem; \
518
asm("divl %2" : "=a" (__quot), "=d" (__rem) \
519
: "rm" (base), "0" (0), "1" ((u32) n)); \
520
n = __quot; \
521
__rem; \
522
})
523
524
static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
525
{
526
kvm->arch.disabled_exits |= mask;
527
}
528
529
static inline bool kvm_mwait_in_guest(struct kvm *kvm)
530
{
531
return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
532
}
533
534
static inline bool kvm_hlt_in_guest(struct kvm *kvm)
535
{
536
return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
537
}
538
539
static inline bool kvm_pause_in_guest(struct kvm *kvm)
540
{
541
return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
542
}
543
544
static inline bool kvm_cstate_in_guest(struct kvm *kvm)
545
{
546
return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
547
}
548
549
static inline bool kvm_aperfmperf_in_guest(struct kvm *kvm)
550
{
551
return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_APERFMPERF;
552
}
553
554
static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
555
{
556
return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
557
}
558
559
static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
560
enum kvm_intr_type intr)
561
{
562
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
563
}
564
565
static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
566
{
567
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
568
}
569
570
static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
571
{
572
return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
573
}
574
575
static inline bool kvm_pat_valid(u64 data)
576
{
577
if (data & 0xF8F8F8F8F8F8F8F8ull)
578
return false;
579
/* 0, 1, 4, 5, 6, 7 are valid values. */
580
return (data | ((data & 0x0202020202020202ull) << 1)) == data;
581
}
582
583
static inline bool kvm_dr7_valid(u64 data)
584
{
585
/* Bits [63:32] are reserved */
586
return !(data >> 32);
587
}
588
static inline bool kvm_dr6_valid(u64 data)
589
{
590
/* Bits [63:32] are reserved */
591
return !(data >> 32);
592
}
593
594
/*
595
* Trigger machine check on the host. We assume all the MSRs are already set up
596
* by the CPU and that we still run on the same CPU as the MCE occurred on.
597
* We pass a fake environment to the machine check handler because we want
598
* the guest to be always treated like user space, no matter what context
599
* it used internally.
600
*/
601
static inline void kvm_machine_check(void)
602
{
603
#if defined(CONFIG_X86_MCE)
604
struct pt_regs regs = {
605
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
606
.flags = X86_EFLAGS_IF,
607
};
608
609
do_machine_check(&regs);
610
#endif
611
}
612
613
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
614
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
615
int kvm_spec_ctrl_test_value(u64 value);
616
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
617
struct x86_exception *e);
618
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
619
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
620
621
enum kvm_msr_access {
622
MSR_TYPE_R = BIT(0),
623
MSR_TYPE_W = BIT(1),
624
MSR_TYPE_RW = MSR_TYPE_R | MSR_TYPE_W,
625
};
626
627
/*
628
* Internal error codes that are used to indicate that MSR emulation encountered
629
* an error that should result in #GP in the guest, unless userspace handles it.
630
* Note, '1', '0', and negative numbers are off limits, as they are used by KVM
631
* as part of KVM's lightly documented internal KVM_RUN return codes.
632
*
633
* UNSUPPORTED - The MSR isn't supported, either because it is completely
634
* unknown to KVM, or because the MSR should not exist according
635
* to the vCPU model.
636
*
637
* FILTERED - Access to the MSR is denied by a userspace MSR filter.
638
*/
639
#define KVM_MSR_RET_UNSUPPORTED 2
640
#define KVM_MSR_RET_FILTERED 3
641
642
static inline bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
643
{
644
return !(cr4 & vcpu->arch.cr4_guest_rsvd_bits);
645
}
646
647
#define __cr4_reserved_bits(__cpu_has, __c) \
648
({ \
649
u64 __reserved_bits = CR4_RESERVED_BITS; \
650
\
651
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
652
__reserved_bits |= X86_CR4_OSXSAVE; \
653
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
654
__reserved_bits |= X86_CR4_SMEP; \
655
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
656
__reserved_bits |= X86_CR4_SMAP; \
657
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
658
__reserved_bits |= X86_CR4_FSGSBASE; \
659
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
660
__reserved_bits |= X86_CR4_PKE; \
661
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
662
__reserved_bits |= X86_CR4_LA57; \
663
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
664
__reserved_bits |= X86_CR4_UMIP; \
665
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
666
__reserved_bits |= X86_CR4_VMXE; \
667
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
668
__reserved_bits |= X86_CR4_PCIDE; \
669
if (!__cpu_has(__c, X86_FEATURE_LAM)) \
670
__reserved_bits |= X86_CR4_LAM_SUP; \
671
__reserved_bits; \
672
})
673
674
int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
675
void *dst);
676
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
677
void *dst);
678
int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
679
unsigned int port, void *data, unsigned int count,
680
int in);
681
682
static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
683
{
684
return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
685
}
686
687
int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
688
int (*complete_hypercall)(struct kvm_vcpu *));
689
690
#define __kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall) \
691
({ \
692
int __ret; \
693
__ret = ____kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall); \
694
\
695
if (__ret > 0) \
696
__ret = complete_hypercall(_vcpu); \
697
__ret; \
698
})
699
700
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
701
702
#endif
703
704