Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/x86.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Kernel-based Virtual Machine driver for Linux
4
*
5
* derived from drivers/kvm/kvm_main.c
6
*
7
* Copyright (C) 2006 Qumranet, Inc.
8
* Copyright (C) 2008 Qumranet, Inc.
9
* Copyright IBM Corporation, 2008
10
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
11
*
12
* Authors:
13
* Avi Kivity <[email protected]>
14
* Yaniv Kamay <[email protected]>
15
* Amit Shah <[email protected]>
16
* Ben-Ami Yassour <[email protected]>
17
*/
18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20
#include <linux/kvm_host.h>
21
#include "irq.h"
22
#include "ioapic.h"
23
#include "mmu.h"
24
#include "i8254.h"
25
#include "tss.h"
26
#include "kvm_cache_regs.h"
27
#include "kvm_emulate.h"
28
#include "mmu/page_track.h"
29
#include "x86.h"
30
#include "cpuid.h"
31
#include "pmu.h"
32
#include "hyperv.h"
33
#include "lapic.h"
34
#include "xen.h"
35
#include "smm.h"
36
37
#include <linux/clocksource.h>
38
#include <linux/interrupt.h>
39
#include <linux/kvm.h>
40
#include <linux/fs.h>
41
#include <linux/vmalloc.h>
42
#include <linux/export.h>
43
#include <linux/moduleparam.h>
44
#include <linux/mman.h>
45
#include <linux/highmem.h>
46
#include <linux/iommu.h>
47
#include <linux/cpufreq.h>
48
#include <linux/user-return-notifier.h>
49
#include <linux/srcu.h>
50
#include <linux/slab.h>
51
#include <linux/perf_event.h>
52
#include <linux/uaccess.h>
53
#include <linux/hash.h>
54
#include <linux/pci.h>
55
#include <linux/timekeeper_internal.h>
56
#include <linux/pvclock_gtod.h>
57
#include <linux/kvm_irqfd.h>
58
#include <linux/irqbypass.h>
59
#include <linux/sched/stat.h>
60
#include <linux/sched/isolation.h>
61
#include <linux/mem_encrypt.h>
62
#include <linux/entry-kvm.h>
63
#include <linux/suspend.h>
64
#include <linux/smp.h>
65
66
#include <trace/events/ipi.h>
67
#include <trace/events/kvm.h>
68
69
#include <asm/debugreg.h>
70
#include <asm/msr.h>
71
#include <asm/desc.h>
72
#include <asm/mce.h>
73
#include <asm/pkru.h>
74
#include <linux/kernel_stat.h>
75
#include <asm/fpu/api.h>
76
#include <asm/fpu/xcr.h>
77
#include <asm/fpu/xstate.h>
78
#include <asm/pvclock.h>
79
#include <asm/div64.h>
80
#include <asm/irq_remapping.h>
81
#include <asm/mshyperv.h>
82
#include <asm/hypervisor.h>
83
#include <asm/tlbflush.h>
84
#include <asm/intel_pt.h>
85
#include <asm/emulate_prefix.h>
86
#include <asm/sgx.h>
87
#include <clocksource/hyperv_timer.h>
88
89
#define CREATE_TRACE_POINTS
90
#include "trace.h"
91
92
#define MAX_IO_MSRS 256
93
94
/*
95
* Note, kvm_caps fields should *never* have default values, all fields must be
96
* recomputed from scratch during vendor module load, e.g. to account for a
97
* vendor module being reloaded with different module parameters.
98
*/
99
struct kvm_caps kvm_caps __read_mostly;
100
EXPORT_SYMBOL_GPL(kvm_caps);
101
102
struct kvm_host_values kvm_host __read_mostly;
103
EXPORT_SYMBOL_GPL(kvm_host);
104
105
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
106
107
#define emul_to_vcpu(ctxt) \
108
((struct kvm_vcpu *)(ctxt)->vcpu)
109
110
/* EFER defaults:
111
* - enable syscall per default because its emulated by KVM
112
* - enable LME and LMA per default on 64 bit KVM
113
*/
114
#ifdef CONFIG_X86_64
115
static
116
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
117
#else
118
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
119
#endif
120
121
#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
122
123
#define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
124
125
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
126
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
127
128
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
129
static void process_nmi(struct kvm_vcpu *vcpu);
130
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
131
static void store_regs(struct kvm_vcpu *vcpu);
132
static int sync_regs(struct kvm_vcpu *vcpu);
133
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
134
135
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
136
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
137
138
static DEFINE_MUTEX(vendor_module_lock);
139
struct kvm_x86_ops kvm_x86_ops __read_mostly;
140
141
#define KVM_X86_OP(func) \
142
DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
143
*(((struct kvm_x86_ops *)0)->func));
144
#define KVM_X86_OP_OPTIONAL KVM_X86_OP
145
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
146
#include <asm/kvm-x86-ops.h>
147
EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
148
EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
149
150
static bool __read_mostly ignore_msrs = 0;
151
module_param(ignore_msrs, bool, 0644);
152
153
bool __read_mostly report_ignored_msrs = true;
154
module_param(report_ignored_msrs, bool, 0644);
155
EXPORT_SYMBOL_GPL(report_ignored_msrs);
156
157
unsigned int min_timer_period_us = 200;
158
module_param(min_timer_period_us, uint, 0644);
159
160
static bool __read_mostly kvmclock_periodic_sync = true;
161
module_param(kvmclock_periodic_sync, bool, 0444);
162
163
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
164
static u32 __read_mostly tsc_tolerance_ppm = 250;
165
module_param(tsc_tolerance_ppm, uint, 0644);
166
167
static bool __read_mostly vector_hashing = true;
168
module_param(vector_hashing, bool, 0444);
169
170
bool __read_mostly enable_vmware_backdoor = false;
171
module_param(enable_vmware_backdoor, bool, 0444);
172
EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
173
174
/*
175
* Flags to manipulate forced emulation behavior (any non-zero value will
176
* enable forced emulation).
177
*/
178
#define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
179
static int __read_mostly force_emulation_prefix;
180
module_param(force_emulation_prefix, int, 0644);
181
182
int __read_mostly pi_inject_timer = -1;
183
module_param(pi_inject_timer, bint, 0644);
184
185
/* Enable/disable PMU virtualization */
186
bool __read_mostly enable_pmu = true;
187
EXPORT_SYMBOL_GPL(enable_pmu);
188
module_param(enable_pmu, bool, 0444);
189
190
bool __read_mostly eager_page_split = true;
191
module_param(eager_page_split, bool, 0644);
192
193
/* Enable/disable SMT_RSB bug mitigation */
194
static bool __read_mostly mitigate_smt_rsb;
195
module_param(mitigate_smt_rsb, bool, 0444);
196
197
/*
198
* Restoring the host value for MSRs that are only consumed when running in
199
* usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
200
* returns to userspace, i.e. the kernel can run with the guest's value.
201
*/
202
#define KVM_MAX_NR_USER_RETURN_MSRS 16
203
204
struct kvm_user_return_msrs {
205
struct user_return_notifier urn;
206
bool registered;
207
struct kvm_user_return_msr_values {
208
u64 host;
209
u64 curr;
210
} values[KVM_MAX_NR_USER_RETURN_MSRS];
211
};
212
213
u32 __read_mostly kvm_nr_uret_msrs;
214
EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
215
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
216
static struct kvm_user_return_msrs __percpu *user_return_msrs;
217
218
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
219
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
220
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
221
| XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
222
223
bool __read_mostly allow_smaller_maxphyaddr = 0;
224
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
225
226
bool __read_mostly enable_apicv = true;
227
EXPORT_SYMBOL_GPL(enable_apicv);
228
229
bool __read_mostly enable_ipiv = true;
230
EXPORT_SYMBOL_GPL(enable_ipiv);
231
232
bool __read_mostly enable_device_posted_irqs = true;
233
EXPORT_SYMBOL_GPL(enable_device_posted_irqs);
234
235
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
236
KVM_GENERIC_VM_STATS(),
237
STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
238
STATS_DESC_COUNTER(VM, mmu_pte_write),
239
STATS_DESC_COUNTER(VM, mmu_pde_zapped),
240
STATS_DESC_COUNTER(VM, mmu_flooded),
241
STATS_DESC_COUNTER(VM, mmu_recycled),
242
STATS_DESC_COUNTER(VM, mmu_cache_miss),
243
STATS_DESC_ICOUNTER(VM, mmu_unsync),
244
STATS_DESC_ICOUNTER(VM, pages_4k),
245
STATS_DESC_ICOUNTER(VM, pages_2m),
246
STATS_DESC_ICOUNTER(VM, pages_1g),
247
STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
248
STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
249
STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
250
};
251
252
const struct kvm_stats_header kvm_vm_stats_header = {
253
.name_size = KVM_STATS_NAME_SIZE,
254
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
255
.id_offset = sizeof(struct kvm_stats_header),
256
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
257
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
258
sizeof(kvm_vm_stats_desc),
259
};
260
261
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
262
KVM_GENERIC_VCPU_STATS(),
263
STATS_DESC_COUNTER(VCPU, pf_taken),
264
STATS_DESC_COUNTER(VCPU, pf_fixed),
265
STATS_DESC_COUNTER(VCPU, pf_emulate),
266
STATS_DESC_COUNTER(VCPU, pf_spurious),
267
STATS_DESC_COUNTER(VCPU, pf_fast),
268
STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
269
STATS_DESC_COUNTER(VCPU, pf_guest),
270
STATS_DESC_COUNTER(VCPU, tlb_flush),
271
STATS_DESC_COUNTER(VCPU, invlpg),
272
STATS_DESC_COUNTER(VCPU, exits),
273
STATS_DESC_COUNTER(VCPU, io_exits),
274
STATS_DESC_COUNTER(VCPU, mmio_exits),
275
STATS_DESC_COUNTER(VCPU, signal_exits),
276
STATS_DESC_COUNTER(VCPU, irq_window_exits),
277
STATS_DESC_COUNTER(VCPU, nmi_window_exits),
278
STATS_DESC_COUNTER(VCPU, l1d_flush),
279
STATS_DESC_COUNTER(VCPU, halt_exits),
280
STATS_DESC_COUNTER(VCPU, request_irq_exits),
281
STATS_DESC_COUNTER(VCPU, irq_exits),
282
STATS_DESC_COUNTER(VCPU, host_state_reload),
283
STATS_DESC_COUNTER(VCPU, fpu_reload),
284
STATS_DESC_COUNTER(VCPU, insn_emulation),
285
STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
286
STATS_DESC_COUNTER(VCPU, hypercalls),
287
STATS_DESC_COUNTER(VCPU, irq_injections),
288
STATS_DESC_COUNTER(VCPU, nmi_injections),
289
STATS_DESC_COUNTER(VCPU, req_event),
290
STATS_DESC_COUNTER(VCPU, nested_run),
291
STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
292
STATS_DESC_COUNTER(VCPU, directed_yield_successful),
293
STATS_DESC_COUNTER(VCPU, preemption_reported),
294
STATS_DESC_COUNTER(VCPU, preemption_other),
295
STATS_DESC_IBOOLEAN(VCPU, guest_mode),
296
STATS_DESC_COUNTER(VCPU, notify_window_exits),
297
};
298
299
const struct kvm_stats_header kvm_vcpu_stats_header = {
300
.name_size = KVM_STATS_NAME_SIZE,
301
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
302
.id_offset = sizeof(struct kvm_stats_header),
303
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
304
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
305
sizeof(kvm_vcpu_stats_desc),
306
};
307
308
static struct kmem_cache *x86_emulator_cache;
309
310
/*
311
* The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
312
* the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
313
* KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
314
* require host support, i.e. should be probed via RDMSR. emulated_msrs holds
315
* MSRs that KVM emulates without strictly requiring host support.
316
* msr_based_features holds MSRs that enumerate features, i.e. are effectively
317
* CPUID leafs. Note, msr_based_features isn't mutually exclusive with
318
* msrs_to_save and emulated_msrs.
319
*/
320
321
static const u32 msrs_to_save_base[] = {
322
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
323
MSR_STAR,
324
#ifdef CONFIG_X86_64
325
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
326
#endif
327
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
328
MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
329
MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
330
MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
331
MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
332
MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
333
MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
334
MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
335
MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
336
MSR_IA32_UMWAIT_CONTROL,
337
338
MSR_IA32_XFD, MSR_IA32_XFD_ERR,
339
};
340
341
static const u32 msrs_to_save_pmu[] = {
342
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
343
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
344
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
345
MSR_CORE_PERF_GLOBAL_CTRL,
346
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
347
348
/* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
349
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
350
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
351
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
352
MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
353
MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
354
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
355
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
356
MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
357
358
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
359
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
360
361
/* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
362
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
363
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
364
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
365
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
366
367
MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
368
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
369
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
370
};
371
372
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
373
ARRAY_SIZE(msrs_to_save_pmu)];
374
static unsigned num_msrs_to_save;
375
376
static const u32 emulated_msrs_all[] = {
377
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
378
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
379
380
#ifdef CONFIG_KVM_HYPERV
381
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
382
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
383
HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
384
HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
385
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
386
HV_X64_MSR_RESET,
387
HV_X64_MSR_VP_INDEX,
388
HV_X64_MSR_VP_RUNTIME,
389
HV_X64_MSR_SCONTROL,
390
HV_X64_MSR_STIMER0_CONFIG,
391
HV_X64_MSR_VP_ASSIST_PAGE,
392
HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
393
HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
394
HV_X64_MSR_SYNDBG_OPTIONS,
395
HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
396
HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
397
HV_X64_MSR_SYNDBG_PENDING_BUFFER,
398
#endif
399
400
MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
401
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
402
403
MSR_IA32_TSC_ADJUST,
404
MSR_IA32_TSC_DEADLINE,
405
MSR_IA32_ARCH_CAPABILITIES,
406
MSR_IA32_PERF_CAPABILITIES,
407
MSR_IA32_MISC_ENABLE,
408
MSR_IA32_MCG_STATUS,
409
MSR_IA32_MCG_CTL,
410
MSR_IA32_MCG_EXT_CTL,
411
MSR_IA32_SMBASE,
412
MSR_SMI_COUNT,
413
MSR_PLATFORM_INFO,
414
MSR_MISC_FEATURES_ENABLES,
415
MSR_AMD64_VIRT_SPEC_CTRL,
416
MSR_AMD64_TSC_RATIO,
417
MSR_IA32_POWER_CTL,
418
MSR_IA32_UCODE_REV,
419
420
/*
421
* KVM always supports the "true" VMX control MSRs, even if the host
422
* does not. The VMX MSRs as a whole are considered "emulated" as KVM
423
* doesn't strictly require them to exist in the host (ignoring that
424
* KVM would refuse to load in the first place if the core set of MSRs
425
* aren't supported).
426
*/
427
MSR_IA32_VMX_BASIC,
428
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
429
MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
430
MSR_IA32_VMX_TRUE_EXIT_CTLS,
431
MSR_IA32_VMX_TRUE_ENTRY_CTLS,
432
MSR_IA32_VMX_MISC,
433
MSR_IA32_VMX_CR0_FIXED0,
434
MSR_IA32_VMX_CR4_FIXED0,
435
MSR_IA32_VMX_VMCS_ENUM,
436
MSR_IA32_VMX_PROCBASED_CTLS2,
437
MSR_IA32_VMX_EPT_VPID_CAP,
438
MSR_IA32_VMX_VMFUNC,
439
440
MSR_K7_HWCR,
441
MSR_KVM_POLL_CONTROL,
442
};
443
444
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
445
static unsigned num_emulated_msrs;
446
447
/*
448
* List of MSRs that control the existence of MSR-based features, i.e. MSRs
449
* that are effectively CPUID leafs. VMX MSRs are also included in the set of
450
* feature MSRs, but are handled separately to allow expedited lookups.
451
*/
452
static const u32 msr_based_features_all_except_vmx[] = {
453
MSR_AMD64_DE_CFG,
454
MSR_IA32_UCODE_REV,
455
MSR_IA32_ARCH_CAPABILITIES,
456
MSR_IA32_PERF_CAPABILITIES,
457
MSR_PLATFORM_INFO,
458
};
459
460
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
461
(KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
462
static unsigned int num_msr_based_features;
463
464
/*
465
* All feature MSRs except uCode revID, which tracks the currently loaded uCode
466
* patch, are immutable once the vCPU model is defined.
467
*/
468
static bool kvm_is_immutable_feature_msr(u32 msr)
469
{
470
int i;
471
472
if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
473
return true;
474
475
for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
476
if (msr == msr_based_features_all_except_vmx[i])
477
return msr != MSR_IA32_UCODE_REV;
478
}
479
480
return false;
481
}
482
483
static bool kvm_is_advertised_msr(u32 msr_index)
484
{
485
unsigned int i;
486
487
for (i = 0; i < num_msrs_to_save; i++) {
488
if (msrs_to_save[i] == msr_index)
489
return true;
490
}
491
492
for (i = 0; i < num_emulated_msrs; i++) {
493
if (emulated_msrs[i] == msr_index)
494
return true;
495
}
496
497
return false;
498
}
499
500
typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
501
bool host_initiated);
502
503
static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
504
u64 *data, bool host_initiated,
505
enum kvm_msr_access rw,
506
msr_access_t msr_access_fn)
507
{
508
const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
509
int ret;
510
511
BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
512
513
/*
514
* Zero the data on read failures to avoid leaking stack data to the
515
* guest and/or userspace, e.g. if the failure is ignored below.
516
*/
517
ret = msr_access_fn(vcpu, msr, data, host_initiated);
518
if (ret && rw == MSR_TYPE_R)
519
*data = 0;
520
521
if (ret != KVM_MSR_RET_UNSUPPORTED)
522
return ret;
523
524
/*
525
* Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM
526
* advertises to userspace, even if an MSR isn't fully supported.
527
* Simply check that @data is '0', which covers both the write '0' case
528
* and all reads (in which case @data is zeroed on failure; see above).
529
*/
530
if (host_initiated && !*data && kvm_is_advertised_msr(msr))
531
return 0;
532
533
if (!ignore_msrs) {
534
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
535
op, msr, *data);
536
return ret;
537
}
538
539
if (report_ignored_msrs)
540
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
541
542
return 0;
543
}
544
545
static struct kmem_cache *kvm_alloc_emulator_cache(void)
546
{
547
unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
548
unsigned int size = sizeof(struct x86_emulate_ctxt);
549
550
return kmem_cache_create_usercopy("x86_emulator", size,
551
__alignof__(struct x86_emulate_ctxt),
552
SLAB_ACCOUNT, useroffset,
553
size - useroffset, NULL);
554
}
555
556
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
557
558
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
559
{
560
int i;
561
for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
562
vcpu->arch.apf.gfns[i] = ~0;
563
}
564
565
static void kvm_on_user_return(struct user_return_notifier *urn)
566
{
567
unsigned slot;
568
struct kvm_user_return_msrs *msrs
569
= container_of(urn, struct kvm_user_return_msrs, urn);
570
struct kvm_user_return_msr_values *values;
571
unsigned long flags;
572
573
/*
574
* Disabling irqs at this point since the following code could be
575
* interrupted and executed through kvm_arch_disable_virtualization_cpu()
576
*/
577
local_irq_save(flags);
578
if (msrs->registered) {
579
msrs->registered = false;
580
user_return_notifier_unregister(urn);
581
}
582
local_irq_restore(flags);
583
for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
584
values = &msrs->values[slot];
585
if (values->host != values->curr) {
586
wrmsrq(kvm_uret_msrs_list[slot], values->host);
587
values->curr = values->host;
588
}
589
}
590
}
591
592
static int kvm_probe_user_return_msr(u32 msr)
593
{
594
u64 val;
595
int ret;
596
597
preempt_disable();
598
ret = rdmsrq_safe(msr, &val);
599
if (ret)
600
goto out;
601
ret = wrmsrq_safe(msr, val);
602
out:
603
preempt_enable();
604
return ret;
605
}
606
607
int kvm_add_user_return_msr(u32 msr)
608
{
609
BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
610
611
if (kvm_probe_user_return_msr(msr))
612
return -1;
613
614
kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
615
return kvm_nr_uret_msrs++;
616
}
617
EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
618
619
int kvm_find_user_return_msr(u32 msr)
620
{
621
int i;
622
623
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
624
if (kvm_uret_msrs_list[i] == msr)
625
return i;
626
}
627
return -1;
628
}
629
EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
630
631
static void kvm_user_return_msr_cpu_online(void)
632
{
633
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
634
u64 value;
635
int i;
636
637
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
638
rdmsrq_safe(kvm_uret_msrs_list[i], &value);
639
msrs->values[i].host = value;
640
msrs->values[i].curr = value;
641
}
642
}
643
644
static void kvm_user_return_register_notifier(struct kvm_user_return_msrs *msrs)
645
{
646
if (!msrs->registered) {
647
msrs->urn.on_user_return = kvm_on_user_return;
648
user_return_notifier_register(&msrs->urn);
649
msrs->registered = true;
650
}
651
}
652
653
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
654
{
655
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
656
int err;
657
658
value = (value & mask) | (msrs->values[slot].host & ~mask);
659
if (value == msrs->values[slot].curr)
660
return 0;
661
err = wrmsrq_safe(kvm_uret_msrs_list[slot], value);
662
if (err)
663
return 1;
664
665
msrs->values[slot].curr = value;
666
kvm_user_return_register_notifier(msrs);
667
return 0;
668
}
669
EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
670
671
void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
672
{
673
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
674
675
msrs->values[slot].curr = value;
676
kvm_user_return_register_notifier(msrs);
677
}
678
EXPORT_SYMBOL_GPL(kvm_user_return_msr_update_cache);
679
680
static void drop_user_return_notifiers(void)
681
{
682
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
683
684
if (msrs->registered)
685
kvm_on_user_return(&msrs->urn);
686
}
687
688
/*
689
* Handle a fault on a hardware virtualization (VMX or SVM) instruction.
690
*
691
* Hardware virtualization extension instructions may fault if a reboot turns
692
* off virtualization while processes are running. Usually after catching the
693
* fault we just panic; during reboot instead the instruction is ignored.
694
*/
695
noinstr void kvm_spurious_fault(void)
696
{
697
/* Fault while not rebooting. We want the trace. */
698
BUG_ON(!kvm_rebooting);
699
}
700
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
701
702
#define EXCPT_BENIGN 0
703
#define EXCPT_CONTRIBUTORY 1
704
#define EXCPT_PF 2
705
706
static int exception_class(int vector)
707
{
708
switch (vector) {
709
case PF_VECTOR:
710
return EXCPT_PF;
711
case DE_VECTOR:
712
case TS_VECTOR:
713
case NP_VECTOR:
714
case SS_VECTOR:
715
case GP_VECTOR:
716
return EXCPT_CONTRIBUTORY;
717
default:
718
break;
719
}
720
return EXCPT_BENIGN;
721
}
722
723
#define EXCPT_FAULT 0
724
#define EXCPT_TRAP 1
725
#define EXCPT_ABORT 2
726
#define EXCPT_INTERRUPT 3
727
#define EXCPT_DB 4
728
729
static int exception_type(int vector)
730
{
731
unsigned int mask;
732
733
if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
734
return EXCPT_INTERRUPT;
735
736
mask = 1 << vector;
737
738
/*
739
* #DBs can be trap-like or fault-like, the caller must check other CPU
740
* state, e.g. DR6, to determine whether a #DB is a trap or fault.
741
*/
742
if (mask & (1 << DB_VECTOR))
743
return EXCPT_DB;
744
745
if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
746
return EXCPT_TRAP;
747
748
if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
749
return EXCPT_ABORT;
750
751
/* Reserved exceptions will result in fault */
752
return EXCPT_FAULT;
753
}
754
755
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
756
struct kvm_queued_exception *ex)
757
{
758
if (!ex->has_payload)
759
return;
760
761
switch (ex->vector) {
762
case DB_VECTOR:
763
/*
764
* "Certain debug exceptions may clear bit 0-3. The
765
* remaining contents of the DR6 register are never
766
* cleared by the processor".
767
*/
768
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
769
/*
770
* In order to reflect the #DB exception payload in guest
771
* dr6, three components need to be considered: active low
772
* bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
773
* DR6_BS and DR6_BT)
774
* DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
775
* In the target guest dr6:
776
* FIXED_1 bits should always be set.
777
* Active low bits should be cleared if 1-setting in payload.
778
* Active high bits should be set if 1-setting in payload.
779
*
780
* Note, the payload is compatible with the pending debug
781
* exceptions/exit qualification under VMX, that active_low bits
782
* are active high in payload.
783
* So they need to be flipped for DR6.
784
*/
785
vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
786
vcpu->arch.dr6 |= ex->payload;
787
vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
788
789
/*
790
* The #DB payload is defined as compatible with the 'pending
791
* debug exceptions' field under VMX, not DR6. While bit 12 is
792
* defined in the 'pending debug exceptions' field (enabled
793
* breakpoint), it is reserved and must be zero in DR6.
794
*/
795
vcpu->arch.dr6 &= ~BIT(12);
796
break;
797
case PF_VECTOR:
798
vcpu->arch.cr2 = ex->payload;
799
break;
800
}
801
802
ex->has_payload = false;
803
ex->payload = 0;
804
}
805
EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
806
807
static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
808
bool has_error_code, u32 error_code,
809
bool has_payload, unsigned long payload)
810
{
811
struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
812
813
ex->vector = vector;
814
ex->injected = false;
815
ex->pending = true;
816
ex->has_error_code = has_error_code;
817
ex->error_code = error_code;
818
ex->has_payload = has_payload;
819
ex->payload = payload;
820
}
821
822
static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr,
823
bool has_error, u32 error_code,
824
bool has_payload, unsigned long payload)
825
{
826
u32 prev_nr;
827
int class1, class2;
828
829
kvm_make_request(KVM_REQ_EVENT, vcpu);
830
831
/*
832
* If the exception is destined for L2, morph it to a VM-Exit if L1
833
* wants to intercept the exception.
834
*/
835
if (is_guest_mode(vcpu) &&
836
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
837
kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
838
has_payload, payload);
839
return;
840
}
841
842
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
843
queue:
844
vcpu->arch.exception.pending = true;
845
vcpu->arch.exception.injected = false;
846
847
vcpu->arch.exception.has_error_code = has_error;
848
vcpu->arch.exception.vector = nr;
849
vcpu->arch.exception.error_code = error_code;
850
vcpu->arch.exception.has_payload = has_payload;
851
vcpu->arch.exception.payload = payload;
852
if (!is_guest_mode(vcpu))
853
kvm_deliver_exception_payload(vcpu,
854
&vcpu->arch.exception);
855
return;
856
}
857
858
/* to check exception */
859
prev_nr = vcpu->arch.exception.vector;
860
if (prev_nr == DF_VECTOR) {
861
/* triple fault -> shutdown */
862
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
863
return;
864
}
865
class1 = exception_class(prev_nr);
866
class2 = exception_class(nr);
867
if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
868
(class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
869
/*
870
* Synthesize #DF. Clear the previously injected or pending
871
* exception so as not to incorrectly trigger shutdown.
872
*/
873
vcpu->arch.exception.injected = false;
874
vcpu->arch.exception.pending = false;
875
876
kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
877
} else {
878
/* replace previous exception with a new one in a hope
879
that instruction re-execution will regenerate lost
880
exception */
881
goto queue;
882
}
883
}
884
885
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
886
{
887
kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
888
}
889
EXPORT_SYMBOL_GPL(kvm_queue_exception);
890
891
892
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
893
unsigned long payload)
894
{
895
kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
896
}
897
EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
898
899
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
900
u32 error_code, unsigned long payload)
901
{
902
kvm_multiple_exception(vcpu, nr, true, error_code, true, payload);
903
}
904
905
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
906
bool has_error_code, u32 error_code)
907
{
908
909
/*
910
* On VM-Entry, an exception can be pending if and only if event
911
* injection was blocked by nested_run_pending. In that case, however,
912
* vcpu_enter_guest() requests an immediate exit, and the guest
913
* shouldn't proceed far enough to need reinjection.
914
*/
915
WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
916
917
/*
918
* Do not check for interception when injecting an event for L2, as the
919
* exception was checked for intercept when it was original queued, and
920
* re-checking is incorrect if _L1_ injected the exception, in which
921
* case it's exempt from interception.
922
*/
923
kvm_make_request(KVM_REQ_EVENT, vcpu);
924
925
vcpu->arch.exception.injected = true;
926
vcpu->arch.exception.has_error_code = has_error_code;
927
vcpu->arch.exception.vector = nr;
928
vcpu->arch.exception.error_code = error_code;
929
vcpu->arch.exception.has_payload = false;
930
vcpu->arch.exception.payload = 0;
931
}
932
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
933
934
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
935
{
936
if (err)
937
kvm_inject_gp(vcpu, 0);
938
else
939
return kvm_skip_emulated_instruction(vcpu);
940
941
return 1;
942
}
943
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
944
945
static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
946
{
947
if (err) {
948
kvm_inject_gp(vcpu, 0);
949
return 1;
950
}
951
952
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
953
EMULTYPE_COMPLETE_USER_EXIT);
954
}
955
956
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
957
{
958
++vcpu->stat.pf_guest;
959
960
/*
961
* Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
962
* whether or not L1 wants to intercept "regular" #PF.
963
*/
964
if (is_guest_mode(vcpu) && fault->async_page_fault)
965
kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
966
true, fault->error_code,
967
true, fault->address);
968
else
969
kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
970
fault->address);
971
}
972
973
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
974
struct x86_exception *fault)
975
{
976
struct kvm_mmu *fault_mmu;
977
WARN_ON_ONCE(fault->vector != PF_VECTOR);
978
979
fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
980
vcpu->arch.walk_mmu;
981
982
/*
983
* Invalidate the TLB entry for the faulting address, if it exists,
984
* else the access will fault indefinitely (and to emulate hardware).
985
*/
986
if ((fault->error_code & PFERR_PRESENT_MASK) &&
987
!(fault->error_code & PFERR_RSVD_MASK))
988
kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
989
KVM_MMU_ROOT_CURRENT);
990
991
fault_mmu->inject_page_fault(vcpu, fault);
992
}
993
EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
994
995
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
996
{
997
atomic_inc(&vcpu->arch.nmi_queued);
998
kvm_make_request(KVM_REQ_NMI, vcpu);
999
}
1000
1001
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
1002
{
1003
kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
1004
}
1005
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
1006
1007
/*
1008
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
1009
* a #GP and return false.
1010
*/
1011
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
1012
{
1013
if (kvm_x86_call(get_cpl)(vcpu) <= required_cpl)
1014
return true;
1015
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
1016
return false;
1017
}
1018
1019
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
1020
{
1021
if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
1022
return true;
1023
1024
kvm_queue_exception(vcpu, UD_VECTOR);
1025
return false;
1026
}
1027
EXPORT_SYMBOL_GPL(kvm_require_dr);
1028
1029
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
1030
{
1031
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
1032
}
1033
1034
/*
1035
* Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
1036
*/
1037
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
1038
{
1039
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
1040
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1041
gpa_t real_gpa;
1042
int i;
1043
int ret;
1044
u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
1045
1046
/*
1047
* If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
1048
* to an L1 GPA.
1049
*/
1050
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
1051
PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
1052
if (real_gpa == INVALID_GPA)
1053
return 0;
1054
1055
/* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
1056
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
1057
cr3 & GENMASK(11, 5), sizeof(pdpte));
1058
if (ret < 0)
1059
return 0;
1060
1061
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
1062
if ((pdpte[i] & PT_PRESENT_MASK) &&
1063
(pdpte[i] & pdptr_rsvd_bits(vcpu))) {
1064
return 0;
1065
}
1066
}
1067
1068
/*
1069
* Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
1070
* Shadow page roots need to be reconstructed instead.
1071
*/
1072
if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
1073
kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
1074
1075
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
1076
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
1077
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
1078
vcpu->arch.pdptrs_from_userspace = false;
1079
1080
return 1;
1081
}
1082
EXPORT_SYMBOL_GPL(load_pdptrs);
1083
1084
static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1085
{
1086
#ifdef CONFIG_X86_64
1087
if (cr0 & 0xffffffff00000000UL)
1088
return false;
1089
#endif
1090
1091
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
1092
return false;
1093
1094
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
1095
return false;
1096
1097
return kvm_x86_call(is_valid_cr0)(vcpu, cr0);
1098
}
1099
1100
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
1101
{
1102
/*
1103
* CR0.WP is incorporated into the MMU role, but only for non-nested,
1104
* indirect shadow MMUs. If paging is disabled, no updates are needed
1105
* as there are no permission bits to emulate. If TDP is enabled, the
1106
* MMU's metadata needs to be updated, e.g. so that emulating guest
1107
* translations does the right thing, but there's no need to unload the
1108
* root as CR0.WP doesn't affect SPTEs.
1109
*/
1110
if ((cr0 ^ old_cr0) == X86_CR0_WP) {
1111
if (!(cr0 & X86_CR0_PG))
1112
return;
1113
1114
if (tdp_enabled) {
1115
kvm_init_mmu(vcpu);
1116
return;
1117
}
1118
}
1119
1120
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
1121
kvm_clear_async_pf_completion_queue(vcpu);
1122
kvm_async_pf_hash_reset(vcpu);
1123
1124
/*
1125
* Clearing CR0.PG is defined to flush the TLB from the guest's
1126
* perspective.
1127
*/
1128
if (!(cr0 & X86_CR0_PG))
1129
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1130
}
1131
1132
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
1133
kvm_mmu_reset_context(vcpu);
1134
}
1135
EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
1136
1137
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1138
{
1139
unsigned long old_cr0 = kvm_read_cr0(vcpu);
1140
1141
if (!kvm_is_valid_cr0(vcpu, cr0))
1142
return 1;
1143
1144
cr0 |= X86_CR0_ET;
1145
1146
/* Write to CR0 reserved bits are ignored, even on Intel. */
1147
cr0 &= ~CR0_RESERVED_BITS;
1148
1149
#ifdef CONFIG_X86_64
1150
if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
1151
(cr0 & X86_CR0_PG)) {
1152
int cs_db, cs_l;
1153
1154
if (!is_pae(vcpu))
1155
return 1;
1156
kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1157
if (cs_l)
1158
return 1;
1159
}
1160
#endif
1161
if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
1162
is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
1163
!load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1164
return 1;
1165
1166
if (!(cr0 & X86_CR0_PG) &&
1167
(is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
1168
return 1;
1169
1170
kvm_x86_call(set_cr0)(vcpu, cr0);
1171
1172
kvm_post_set_cr0(vcpu, old_cr0, cr0);
1173
1174
return 0;
1175
}
1176
EXPORT_SYMBOL_GPL(kvm_set_cr0);
1177
1178
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1179
{
1180
(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1181
}
1182
EXPORT_SYMBOL_GPL(kvm_lmsw);
1183
1184
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
1185
{
1186
if (vcpu->arch.guest_state_protected)
1187
return;
1188
1189
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1190
1191
if (vcpu->arch.xcr0 != kvm_host.xcr0)
1192
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
1193
1194
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
1195
vcpu->arch.ia32_xss != kvm_host.xss)
1196
wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
1197
}
1198
1199
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1200
vcpu->arch.pkru != vcpu->arch.host_pkru &&
1201
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1202
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
1203
wrpkru(vcpu->arch.pkru);
1204
}
1205
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
1206
1207
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
1208
{
1209
if (vcpu->arch.guest_state_protected)
1210
return;
1211
1212
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1213
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1214
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
1215
vcpu->arch.pkru = rdpkru();
1216
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1217
wrpkru(vcpu->arch.host_pkru);
1218
}
1219
1220
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1221
1222
if (vcpu->arch.xcr0 != kvm_host.xcr0)
1223
xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
1224
1225
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
1226
vcpu->arch.ia32_xss != kvm_host.xss)
1227
wrmsrq(MSR_IA32_XSS, kvm_host.xss);
1228
}
1229
1230
}
1231
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
1232
1233
#ifdef CONFIG_X86_64
1234
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
1235
{
1236
return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1237
}
1238
#endif
1239
1240
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1241
{
1242
u64 xcr0 = xcr;
1243
u64 old_xcr0 = vcpu->arch.xcr0;
1244
u64 valid_bits;
1245
1246
/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
1247
if (index != XCR_XFEATURE_ENABLED_MASK)
1248
return 1;
1249
if (!(xcr0 & XFEATURE_MASK_FP))
1250
return 1;
1251
if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
1252
return 1;
1253
1254
/*
1255
* Do not allow the guest to set bits that we do not support
1256
* saving. However, xcr0 bit 0 is always set, even if the
1257
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1258
*/
1259
valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1260
if (xcr0 & ~valid_bits)
1261
return 1;
1262
1263
if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1264
(!(xcr0 & XFEATURE_MASK_BNDCSR)))
1265
return 1;
1266
1267
if (xcr0 & XFEATURE_MASK_AVX512) {
1268
if (!(xcr0 & XFEATURE_MASK_YMM))
1269
return 1;
1270
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1271
return 1;
1272
}
1273
1274
if ((xcr0 & XFEATURE_MASK_XTILE) &&
1275
((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
1276
return 1;
1277
1278
vcpu->arch.xcr0 = xcr0;
1279
1280
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1281
vcpu->arch.cpuid_dynamic_bits_dirty = true;
1282
return 0;
1283
}
1284
1285
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1286
{
1287
/* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1288
if (kvm_x86_call(get_cpl)(vcpu) != 0 ||
1289
__kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1290
kvm_inject_gp(vcpu, 0);
1291
return 1;
1292
}
1293
1294
return kvm_skip_emulated_instruction(vcpu);
1295
}
1296
EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
1297
1298
static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1299
{
1300
return __kvm_is_valid_cr4(vcpu, cr4) &&
1301
kvm_x86_call(is_valid_cr4)(vcpu, cr4);
1302
}
1303
1304
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1305
{
1306
if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS)
1307
kvm_mmu_reset_context(vcpu);
1308
1309
/*
1310
* If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1311
* according to the SDM; however, stale prev_roots could be reused
1312
* incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1313
* free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1314
* or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1315
* so fall through.
1316
*/
1317
if (!tdp_enabled &&
1318
(cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE))
1319
kvm_mmu_unload(vcpu);
1320
1321
/*
1322
* The TLB has to be flushed for all PCIDs if any of the following
1323
* (architecturally required) changes happen:
1324
* - CR4.PCIDE is changed from 1 to 0
1325
* - CR4.PGE is toggled
1326
*
1327
* This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1328
*/
1329
if (((cr4 ^ old_cr4) & X86_CR4_PGE) ||
1330
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1331
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1332
1333
/*
1334
* The TLB has to be flushed for the current PCID if any of the
1335
* following (architecturally required) changes happen:
1336
* - CR4.SMEP is changed from 0 to 1
1337
* - CR4.PAE is toggled
1338
*/
1339
else if (((cr4 ^ old_cr4) & X86_CR4_PAE) ||
1340
((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP)))
1341
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1342
1343
}
1344
EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
1345
1346
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1347
{
1348
unsigned long old_cr4 = kvm_read_cr4(vcpu);
1349
1350
if (!kvm_is_valid_cr4(vcpu, cr4))
1351
return 1;
1352
1353
if (is_long_mode(vcpu)) {
1354
if (!(cr4 & X86_CR4_PAE))
1355
return 1;
1356
if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1357
return 1;
1358
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1359
&& ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
1360
&& !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1361
return 1;
1362
1363
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1364
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1365
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1366
return 1;
1367
}
1368
1369
kvm_x86_call(set_cr4)(vcpu, cr4);
1370
1371
kvm_post_set_cr4(vcpu, old_cr4, cr4);
1372
1373
return 0;
1374
}
1375
EXPORT_SYMBOL_GPL(kvm_set_cr4);
1376
1377
static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1378
{
1379
struct kvm_mmu *mmu = vcpu->arch.mmu;
1380
unsigned long roots_to_free = 0;
1381
int i;
1382
1383
/*
1384
* MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1385
* this is reachable when running EPT=1 and unrestricted_guest=0, and
1386
* also via the emulator. KVM's TDP page tables are not in the scope of
1387
* the invalidation, but the guest's TLB entries need to be flushed as
1388
* the CPU may have cached entries in its TLB for the target PCID.
1389
*/
1390
if (unlikely(tdp_enabled)) {
1391
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1392
return;
1393
}
1394
1395
/*
1396
* If neither the current CR3 nor any of the prev_roots use the given
1397
* PCID, then nothing needs to be done here because a resync will
1398
* happen anyway before switching to any other CR3.
1399
*/
1400
if (kvm_get_active_pcid(vcpu) == pcid) {
1401
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1402
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1403
}
1404
1405
/*
1406
* If PCID is disabled, there is no need to free prev_roots even if the
1407
* PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1408
* with PCIDE=0.
1409
*/
1410
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
1411
return;
1412
1413
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1414
if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1415
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1416
1417
kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1418
}
1419
1420
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1421
{
1422
bool skip_tlb_flush = false;
1423
unsigned long pcid = 0;
1424
#ifdef CONFIG_X86_64
1425
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
1426
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1427
cr3 &= ~X86_CR3_PCID_NOFLUSH;
1428
pcid = cr3 & X86_CR3_PCID_MASK;
1429
}
1430
#endif
1431
1432
/* PDPTRs are always reloaded for PAE paging. */
1433
if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1434
goto handle_tlb_flush;
1435
1436
/*
1437
* Do not condition the GPA check on long mode, this helper is used to
1438
* stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1439
* the current vCPU mode is accurate.
1440
*/
1441
if (!kvm_vcpu_is_legal_cr3(vcpu, cr3))
1442
return 1;
1443
1444
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
1445
return 1;
1446
1447
if (cr3 != kvm_read_cr3(vcpu))
1448
kvm_mmu_new_pgd(vcpu, cr3);
1449
1450
vcpu->arch.cr3 = cr3;
1451
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1452
/* Do not call post_set_cr3, we do not get here for confidential guests. */
1453
1454
handle_tlb_flush:
1455
/*
1456
* A load of CR3 that flushes the TLB flushes only the current PCID,
1457
* even if PCID is disabled, in which case PCID=0 is flushed. It's a
1458
* moot point in the end because _disabling_ PCID will flush all PCIDs,
1459
* and it's impossible to use a non-zero PCID when PCID is disabled,
1460
* i.e. only PCID=0 can be relevant.
1461
*/
1462
if (!skip_tlb_flush)
1463
kvm_invalidate_pcid(vcpu, pcid);
1464
1465
return 0;
1466
}
1467
EXPORT_SYMBOL_GPL(kvm_set_cr3);
1468
1469
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1470
{
1471
if (cr8 & CR8_RESERVED_BITS)
1472
return 1;
1473
if (lapic_in_kernel(vcpu))
1474
kvm_lapic_set_tpr(vcpu, cr8);
1475
else
1476
vcpu->arch.cr8 = cr8;
1477
return 0;
1478
}
1479
EXPORT_SYMBOL_GPL(kvm_set_cr8);
1480
1481
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1482
{
1483
if (lapic_in_kernel(vcpu))
1484
return kvm_lapic_get_cr8(vcpu);
1485
else
1486
return vcpu->arch.cr8;
1487
}
1488
EXPORT_SYMBOL_GPL(kvm_get_cr8);
1489
1490
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1491
{
1492
int i;
1493
1494
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1495
for (i = 0; i < KVM_NR_DB_REGS; i++)
1496
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1497
}
1498
}
1499
1500
void kvm_update_dr7(struct kvm_vcpu *vcpu)
1501
{
1502
unsigned long dr7;
1503
1504
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1505
dr7 = vcpu->arch.guest_debug_dr7;
1506
else
1507
dr7 = vcpu->arch.dr7;
1508
kvm_x86_call(set_dr7)(vcpu, dr7);
1509
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1510
if (dr7 & DR7_BP_EN_MASK)
1511
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1512
}
1513
EXPORT_SYMBOL_GPL(kvm_update_dr7);
1514
1515
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1516
{
1517
u64 fixed = DR6_FIXED_1;
1518
1519
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_RTM))
1520
fixed |= DR6_RTM;
1521
1522
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1523
fixed |= DR6_BUS_LOCK;
1524
return fixed;
1525
}
1526
1527
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1528
{
1529
size_t size = ARRAY_SIZE(vcpu->arch.db);
1530
1531
switch (dr) {
1532
case 0 ... 3:
1533
vcpu->arch.db[array_index_nospec(dr, size)] = val;
1534
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1535
vcpu->arch.eff_db[dr] = val;
1536
break;
1537
case 4:
1538
case 6:
1539
if (!kvm_dr6_valid(val))
1540
return 1; /* #GP */
1541
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1542
break;
1543
case 5:
1544
default: /* 7 */
1545
if (!kvm_dr7_valid(val))
1546
return 1; /* #GP */
1547
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1548
kvm_update_dr7(vcpu);
1549
break;
1550
}
1551
1552
return 0;
1553
}
1554
EXPORT_SYMBOL_GPL(kvm_set_dr);
1555
1556
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
1557
{
1558
size_t size = ARRAY_SIZE(vcpu->arch.db);
1559
1560
switch (dr) {
1561
case 0 ... 3:
1562
return vcpu->arch.db[array_index_nospec(dr, size)];
1563
case 4:
1564
case 6:
1565
return vcpu->arch.dr6;
1566
case 5:
1567
default: /* 7 */
1568
return vcpu->arch.dr7;
1569
}
1570
}
1571
EXPORT_SYMBOL_GPL(kvm_get_dr);
1572
1573
int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1574
{
1575
u32 ecx = kvm_rcx_read(vcpu);
1576
u64 data;
1577
1578
if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
1579
kvm_inject_gp(vcpu, 0);
1580
return 1;
1581
}
1582
1583
kvm_rax_write(vcpu, (u32)data);
1584
kvm_rdx_write(vcpu, data >> 32);
1585
return kvm_skip_emulated_instruction(vcpu);
1586
}
1587
EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
1588
1589
/*
1590
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1591
* does not yet virtualize. These include:
1592
* 10 - MISC_PACKAGE_CTRLS
1593
* 11 - ENERGY_FILTERING_CTL
1594
* 12 - DOITM
1595
* 18 - FB_CLEAR_CTRL
1596
* 21 - XAPIC_DISABLE_STATUS
1597
* 23 - OVERCLOCKING_STATUS
1598
*/
1599
1600
#define KVM_SUPPORTED_ARCH_CAP \
1601
(ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1602
ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1603
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1604
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1605
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
1606
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
1607
1608
static u64 kvm_get_arch_capabilities(void)
1609
{
1610
u64 data = kvm_host.arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
1611
1612
/*
1613
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1614
* the nested hypervisor runs with NX huge pages. If it is not,
1615
* L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1616
* L1 guests, so it need not worry about its own (L2) guests.
1617
*/
1618
data |= ARCH_CAP_PSCHANGE_MC_NO;
1619
1620
/*
1621
* If we're doing cache flushes (either "always" or "cond")
1622
* we will do one whenever the guest does a vmlaunch/vmresume.
1623
* If an outer hypervisor is doing the cache flush for us
1624
* (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
1625
* capability to the guest too, and if EPT is disabled we're not
1626
* vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1627
* require a nested hypervisor to do a flush of its own.
1628
*/
1629
if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1630
data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1631
1632
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1633
data |= ARCH_CAP_RDCL_NO;
1634
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1635
data |= ARCH_CAP_SSB_NO;
1636
if (!boot_cpu_has_bug(X86_BUG_MDS))
1637
data |= ARCH_CAP_MDS_NO;
1638
if (!boot_cpu_has_bug(X86_BUG_RFDS))
1639
data |= ARCH_CAP_RFDS_NO;
1640
if (!boot_cpu_has_bug(X86_BUG_ITS))
1641
data |= ARCH_CAP_ITS_NO;
1642
1643
if (!boot_cpu_has(X86_FEATURE_RTM)) {
1644
/*
1645
* If RTM=0 because the kernel has disabled TSX, the host might
1646
* have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
1647
* and therefore knows that there cannot be TAA) but keep
1648
* TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1649
* and we want to allow migrating those guests to tsx=off hosts.
1650
*/
1651
data &= ~ARCH_CAP_TAA_NO;
1652
} else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1653
data |= ARCH_CAP_TAA_NO;
1654
} else {
1655
/*
1656
* Nothing to do here; we emulate TSX_CTRL if present on the
1657
* host so the guest can choose between disabling TSX or
1658
* using VERW to clear CPU buffers.
1659
*/
1660
}
1661
1662
if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
1663
data |= ARCH_CAP_GDS_NO;
1664
1665
return data;
1666
}
1667
1668
static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1669
bool host_initiated)
1670
{
1671
WARN_ON_ONCE(!host_initiated);
1672
1673
switch (index) {
1674
case MSR_IA32_ARCH_CAPABILITIES:
1675
*data = kvm_get_arch_capabilities();
1676
break;
1677
case MSR_IA32_PERF_CAPABILITIES:
1678
*data = kvm_caps.supported_perf_cap;
1679
break;
1680
case MSR_PLATFORM_INFO:
1681
*data = MSR_PLATFORM_INFO_CPUID_FAULT;
1682
break;
1683
case MSR_IA32_UCODE_REV:
1684
rdmsrq_safe(index, data);
1685
break;
1686
default:
1687
return kvm_x86_call(get_feature_msr)(index, data);
1688
}
1689
return 0;
1690
}
1691
1692
static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1693
{
1694
return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
1695
kvm_get_feature_msr);
1696
}
1697
1698
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1699
{
1700
if (efer & EFER_AUTOIBRS && !guest_cpu_cap_has(vcpu, X86_FEATURE_AUTOIBRS))
1701
return false;
1702
1703
if (efer & EFER_FFXSR && !guest_cpu_cap_has(vcpu, X86_FEATURE_FXSR_OPT))
1704
return false;
1705
1706
if (efer & EFER_SVME && !guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
1707
return false;
1708
1709
if (efer & (EFER_LME | EFER_LMA) &&
1710
!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1711
return false;
1712
1713
if (efer & EFER_NX && !guest_cpu_cap_has(vcpu, X86_FEATURE_NX))
1714
return false;
1715
1716
return true;
1717
1718
}
1719
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1720
{
1721
if (efer & efer_reserved_bits)
1722
return false;
1723
1724
return __kvm_valid_efer(vcpu, efer);
1725
}
1726
EXPORT_SYMBOL_GPL(kvm_valid_efer);
1727
1728
static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1729
{
1730
u64 old_efer = vcpu->arch.efer;
1731
u64 efer = msr_info->data;
1732
int r;
1733
1734
if (efer & efer_reserved_bits)
1735
return 1;
1736
1737
if (!msr_info->host_initiated) {
1738
if (!__kvm_valid_efer(vcpu, efer))
1739
return 1;
1740
1741
if (is_paging(vcpu) &&
1742
(vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1743
return 1;
1744
}
1745
1746
efer &= ~EFER_LMA;
1747
efer |= vcpu->arch.efer & EFER_LMA;
1748
1749
r = kvm_x86_call(set_efer)(vcpu, efer);
1750
if (r) {
1751
WARN_ON(r > 0);
1752
return r;
1753
}
1754
1755
if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
1756
kvm_mmu_reset_context(vcpu);
1757
1758
if (!static_cpu_has(X86_FEATURE_XSAVES) &&
1759
(efer & EFER_SVME))
1760
kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
1761
1762
return 0;
1763
}
1764
1765
void kvm_enable_efer_bits(u64 mask)
1766
{
1767
efer_reserved_bits &= ~mask;
1768
}
1769
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1770
1771
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1772
{
1773
struct kvm_x86_msr_filter *msr_filter;
1774
struct msr_bitmap_range *ranges;
1775
struct kvm *kvm = vcpu->kvm;
1776
bool allowed;
1777
int idx;
1778
u32 i;
1779
1780
/* x2APIC MSRs do not support filtering. */
1781
if (index >= 0x800 && index <= 0x8ff)
1782
return true;
1783
1784
idx = srcu_read_lock(&kvm->srcu);
1785
1786
msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1787
if (!msr_filter) {
1788
allowed = true;
1789
goto out;
1790
}
1791
1792
allowed = msr_filter->default_allow;
1793
ranges = msr_filter->ranges;
1794
1795
for (i = 0; i < msr_filter->count; i++) {
1796
u32 start = ranges[i].base;
1797
u32 end = start + ranges[i].nmsrs;
1798
u32 flags = ranges[i].flags;
1799
unsigned long *bitmap = ranges[i].bitmap;
1800
1801
if ((index >= start) && (index < end) && (flags & type)) {
1802
allowed = test_bit(index - start, bitmap);
1803
break;
1804
}
1805
}
1806
1807
out:
1808
srcu_read_unlock(&kvm->srcu, idx);
1809
1810
return allowed;
1811
}
1812
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
1813
1814
/*
1815
* Write @data into the MSR specified by @index. Select MSR specific fault
1816
* checks are bypassed if @host_initiated is %true.
1817
* Returns 0 on success, non-0 otherwise.
1818
* Assumes vcpu_load() was already called.
1819
*/
1820
static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1821
bool host_initiated)
1822
{
1823
struct msr_data msr;
1824
1825
switch (index) {
1826
case MSR_FS_BASE:
1827
case MSR_GS_BASE:
1828
case MSR_KERNEL_GS_BASE:
1829
case MSR_CSTAR:
1830
case MSR_LSTAR:
1831
if (is_noncanonical_msr_address(data, vcpu))
1832
return 1;
1833
break;
1834
case MSR_IA32_SYSENTER_EIP:
1835
case MSR_IA32_SYSENTER_ESP:
1836
/*
1837
* IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1838
* non-canonical address is written on Intel but not on
1839
* AMD (which ignores the top 32-bits, because it does
1840
* not implement 64-bit SYSENTER).
1841
*
1842
* 64-bit code should hence be able to write a non-canonical
1843
* value on AMD. Making the address canonical ensures that
1844
* vmentry does not fail on Intel after writing a non-canonical
1845
* value, and that something deterministic happens if the guest
1846
* invokes 64-bit SYSENTER.
1847
*/
1848
data = __canonical_address(data, max_host_virt_addr_bits());
1849
break;
1850
case MSR_TSC_AUX:
1851
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1852
return 1;
1853
1854
if (!host_initiated &&
1855
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1856
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1857
return 1;
1858
1859
/*
1860
* Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1861
* incomplete and conflicting architectural behavior. Current
1862
* AMD CPUs completely ignore bits 63:32, i.e. they aren't
1863
* reserved and always read as zeros. Enforce Intel's reserved
1864
* bits check if the guest CPU is Intel compatible, otherwise
1865
* clear the bits. This ensures cross-vendor migration will
1866
* provide consistent behavior for the guest.
1867
*/
1868
if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0)
1869
return 1;
1870
1871
data = (u32)data;
1872
break;
1873
}
1874
1875
msr.data = data;
1876
msr.index = index;
1877
msr.host_initiated = host_initiated;
1878
1879
return kvm_x86_call(set_msr)(vcpu, &msr);
1880
}
1881
1882
static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1883
bool host_initiated)
1884
{
1885
return __kvm_set_msr(vcpu, index, *data, host_initiated);
1886
}
1887
1888
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1889
u32 index, u64 data, bool host_initiated)
1890
{
1891
return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
1892
_kvm_set_msr);
1893
}
1894
1895
/*
1896
* Read the MSR specified by @index into @data. Select MSR specific fault
1897
* checks are bypassed if @host_initiated is %true.
1898
* Returns 0 on success, non-0 otherwise.
1899
* Assumes vcpu_load() was already called.
1900
*/
1901
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1902
bool host_initiated)
1903
{
1904
struct msr_data msr;
1905
int ret;
1906
1907
switch (index) {
1908
case MSR_TSC_AUX:
1909
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1910
return 1;
1911
1912
if (!host_initiated &&
1913
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1914
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1915
return 1;
1916
break;
1917
}
1918
1919
msr.index = index;
1920
msr.host_initiated = host_initiated;
1921
1922
ret = kvm_x86_call(get_msr)(vcpu, &msr);
1923
if (!ret)
1924
*data = msr.data;
1925
return ret;
1926
}
1927
1928
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1929
u32 index, u64 *data, bool host_initiated)
1930
{
1931
return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
1932
__kvm_get_msr);
1933
}
1934
1935
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1936
{
1937
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1938
return KVM_MSR_RET_FILTERED;
1939
return kvm_get_msr_ignored_check(vcpu, index, data, false);
1940
}
1941
EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
1942
1943
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1944
{
1945
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1946
return KVM_MSR_RET_FILTERED;
1947
return kvm_set_msr_ignored_check(vcpu, index, data, false);
1948
}
1949
EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
1950
1951
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1952
{
1953
return kvm_get_msr_ignored_check(vcpu, index, data, false);
1954
}
1955
EXPORT_SYMBOL_GPL(kvm_get_msr);
1956
1957
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1958
{
1959
return kvm_set_msr_ignored_check(vcpu, index, data, false);
1960
}
1961
EXPORT_SYMBOL_GPL(kvm_set_msr);
1962
1963
static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
1964
{
1965
if (!vcpu->run->msr.error) {
1966
kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1967
kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
1968
}
1969
}
1970
1971
static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
1972
{
1973
return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
1974
}
1975
1976
static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
1977
{
1978
complete_userspace_rdmsr(vcpu);
1979
return complete_emulated_msr_access(vcpu);
1980
}
1981
1982
static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
1983
{
1984
return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error);
1985
}
1986
1987
static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
1988
{
1989
complete_userspace_rdmsr(vcpu);
1990
return complete_fast_msr_access(vcpu);
1991
}
1992
1993
static u64 kvm_msr_reason(int r)
1994
{
1995
switch (r) {
1996
case KVM_MSR_RET_UNSUPPORTED:
1997
return KVM_MSR_EXIT_REASON_UNKNOWN;
1998
case KVM_MSR_RET_FILTERED:
1999
return KVM_MSR_EXIT_REASON_FILTER;
2000
default:
2001
return KVM_MSR_EXIT_REASON_INVAL;
2002
}
2003
}
2004
2005
static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
2006
u32 exit_reason, u64 data,
2007
int (*completion)(struct kvm_vcpu *vcpu),
2008
int r)
2009
{
2010
u64 msr_reason = kvm_msr_reason(r);
2011
2012
/* Check if the user wanted to know about this MSR fault */
2013
if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2014
return 0;
2015
2016
vcpu->run->exit_reason = exit_reason;
2017
vcpu->run->msr.error = 0;
2018
memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
2019
vcpu->run->msr.reason = msr_reason;
2020
vcpu->run->msr.index = index;
2021
vcpu->run->msr.data = data;
2022
vcpu->arch.complete_userspace_io = completion;
2023
2024
return 1;
2025
}
2026
2027
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
2028
{
2029
u32 ecx = kvm_rcx_read(vcpu);
2030
u64 data;
2031
int r;
2032
2033
r = kvm_get_msr_with_filter(vcpu, ecx, &data);
2034
2035
if (!r) {
2036
trace_kvm_msr_read(ecx, data);
2037
2038
kvm_rax_write(vcpu, data & -1u);
2039
kvm_rdx_write(vcpu, (data >> 32) & -1u);
2040
} else {
2041
/* MSR read failed? See if we should ask user space */
2042
if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0,
2043
complete_fast_rdmsr, r))
2044
return 0;
2045
trace_kvm_msr_read_ex(ecx);
2046
}
2047
2048
return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2049
}
2050
EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
2051
2052
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
2053
{
2054
u32 ecx = kvm_rcx_read(vcpu);
2055
u64 data = kvm_read_edx_eax(vcpu);
2056
int r;
2057
2058
r = kvm_set_msr_with_filter(vcpu, ecx, data);
2059
2060
if (!r) {
2061
trace_kvm_msr_write(ecx, data);
2062
} else {
2063
/* MSR write failed? See if we should ask user space */
2064
if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
2065
complete_fast_msr_access, r))
2066
return 0;
2067
/* Signal all other negative errors to userspace */
2068
if (r < 0)
2069
return r;
2070
trace_kvm_msr_write_ex(ecx, data);
2071
}
2072
2073
return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2074
}
2075
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
2076
2077
int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
2078
{
2079
return kvm_skip_emulated_instruction(vcpu);
2080
}
2081
2082
int kvm_emulate_invd(struct kvm_vcpu *vcpu)
2083
{
2084
/* Treat an INVD instruction as a NOP and just skip it. */
2085
return kvm_emulate_as_nop(vcpu);
2086
}
2087
EXPORT_SYMBOL_GPL(kvm_emulate_invd);
2088
2089
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
2090
{
2091
kvm_queue_exception(vcpu, UD_VECTOR);
2092
return 1;
2093
}
2094
EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
2095
2096
2097
static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
2098
{
2099
bool enabled;
2100
2101
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS))
2102
goto emulate_as_nop;
2103
2104
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
2105
enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT);
2106
else
2107
enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT;
2108
2109
if (!enabled)
2110
return kvm_handle_invalid_op(vcpu);
2111
2112
emulate_as_nop:
2113
pr_warn_once("%s instruction emulated as NOP!\n", insn);
2114
return kvm_emulate_as_nop(vcpu);
2115
}
2116
int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
2117
{
2118
return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
2119
}
2120
EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
2121
2122
int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
2123
{
2124
return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
2125
}
2126
EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
2127
2128
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
2129
{
2130
xfer_to_guest_mode_prepare();
2131
2132
return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE ||
2133
kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
2134
}
2135
2136
/*
2137
* The fast path for frequent and performance sensitive wrmsr emulation,
2138
* i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
2139
* the latency of virtual IPI by avoiding the expensive bits of transitioning
2140
* from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
2141
* other cases which must be called after interrupts are enabled on the host.
2142
*/
2143
static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
2144
{
2145
if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
2146
return 1;
2147
2148
if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
2149
((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
2150
((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
2151
((u32)(data >> 32) != X2APIC_BROADCAST))
2152
return kvm_x2apic_icr_write(vcpu->arch.apic, data);
2153
2154
return 1;
2155
}
2156
2157
static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
2158
{
2159
if (!kvm_can_use_hv_timer(vcpu))
2160
return 1;
2161
2162
kvm_set_lapic_tscdeadline_msr(vcpu, data);
2163
return 0;
2164
}
2165
2166
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
2167
{
2168
u32 msr = kvm_rcx_read(vcpu);
2169
u64 data;
2170
fastpath_t ret;
2171
bool handled;
2172
2173
kvm_vcpu_srcu_read_lock(vcpu);
2174
2175
switch (msr) {
2176
case APIC_BASE_MSR + (APIC_ICR >> 4):
2177
data = kvm_read_edx_eax(vcpu);
2178
handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
2179
break;
2180
case MSR_IA32_TSC_DEADLINE:
2181
data = kvm_read_edx_eax(vcpu);
2182
handled = !handle_fastpath_set_tscdeadline(vcpu, data);
2183
break;
2184
default:
2185
handled = false;
2186
break;
2187
}
2188
2189
if (handled) {
2190
if (!kvm_skip_emulated_instruction(vcpu))
2191
ret = EXIT_FASTPATH_EXIT_USERSPACE;
2192
else
2193
ret = EXIT_FASTPATH_REENTER_GUEST;
2194
trace_kvm_msr_write(msr, data);
2195
} else {
2196
ret = EXIT_FASTPATH_NONE;
2197
}
2198
2199
kvm_vcpu_srcu_read_unlock(vcpu);
2200
2201
return ret;
2202
}
2203
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
2204
2205
/*
2206
* Adapt set_msr() to msr_io()'s calling convention
2207
*/
2208
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2209
{
2210
return kvm_get_msr_ignored_check(vcpu, index, data, true);
2211
}
2212
2213
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2214
{
2215
u64 val;
2216
2217
/*
2218
* Disallow writes to immutable feature MSRs after KVM_RUN. KVM does
2219
* not support modifying the guest vCPU model on the fly, e.g. changing
2220
* the nVMX capabilities while L2 is running is nonsensical. Allow
2221
* writes of the same value, e.g. to allow userspace to blindly stuff
2222
* all MSRs when emulating RESET.
2223
*/
2224
if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index) &&
2225
(do_get_msr(vcpu, index, &val) || *data != val))
2226
return -EINVAL;
2227
2228
return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2229
}
2230
2231
#ifdef CONFIG_X86_64
2232
struct pvclock_clock {
2233
int vclock_mode;
2234
u64 cycle_last;
2235
u64 mask;
2236
u32 mult;
2237
u32 shift;
2238
u64 base_cycles;
2239
u64 offset;
2240
};
2241
2242
struct pvclock_gtod_data {
2243
seqcount_t seq;
2244
2245
struct pvclock_clock clock; /* extract of a clocksource struct */
2246
struct pvclock_clock raw_clock; /* extract of a clocksource struct */
2247
2248
ktime_t offs_boot;
2249
u64 wall_time_sec;
2250
};
2251
2252
static struct pvclock_gtod_data pvclock_gtod_data;
2253
2254
static void update_pvclock_gtod(struct timekeeper *tk)
2255
{
2256
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2257
2258
write_seqcount_begin(&vdata->seq);
2259
2260
/* copy pvclock gtod data */
2261
vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
2262
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
2263
vdata->clock.mask = tk->tkr_mono.mask;
2264
vdata->clock.mult = tk->tkr_mono.mult;
2265
vdata->clock.shift = tk->tkr_mono.shift;
2266
vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
2267
vdata->clock.offset = tk->tkr_mono.base;
2268
2269
vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
2270
vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
2271
vdata->raw_clock.mask = tk->tkr_raw.mask;
2272
vdata->raw_clock.mult = tk->tkr_raw.mult;
2273
vdata->raw_clock.shift = tk->tkr_raw.shift;
2274
vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec;
2275
vdata->raw_clock.offset = tk->tkr_raw.base;
2276
2277
vdata->wall_time_sec = tk->xtime_sec;
2278
2279
vdata->offs_boot = tk->offs_boot;
2280
2281
write_seqcount_end(&vdata->seq);
2282
}
2283
2284
static s64 get_kvmclock_base_ns(void)
2285
{
2286
/* Count up from boot time, but with the frequency of the raw clock. */
2287
return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2288
}
2289
#else
2290
static s64 get_kvmclock_base_ns(void)
2291
{
2292
/* Master clock not used, so we can just use CLOCK_BOOTTIME. */
2293
return ktime_get_boottime_ns();
2294
}
2295
#endif
2296
2297
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2298
{
2299
int version;
2300
int r;
2301
struct pvclock_wall_clock wc;
2302
u32 wc_sec_hi;
2303
u64 wall_nsec;
2304
2305
if (!wall_clock)
2306
return;
2307
2308
r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2309
if (r)
2310
return;
2311
2312
if (version & 1)
2313
++version; /* first time write, random junk */
2314
2315
++version;
2316
2317
if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2318
return;
2319
2320
wall_nsec = kvm_get_wall_clock_epoch(kvm);
2321
2322
wc.nsec = do_div(wall_nsec, NSEC_PER_SEC);
2323
wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
2324
wc.version = version;
2325
2326
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2327
2328
if (sec_hi_ofs) {
2329
wc_sec_hi = wall_nsec >> 32;
2330
kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2331
&wc_sec_hi, sizeof(wc_sec_hi));
2332
}
2333
2334
version++;
2335
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2336
}
2337
2338
static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2339
bool old_msr, bool host_initiated)
2340
{
2341
struct kvm_arch *ka = &vcpu->kvm->arch;
2342
2343
if (vcpu->vcpu_id == 0 && !host_initiated) {
2344
if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2345
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2346
2347
ka->boot_vcpu_runs_old_kvmclock = old_msr;
2348
}
2349
2350
vcpu->arch.time = system_time;
2351
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2352
2353
/* we verify if the enable bit is set... */
2354
if (system_time & 1)
2355
kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
2356
sizeof(struct pvclock_vcpu_time_info));
2357
else
2358
kvm_gpc_deactivate(&vcpu->arch.pv_time);
2359
2360
return;
2361
}
2362
2363
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2364
{
2365
do_shl32_div32(dividend, divisor);
2366
return dividend;
2367
}
2368
2369
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2370
s8 *pshift, u32 *pmultiplier)
2371
{
2372
uint64_t scaled64;
2373
int32_t shift = 0;
2374
uint64_t tps64;
2375
uint32_t tps32;
2376
2377
tps64 = base_hz;
2378
scaled64 = scaled_hz;
2379
while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2380
tps64 >>= 1;
2381
shift--;
2382
}
2383
2384
tps32 = (uint32_t)tps64;
2385
while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2386
if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2387
scaled64 >>= 1;
2388
else
2389
tps32 <<= 1;
2390
shift++;
2391
}
2392
2393
*pshift = shift;
2394
*pmultiplier = div_frac(scaled64, tps32);
2395
}
2396
2397
#ifdef CONFIG_X86_64
2398
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2399
#endif
2400
2401
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2402
static unsigned long max_tsc_khz;
2403
2404
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2405
{
2406
u64 v = (u64)khz * (1000000 + ppm);
2407
do_div(v, 1000000);
2408
return v;
2409
}
2410
2411
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2412
2413
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2414
{
2415
u64 ratio;
2416
2417
/* Guest TSC same frequency as host TSC? */
2418
if (!scale) {
2419
kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2420
return 0;
2421
}
2422
2423
/* TSC scaling supported? */
2424
if (!kvm_caps.has_tsc_control) {
2425
if (user_tsc_khz > tsc_khz) {
2426
vcpu->arch.tsc_catchup = 1;
2427
vcpu->arch.tsc_always_catchup = 1;
2428
return 0;
2429
} else {
2430
pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2431
return -1;
2432
}
2433
}
2434
2435
/* TSC scaling required - calculate ratio */
2436
ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits,
2437
user_tsc_khz, tsc_khz);
2438
2439
if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) {
2440
pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2441
user_tsc_khz);
2442
return -1;
2443
}
2444
2445
kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2446
return 0;
2447
}
2448
2449
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2450
{
2451
u32 thresh_lo, thresh_hi;
2452
int use_scaling = 0;
2453
2454
/* tsc_khz can be zero if TSC calibration fails */
2455
if (user_tsc_khz == 0) {
2456
/* set tsc_scaling_ratio to a safe value */
2457
kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2458
return -1;
2459
}
2460
2461
/* Compute a scale to convert nanoseconds in TSC cycles */
2462
kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2463
&vcpu->arch.virtual_tsc_shift,
2464
&vcpu->arch.virtual_tsc_mult);
2465
vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2466
2467
/*
2468
* Compute the variation in TSC rate which is acceptable
2469
* within the range of tolerance and decide if the
2470
* rate being applied is within that bounds of the hardware
2471
* rate. If so, no scaling or compensation need be done.
2472
*/
2473
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2474
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2475
if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2476
pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n",
2477
user_tsc_khz, thresh_lo, thresh_hi);
2478
use_scaling = 1;
2479
}
2480
return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2481
}
2482
2483
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2484
{
2485
u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2486
vcpu->arch.virtual_tsc_mult,
2487
vcpu->arch.virtual_tsc_shift);
2488
tsc += vcpu->arch.this_tsc_write;
2489
return tsc;
2490
}
2491
2492
#ifdef CONFIG_X86_64
2493
static inline bool gtod_is_based_on_tsc(int mode)
2494
{
2495
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2496
}
2497
#endif
2498
2499
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
2500
{
2501
#ifdef CONFIG_X86_64
2502
struct kvm_arch *ka = &vcpu->kvm->arch;
2503
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2504
2505
/*
2506
* To use the masterclock, the host clocksource must be based on TSC
2507
* and all vCPUs must have matching TSCs. Note, the count for matching
2508
* vCPUs doesn't include the reference vCPU, hence "+1".
2509
*/
2510
bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 ==
2511
atomic_read(&vcpu->kvm->online_vcpus)) &&
2512
gtod_is_based_on_tsc(gtod->clock.vclock_mode);
2513
2514
/*
2515
* Request a masterclock update if the masterclock needs to be toggled
2516
* on/off, or when starting a new generation and the masterclock is
2517
* enabled (compute_guest_tsc() requires the masterclock snapshot to be
2518
* taken _after_ the new generation is created).
2519
*/
2520
if ((ka->use_master_clock && new_generation) ||
2521
(ka->use_master_clock != use_master_clock))
2522
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2523
2524
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2525
atomic_read(&vcpu->kvm->online_vcpus),
2526
ka->use_master_clock, gtod->clock.vclock_mode);
2527
#endif
2528
}
2529
2530
/*
2531
* Multiply tsc by a fixed point number represented by ratio.
2532
*
2533
* The most significant 64-N bits (mult) of ratio represent the
2534
* integral part of the fixed point number; the remaining N bits
2535
* (frac) represent the fractional part, ie. ratio represents a fixed
2536
* point number (mult + frac * 2^(-N)).
2537
*
2538
* N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2539
*/
2540
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2541
{
2542
return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits);
2543
}
2544
2545
u64 kvm_scale_tsc(u64 tsc, u64 ratio)
2546
{
2547
u64 _tsc = tsc;
2548
2549
if (ratio != kvm_caps.default_tsc_scaling_ratio)
2550
_tsc = __scale_tsc(ratio, tsc);
2551
2552
return _tsc;
2553
}
2554
2555
static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2556
{
2557
u64 tsc;
2558
2559
tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2560
2561
return target_tsc - tsc;
2562
}
2563
2564
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2565
{
2566
return vcpu->arch.l1_tsc_offset +
2567
kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2568
}
2569
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
2570
2571
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2572
{
2573
u64 nested_offset;
2574
2575
if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio)
2576
nested_offset = l1_offset;
2577
else
2578
nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2579
kvm_caps.tsc_scaling_ratio_frac_bits);
2580
2581
nested_offset += l2_offset;
2582
return nested_offset;
2583
}
2584
EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
2585
2586
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2587
{
2588
if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio)
2589
return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2590
kvm_caps.tsc_scaling_ratio_frac_bits);
2591
2592
return l1_multiplier;
2593
}
2594
EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
2595
2596
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2597
{
2598
if (vcpu->arch.guest_tsc_protected)
2599
return;
2600
2601
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2602
vcpu->arch.l1_tsc_offset,
2603
l1_offset);
2604
2605
vcpu->arch.l1_tsc_offset = l1_offset;
2606
2607
/*
2608
* If we are here because L1 chose not to trap WRMSR to TSC then
2609
* according to the spec this should set L1's TSC (as opposed to
2610
* setting L1's offset for L2).
2611
*/
2612
if (is_guest_mode(vcpu))
2613
vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2614
l1_offset,
2615
kvm_x86_call(get_l2_tsc_offset)(vcpu),
2616
kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2617
else
2618
vcpu->arch.tsc_offset = l1_offset;
2619
2620
kvm_x86_call(write_tsc_offset)(vcpu);
2621
}
2622
2623
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2624
{
2625
vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2626
2627
/* Userspace is changing the multiplier while L2 is active */
2628
if (is_guest_mode(vcpu))
2629
vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2630
l1_multiplier,
2631
kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2632
else
2633
vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2634
2635
if (kvm_caps.has_tsc_control)
2636
kvm_x86_call(write_tsc_multiplier)(vcpu);
2637
}
2638
2639
static inline bool kvm_check_tsc_unstable(void)
2640
{
2641
#ifdef CONFIG_X86_64
2642
/*
2643
* TSC is marked unstable when we're running on Hyper-V,
2644
* 'TSC page' clocksource is good.
2645
*/
2646
if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2647
return false;
2648
#endif
2649
return check_tsc_unstable();
2650
}
2651
2652
/*
2653
* Infers attempts to synchronize the guest's tsc from host writes. Sets the
2654
* offset for the vcpu and tracks the TSC matching generation that the vcpu
2655
* participates in.
2656
*/
2657
static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
2658
u64 ns, bool matched, bool user_set_tsc)
2659
{
2660
struct kvm *kvm = vcpu->kvm;
2661
2662
lockdep_assert_held(&kvm->arch.tsc_write_lock);
2663
2664
if (vcpu->arch.guest_tsc_protected)
2665
return;
2666
2667
if (user_set_tsc)
2668
vcpu->kvm->arch.user_set_tsc = true;
2669
2670
/*
2671
* We also track th most recent recorded KHZ, write and time to
2672
* allow the matching interval to be extended at each write.
2673
*/
2674
kvm->arch.last_tsc_nsec = ns;
2675
kvm->arch.last_tsc_write = tsc;
2676
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2677
kvm->arch.last_tsc_offset = offset;
2678
2679
vcpu->arch.last_guest_tsc = tsc;
2680
2681
kvm_vcpu_write_tsc_offset(vcpu, offset);
2682
2683
if (!matched) {
2684
/*
2685
* We split periods of matched TSC writes into generations.
2686
* For each generation, we track the original measured
2687
* nanosecond time, offset, and write, so if TSCs are in
2688
* sync, we can match exact offset, and if not, we can match
2689
* exact software computation in compute_guest_tsc()
2690
*
2691
* These values are tracked in kvm->arch.cur_xxx variables.
2692
*/
2693
kvm->arch.cur_tsc_generation++;
2694
kvm->arch.cur_tsc_nsec = ns;
2695
kvm->arch.cur_tsc_write = tsc;
2696
kvm->arch.cur_tsc_offset = offset;
2697
kvm->arch.nr_vcpus_matched_tsc = 0;
2698
} else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2699
kvm->arch.nr_vcpus_matched_tsc++;
2700
}
2701
2702
/* Keep track of which generation this VCPU has synchronized to */
2703
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2704
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2705
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2706
2707
kvm_track_tsc_matching(vcpu, !matched);
2708
}
2709
2710
static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
2711
{
2712
u64 data = user_value ? *user_value : 0;
2713
struct kvm *kvm = vcpu->kvm;
2714
u64 offset, ns, elapsed;
2715
unsigned long flags;
2716
bool matched = false;
2717
bool synchronizing = false;
2718
2719
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2720
offset = kvm_compute_l1_tsc_offset(vcpu, data);
2721
ns = get_kvmclock_base_ns();
2722
elapsed = ns - kvm->arch.last_tsc_nsec;
2723
2724
if (vcpu->arch.virtual_tsc_khz) {
2725
if (data == 0) {
2726
/*
2727
* Force synchronization when creating a vCPU, or when
2728
* userspace explicitly writes a zero value.
2729
*/
2730
synchronizing = true;
2731
} else if (kvm->arch.user_set_tsc) {
2732
u64 tsc_exp = kvm->arch.last_tsc_write +
2733
nsec_to_cycles(vcpu, elapsed);
2734
u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2735
/*
2736
* Here lies UAPI baggage: when a user-initiated TSC write has
2737
* a small delta (1 second) of virtual cycle time against the
2738
* previously set vCPU, we assume that they were intended to be
2739
* in sync and the delta was only due to the racy nature of the
2740
* legacy API.
2741
*
2742
* This trick falls down when restoring a guest which genuinely
2743
* has been running for less time than the 1 second of imprecision
2744
* which we allow for in the legacy API. In this case, the first
2745
* value written by userspace (on any vCPU) should not be subject
2746
* to this 'correction' to make it sync up with values that only
2747
* come from the kernel's default vCPU creation. Make the 1-second
2748
* slop hack only trigger if the user_set_tsc flag is already set.
2749
*/
2750
synchronizing = data < tsc_exp + tsc_hz &&
2751
data + tsc_hz > tsc_exp;
2752
}
2753
}
2754
2755
2756
/*
2757
* For a reliable TSC, we can match TSC offsets, and for an unstable
2758
* TSC, we add elapsed time in this computation. We could let the
2759
* compensation code attempt to catch up if we fall behind, but
2760
* it's better to try to match offsets from the beginning.
2761
*/
2762
if (synchronizing &&
2763
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2764
if (!kvm_check_tsc_unstable()) {
2765
offset = kvm->arch.cur_tsc_offset;
2766
} else {
2767
u64 delta = nsec_to_cycles(vcpu, elapsed);
2768
data += delta;
2769
offset = kvm_compute_l1_tsc_offset(vcpu, data);
2770
}
2771
matched = true;
2772
}
2773
2774
__kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value);
2775
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2776
}
2777
2778
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2779
s64 adjustment)
2780
{
2781
u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2782
kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2783
}
2784
2785
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2786
{
2787
if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2788
WARN_ON(adjustment < 0);
2789
adjustment = kvm_scale_tsc((u64) adjustment,
2790
vcpu->arch.l1_tsc_scaling_ratio);
2791
adjust_tsc_offset_guest(vcpu, adjustment);
2792
}
2793
2794
#ifdef CONFIG_X86_64
2795
2796
static u64 read_tsc(void)
2797
{
2798
u64 ret = (u64)rdtsc_ordered();
2799
u64 last = pvclock_gtod_data.clock.cycle_last;
2800
2801
if (likely(ret >= last))
2802
return ret;
2803
2804
/*
2805
* GCC likes to generate cmov here, but this branch is extremely
2806
* predictable (it's just a function of time and the likely is
2807
* very likely) and there's a data dependence, so force GCC
2808
* to generate a branch instead. I don't barrier() because
2809
* we don't actually need a barrier, and if this function
2810
* ever gets inlined it will generate worse code.
2811
*/
2812
asm volatile ("");
2813
return last;
2814
}
2815
2816
static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2817
int *mode)
2818
{
2819
u64 tsc_pg_val;
2820
long v;
2821
2822
switch (clock->vclock_mode) {
2823
case VDSO_CLOCKMODE_HVCLOCK:
2824
if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
2825
tsc_timestamp, &tsc_pg_val)) {
2826
/* TSC page valid */
2827
*mode = VDSO_CLOCKMODE_HVCLOCK;
2828
v = (tsc_pg_val - clock->cycle_last) &
2829
clock->mask;
2830
} else {
2831
/* TSC page invalid */
2832
*mode = VDSO_CLOCKMODE_NONE;
2833
}
2834
break;
2835
case VDSO_CLOCKMODE_TSC:
2836
*mode = VDSO_CLOCKMODE_TSC;
2837
*tsc_timestamp = read_tsc();
2838
v = (*tsc_timestamp - clock->cycle_last) &
2839
clock->mask;
2840
break;
2841
default:
2842
*mode = VDSO_CLOCKMODE_NONE;
2843
}
2844
2845
if (*mode == VDSO_CLOCKMODE_NONE)
2846
*tsc_timestamp = v = 0;
2847
2848
return v * clock->mult;
2849
}
2850
2851
/*
2852
* As with get_kvmclock_base_ns(), this counts from boot time, at the
2853
* frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
2854
*/
2855
static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
2856
{
2857
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2858
unsigned long seq;
2859
int mode;
2860
u64 ns;
2861
2862
do {
2863
seq = read_seqcount_begin(&gtod->seq);
2864
ns = gtod->raw_clock.base_cycles;
2865
ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
2866
ns >>= gtod->raw_clock.shift;
2867
ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2868
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2869
*t = ns;
2870
2871
return mode;
2872
}
2873
2874
/*
2875
* This calculates CLOCK_MONOTONIC at the time of the TSC snapshot, with
2876
* no boot time offset.
2877
*/
2878
static int do_monotonic(s64 *t, u64 *tsc_timestamp)
2879
{
2880
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2881
unsigned long seq;
2882
int mode;
2883
u64 ns;
2884
2885
do {
2886
seq = read_seqcount_begin(&gtod->seq);
2887
ns = gtod->clock.base_cycles;
2888
ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
2889
ns >>= gtod->clock.shift;
2890
ns += ktime_to_ns(gtod->clock.offset);
2891
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2892
*t = ns;
2893
2894
return mode;
2895
}
2896
2897
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
2898
{
2899
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2900
unsigned long seq;
2901
int mode;
2902
u64 ns;
2903
2904
do {
2905
seq = read_seqcount_begin(&gtod->seq);
2906
ts->tv_sec = gtod->wall_time_sec;
2907
ns = gtod->clock.base_cycles;
2908
ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
2909
ns >>= gtod->clock.shift;
2910
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2911
2912
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2913
ts->tv_nsec = ns;
2914
2915
return mode;
2916
}
2917
2918
/*
2919
* Calculates the kvmclock_base_ns (CLOCK_MONOTONIC_RAW + boot time) and
2920
* reports the TSC value from which it do so. Returns true if host is
2921
* using TSC based clocksource.
2922
*/
2923
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2924
{
2925
/* checked again under seqlock below */
2926
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2927
return false;
2928
2929
return gtod_is_based_on_tsc(do_kvmclock_base(kernel_ns,
2930
tsc_timestamp));
2931
}
2932
2933
/*
2934
* Calculates CLOCK_MONOTONIC and reports the TSC value from which it did
2935
* so. Returns true if host is using TSC based clocksource.
2936
*/
2937
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2938
{
2939
/* checked again under seqlock below */
2940
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2941
return false;
2942
2943
return gtod_is_based_on_tsc(do_monotonic(kernel_ns,
2944
tsc_timestamp));
2945
}
2946
2947
/*
2948
* Calculates CLOCK_REALTIME and reports the TSC value from which it did
2949
* so. Returns true if host is using TSC based clocksource.
2950
*
2951
* DO NOT USE this for anything related to migration. You want CLOCK_TAI
2952
* for that.
2953
*/
2954
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
2955
u64 *tsc_timestamp)
2956
{
2957
/* checked again under seqlock below */
2958
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2959
return false;
2960
2961
return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
2962
}
2963
#endif
2964
2965
/*
2966
*
2967
* Assuming a stable TSC across physical CPUS, and a stable TSC
2968
* across virtual CPUs, the following condition is possible.
2969
* Each numbered line represents an event visible to both
2970
* CPUs at the next numbered event.
2971
*
2972
* "timespecX" represents host monotonic time. "tscX" represents
2973
* RDTSC value.
2974
*
2975
* VCPU0 on CPU0 | VCPU1 on CPU1
2976
*
2977
* 1. read timespec0,tsc0
2978
* 2. | timespec1 = timespec0 + N
2979
* | tsc1 = tsc0 + M
2980
* 3. transition to guest | transition to guest
2981
* 4. ret0 = timespec0 + (rdtsc - tsc0) |
2982
* 5. | ret1 = timespec1 + (rdtsc - tsc1)
2983
* | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2984
*
2985
* Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2986
*
2987
* - ret0 < ret1
2988
* - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2989
* ...
2990
* - 0 < N - M => M < N
2991
*
2992
* That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2993
* always the case (the difference between two distinct xtime instances
2994
* might be smaller then the difference between corresponding TSC reads,
2995
* when updating guest vcpus pvclock areas).
2996
*
2997
* To avoid that problem, do not allow visibility of distinct
2998
* system_timestamp/tsc_timestamp values simultaneously: use a master
2999
* copy of host monotonic time values. Update that master copy
3000
* in lockstep.
3001
*
3002
* Rely on synchronization of host TSCs and guest TSCs for monotonicity.
3003
*
3004
*/
3005
3006
static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
3007
{
3008
#ifdef CONFIG_X86_64
3009
struct kvm_arch *ka = &kvm->arch;
3010
int vclock_mode;
3011
bool host_tsc_clocksource, vcpus_matched;
3012
3013
lockdep_assert_held(&kvm->arch.tsc_write_lock);
3014
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
3015
atomic_read(&kvm->online_vcpus));
3016
3017
/*
3018
* If the host uses TSC clock, then passthrough TSC as stable
3019
* to the guest.
3020
*/
3021
host_tsc_clocksource = kvm_get_time_and_clockread(
3022
&ka->master_kernel_ns,
3023
&ka->master_cycle_now);
3024
3025
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
3026
&& !ka->backwards_tsc_observed
3027
&& !ka->boot_vcpu_runs_old_kvmclock;
3028
3029
if (ka->use_master_clock)
3030
atomic_set(&kvm_guest_has_master_clock, 1);
3031
3032
vclock_mode = pvclock_gtod_data.clock.vclock_mode;
3033
trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
3034
vcpus_matched);
3035
#endif
3036
}
3037
3038
static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
3039
{
3040
kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
3041
}
3042
3043
static void __kvm_start_pvclock_update(struct kvm *kvm)
3044
{
3045
raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
3046
write_seqcount_begin(&kvm->arch.pvclock_sc);
3047
}
3048
3049
static void kvm_start_pvclock_update(struct kvm *kvm)
3050
{
3051
kvm_make_mclock_inprogress_request(kvm);
3052
3053
/* no guest entries from this point */
3054
__kvm_start_pvclock_update(kvm);
3055
}
3056
3057
static void kvm_end_pvclock_update(struct kvm *kvm)
3058
{
3059
struct kvm_arch *ka = &kvm->arch;
3060
struct kvm_vcpu *vcpu;
3061
unsigned long i;
3062
3063
write_seqcount_end(&ka->pvclock_sc);
3064
raw_spin_unlock_irq(&ka->tsc_write_lock);
3065
kvm_for_each_vcpu(i, vcpu, kvm)
3066
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3067
3068
/* guest entries allowed */
3069
kvm_for_each_vcpu(i, vcpu, kvm)
3070
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
3071
}
3072
3073
static void kvm_update_masterclock(struct kvm *kvm)
3074
{
3075
kvm_hv_request_tsc_page_update(kvm);
3076
kvm_start_pvclock_update(kvm);
3077
pvclock_update_vm_gtod_copy(kvm);
3078
kvm_end_pvclock_update(kvm);
3079
}
3080
3081
/*
3082
* Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
3083
* per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz
3084
* can change during boot even if the TSC is constant, as it's possible for KVM
3085
* to be loaded before TSC calibration completes. Ideally, KVM would get a
3086
* notification when calibration completes, but practically speaking calibration
3087
* will complete before userspace is alive enough to create VMs.
3088
*/
3089
static unsigned long get_cpu_tsc_khz(void)
3090
{
3091
if (static_cpu_has(X86_FEATURE_CONSTANT_TSC))
3092
return tsc_khz;
3093
else
3094
return __this_cpu_read(cpu_tsc_khz);
3095
}
3096
3097
/* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
3098
static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3099
{
3100
struct kvm_arch *ka = &kvm->arch;
3101
struct pvclock_vcpu_time_info hv_clock;
3102
3103
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
3104
get_cpu();
3105
3106
data->flags = 0;
3107
if (ka->use_master_clock &&
3108
(static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) {
3109
#ifdef CONFIG_X86_64
3110
struct timespec64 ts;
3111
3112
if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
3113
data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
3114
data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
3115
} else
3116
#endif
3117
data->host_tsc = rdtsc();
3118
3119
data->flags |= KVM_CLOCK_TSC_STABLE;
3120
hv_clock.tsc_timestamp = ka->master_cycle_now;
3121
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3122
kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL,
3123
&hv_clock.tsc_shift,
3124
&hv_clock.tsc_to_system_mul);
3125
data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3126
} else {
3127
data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3128
}
3129
3130
put_cpu();
3131
}
3132
3133
static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3134
{
3135
struct kvm_arch *ka = &kvm->arch;
3136
unsigned seq;
3137
3138
do {
3139
seq = read_seqcount_begin(&ka->pvclock_sc);
3140
__get_kvmclock(kvm, data);
3141
} while (read_seqcount_retry(&ka->pvclock_sc, seq));
3142
}
3143
3144
u64 get_kvmclock_ns(struct kvm *kvm)
3145
{
3146
struct kvm_clock_data data;
3147
3148
get_kvmclock(kvm, &data);
3149
return data.clock;
3150
}
3151
3152
static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
3153
struct kvm_vcpu *vcpu,
3154
struct gfn_to_pfn_cache *gpc,
3155
unsigned int offset)
3156
{
3157
struct pvclock_vcpu_time_info *guest_hv_clock;
3158
struct pvclock_vcpu_time_info hv_clock;
3159
unsigned long flags;
3160
3161
memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
3162
3163
read_lock_irqsave(&gpc->lock, flags);
3164
while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
3165
read_unlock_irqrestore(&gpc->lock, flags);
3166
3167
if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
3168
return;
3169
3170
read_lock_irqsave(&gpc->lock, flags);
3171
}
3172
3173
guest_hv_clock = (void *)(gpc->khva + offset);
3174
3175
/*
3176
* This VCPU is paused, but it's legal for a guest to read another
3177
* VCPU's kvmclock, so we really have to follow the specification where
3178
* it says that version is odd if data is being modified, and even after
3179
* it is consistent.
3180
*/
3181
3182
guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1;
3183
smp_wmb();
3184
3185
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3186
hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
3187
3188
memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
3189
3190
smp_wmb();
3191
3192
guest_hv_clock->version = ++hv_clock.version;
3193
3194
kvm_gpc_mark_dirty_in_slot(gpc);
3195
read_unlock_irqrestore(&gpc->lock, flags);
3196
3197
trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
3198
}
3199
3200
int kvm_guest_time_update(struct kvm_vcpu *v)
3201
{
3202
struct pvclock_vcpu_time_info hv_clock = {};
3203
unsigned long flags, tgt_tsc_khz;
3204
unsigned seq;
3205
struct kvm_vcpu_arch *vcpu = &v->arch;
3206
struct kvm_arch *ka = &v->kvm->arch;
3207
s64 kernel_ns;
3208
u64 tsc_timestamp, host_tsc;
3209
bool use_master_clock;
3210
3211
kernel_ns = 0;
3212
host_tsc = 0;
3213
3214
/*
3215
* If the host uses TSC clock, then passthrough TSC as stable
3216
* to the guest.
3217
*/
3218
do {
3219
seq = read_seqcount_begin(&ka->pvclock_sc);
3220
use_master_clock = ka->use_master_clock;
3221
if (use_master_clock) {
3222
host_tsc = ka->master_cycle_now;
3223
kernel_ns = ka->master_kernel_ns;
3224
}
3225
} while (read_seqcount_retry(&ka->pvclock_sc, seq));
3226
3227
/* Keep irq disabled to prevent changes to the clock */
3228
local_irq_save(flags);
3229
tgt_tsc_khz = get_cpu_tsc_khz();
3230
if (unlikely(tgt_tsc_khz == 0)) {
3231
local_irq_restore(flags);
3232
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3233
return 1;
3234
}
3235
if (!use_master_clock) {
3236
host_tsc = rdtsc();
3237
kernel_ns = get_kvmclock_base_ns();
3238
}
3239
3240
tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
3241
3242
/*
3243
* We may have to catch up the TSC to match elapsed wall clock
3244
* time for two reasons, even if kvmclock is used.
3245
* 1) CPU could have been running below the maximum TSC rate
3246
* 2) Broken TSC compensation resets the base at each VCPU
3247
* entry to avoid unknown leaps of TSC even when running
3248
* again on the same CPU. This may cause apparent elapsed
3249
* time to disappear, and the guest to stand still or run
3250
* very slowly.
3251
*/
3252
if (vcpu->tsc_catchup) {
3253
u64 tsc = compute_guest_tsc(v, kernel_ns);
3254
if (tsc > tsc_timestamp) {
3255
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
3256
tsc_timestamp = tsc;
3257
}
3258
}
3259
3260
local_irq_restore(flags);
3261
3262
/* With all the info we got, fill in the values */
3263
3264
if (kvm_caps.has_tsc_control) {
3265
tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
3266
v->arch.l1_tsc_scaling_ratio);
3267
tgt_tsc_khz = tgt_tsc_khz ? : 1;
3268
}
3269
3270
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3271
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
3272
&vcpu->pvclock_tsc_shift,
3273
&vcpu->pvclock_tsc_mul);
3274
vcpu->hw_tsc_khz = tgt_tsc_khz;
3275
}
3276
3277
hv_clock.tsc_shift = vcpu->pvclock_tsc_shift;
3278
hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul;
3279
hv_clock.tsc_timestamp = tsc_timestamp;
3280
hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3281
vcpu->last_guest_tsc = tsc_timestamp;
3282
3283
/* If the host uses TSC clocksource, then it is stable */
3284
hv_clock.flags = 0;
3285
if (use_master_clock)
3286
hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
3287
3288
if (vcpu->pv_time.active) {
3289
/*
3290
* GUEST_STOPPED is only supported by kvmclock, and KVM's
3291
* historic behavior is to only process the request if kvmclock
3292
* is active/enabled.
3293
*/
3294
if (vcpu->pvclock_set_guest_stopped_request) {
3295
hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
3296
vcpu->pvclock_set_guest_stopped_request = false;
3297
}
3298
kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
3299
3300
hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
3301
}
3302
3303
kvm_hv_setup_tsc_page(v->kvm, &hv_clock);
3304
3305
#ifdef CONFIG_KVM_XEN
3306
/*
3307
* For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
3308
* explicitly told to use TSC as its clocksource Xen will not set this bit.
3309
* This default behaviour led to bugs in some guest kernels which cause
3310
* problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
3311
*
3312
* Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
3313
*/
3314
if (ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
3315
hv_clock.flags &= ~PVCLOCK_TSC_STABLE_BIT;
3316
3317
if (vcpu->xen.vcpu_info_cache.active)
3318
kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache,
3319
offsetof(struct compat_vcpu_info, time));
3320
if (vcpu->xen.vcpu_time_info_cache.active)
3321
kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0);
3322
#endif
3323
return 0;
3324
}
3325
3326
/*
3327
* The pvclock_wall_clock ABI tells the guest the wall clock time at
3328
* which it started (i.e. its epoch, when its kvmclock was zero).
3329
*
3330
* In fact those clocks are subtly different; wall clock frequency is
3331
* adjusted by NTP and has leap seconds, while the kvmclock is a
3332
* simple function of the TSC without any such adjustment.
3333
*
3334
* Perhaps the ABI should have exposed CLOCK_TAI and a ratio between
3335
* that and kvmclock, but even that would be subject to change over
3336
* time.
3337
*
3338
* Attempt to calculate the epoch at a given moment using the *same*
3339
* TSC reading via kvm_get_walltime_and_clockread() to obtain both
3340
* wallclock and kvmclock times, and subtracting one from the other.
3341
*
3342
* Fall back to using their values at slightly different moments by
3343
* calling ktime_get_real_ns() and get_kvmclock_ns() separately.
3344
*/
3345
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
3346
{
3347
#ifdef CONFIG_X86_64
3348
struct pvclock_vcpu_time_info hv_clock;
3349
struct kvm_arch *ka = &kvm->arch;
3350
unsigned long seq, local_tsc_khz;
3351
struct timespec64 ts;
3352
uint64_t host_tsc;
3353
3354
do {
3355
seq = read_seqcount_begin(&ka->pvclock_sc);
3356
3357
local_tsc_khz = 0;
3358
if (!ka->use_master_clock)
3359
break;
3360
3361
/*
3362
* The TSC read and the call to get_cpu_tsc_khz() must happen
3363
* on the same CPU.
3364
*/
3365
get_cpu();
3366
3367
local_tsc_khz = get_cpu_tsc_khz();
3368
3369
if (local_tsc_khz &&
3370
!kvm_get_walltime_and_clockread(&ts, &host_tsc))
3371
local_tsc_khz = 0; /* Fall back to old method */
3372
3373
put_cpu();
3374
3375
/*
3376
* These values must be snapshotted within the seqcount loop.
3377
* After that, it's just mathematics which can happen on any
3378
* CPU at any time.
3379
*/
3380
hv_clock.tsc_timestamp = ka->master_cycle_now;
3381
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3382
3383
} while (read_seqcount_retry(&ka->pvclock_sc, seq));
3384
3385
/*
3386
* If the conditions were right, and obtaining the wallclock+TSC was
3387
* successful, calculate the KVM clock at the corresponding time and
3388
* subtract one from the other to get the guest's epoch in nanoseconds
3389
* since 1970-01-01.
3390
*/
3391
if (local_tsc_khz) {
3392
kvm_get_time_scale(NSEC_PER_SEC, local_tsc_khz * NSEC_PER_USEC,
3393
&hv_clock.tsc_shift,
3394
&hv_clock.tsc_to_system_mul);
3395
return ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec -
3396
__pvclock_read_cycles(&hv_clock, host_tsc);
3397
}
3398
#endif
3399
return ktime_get_real_ns() - get_kvmclock_ns(kvm);
3400
}
3401
3402
/*
3403
* kvmclock updates which are isolated to a given vcpu, such as
3404
* vcpu->cpu migration, should not allow system_timestamp from
3405
* the rest of the vcpus to remain static. Otherwise ntp frequency
3406
* correction applies to one vcpu's system_timestamp but not
3407
* the others.
3408
*
3409
* So in those cases, request a kvmclock update for all vcpus.
3410
* We need to rate-limit these requests though, as they can
3411
* considerably slow guests that have a large number of vcpus.
3412
* The time for a remote vcpu to update its kvmclock is bound
3413
* by the delay we use to rate-limit the updates.
3414
*/
3415
3416
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
3417
3418
static void kvmclock_update_fn(struct work_struct *work)
3419
{
3420
unsigned long i;
3421
struct delayed_work *dwork = to_delayed_work(work);
3422
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3423
kvmclock_update_work);
3424
struct kvm *kvm = container_of(ka, struct kvm, arch);
3425
struct kvm_vcpu *vcpu;
3426
3427
kvm_for_each_vcpu(i, vcpu, kvm) {
3428
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3429
kvm_vcpu_kick(vcpu);
3430
}
3431
}
3432
3433
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3434
{
3435
struct kvm *kvm = v->kvm;
3436
3437
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3438
schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3439
KVMCLOCK_UPDATE_DELAY);
3440
}
3441
3442
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
3443
3444
static void kvmclock_sync_fn(struct work_struct *work)
3445
{
3446
struct delayed_work *dwork = to_delayed_work(work);
3447
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3448
kvmclock_sync_work);
3449
struct kvm *kvm = container_of(ka, struct kvm, arch);
3450
3451
schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3452
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3453
KVMCLOCK_SYNC_PERIOD);
3454
}
3455
3456
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
3457
static bool is_mci_control_msr(u32 msr)
3458
{
3459
return (msr & 3) == 0;
3460
}
3461
static bool is_mci_status_msr(u32 msr)
3462
{
3463
return (msr & 3) == 1;
3464
}
3465
3466
/*
3467
* On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3468
*/
3469
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3470
{
3471
/* McStatusWrEn enabled? */
3472
if (guest_cpuid_is_amd_compatible(vcpu))
3473
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3474
3475
return false;
3476
}
3477
3478
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3479
{
3480
u64 mcg_cap = vcpu->arch.mcg_cap;
3481
unsigned bank_num = mcg_cap & 0xff;
3482
u32 msr = msr_info->index;
3483
u64 data = msr_info->data;
3484
u32 offset, last_msr;
3485
3486
switch (msr) {
3487
case MSR_IA32_MCG_STATUS:
3488
vcpu->arch.mcg_status = data;
3489
break;
3490
case MSR_IA32_MCG_CTL:
3491
if (!(mcg_cap & MCG_CTL_P) &&
3492
(data || !msr_info->host_initiated))
3493
return 1;
3494
if (data != 0 && data != ~(u64)0)
3495
return 1;
3496
vcpu->arch.mcg_ctl = data;
3497
break;
3498
case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3499
last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3500
if (msr > last_msr)
3501
return 1;
3502
3503
if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3504
return 1;
3505
/* An attempt to write a 1 to a reserved bit raises #GP */
3506
if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3507
return 1;
3508
offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3509
last_msr + 1 - MSR_IA32_MC0_CTL2);
3510
vcpu->arch.mci_ctl2_banks[offset] = data;
3511
break;
3512
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3513
last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3514
if (msr > last_msr)
3515
return 1;
3516
3517
/*
3518
* Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3519
* values are architecturally undefined. But, some Linux
3520
* kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3521
* issue on AMD K8s, allow bit 10 to be clear when setting all
3522
* other bits in order to avoid an uncaught #GP in the guest.
3523
*
3524
* UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3525
* single-bit ECC data errors.
3526
*/
3527
if (is_mci_control_msr(msr) &&
3528
data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3529
return 1;
3530
3531
/*
3532
* All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3533
* AMD-based CPUs allow non-zero values, but if and only if
3534
* HWCR[McStatusWrEn] is set.
3535
*/
3536
if (!msr_info->host_initiated && is_mci_status_msr(msr) &&
3537
data != 0 && !can_set_mci_status(vcpu))
3538
return 1;
3539
3540
offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3541
last_msr + 1 - MSR_IA32_MC0_CTL);
3542
vcpu->arch.mce_banks[offset] = data;
3543
break;
3544
default:
3545
return 1;
3546
}
3547
return 0;
3548
}
3549
3550
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
3551
{
3552
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
3553
3554
return (vcpu->arch.apf.msr_en_val & mask) == mask;
3555
}
3556
3557
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3558
{
3559
gpa_t gpa = data & ~0x3f;
3560
3561
/* Bits 4:5 are reserved, Should be zero */
3562
if (data & 0x30)
3563
return 1;
3564
3565
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3566
(data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3567
return 1;
3568
3569
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3570
(data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3571
return 1;
3572
3573
if (!lapic_in_kernel(vcpu))
3574
return data ? 1 : 0;
3575
3576
vcpu->arch.apf.msr_en_val = data;
3577
3578
if (!kvm_pv_async_pf_enabled(vcpu)) {
3579
kvm_clear_async_pf_completion_queue(vcpu);
3580
kvm_async_pf_hash_reset(vcpu);
3581
return 0;
3582
}
3583
3584
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3585
sizeof(u64)))
3586
return 1;
3587
3588
vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS);
3589
vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3590
3591
kvm_async_pf_wakeup_all(vcpu);
3592
3593
return 0;
3594
}
3595
3596
static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3597
{
3598
/* Bits 8-63 are reserved */
3599
if (data >> 8)
3600
return 1;
3601
3602
if (!lapic_in_kernel(vcpu))
3603
return 1;
3604
3605
vcpu->arch.apf.msr_int_val = data;
3606
3607
vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3608
3609
return 0;
3610
}
3611
3612
static void kvmclock_reset(struct kvm_vcpu *vcpu)
3613
{
3614
kvm_gpc_deactivate(&vcpu->arch.pv_time);
3615
vcpu->arch.time = 0;
3616
}
3617
3618
static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3619
{
3620
++vcpu->stat.tlb_flush;
3621
kvm_x86_call(flush_tlb_all)(vcpu);
3622
3623
/* Flushing all ASIDs flushes the current ASID... */
3624
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3625
}
3626
3627
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3628
{
3629
++vcpu->stat.tlb_flush;
3630
3631
if (!tdp_enabled) {
3632
/*
3633
* A TLB flush on behalf of the guest is equivalent to
3634
* INVPCID(all), toggling CR4.PGE, etc., which requires
3635
* a forced sync of the shadow page tables. Ensure all the
3636
* roots are synced and the guest TLB in hardware is clean.
3637
*/
3638
kvm_mmu_sync_roots(vcpu);
3639
kvm_mmu_sync_prev_roots(vcpu);
3640
}
3641
3642
kvm_x86_call(flush_tlb_guest)(vcpu);
3643
3644
/*
3645
* Flushing all "guest" TLB is always a superset of Hyper-V's fine
3646
* grained flushing.
3647
*/
3648
kvm_hv_vcpu_purge_flush_tlb(vcpu);
3649
}
3650
3651
3652
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
3653
{
3654
++vcpu->stat.tlb_flush;
3655
kvm_x86_call(flush_tlb_current)(vcpu);
3656
}
3657
3658
/*
3659
* Service "local" TLB flush requests, which are specific to the current MMU
3660
* context. In addition to the generic event handling in vcpu_enter_guest(),
3661
* TLB flushes that are targeted at an MMU context also need to be serviced
3662
* prior before nested VM-Enter/VM-Exit.
3663
*/
3664
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
3665
{
3666
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3667
kvm_vcpu_flush_tlb_current(vcpu);
3668
3669
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3670
kvm_vcpu_flush_tlb_guest(vcpu);
3671
}
3672
EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
3673
3674
static void record_steal_time(struct kvm_vcpu *vcpu)
3675
{
3676
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3677
struct kvm_steal_time __user *st;
3678
struct kvm_memslots *slots;
3679
gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3680
u64 steal;
3681
u32 version;
3682
3683
if (kvm_xen_msr_enabled(vcpu->kvm)) {
3684
kvm_xen_runstate_set_running(vcpu);
3685
return;
3686
}
3687
3688
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3689
return;
3690
3691
if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3692
return;
3693
3694
slots = kvm_memslots(vcpu->kvm);
3695
3696
if (unlikely(slots->generation != ghc->generation ||
3697
gpa != ghc->gpa ||
3698
kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3699
/* We rely on the fact that it fits in a single page. */
3700
BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
3701
3702
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3703
kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3704
return;
3705
}
3706
3707
st = (struct kvm_steal_time __user *)ghc->hva;
3708
/*
3709
* Doing a TLB flush here, on the guest's behalf, can avoid
3710
* expensive IPIs.
3711
*/
3712
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3713
u8 st_preempted = 0;
3714
int err = -EFAULT;
3715
3716
if (!user_access_begin(st, sizeof(*st)))
3717
return;
3718
3719
asm volatile("1: xchgb %0, %2\n"
3720
"xor %1, %1\n"
3721
"2:\n"
3722
_ASM_EXTABLE_UA(1b, 2b)
3723
: "+q" (st_preempted),
3724
"+&r" (err),
3725
"+m" (st->preempted));
3726
if (err)
3727
goto out;
3728
3729
user_access_end();
3730
3731
vcpu->arch.st.preempted = 0;
3732
3733
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3734
st_preempted & KVM_VCPU_FLUSH_TLB);
3735
if (st_preempted & KVM_VCPU_FLUSH_TLB)
3736
kvm_vcpu_flush_tlb_guest(vcpu);
3737
3738
if (!user_access_begin(st, sizeof(*st)))
3739
goto dirty;
3740
} else {
3741
if (!user_access_begin(st, sizeof(*st)))
3742
return;
3743
3744
unsafe_put_user(0, &st->preempted, out);
3745
vcpu->arch.st.preempted = 0;
3746
}
3747
3748
unsafe_get_user(version, &st->version, out);
3749
if (version & 1)
3750
version += 1; /* first time write, random junk */
3751
3752
version += 1;
3753
unsafe_put_user(version, &st->version, out);
3754
3755
smp_wmb();
3756
3757
unsafe_get_user(steal, &st->steal, out);
3758
steal += current->sched_info.run_delay -
3759
vcpu->arch.st.last_steal;
3760
vcpu->arch.st.last_steal = current->sched_info.run_delay;
3761
unsafe_put_user(steal, &st->steal, out);
3762
3763
version += 1;
3764
unsafe_put_user(version, &st->version, out);
3765
3766
out:
3767
user_access_end();
3768
dirty:
3769
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3770
}
3771
3772
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3773
{
3774
u32 msr = msr_info->index;
3775
u64 data = msr_info->data;
3776
3777
/*
3778
* Do not allow host-initiated writes to trigger the Xen hypercall
3779
* page setup; it could incur locking paths which are not expected
3780
* if userspace sets the MSR in an unusual location.
3781
*/
3782
if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) &&
3783
!msr_info->host_initiated)
3784
return kvm_xen_write_hypercall_page(vcpu, data);
3785
3786
switch (msr) {
3787
case MSR_AMD64_NB_CFG:
3788
case MSR_IA32_UCODE_WRITE:
3789
case MSR_VM_HSAVE_PA:
3790
case MSR_AMD64_PATCH_LOADER:
3791
case MSR_AMD64_BU_CFG2:
3792
case MSR_AMD64_DC_CFG:
3793
case MSR_AMD64_TW_CFG:
3794
case MSR_F15H_EX_CFG:
3795
break;
3796
3797
case MSR_IA32_UCODE_REV:
3798
if (msr_info->host_initiated)
3799
vcpu->arch.microcode_version = data;
3800
break;
3801
case MSR_IA32_ARCH_CAPABILITIES:
3802
if (!msr_info->host_initiated ||
3803
!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3804
return KVM_MSR_RET_UNSUPPORTED;
3805
vcpu->arch.arch_capabilities = data;
3806
break;
3807
case MSR_IA32_PERF_CAPABILITIES:
3808
if (!msr_info->host_initiated ||
3809
!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
3810
return KVM_MSR_RET_UNSUPPORTED;
3811
3812
if (data & ~kvm_caps.supported_perf_cap)
3813
return 1;
3814
3815
/*
3816
* Note, this is not just a performance optimization! KVM
3817
* disallows changing feature MSRs after the vCPU has run; PMU
3818
* refresh will bug the VM if called after the vCPU has run.
3819
*/
3820
if (vcpu->arch.perf_capabilities == data)
3821
break;
3822
3823
vcpu->arch.perf_capabilities = data;
3824
kvm_pmu_refresh(vcpu);
3825
break;
3826
case MSR_IA32_PRED_CMD: {
3827
u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);
3828
3829
if (!msr_info->host_initiated) {
3830
if ((!guest_has_pred_cmd_msr(vcpu)))
3831
return 1;
3832
3833
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
3834
!guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB))
3835
reserved_bits |= PRED_CMD_IBPB;
3836
3837
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB))
3838
reserved_bits |= PRED_CMD_SBPB;
3839
}
3840
3841
if (!boot_cpu_has(X86_FEATURE_IBPB))
3842
reserved_bits |= PRED_CMD_IBPB;
3843
3844
if (!boot_cpu_has(X86_FEATURE_SBPB))
3845
reserved_bits |= PRED_CMD_SBPB;
3846
3847
if (data & reserved_bits)
3848
return 1;
3849
3850
if (!data)
3851
break;
3852
3853
wrmsrq(MSR_IA32_PRED_CMD, data);
3854
break;
3855
}
3856
case MSR_IA32_FLUSH_CMD:
3857
if (!msr_info->host_initiated &&
3858
!guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D))
3859
return 1;
3860
3861
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
3862
return 1;
3863
if (!data)
3864
break;
3865
3866
wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
3867
break;
3868
case MSR_EFER:
3869
return set_efer(vcpu, msr_info);
3870
case MSR_K7_HWCR:
3871
data &= ~(u64)0x40; /* ignore flush filter disable */
3872
data &= ~(u64)0x100; /* ignore ignne emulation enable */
3873
data &= ~(u64)0x8; /* ignore TLB cache disable */
3874
3875
/*
3876
* Allow McStatusWrEn and TscFreqSel. (Linux guests from v3.2
3877
* through at least v6.6 whine if TscFreqSel is clear,
3878
* depending on F/M/S.
3879
*/
3880
if (data & ~(BIT_ULL(18) | BIT_ULL(24))) {
3881
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3882
return 1;
3883
}
3884
vcpu->arch.msr_hwcr = data;
3885
break;
3886
case MSR_FAM10H_MMIO_CONF_BASE:
3887
if (data != 0) {
3888
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3889
return 1;
3890
}
3891
break;
3892
case MSR_IA32_CR_PAT:
3893
if (!kvm_pat_valid(data))
3894
return 1;
3895
3896
vcpu->arch.pat = data;
3897
break;
3898
case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
3899
case MSR_MTRRdefType:
3900
return kvm_mtrr_set_msr(vcpu, msr, data);
3901
case MSR_IA32_APICBASE:
3902
return kvm_apic_set_base(vcpu, data, msr_info->host_initiated);
3903
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3904
return kvm_x2apic_msr_write(vcpu, msr, data);
3905
case MSR_IA32_TSC_DEADLINE:
3906
kvm_set_lapic_tscdeadline_msr(vcpu, data);
3907
break;
3908
case MSR_IA32_TSC_ADJUST:
3909
if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
3910
if (!msr_info->host_initiated) {
3911
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3912
adjust_tsc_offset_guest(vcpu, adj);
3913
/* Before back to guest, tsc_timestamp must be adjusted
3914
* as well, otherwise guest's percpu pvclock time could jump.
3915
*/
3916
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3917
}
3918
vcpu->arch.ia32_tsc_adjust_msr = data;
3919
}
3920
break;
3921
case MSR_IA32_MISC_ENABLE: {
3922
u64 old_val = vcpu->arch.ia32_misc_enable_msr;
3923
3924
if (!msr_info->host_initiated) {
3925
/* RO bits */
3926
if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
3927
return 1;
3928
3929
/* R bits, i.e. writes are ignored, but don't fault. */
3930
data = data & ~MSR_IA32_MISC_ENABLE_EMON;
3931
data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
3932
}
3933
3934
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3935
((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
3936
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
3937
return 1;
3938
vcpu->arch.ia32_misc_enable_msr = data;
3939
vcpu->arch.cpuid_dynamic_bits_dirty = true;
3940
} else {
3941
vcpu->arch.ia32_misc_enable_msr = data;
3942
}
3943
break;
3944
}
3945
case MSR_IA32_SMBASE:
3946
if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
3947
return 1;
3948
vcpu->arch.smbase = data;
3949
break;
3950
case MSR_IA32_POWER_CTL:
3951
vcpu->arch.msr_ia32_power_ctl = data;
3952
break;
3953
case MSR_IA32_TSC:
3954
if (msr_info->host_initiated) {
3955
kvm_synchronize_tsc(vcpu, &data);
3956
} else if (!vcpu->arch.guest_tsc_protected) {
3957
u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3958
adjust_tsc_offset_guest(vcpu, adj);
3959
vcpu->arch.ia32_tsc_adjust_msr += adj;
3960
}
3961
break;
3962
case MSR_IA32_XSS:
3963
if (!msr_info->host_initiated &&
3964
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3965
return 1;
3966
/*
3967
* KVM supports exposing PT to the guest, but does not support
3968
* IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
3969
* XSAVES/XRSTORS to save/restore PT MSRs.
3970
*/
3971
if (data & ~kvm_caps.supported_xss)
3972
return 1;
3973
vcpu->arch.ia32_xss = data;
3974
vcpu->arch.cpuid_dynamic_bits_dirty = true;
3975
break;
3976
case MSR_SMI_COUNT:
3977
if (!msr_info->host_initiated)
3978
return 1;
3979
vcpu->arch.smi_count = data;
3980
break;
3981
case MSR_KVM_WALL_CLOCK_NEW:
3982
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3983
return 1;
3984
3985
vcpu->kvm->arch.wall_clock = data;
3986
kvm_write_wall_clock(vcpu->kvm, data, 0);
3987
break;
3988
case MSR_KVM_WALL_CLOCK:
3989
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3990
return 1;
3991
3992
vcpu->kvm->arch.wall_clock = data;
3993
kvm_write_wall_clock(vcpu->kvm, data, 0);
3994
break;
3995
case MSR_KVM_SYSTEM_TIME_NEW:
3996
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3997
return 1;
3998
3999
kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
4000
break;
4001
case MSR_KVM_SYSTEM_TIME:
4002
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4003
return 1;
4004
4005
kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
4006
break;
4007
case MSR_KVM_ASYNC_PF_EN:
4008
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4009
return 1;
4010
4011
if (kvm_pv_enable_async_pf(vcpu, data))
4012
return 1;
4013
break;
4014
case MSR_KVM_ASYNC_PF_INT:
4015
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4016
return 1;
4017
4018
if (kvm_pv_enable_async_pf_int(vcpu, data))
4019
return 1;
4020
break;
4021
case MSR_KVM_ASYNC_PF_ACK:
4022
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4023
return 1;
4024
if (data & 0x1) {
4025
vcpu->arch.apf.pageready_pending = false;
4026
kvm_check_async_pf_completion(vcpu);
4027
}
4028
break;
4029
case MSR_KVM_STEAL_TIME:
4030
if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4031
return 1;
4032
4033
if (unlikely(!sched_info_on()))
4034
return 1;
4035
4036
if (data & KVM_STEAL_RESERVED_MASK)
4037
return 1;
4038
4039
vcpu->arch.st.msr_val = data;
4040
4041
if (!(data & KVM_MSR_ENABLED))
4042
break;
4043
4044
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4045
4046
break;
4047
case MSR_KVM_PV_EOI_EN:
4048
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4049
return 1;
4050
4051
if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
4052
return 1;
4053
break;
4054
4055
case MSR_KVM_POLL_CONTROL:
4056
if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4057
return 1;
4058
4059
/* only enable bit supported */
4060
if (data & (-1ULL << 1))
4061
return 1;
4062
4063
vcpu->arch.msr_kvm_poll_control = data;
4064
break;
4065
4066
case MSR_IA32_MCG_CTL:
4067
case MSR_IA32_MCG_STATUS:
4068
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4069
case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4070
return set_msr_mce(vcpu, msr_info);
4071
4072
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4073
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4074
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4075
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4076
if (kvm_pmu_is_valid_msr(vcpu, msr))
4077
return kvm_pmu_set_msr(vcpu, msr_info);
4078
4079
if (data)
4080
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4081
break;
4082
case MSR_K7_CLK_CTL:
4083
/*
4084
* Ignore all writes to this no longer documented MSR.
4085
* Writes are only relevant for old K7 processors,
4086
* all pre-dating SVM, but a recommended workaround from
4087
* AMD for these chips. It is possible to specify the
4088
* affected processor models on the command line, hence
4089
* the need to ignore the workaround.
4090
*/
4091
break;
4092
#ifdef CONFIG_KVM_HYPERV
4093
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4094
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4095
case HV_X64_MSR_SYNDBG_OPTIONS:
4096
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4097
case HV_X64_MSR_CRASH_CTL:
4098
case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4099
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4100
case HV_X64_MSR_TSC_EMULATION_CONTROL:
4101
case HV_X64_MSR_TSC_EMULATION_STATUS:
4102
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4103
return kvm_hv_set_msr_common(vcpu, msr, data,
4104
msr_info->host_initiated);
4105
#endif
4106
case MSR_IA32_BBL_CR_CTL3:
4107
/* Drop writes to this legacy MSR -- see rdmsr
4108
* counterpart for further detail.
4109
*/
4110
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4111
break;
4112
case MSR_AMD64_OSVW_ID_LENGTH:
4113
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4114
return 1;
4115
vcpu->arch.osvw.length = data;
4116
break;
4117
case MSR_AMD64_OSVW_STATUS:
4118
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4119
return 1;
4120
vcpu->arch.osvw.status = data;
4121
break;
4122
case MSR_PLATFORM_INFO:
4123
if (!msr_info->host_initiated)
4124
return 1;
4125
vcpu->arch.msr_platform_info = data;
4126
break;
4127
case MSR_MISC_FEATURES_ENABLES:
4128
if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
4129
(data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
4130
!supports_cpuid_fault(vcpu)))
4131
return 1;
4132
vcpu->arch.msr_misc_features_enables = data;
4133
break;
4134
#ifdef CONFIG_X86_64
4135
case MSR_IA32_XFD:
4136
if (!msr_info->host_initiated &&
4137
!guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4138
return 1;
4139
4140
if (data & ~kvm_guest_supported_xfd(vcpu))
4141
return 1;
4142
4143
fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
4144
break;
4145
case MSR_IA32_XFD_ERR:
4146
if (!msr_info->host_initiated &&
4147
!guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4148
return 1;
4149
4150
if (data & ~kvm_guest_supported_xfd(vcpu))
4151
return 1;
4152
4153
vcpu->arch.guest_fpu.xfd_err = data;
4154
break;
4155
#endif
4156
default:
4157
if (kvm_pmu_is_valid_msr(vcpu, msr))
4158
return kvm_pmu_set_msr(vcpu, msr_info);
4159
4160
return KVM_MSR_RET_UNSUPPORTED;
4161
}
4162
return 0;
4163
}
4164
EXPORT_SYMBOL_GPL(kvm_set_msr_common);
4165
4166
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
4167
{
4168
u64 data;
4169
u64 mcg_cap = vcpu->arch.mcg_cap;
4170
unsigned bank_num = mcg_cap & 0xff;
4171
u32 offset, last_msr;
4172
4173
switch (msr) {
4174
case MSR_IA32_P5_MC_ADDR:
4175
case MSR_IA32_P5_MC_TYPE:
4176
data = 0;
4177
break;
4178
case MSR_IA32_MCG_CAP:
4179
data = vcpu->arch.mcg_cap;
4180
break;
4181
case MSR_IA32_MCG_CTL:
4182
if (!(mcg_cap & MCG_CTL_P) && !host)
4183
return 1;
4184
data = vcpu->arch.mcg_ctl;
4185
break;
4186
case MSR_IA32_MCG_STATUS:
4187
data = vcpu->arch.mcg_status;
4188
break;
4189
case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4190
last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
4191
if (msr > last_msr)
4192
return 1;
4193
4194
if (!(mcg_cap & MCG_CMCI_P) && !host)
4195
return 1;
4196
offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
4197
last_msr + 1 - MSR_IA32_MC0_CTL2);
4198
data = vcpu->arch.mci_ctl2_banks[offset];
4199
break;
4200
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4201
last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
4202
if (msr > last_msr)
4203
return 1;
4204
4205
offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
4206
last_msr + 1 - MSR_IA32_MC0_CTL);
4207
data = vcpu->arch.mce_banks[offset];
4208
break;
4209
default:
4210
return 1;
4211
}
4212
*pdata = data;
4213
return 0;
4214
}
4215
4216
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4217
{
4218
switch (msr_info->index) {
4219
case MSR_IA32_PLATFORM_ID:
4220
case MSR_IA32_EBL_CR_POWERON:
4221
case MSR_IA32_LASTBRANCHFROMIP:
4222
case MSR_IA32_LASTBRANCHTOIP:
4223
case MSR_IA32_LASTINTFROMIP:
4224
case MSR_IA32_LASTINTTOIP:
4225
case MSR_AMD64_SYSCFG:
4226
case MSR_K8_TSEG_ADDR:
4227
case MSR_K8_TSEG_MASK:
4228
case MSR_VM_HSAVE_PA:
4229
case MSR_K8_INT_PENDING_MSG:
4230
case MSR_AMD64_NB_CFG:
4231
case MSR_FAM10H_MMIO_CONF_BASE:
4232
case MSR_AMD64_BU_CFG2:
4233
case MSR_IA32_PERF_CTL:
4234
case MSR_AMD64_DC_CFG:
4235
case MSR_AMD64_TW_CFG:
4236
case MSR_F15H_EX_CFG:
4237
/*
4238
* Intel Sandy Bridge CPUs must support the RAPL (running average power
4239
* limit) MSRs. Just return 0, as we do not want to expose the host
4240
* data here. Do not conditionalize this on CPUID, as KVM does not do
4241
* so for existing CPU-specific MSRs.
4242
*/
4243
case MSR_RAPL_POWER_UNIT:
4244
case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */
4245
case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */
4246
case MSR_PKG_ENERGY_STATUS: /* Total package */
4247
case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
4248
msr_info->data = 0;
4249
break;
4250
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4251
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4252
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4253
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4254
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4255
return kvm_pmu_get_msr(vcpu, msr_info);
4256
msr_info->data = 0;
4257
break;
4258
case MSR_IA32_UCODE_REV:
4259
msr_info->data = vcpu->arch.microcode_version;
4260
break;
4261
case MSR_IA32_ARCH_CAPABILITIES:
4262
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4263
return KVM_MSR_RET_UNSUPPORTED;
4264
msr_info->data = vcpu->arch.arch_capabilities;
4265
break;
4266
case MSR_IA32_PERF_CAPABILITIES:
4267
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
4268
return KVM_MSR_RET_UNSUPPORTED;
4269
msr_info->data = vcpu->arch.perf_capabilities;
4270
break;
4271
case MSR_IA32_POWER_CTL:
4272
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4273
break;
4274
case MSR_IA32_TSC: {
4275
/*
4276
* Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4277
* even when not intercepted. AMD manual doesn't explicitly
4278
* state this but appears to behave the same.
4279
*
4280
* On userspace reads and writes, however, we unconditionally
4281
* return L1's TSC value to ensure backwards-compatible
4282
* behavior for migration.
4283
*/
4284
u64 offset, ratio;
4285
4286
if (msr_info->host_initiated) {
4287
offset = vcpu->arch.l1_tsc_offset;
4288
ratio = vcpu->arch.l1_tsc_scaling_ratio;
4289
} else {
4290
offset = vcpu->arch.tsc_offset;
4291
ratio = vcpu->arch.tsc_scaling_ratio;
4292
}
4293
4294
msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4295
break;
4296
}
4297
case MSR_IA32_CR_PAT:
4298
msr_info->data = vcpu->arch.pat;
4299
break;
4300
case MSR_MTRRcap:
4301
case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4302
case MSR_MTRRdefType:
4303
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4304
case 0xcd: /* fsb frequency */
4305
msr_info->data = 3;
4306
break;
4307
/*
4308
* MSR_EBC_FREQUENCY_ID
4309
* Conservative value valid for even the basic CPU models.
4310
* Models 0,1: 000 in bits 23:21 indicating a bus speed of
4311
* 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4312
* and 266MHz for model 3, or 4. Set Core Clock
4313
* Frequency to System Bus Frequency Ratio to 1 (bits
4314
* 31:24) even though these are only valid for CPU
4315
* models > 2, however guests may end up dividing or
4316
* multiplying by zero otherwise.
4317
*/
4318
case MSR_EBC_FREQUENCY_ID:
4319
msr_info->data = 1 << 24;
4320
break;
4321
case MSR_IA32_APICBASE:
4322
msr_info->data = vcpu->arch.apic_base;
4323
break;
4324
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4325
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4326
case MSR_IA32_TSC_DEADLINE:
4327
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4328
break;
4329
case MSR_IA32_TSC_ADJUST:
4330
msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4331
break;
4332
case MSR_IA32_MISC_ENABLE:
4333
msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4334
break;
4335
case MSR_IA32_SMBASE:
4336
if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4337
return 1;
4338
msr_info->data = vcpu->arch.smbase;
4339
break;
4340
case MSR_SMI_COUNT:
4341
msr_info->data = vcpu->arch.smi_count;
4342
break;
4343
case MSR_IA32_PERF_STATUS:
4344
/* TSC increment by tick */
4345
msr_info->data = 1000ULL;
4346
/* CPU multiplier */
4347
msr_info->data |= (((uint64_t)4ULL) << 40);
4348
break;
4349
case MSR_EFER:
4350
msr_info->data = vcpu->arch.efer;
4351
break;
4352
case MSR_KVM_WALL_CLOCK:
4353
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4354
return 1;
4355
4356
msr_info->data = vcpu->kvm->arch.wall_clock;
4357
break;
4358
case MSR_KVM_WALL_CLOCK_NEW:
4359
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4360
return 1;
4361
4362
msr_info->data = vcpu->kvm->arch.wall_clock;
4363
break;
4364
case MSR_KVM_SYSTEM_TIME:
4365
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4366
return 1;
4367
4368
msr_info->data = vcpu->arch.time;
4369
break;
4370
case MSR_KVM_SYSTEM_TIME_NEW:
4371
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4372
return 1;
4373
4374
msr_info->data = vcpu->arch.time;
4375
break;
4376
case MSR_KVM_ASYNC_PF_EN:
4377
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4378
return 1;
4379
4380
msr_info->data = vcpu->arch.apf.msr_en_val;
4381
break;
4382
case MSR_KVM_ASYNC_PF_INT:
4383
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4384
return 1;
4385
4386
msr_info->data = vcpu->arch.apf.msr_int_val;
4387
break;
4388
case MSR_KVM_ASYNC_PF_ACK:
4389
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4390
return 1;
4391
4392
msr_info->data = 0;
4393
break;
4394
case MSR_KVM_STEAL_TIME:
4395
if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4396
return 1;
4397
4398
msr_info->data = vcpu->arch.st.msr_val;
4399
break;
4400
case MSR_KVM_PV_EOI_EN:
4401
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4402
return 1;
4403
4404
msr_info->data = vcpu->arch.pv_eoi.msr_val;
4405
break;
4406
case MSR_KVM_POLL_CONTROL:
4407
if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4408
return 1;
4409
4410
msr_info->data = vcpu->arch.msr_kvm_poll_control;
4411
break;
4412
case MSR_IA32_P5_MC_ADDR:
4413
case MSR_IA32_P5_MC_TYPE:
4414
case MSR_IA32_MCG_CAP:
4415
case MSR_IA32_MCG_CTL:
4416
case MSR_IA32_MCG_STATUS:
4417
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4418
case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4419
return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4420
msr_info->host_initiated);
4421
case MSR_IA32_XSS:
4422
if (!msr_info->host_initiated &&
4423
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4424
return 1;
4425
msr_info->data = vcpu->arch.ia32_xss;
4426
break;
4427
case MSR_K7_CLK_CTL:
4428
/*
4429
* Provide expected ramp-up count for K7. All other
4430
* are set to zero, indicating minimum divisors for
4431
* every field.
4432
*
4433
* This prevents guest kernels on AMD host with CPU
4434
* type 6, model 8 and higher from exploding due to
4435
* the rdmsr failing.
4436
*/
4437
msr_info->data = 0x20000000;
4438
break;
4439
#ifdef CONFIG_KVM_HYPERV
4440
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4441
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4442
case HV_X64_MSR_SYNDBG_OPTIONS:
4443
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4444
case HV_X64_MSR_CRASH_CTL:
4445
case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4446
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4447
case HV_X64_MSR_TSC_EMULATION_CONTROL:
4448
case HV_X64_MSR_TSC_EMULATION_STATUS:
4449
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4450
return kvm_hv_get_msr_common(vcpu,
4451
msr_info->index, &msr_info->data,
4452
msr_info->host_initiated);
4453
#endif
4454
case MSR_IA32_BBL_CR_CTL3:
4455
/* This legacy MSR exists but isn't fully documented in current
4456
* silicon. It is however accessed by winxp in very narrow
4457
* scenarios where it sets bit #19, itself documented as
4458
* a "reserved" bit. Best effort attempt to source coherent
4459
* read data here should the balance of the register be
4460
* interpreted by the guest:
4461
*
4462
* L2 cache control register 3: 64GB range, 256KB size,
4463
* enabled, latency 0x1, configured
4464
*/
4465
msr_info->data = 0xbe702111;
4466
break;
4467
case MSR_AMD64_OSVW_ID_LENGTH:
4468
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4469
return 1;
4470
msr_info->data = vcpu->arch.osvw.length;
4471
break;
4472
case MSR_AMD64_OSVW_STATUS:
4473
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4474
return 1;
4475
msr_info->data = vcpu->arch.osvw.status;
4476
break;
4477
case MSR_PLATFORM_INFO:
4478
if (!msr_info->host_initiated &&
4479
!vcpu->kvm->arch.guest_can_read_msr_platform_info)
4480
return 1;
4481
msr_info->data = vcpu->arch.msr_platform_info;
4482
break;
4483
case MSR_MISC_FEATURES_ENABLES:
4484
msr_info->data = vcpu->arch.msr_misc_features_enables;
4485
break;
4486
case MSR_K7_HWCR:
4487
msr_info->data = vcpu->arch.msr_hwcr;
4488
break;
4489
#ifdef CONFIG_X86_64
4490
case MSR_IA32_XFD:
4491
if (!msr_info->host_initiated &&
4492
!guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4493
return 1;
4494
4495
msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4496
break;
4497
case MSR_IA32_XFD_ERR:
4498
if (!msr_info->host_initiated &&
4499
!guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4500
return 1;
4501
4502
msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4503
break;
4504
#endif
4505
default:
4506
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4507
return kvm_pmu_get_msr(vcpu, msr_info);
4508
4509
return KVM_MSR_RET_UNSUPPORTED;
4510
}
4511
return 0;
4512
}
4513
EXPORT_SYMBOL_GPL(kvm_get_msr_common);
4514
4515
/*
4516
* Read or write a bunch of msrs. All parameters are kernel addresses.
4517
*
4518
* @return number of msrs set successfully.
4519
*/
4520
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
4521
struct kvm_msr_entry *entries,
4522
int (*do_msr)(struct kvm_vcpu *vcpu,
4523
unsigned index, u64 *data))
4524
{
4525
int i;
4526
4527
for (i = 0; i < msrs->nmsrs; ++i)
4528
if (do_msr(vcpu, entries[i].index, &entries[i].data))
4529
break;
4530
4531
return i;
4532
}
4533
4534
/*
4535
* Read or write a bunch of msrs. Parameters are user addresses.
4536
*
4537
* @return number of msrs set successfully.
4538
*/
4539
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
4540
int (*do_msr)(struct kvm_vcpu *vcpu,
4541
unsigned index, u64 *data),
4542
int writeback)
4543
{
4544
struct kvm_msrs msrs;
4545
struct kvm_msr_entry *entries;
4546
unsigned size;
4547
int r;
4548
4549
r = -EFAULT;
4550
if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
4551
goto out;
4552
4553
r = -E2BIG;
4554
if (msrs.nmsrs >= MAX_IO_MSRS)
4555
goto out;
4556
4557
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
4558
entries = memdup_user(user_msrs->entries, size);
4559
if (IS_ERR(entries)) {
4560
r = PTR_ERR(entries);
4561
goto out;
4562
}
4563
4564
r = __msr_io(vcpu, &msrs, entries, do_msr);
4565
4566
if (writeback && copy_to_user(user_msrs->entries, entries, size))
4567
r = -EFAULT;
4568
4569
kfree(entries);
4570
out:
4571
return r;
4572
}
4573
4574
static inline bool kvm_can_mwait_in_guest(void)
4575
{
4576
return boot_cpu_has(X86_FEATURE_MWAIT) &&
4577
!boot_cpu_has_bug(X86_BUG_MONITOR) &&
4578
boot_cpu_has(X86_FEATURE_ARAT);
4579
}
4580
4581
static u64 kvm_get_allowed_disable_exits(void)
4582
{
4583
u64 r = KVM_X86_DISABLE_EXITS_PAUSE;
4584
4585
if (boot_cpu_has(X86_FEATURE_APERFMPERF))
4586
r |= KVM_X86_DISABLE_EXITS_APERFMPERF;
4587
4588
if (!mitigate_smt_rsb) {
4589
r |= KVM_X86_DISABLE_EXITS_HLT |
4590
KVM_X86_DISABLE_EXITS_CSTATE;
4591
4592
if (kvm_can_mwait_in_guest())
4593
r |= KVM_X86_DISABLE_EXITS_MWAIT;
4594
}
4595
return r;
4596
}
4597
4598
#ifdef CONFIG_KVM_HYPERV
4599
static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
4600
struct kvm_cpuid2 __user *cpuid_arg)
4601
{
4602
struct kvm_cpuid2 cpuid;
4603
int r;
4604
4605
r = -EFAULT;
4606
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4607
return r;
4608
4609
r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4610
if (r)
4611
return r;
4612
4613
r = -EFAULT;
4614
if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4615
return r;
4616
4617
return 0;
4618
}
4619
#endif
4620
4621
static bool kvm_is_vm_type_supported(unsigned long type)
4622
{
4623
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
4624
}
4625
4626
static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
4627
{
4628
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
4629
}
4630
4631
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4632
{
4633
int r = 0;
4634
4635
switch (ext) {
4636
case KVM_CAP_IRQCHIP:
4637
case KVM_CAP_HLT:
4638
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
4639
case KVM_CAP_SET_TSS_ADDR:
4640
case KVM_CAP_EXT_CPUID:
4641
case KVM_CAP_EXT_EMUL_CPUID:
4642
case KVM_CAP_CLOCKSOURCE:
4643
#ifdef CONFIG_KVM_IOAPIC
4644
case KVM_CAP_PIT:
4645
case KVM_CAP_PIT2:
4646
case KVM_CAP_PIT_STATE2:
4647
case KVM_CAP_REINJECT_CONTROL:
4648
#endif
4649
case KVM_CAP_NOP_IO_DELAY:
4650
case KVM_CAP_MP_STATE:
4651
case KVM_CAP_SYNC_MMU:
4652
case KVM_CAP_USER_NMI:
4653
case KVM_CAP_IRQ_INJECT_STATUS:
4654
case KVM_CAP_IOEVENTFD:
4655
case KVM_CAP_IOEVENTFD_NO_LENGTH:
4656
4657
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
4658
case KVM_CAP_VCPU_EVENTS:
4659
#ifdef CONFIG_KVM_HYPERV
4660
case KVM_CAP_HYPERV:
4661
case KVM_CAP_HYPERV_VAPIC:
4662
case KVM_CAP_HYPERV_SPIN:
4663
case KVM_CAP_HYPERV_TIME:
4664
case KVM_CAP_HYPERV_SYNIC:
4665
case KVM_CAP_HYPERV_SYNIC2:
4666
case KVM_CAP_HYPERV_VP_INDEX:
4667
case KVM_CAP_HYPERV_EVENTFD:
4668
case KVM_CAP_HYPERV_TLBFLUSH:
4669
case KVM_CAP_HYPERV_SEND_IPI:
4670
case KVM_CAP_HYPERV_CPUID:
4671
case KVM_CAP_HYPERV_ENFORCE_CPUID:
4672
case KVM_CAP_SYS_HYPERV_CPUID:
4673
#endif
4674
case KVM_CAP_PCI_SEGMENT:
4675
case KVM_CAP_DEBUGREGS:
4676
case KVM_CAP_X86_ROBUST_SINGLESTEP:
4677
case KVM_CAP_XSAVE:
4678
case KVM_CAP_ASYNC_PF:
4679
case KVM_CAP_ASYNC_PF_INT:
4680
case KVM_CAP_GET_TSC_KHZ:
4681
case KVM_CAP_KVMCLOCK_CTRL:
4682
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4683
case KVM_CAP_TSC_DEADLINE_TIMER:
4684
case KVM_CAP_DISABLE_QUIRKS:
4685
case KVM_CAP_SET_BOOT_CPU_ID:
4686
case KVM_CAP_SPLIT_IRQCHIP:
4687
case KVM_CAP_IMMEDIATE_EXIT:
4688
case KVM_CAP_PMU_EVENT_FILTER:
4689
case KVM_CAP_PMU_EVENT_MASKED_EVENTS:
4690
case KVM_CAP_GET_MSR_FEATURES:
4691
case KVM_CAP_MSR_PLATFORM_INFO:
4692
case KVM_CAP_EXCEPTION_PAYLOAD:
4693
case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
4694
case KVM_CAP_SET_GUEST_DEBUG:
4695
case KVM_CAP_LAST_CPU:
4696
case KVM_CAP_X86_USER_SPACE_MSR:
4697
case KVM_CAP_X86_MSR_FILTER:
4698
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4699
#ifdef CONFIG_X86_SGX_KVM
4700
case KVM_CAP_SGX_ATTRIBUTE:
4701
#endif
4702
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4703
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
4704
case KVM_CAP_SREGS2:
4705
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4706
case KVM_CAP_VCPU_ATTRIBUTES:
4707
case KVM_CAP_SYS_ATTRIBUTES:
4708
case KVM_CAP_VAPIC:
4709
case KVM_CAP_ENABLE_CAP:
4710
case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
4711
case KVM_CAP_IRQFD_RESAMPLE:
4712
case KVM_CAP_MEMORY_FAULT_INFO:
4713
case KVM_CAP_X86_GUEST_MODE:
4714
r = 1;
4715
break;
4716
case KVM_CAP_PRE_FAULT_MEMORY:
4717
r = tdp_enabled;
4718
break;
4719
case KVM_CAP_X86_APIC_BUS_CYCLES_NS:
4720
r = APIC_BUS_CYCLE_NS_DEFAULT;
4721
break;
4722
case KVM_CAP_EXIT_HYPERCALL:
4723
r = KVM_EXIT_HYPERCALL_VALID_MASK;
4724
break;
4725
case KVM_CAP_SET_GUEST_DEBUG2:
4726
return KVM_GUESTDBG_VALID_MASK;
4727
#ifdef CONFIG_KVM_XEN
4728
case KVM_CAP_XEN_HVM:
4729
r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4730
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4731
KVM_XEN_HVM_CONFIG_SHARED_INFO |
4732
KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
4733
KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
4734
KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE |
4735
KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA;
4736
if (sched_info_on())
4737
r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
4738
KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
4739
break;
4740
#endif
4741
case KVM_CAP_SYNC_REGS:
4742
r = kvm_sync_valid_fields(kvm);
4743
break;
4744
case KVM_CAP_ADJUST_CLOCK:
4745
r = KVM_CLOCK_VALID_FLAGS;
4746
break;
4747
case KVM_CAP_X86_DISABLE_EXITS:
4748
r = kvm_get_allowed_disable_exits();
4749
break;
4750
case KVM_CAP_X86_SMM:
4751
if (!IS_ENABLED(CONFIG_KVM_SMM))
4752
break;
4753
4754
/* SMBASE is usually relocated above 1M on modern chipsets,
4755
* and SMM handlers might indeed rely on 4G segment limits,
4756
* so do not report SMM to be available if real mode is
4757
* emulated via vm86 mode. Still, do not go to great lengths
4758
* to avoid userspace's usage of the feature, because it is a
4759
* fringe case that is not enabled except via specific settings
4760
* of the module parameters.
4761
*/
4762
r = kvm_x86_call(has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4763
break;
4764
case KVM_CAP_NR_VCPUS:
4765
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
4766
break;
4767
case KVM_CAP_MAX_VCPUS:
4768
r = KVM_MAX_VCPUS;
4769
if (kvm)
4770
r = kvm->max_vcpus;
4771
break;
4772
case KVM_CAP_MAX_VCPU_ID:
4773
r = KVM_MAX_VCPU_IDS;
4774
break;
4775
case KVM_CAP_PV_MMU: /* obsolete */
4776
r = 0;
4777
break;
4778
case KVM_CAP_MCE:
4779
r = KVM_MAX_MCE_BANKS;
4780
break;
4781
case KVM_CAP_XCRS:
4782
r = boot_cpu_has(X86_FEATURE_XSAVE);
4783
break;
4784
case KVM_CAP_TSC_CONTROL:
4785
case KVM_CAP_VM_TSC_CONTROL:
4786
r = kvm_caps.has_tsc_control;
4787
break;
4788
case KVM_CAP_X2APIC_API:
4789
r = KVM_X2APIC_API_VALID_FLAGS;
4790
break;
4791
case KVM_CAP_NESTED_STATE:
4792
r = kvm_x86_ops.nested_ops->get_state ?
4793
kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4794
break;
4795
#ifdef CONFIG_KVM_HYPERV
4796
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4797
r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
4798
break;
4799
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4800
r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4801
break;
4802
#endif
4803
case KVM_CAP_SMALLER_MAXPHYADDR:
4804
r = (int) allow_smaller_maxphyaddr;
4805
break;
4806
case KVM_CAP_STEAL_TIME:
4807
r = sched_info_on();
4808
break;
4809
case KVM_CAP_X86_BUS_LOCK_EXIT:
4810
if (kvm_caps.has_bus_lock_exit)
4811
r = KVM_BUS_LOCK_DETECTION_OFF |
4812
KVM_BUS_LOCK_DETECTION_EXIT;
4813
else
4814
r = 0;
4815
break;
4816
case KVM_CAP_XSAVE2: {
4817
r = xstate_required_size(kvm_get_filtered_xcr0(), false);
4818
if (r < sizeof(struct kvm_xsave))
4819
r = sizeof(struct kvm_xsave);
4820
break;
4821
}
4822
case KVM_CAP_PMU_CAPABILITY:
4823
r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
4824
break;
4825
case KVM_CAP_DISABLE_QUIRKS2:
4826
r = kvm_caps.supported_quirks;
4827
break;
4828
case KVM_CAP_X86_NOTIFY_VMEXIT:
4829
r = kvm_caps.has_notify_vmexit;
4830
break;
4831
case KVM_CAP_VM_TYPES:
4832
r = kvm_caps.supported_vm_types;
4833
break;
4834
case KVM_CAP_READONLY_MEM:
4835
r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
4836
break;
4837
default:
4838
break;
4839
}
4840
return r;
4841
}
4842
4843
static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
4844
{
4845
if (attr->group) {
4846
if (kvm_x86_ops.dev_get_attr)
4847
return kvm_x86_call(dev_get_attr)(attr->group, attr->attr, val);
4848
return -ENXIO;
4849
}
4850
4851
switch (attr->attr) {
4852
case KVM_X86_XCOMP_GUEST_SUPP:
4853
*val = kvm_caps.supported_xcr0;
4854
return 0;
4855
default:
4856
return -ENXIO;
4857
}
4858
}
4859
4860
static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
4861
{
4862
u64 __user *uaddr = u64_to_user_ptr(attr->addr);
4863
int r;
4864
u64 val;
4865
4866
r = __kvm_x86_dev_get_attr(attr, &val);
4867
if (r < 0)
4868
return r;
4869
4870
if (put_user(val, uaddr))
4871
return -EFAULT;
4872
4873
return 0;
4874
}
4875
4876
static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
4877
{
4878
u64 val;
4879
4880
return __kvm_x86_dev_get_attr(attr, &val);
4881
}
4882
4883
long kvm_arch_dev_ioctl(struct file *filp,
4884
unsigned int ioctl, unsigned long arg)
4885
{
4886
void __user *argp = (void __user *)arg;
4887
long r;
4888
4889
switch (ioctl) {
4890
case KVM_GET_MSR_INDEX_LIST: {
4891
struct kvm_msr_list __user *user_msr_list = argp;
4892
struct kvm_msr_list msr_list;
4893
unsigned n;
4894
4895
r = -EFAULT;
4896
if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4897
goto out;
4898
n = msr_list.nmsrs;
4899
msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
4900
if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4901
goto out;
4902
r = -E2BIG;
4903
if (n < msr_list.nmsrs)
4904
goto out;
4905
r = -EFAULT;
4906
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
4907
num_msrs_to_save * sizeof(u32)))
4908
goto out;
4909
if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
4910
&emulated_msrs,
4911
num_emulated_msrs * sizeof(u32)))
4912
goto out;
4913
r = 0;
4914
break;
4915
}
4916
case KVM_GET_SUPPORTED_CPUID:
4917
case KVM_GET_EMULATED_CPUID: {
4918
struct kvm_cpuid2 __user *cpuid_arg = argp;
4919
struct kvm_cpuid2 cpuid;
4920
4921
r = -EFAULT;
4922
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4923
goto out;
4924
4925
r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
4926
ioctl);
4927
if (r)
4928
goto out;
4929
4930
r = -EFAULT;
4931
if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4932
goto out;
4933
r = 0;
4934
break;
4935
}
4936
case KVM_X86_GET_MCE_CAP_SUPPORTED:
4937
r = -EFAULT;
4938
if (copy_to_user(argp, &kvm_caps.supported_mce_cap,
4939
sizeof(kvm_caps.supported_mce_cap)))
4940
goto out;
4941
r = 0;
4942
break;
4943
case KVM_GET_MSR_FEATURE_INDEX_LIST: {
4944
struct kvm_msr_list __user *user_msr_list = argp;
4945
struct kvm_msr_list msr_list;
4946
unsigned int n;
4947
4948
r = -EFAULT;
4949
if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4950
goto out;
4951
n = msr_list.nmsrs;
4952
msr_list.nmsrs = num_msr_based_features;
4953
if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4954
goto out;
4955
r = -E2BIG;
4956
if (n < msr_list.nmsrs)
4957
goto out;
4958
r = -EFAULT;
4959
if (copy_to_user(user_msr_list->indices, &msr_based_features,
4960
num_msr_based_features * sizeof(u32)))
4961
goto out;
4962
r = 0;
4963
break;
4964
}
4965
case KVM_GET_MSRS:
4966
r = msr_io(NULL, argp, do_get_feature_msr, 1);
4967
break;
4968
#ifdef CONFIG_KVM_HYPERV
4969
case KVM_GET_SUPPORTED_HV_CPUID:
4970
r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
4971
break;
4972
#endif
4973
case KVM_GET_DEVICE_ATTR: {
4974
struct kvm_device_attr attr;
4975
r = -EFAULT;
4976
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4977
break;
4978
r = kvm_x86_dev_get_attr(&attr);
4979
break;
4980
}
4981
case KVM_HAS_DEVICE_ATTR: {
4982
struct kvm_device_attr attr;
4983
r = -EFAULT;
4984
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4985
break;
4986
r = kvm_x86_dev_has_attr(&attr);
4987
break;
4988
}
4989
default:
4990
r = -EINVAL;
4991
break;
4992
}
4993
out:
4994
return r;
4995
}
4996
4997
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
4998
{
4999
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
5000
}
5001
5002
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
5003
5004
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
5005
{
5006
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
5007
5008
vcpu->arch.l1tf_flush_l1d = true;
5009
5010
if (vcpu->scheduled_out && pmu->version && pmu->event_count) {
5011
pmu->need_cleanup = true;
5012
kvm_make_request(KVM_REQ_PMU, vcpu);
5013
}
5014
5015
/* Address WBINVD may be executed by guest */
5016
if (need_emulate_wbinvd(vcpu)) {
5017
if (kvm_x86_call(has_wbinvd_exit)())
5018
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
5019
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
5020
wbinvd_on_cpu(vcpu->cpu);
5021
}
5022
5023
kvm_x86_call(vcpu_load)(vcpu, cpu);
5024
5025
if (vcpu != per_cpu(last_vcpu, cpu)) {
5026
/*
5027
* Flush the branch predictor when switching vCPUs on the same
5028
* physical CPU, as each vCPU needs its own branch prediction
5029
* domain. No IBPB is needed when switching between L1 and L2
5030
* on the same vCPU unless IBRS is advertised to the vCPU; that
5031
* is handled on the nested VM-Exit path.
5032
*/
5033
if (static_branch_likely(&switch_vcpu_ibpb))
5034
indirect_branch_prediction_barrier();
5035
per_cpu(last_vcpu, cpu) = vcpu;
5036
}
5037
5038
/* Save host pkru register if supported */
5039
vcpu->arch.host_pkru = read_pkru();
5040
5041
/* Apply any externally detected TSC adjustments (due to suspend) */
5042
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
5043
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
5044
vcpu->arch.tsc_offset_adjustment = 0;
5045
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5046
}
5047
5048
if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
5049
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
5050
rdtsc() - vcpu->arch.last_host_tsc;
5051
if (tsc_delta < 0)
5052
mark_tsc_unstable("KVM discovered backwards TSC");
5053
5054
if (kvm_check_tsc_unstable()) {
5055
u64 offset = kvm_compute_l1_tsc_offset(vcpu,
5056
vcpu->arch.last_guest_tsc);
5057
kvm_vcpu_write_tsc_offset(vcpu, offset);
5058
if (!vcpu->arch.guest_tsc_protected)
5059
vcpu->arch.tsc_catchup = 1;
5060
}
5061
5062
if (kvm_lapic_hv_timer_in_use(vcpu))
5063
kvm_lapic_restart_hv_timer(vcpu);
5064
5065
/*
5066
* On a host with synchronized TSC, there is no need to update
5067
* kvmclock on vcpu->cpu migration
5068
*/
5069
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
5070
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
5071
if (vcpu->cpu != cpu)
5072
kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
5073
vcpu->cpu = cpu;
5074
}
5075
5076
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
5077
}
5078
5079
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
5080
{
5081
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
5082
struct kvm_steal_time __user *st;
5083
struct kvm_memslots *slots;
5084
static const u8 preempted = KVM_VCPU_PREEMPTED;
5085
gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
5086
5087
/*
5088
* The vCPU can be marked preempted if and only if the VM-Exit was on
5089
* an instruction boundary and will not trigger guest emulation of any
5090
* kind (see vcpu_run). Vendor specific code controls (conservatively)
5091
* when this is true, for example allowing the vCPU to be marked
5092
* preempted if and only if the VM-Exit was due to a host interrupt.
5093
*/
5094
if (!vcpu->arch.at_instruction_boundary) {
5095
vcpu->stat.preemption_other++;
5096
return;
5097
}
5098
5099
vcpu->stat.preemption_reported++;
5100
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
5101
return;
5102
5103
if (vcpu->arch.st.preempted)
5104
return;
5105
5106
/* This happens on process exit */
5107
if (unlikely(current->mm != vcpu->kvm->mm))
5108
return;
5109
5110
slots = kvm_memslots(vcpu->kvm);
5111
5112
if (unlikely(slots->generation != ghc->generation ||
5113
gpa != ghc->gpa ||
5114
kvm_is_error_hva(ghc->hva) || !ghc->memslot))
5115
return;
5116
5117
st = (struct kvm_steal_time __user *)ghc->hva;
5118
BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
5119
5120
if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
5121
vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
5122
5123
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
5124
}
5125
5126
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
5127
{
5128
int idx;
5129
5130
if (vcpu->preempted) {
5131
/*
5132
* Assume protected guests are in-kernel. Inefficient yielding
5133
* due to false positives is preferable to never yielding due
5134
* to false negatives.
5135
*/
5136
vcpu->arch.preempted_in_kernel = vcpu->arch.guest_state_protected ||
5137
!kvm_x86_call(get_cpl_no_cache)(vcpu);
5138
5139
/*
5140
* Take the srcu lock as memslots will be accessed to check the gfn
5141
* cache generation against the memslots generation.
5142
*/
5143
idx = srcu_read_lock(&vcpu->kvm->srcu);
5144
if (kvm_xen_msr_enabled(vcpu->kvm))
5145
kvm_xen_runstate_set_preempted(vcpu);
5146
else
5147
kvm_steal_time_set_preempted(vcpu);
5148
srcu_read_unlock(&vcpu->kvm->srcu, idx);
5149
}
5150
5151
kvm_x86_call(vcpu_put)(vcpu);
5152
vcpu->arch.last_host_tsc = rdtsc();
5153
}
5154
5155
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
5156
struct kvm_lapic_state *s)
5157
{
5158
if (vcpu->arch.apic->guest_apic_protected)
5159
return -EINVAL;
5160
5161
kvm_x86_call(sync_pir_to_irr)(vcpu);
5162
5163
return kvm_apic_get_state(vcpu, s);
5164
}
5165
5166
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
5167
struct kvm_lapic_state *s)
5168
{
5169
int r;
5170
5171
if (vcpu->arch.apic->guest_apic_protected)
5172
return -EINVAL;
5173
5174
r = kvm_apic_set_state(vcpu, s);
5175
if (r)
5176
return r;
5177
update_cr8_intercept(vcpu);
5178
5179
return 0;
5180
}
5181
5182
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
5183
{
5184
/*
5185
* We can accept userspace's request for interrupt injection
5186
* as long as we have a place to store the interrupt number.
5187
* The actual injection will happen when the CPU is able to
5188
* deliver the interrupt.
5189
*/
5190
if (kvm_cpu_has_extint(vcpu))
5191
return false;
5192
5193
/* Acknowledging ExtINT does not happen if LINT0 is masked. */
5194
return (!lapic_in_kernel(vcpu) ||
5195
kvm_apic_accept_pic_intr(vcpu));
5196
}
5197
5198
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
5199
{
5200
/*
5201
* Do not cause an interrupt window exit if an exception
5202
* is pending or an event needs reinjection; userspace
5203
* might want to inject the interrupt manually using KVM_SET_REGS
5204
* or KVM_SET_SREGS. For that to work, we must be at an
5205
* instruction boundary and with no events half-injected.
5206
*/
5207
return (kvm_arch_interrupt_allowed(vcpu) &&
5208
kvm_cpu_accept_dm_intr(vcpu) &&
5209
!kvm_event_needs_reinjection(vcpu) &&
5210
!kvm_is_exception_pending(vcpu));
5211
}
5212
5213
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
5214
struct kvm_interrupt *irq)
5215
{
5216
if (irq->irq >= KVM_NR_INTERRUPTS)
5217
return -EINVAL;
5218
5219
if (!irqchip_in_kernel(vcpu->kvm)) {
5220
kvm_queue_interrupt(vcpu, irq->irq, false);
5221
kvm_make_request(KVM_REQ_EVENT, vcpu);
5222
return 0;
5223
}
5224
5225
/*
5226
* With in-kernel LAPIC, we only use this to inject EXTINT, so
5227
* fail for in-kernel 8259.
5228
*/
5229
if (pic_in_kernel(vcpu->kvm))
5230
return -ENXIO;
5231
5232
if (vcpu->arch.pending_external_vector != -1)
5233
return -EEXIST;
5234
5235
vcpu->arch.pending_external_vector = irq->irq;
5236
kvm_make_request(KVM_REQ_EVENT, vcpu);
5237
return 0;
5238
}
5239
5240
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
5241
{
5242
kvm_inject_nmi(vcpu);
5243
5244
return 0;
5245
}
5246
5247
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
5248
struct kvm_tpr_access_ctl *tac)
5249
{
5250
if (tac->flags)
5251
return -EINVAL;
5252
vcpu->arch.tpr_access_reporting = !!tac->enabled;
5253
return 0;
5254
}
5255
5256
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
5257
u64 mcg_cap)
5258
{
5259
int r;
5260
unsigned bank_num = mcg_cap & 0xff, bank;
5261
5262
r = -EINVAL;
5263
if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
5264
goto out;
5265
if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000))
5266
goto out;
5267
r = 0;
5268
vcpu->arch.mcg_cap = mcg_cap;
5269
/* Init IA32_MCG_CTL to all 1s */
5270
if (mcg_cap & MCG_CTL_P)
5271
vcpu->arch.mcg_ctl = ~(u64)0;
5272
/* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
5273
for (bank = 0; bank < bank_num; bank++) {
5274
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
5275
if (mcg_cap & MCG_CMCI_P)
5276
vcpu->arch.mci_ctl2_banks[bank] = 0;
5277
}
5278
5279
kvm_apic_after_set_mcg_cap(vcpu);
5280
5281
kvm_x86_call(setup_mce)(vcpu);
5282
out:
5283
return r;
5284
}
5285
5286
/*
5287
* Validate this is an UCNA (uncorrectable no action) error by checking the
5288
* MCG_STATUS and MCi_STATUS registers:
5289
* - none of the bits for Machine Check Exceptions are set
5290
* - both the VAL (valid) and UC (uncorrectable) bits are set
5291
* MCI_STATUS_PCC - Processor Context Corrupted
5292
* MCI_STATUS_S - Signaled as a Machine Check Exception
5293
* MCI_STATUS_AR - Software recoverable Action Required
5294
*/
5295
static bool is_ucna(struct kvm_x86_mce *mce)
5296
{
5297
return !mce->mcg_status &&
5298
!(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) &&
5299
(mce->status & MCI_STATUS_VAL) &&
5300
(mce->status & MCI_STATUS_UC);
5301
}
5302
5303
static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
5304
{
5305
u64 mcg_cap = vcpu->arch.mcg_cap;
5306
5307
banks[1] = mce->status;
5308
banks[2] = mce->addr;
5309
banks[3] = mce->misc;
5310
vcpu->arch.mcg_status = mce->mcg_status;
5311
5312
if (!(mcg_cap & MCG_CMCI_P) ||
5313
!(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
5314
return 0;
5315
5316
if (lapic_in_kernel(vcpu))
5317
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
5318
5319
return 0;
5320
}
5321
5322
static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
5323
struct kvm_x86_mce *mce)
5324
{
5325
u64 mcg_cap = vcpu->arch.mcg_cap;
5326
unsigned bank_num = mcg_cap & 0xff;
5327
u64 *banks = vcpu->arch.mce_banks;
5328
5329
if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
5330
return -EINVAL;
5331
5332
banks += array_index_nospec(4 * mce->bank, 4 * bank_num);
5333
5334
if (is_ucna(mce))
5335
return kvm_vcpu_x86_set_ucna(vcpu, mce, banks);
5336
5337
/*
5338
* if IA32_MCG_CTL is not all 1s, the uncorrected error
5339
* reporting is disabled
5340
*/
5341
if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
5342
vcpu->arch.mcg_ctl != ~(u64)0)
5343
return 0;
5344
/*
5345
* if IA32_MCi_CTL is not all 1s, the uncorrected error
5346
* reporting is disabled for the bank
5347
*/
5348
if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
5349
return 0;
5350
if (mce->status & MCI_STATUS_UC) {
5351
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5352
!kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
5353
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5354
return 0;
5355
}
5356
if (banks[1] & MCI_STATUS_VAL)
5357
mce->status |= MCI_STATUS_OVER;
5358
banks[2] = mce->addr;
5359
banks[3] = mce->misc;
5360
vcpu->arch.mcg_status = mce->mcg_status;
5361
banks[1] = mce->status;
5362
kvm_queue_exception(vcpu, MC_VECTOR);
5363
} else if (!(banks[1] & MCI_STATUS_VAL)
5364
|| !(banks[1] & MCI_STATUS_UC)) {
5365
if (banks[1] & MCI_STATUS_VAL)
5366
mce->status |= MCI_STATUS_OVER;
5367
banks[2] = mce->addr;
5368
banks[3] = mce->misc;
5369
banks[1] = mce->status;
5370
} else
5371
banks[1] |= MCI_STATUS_OVER;
5372
return 0;
5373
}
5374
5375
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
5376
struct kvm_vcpu_events *events)
5377
{
5378
struct kvm_queued_exception *ex;
5379
5380
process_nmi(vcpu);
5381
5382
#ifdef CONFIG_KVM_SMM
5383
if (kvm_check_request(KVM_REQ_SMI, vcpu))
5384
process_smi(vcpu);
5385
#endif
5386
5387
/*
5388
* KVM's ABI only allows for one exception to be migrated. Luckily,
5389
* the only time there can be two queued exceptions is if there's a
5390
* non-exiting _injected_ exception, and a pending exiting exception.
5391
* In that case, ignore the VM-Exiting exception as it's an extension
5392
* of the injected exception.
5393
*/
5394
if (vcpu->arch.exception_vmexit.pending &&
5395
!vcpu->arch.exception.pending &&
5396
!vcpu->arch.exception.injected)
5397
ex = &vcpu->arch.exception_vmexit;
5398
else
5399
ex = &vcpu->arch.exception;
5400
5401
/*
5402
* In guest mode, payload delivery should be deferred if the exception
5403
* will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
5404
* intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
5405
* KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
5406
* propagate the payload and so it cannot be safely deferred. Deliver
5407
* the payload if the capability hasn't been requested.
5408
*/
5409
if (!vcpu->kvm->arch.exception_payload_enabled &&
5410
ex->pending && ex->has_payload)
5411
kvm_deliver_exception_payload(vcpu, ex);
5412
5413
memset(events, 0, sizeof(*events));
5414
5415
/*
5416
* The API doesn't provide the instruction length for software
5417
* exceptions, so don't report them. As long as the guest RIP
5418
* isn't advanced, we should expect to encounter the exception
5419
* again.
5420
*/
5421
if (!kvm_exception_is_soft(ex->vector)) {
5422
events->exception.injected = ex->injected;
5423
events->exception.pending = ex->pending;
5424
/*
5425
* For ABI compatibility, deliberately conflate
5426
* pending and injected exceptions when
5427
* KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5428
*/
5429
if (!vcpu->kvm->arch.exception_payload_enabled)
5430
events->exception.injected |= ex->pending;
5431
}
5432
events->exception.nr = ex->vector;
5433
events->exception.has_error_code = ex->has_error_code;
5434
events->exception.error_code = ex->error_code;
5435
events->exception_has_payload = ex->has_payload;
5436
events->exception_payload = ex->payload;
5437
5438
events->interrupt.injected =
5439
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5440
events->interrupt.nr = vcpu->arch.interrupt.nr;
5441
events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
5442
5443
events->nmi.injected = vcpu->arch.nmi_injected;
5444
events->nmi.pending = kvm_get_nr_pending_nmis(vcpu);
5445
events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu);
5446
5447
/* events->sipi_vector is never valid when reporting to user space */
5448
5449
#ifdef CONFIG_KVM_SMM
5450
events->smi.smm = is_smm(vcpu);
5451
events->smi.pending = vcpu->arch.smi_pending;
5452
events->smi.smm_inside_nmi =
5453
!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5454
#endif
5455
events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5456
5457
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5458
| KVM_VCPUEVENT_VALID_SHADOW
5459
| KVM_VCPUEVENT_VALID_SMM);
5460
if (vcpu->kvm->arch.exception_payload_enabled)
5461
events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5462
if (vcpu->kvm->arch.triple_fault_event) {
5463
events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5464
events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5465
}
5466
}
5467
5468
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
5469
struct kvm_vcpu_events *events)
5470
{
5471
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5472
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
5473
| KVM_VCPUEVENT_VALID_SHADOW
5474
| KVM_VCPUEVENT_VALID_SMM
5475
| KVM_VCPUEVENT_VALID_PAYLOAD
5476
| KVM_VCPUEVENT_VALID_TRIPLE_FAULT))
5477
return -EINVAL;
5478
5479
if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5480
if (!vcpu->kvm->arch.exception_payload_enabled)
5481
return -EINVAL;
5482
if (events->exception.pending)
5483
events->exception.injected = 0;
5484
else
5485
events->exception_has_payload = 0;
5486
} else {
5487
events->exception.pending = 0;
5488
events->exception_has_payload = 0;
5489
}
5490
5491
if ((events->exception.injected || events->exception.pending) &&
5492
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5493
return -EINVAL;
5494
5495
process_nmi(vcpu);
5496
5497
/*
5498
* Flag that userspace is stuffing an exception, the next KVM_RUN will
5499
* morph the exception to a VM-Exit if appropriate. Do this only for
5500
* pending exceptions, already-injected exceptions are not subject to
5501
* intercpetion. Note, userspace that conflates pending and injected
5502
* is hosed, and will incorrectly convert an injected exception into a
5503
* pending exception, which in turn may cause a spurious VM-Exit.
5504
*/
5505
vcpu->arch.exception_from_userspace = events->exception.pending;
5506
5507
vcpu->arch.exception_vmexit.pending = false;
5508
5509
vcpu->arch.exception.injected = events->exception.injected;
5510
vcpu->arch.exception.pending = events->exception.pending;
5511
vcpu->arch.exception.vector = events->exception.nr;
5512
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5513
vcpu->arch.exception.error_code = events->exception.error_code;
5514
vcpu->arch.exception.has_payload = events->exception_has_payload;
5515
vcpu->arch.exception.payload = events->exception_payload;
5516
5517
vcpu->arch.interrupt.injected = events->interrupt.injected;
5518
vcpu->arch.interrupt.nr = events->interrupt.nr;
5519
vcpu->arch.interrupt.soft = events->interrupt.soft;
5520
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5521
kvm_x86_call(set_interrupt_shadow)(vcpu,
5522
events->interrupt.shadow);
5523
5524
vcpu->arch.nmi_injected = events->nmi.injected;
5525
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
5526
vcpu->arch.nmi_pending = 0;
5527
atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5528
if (events->nmi.pending)
5529
kvm_make_request(KVM_REQ_NMI, vcpu);
5530
}
5531
kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked);
5532
5533
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5534
lapic_in_kernel(vcpu))
5535
vcpu->arch.apic->sipi_vector = events->sipi_vector;
5536
5537
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5538
#ifdef CONFIG_KVM_SMM
5539
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5540
kvm_leave_nested(vcpu);
5541
kvm_smm_changed(vcpu, events->smi.smm);
5542
}
5543
5544
vcpu->arch.smi_pending = events->smi.pending;
5545
5546
if (events->smi.smm) {
5547
if (events->smi.smm_inside_nmi)
5548
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5549
else
5550
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5551
}
5552
5553
#else
5554
if (events->smi.smm || events->smi.pending ||
5555
events->smi.smm_inside_nmi)
5556
return -EINVAL;
5557
#endif
5558
5559
if (lapic_in_kernel(vcpu)) {
5560
if (events->smi.latched_init)
5561
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5562
else
5563
clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5564
}
5565
}
5566
5567
if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5568
if (!vcpu->kvm->arch.triple_fault_event)
5569
return -EINVAL;
5570
if (events->triple_fault.pending)
5571
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5572
else
5573
kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5574
}
5575
5576
kvm_make_request(KVM_REQ_EVENT, vcpu);
5577
5578
return 0;
5579
}
5580
5581
static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5582
struct kvm_debugregs *dbgregs)
5583
{
5584
unsigned int i;
5585
5586
if (vcpu->kvm->arch.has_protected_state &&
5587
vcpu->arch.guest_state_protected)
5588
return -EINVAL;
5589
5590
memset(dbgregs, 0, sizeof(*dbgregs));
5591
5592
BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
5593
for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5594
dbgregs->db[i] = vcpu->arch.db[i];
5595
5596
dbgregs->dr6 = vcpu->arch.dr6;
5597
dbgregs->dr7 = vcpu->arch.dr7;
5598
return 0;
5599
}
5600
5601
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
5602
struct kvm_debugregs *dbgregs)
5603
{
5604
unsigned int i;
5605
5606
if (vcpu->kvm->arch.has_protected_state &&
5607
vcpu->arch.guest_state_protected)
5608
return -EINVAL;
5609
5610
if (dbgregs->flags)
5611
return -EINVAL;
5612
5613
if (!kvm_dr6_valid(dbgregs->dr6))
5614
return -EINVAL;
5615
if (!kvm_dr7_valid(dbgregs->dr7))
5616
return -EINVAL;
5617
5618
for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5619
vcpu->arch.db[i] = dbgregs->db[i];
5620
5621
kvm_update_dr0123(vcpu);
5622
vcpu->arch.dr6 = dbgregs->dr6;
5623
vcpu->arch.dr7 = dbgregs->dr7;
5624
kvm_update_dr7(vcpu);
5625
5626
return 0;
5627
}
5628
5629
5630
static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5631
u8 *state, unsigned int size)
5632
{
5633
/*
5634
* Only copy state for features that are enabled for the guest. The
5635
* state itself isn't problematic, but setting bits in the header for
5636
* features that are supported in *this* host but not exposed to the
5637
* guest can result in KVM_SET_XSAVE failing when live migrating to a
5638
* compatible host without the features that are NOT exposed to the
5639
* guest.
5640
*
5641
* FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
5642
* XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
5643
* supported by the host.
5644
*/
5645
u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
5646
XFEATURE_MASK_FPSSE;
5647
5648
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5649
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5650
5651
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
5652
supported_xcr0, vcpu->arch.pkru);
5653
return 0;
5654
}
5655
5656
static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5657
struct kvm_xsave *guest_xsave)
5658
{
5659
return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5660
sizeof(guest_xsave->region));
5661
}
5662
5663
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
5664
struct kvm_xsave *guest_xsave)
5665
{
5666
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5667
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5668
5669
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5670
guest_xsave->region,
5671
kvm_caps.supported_xcr0,
5672
&vcpu->arch.pkru);
5673
}
5674
5675
static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5676
struct kvm_xcrs *guest_xcrs)
5677
{
5678
if (vcpu->kvm->arch.has_protected_state &&
5679
vcpu->arch.guest_state_protected)
5680
return -EINVAL;
5681
5682
if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
5683
guest_xcrs->nr_xcrs = 0;
5684
return 0;
5685
}
5686
5687
guest_xcrs->nr_xcrs = 1;
5688
guest_xcrs->flags = 0;
5689
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
5690
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5691
return 0;
5692
}
5693
5694
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
5695
struct kvm_xcrs *guest_xcrs)
5696
{
5697
int i, r = 0;
5698
5699
if (vcpu->kvm->arch.has_protected_state &&
5700
vcpu->arch.guest_state_protected)
5701
return -EINVAL;
5702
5703
if (!boot_cpu_has(X86_FEATURE_XSAVE))
5704
return -EINVAL;
5705
5706
if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
5707
return -EINVAL;
5708
5709
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
5710
/* Only support XCR0 currently */
5711
if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
5712
r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
5713
guest_xcrs->xcrs[i].value);
5714
break;
5715
}
5716
if (r)
5717
r = -EINVAL;
5718
return r;
5719
}
5720
5721
/*
5722
* kvm_set_guest_paused() indicates to the guest kernel that it has been
5723
* stopped by the hypervisor. This function will be called from the host only.
5724
* EINVAL is returned when the host attempts to set the flag for a guest that
5725
* does not support pv clocks.
5726
*/
5727
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
5728
{
5729
if (!vcpu->arch.pv_time.active)
5730
return -EINVAL;
5731
vcpu->arch.pvclock_set_guest_stopped_request = true;
5732
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5733
return 0;
5734
}
5735
5736
static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
5737
struct kvm_device_attr *attr)
5738
{
5739
int r;
5740
5741
switch (attr->attr) {
5742
case KVM_VCPU_TSC_OFFSET:
5743
r = 0;
5744
break;
5745
default:
5746
r = -ENXIO;
5747
}
5748
5749
return r;
5750
}
5751
5752
static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
5753
struct kvm_device_attr *attr)
5754
{
5755
u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5756
int r;
5757
5758
switch (attr->attr) {
5759
case KVM_VCPU_TSC_OFFSET:
5760
r = -EFAULT;
5761
if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5762
break;
5763
r = 0;
5764
break;
5765
default:
5766
r = -ENXIO;
5767
}
5768
5769
return r;
5770
}
5771
5772
static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5773
struct kvm_device_attr *attr)
5774
{
5775
u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5776
struct kvm *kvm = vcpu->kvm;
5777
int r;
5778
5779
switch (attr->attr) {
5780
case KVM_VCPU_TSC_OFFSET: {
5781
u64 offset, tsc, ns;
5782
unsigned long flags;
5783
bool matched;
5784
5785
r = -EFAULT;
5786
if (get_user(offset, uaddr))
5787
break;
5788
5789
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5790
5791
matched = (vcpu->arch.virtual_tsc_khz &&
5792
kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5793
kvm->arch.last_tsc_offset == offset);
5794
5795
tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5796
ns = get_kvmclock_base_ns();
5797
5798
__kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true);
5799
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5800
5801
r = 0;
5802
break;
5803
}
5804
default:
5805
r = -ENXIO;
5806
}
5807
5808
return r;
5809
}
5810
5811
static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
5812
unsigned int ioctl,
5813
void __user *argp)
5814
{
5815
struct kvm_device_attr attr;
5816
int r;
5817
5818
if (copy_from_user(&attr, argp, sizeof(attr)))
5819
return -EFAULT;
5820
5821
if (attr.group != KVM_VCPU_TSC_CTRL)
5822
return -ENXIO;
5823
5824
switch (ioctl) {
5825
case KVM_HAS_DEVICE_ATTR:
5826
r = kvm_arch_tsc_has_attr(vcpu, &attr);
5827
break;
5828
case KVM_GET_DEVICE_ATTR:
5829
r = kvm_arch_tsc_get_attr(vcpu, &attr);
5830
break;
5831
case KVM_SET_DEVICE_ATTR:
5832
r = kvm_arch_tsc_set_attr(vcpu, &attr);
5833
break;
5834
}
5835
5836
return r;
5837
}
5838
5839
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5840
struct kvm_enable_cap *cap)
5841
{
5842
if (cap->flags)
5843
return -EINVAL;
5844
5845
switch (cap->cap) {
5846
#ifdef CONFIG_KVM_HYPERV
5847
case KVM_CAP_HYPERV_SYNIC2:
5848
if (cap->args[0])
5849
return -EINVAL;
5850
fallthrough;
5851
5852
case KVM_CAP_HYPERV_SYNIC:
5853
if (!irqchip_in_kernel(vcpu->kvm))
5854
return -EINVAL;
5855
return kvm_hv_activate_synic(vcpu, cap->cap ==
5856
KVM_CAP_HYPERV_SYNIC2);
5857
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
5858
{
5859
int r;
5860
uint16_t vmcs_version;
5861
void __user *user_ptr;
5862
5863
if (!kvm_x86_ops.nested_ops->enable_evmcs)
5864
return -ENOTTY;
5865
r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
5866
if (!r) {
5867
user_ptr = (void __user *)(uintptr_t)cap->args[0];
5868
if (copy_to_user(user_ptr, &vmcs_version,
5869
sizeof(vmcs_version)))
5870
r = -EFAULT;
5871
}
5872
return r;
5873
}
5874
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
5875
if (!kvm_x86_ops.enable_l2_tlb_flush)
5876
return -ENOTTY;
5877
5878
return kvm_x86_call(enable_l2_tlb_flush)(vcpu);
5879
5880
case KVM_CAP_HYPERV_ENFORCE_CPUID:
5881
return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
5882
#endif
5883
5884
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
5885
vcpu->arch.pv_cpuid.enforce = cap->args[0];
5886
return 0;
5887
default:
5888
return -EINVAL;
5889
}
5890
}
5891
5892
long kvm_arch_vcpu_ioctl(struct file *filp,
5893
unsigned int ioctl, unsigned long arg)
5894
{
5895
struct kvm_vcpu *vcpu = filp->private_data;
5896
void __user *argp = (void __user *)arg;
5897
int r;
5898
union {
5899
struct kvm_sregs2 *sregs2;
5900
struct kvm_lapic_state *lapic;
5901
struct kvm_xsave *xsave;
5902
struct kvm_xcrs *xcrs;
5903
void *buffer;
5904
} u;
5905
5906
vcpu_load(vcpu);
5907
5908
u.buffer = NULL;
5909
switch (ioctl) {
5910
case KVM_GET_LAPIC: {
5911
r = -EINVAL;
5912
if (!lapic_in_kernel(vcpu))
5913
goto out;
5914
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
5915
5916
r = -ENOMEM;
5917
if (!u.lapic)
5918
goto out;
5919
r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
5920
if (r)
5921
goto out;
5922
r = -EFAULT;
5923
if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
5924
goto out;
5925
r = 0;
5926
break;
5927
}
5928
case KVM_SET_LAPIC: {
5929
r = -EINVAL;
5930
if (!lapic_in_kernel(vcpu))
5931
goto out;
5932
u.lapic = memdup_user(argp, sizeof(*u.lapic));
5933
if (IS_ERR(u.lapic)) {
5934
r = PTR_ERR(u.lapic);
5935
goto out_nofree;
5936
}
5937
5938
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
5939
break;
5940
}
5941
case KVM_INTERRUPT: {
5942
struct kvm_interrupt irq;
5943
5944
r = -EFAULT;
5945
if (copy_from_user(&irq, argp, sizeof(irq)))
5946
goto out;
5947
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
5948
break;
5949
}
5950
case KVM_NMI: {
5951
r = kvm_vcpu_ioctl_nmi(vcpu);
5952
break;
5953
}
5954
case KVM_SMI: {
5955
r = kvm_inject_smi(vcpu);
5956
break;
5957
}
5958
case KVM_SET_CPUID: {
5959
struct kvm_cpuid __user *cpuid_arg = argp;
5960
struct kvm_cpuid cpuid;
5961
5962
r = -EFAULT;
5963
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5964
goto out;
5965
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
5966
break;
5967
}
5968
case KVM_SET_CPUID2: {
5969
struct kvm_cpuid2 __user *cpuid_arg = argp;
5970
struct kvm_cpuid2 cpuid;
5971
5972
r = -EFAULT;
5973
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5974
goto out;
5975
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
5976
cpuid_arg->entries);
5977
break;
5978
}
5979
case KVM_GET_CPUID2: {
5980
struct kvm_cpuid2 __user *cpuid_arg = argp;
5981
struct kvm_cpuid2 cpuid;
5982
5983
r = -EFAULT;
5984
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5985
goto out;
5986
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
5987
cpuid_arg->entries);
5988
if (r)
5989
goto out;
5990
r = -EFAULT;
5991
if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5992
goto out;
5993
r = 0;
5994
break;
5995
}
5996
case KVM_GET_MSRS: {
5997
int idx = srcu_read_lock(&vcpu->kvm->srcu);
5998
r = msr_io(vcpu, argp, do_get_msr, 1);
5999
srcu_read_unlock(&vcpu->kvm->srcu, idx);
6000
break;
6001
}
6002
case KVM_SET_MSRS: {
6003
int idx = srcu_read_lock(&vcpu->kvm->srcu);
6004
r = msr_io(vcpu, argp, do_set_msr, 0);
6005
srcu_read_unlock(&vcpu->kvm->srcu, idx);
6006
break;
6007
}
6008
case KVM_TPR_ACCESS_REPORTING: {
6009
struct kvm_tpr_access_ctl tac;
6010
6011
r = -EFAULT;
6012
if (copy_from_user(&tac, argp, sizeof(tac)))
6013
goto out;
6014
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
6015
if (r)
6016
goto out;
6017
r = -EFAULT;
6018
if (copy_to_user(argp, &tac, sizeof(tac)))
6019
goto out;
6020
r = 0;
6021
break;
6022
};
6023
case KVM_SET_VAPIC_ADDR: {
6024
struct kvm_vapic_addr va;
6025
int idx;
6026
6027
r = -EINVAL;
6028
if (!lapic_in_kernel(vcpu))
6029
goto out;
6030
r = -EFAULT;
6031
if (copy_from_user(&va, argp, sizeof(va)))
6032
goto out;
6033
idx = srcu_read_lock(&vcpu->kvm->srcu);
6034
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
6035
srcu_read_unlock(&vcpu->kvm->srcu, idx);
6036
break;
6037
}
6038
case KVM_X86_SETUP_MCE: {
6039
u64 mcg_cap;
6040
6041
r = -EFAULT;
6042
if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
6043
goto out;
6044
r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
6045
break;
6046
}
6047
case KVM_X86_SET_MCE: {
6048
struct kvm_x86_mce mce;
6049
6050
r = -EFAULT;
6051
if (copy_from_user(&mce, argp, sizeof(mce)))
6052
goto out;
6053
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
6054
break;
6055
}
6056
case KVM_GET_VCPU_EVENTS: {
6057
struct kvm_vcpu_events events;
6058
6059
kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
6060
6061
r = -EFAULT;
6062
if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
6063
break;
6064
r = 0;
6065
break;
6066
}
6067
case KVM_SET_VCPU_EVENTS: {
6068
struct kvm_vcpu_events events;
6069
6070
r = -EFAULT;
6071
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
6072
break;
6073
6074
kvm_vcpu_srcu_read_lock(vcpu);
6075
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
6076
kvm_vcpu_srcu_read_unlock(vcpu);
6077
break;
6078
}
6079
case KVM_GET_DEBUGREGS: {
6080
struct kvm_debugregs dbgregs;
6081
6082
r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
6083
if (r < 0)
6084
break;
6085
6086
r = -EFAULT;
6087
if (copy_to_user(argp, &dbgregs,
6088
sizeof(struct kvm_debugregs)))
6089
break;
6090
r = 0;
6091
break;
6092
}
6093
case KVM_SET_DEBUGREGS: {
6094
struct kvm_debugregs dbgregs;
6095
6096
r = -EFAULT;
6097
if (copy_from_user(&dbgregs, argp,
6098
sizeof(struct kvm_debugregs)))
6099
break;
6100
6101
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
6102
break;
6103
}
6104
case KVM_GET_XSAVE: {
6105
r = -EINVAL;
6106
if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
6107
break;
6108
6109
u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
6110
r = -ENOMEM;
6111
if (!u.xsave)
6112
break;
6113
6114
r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
6115
if (r < 0)
6116
break;
6117
6118
r = -EFAULT;
6119
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
6120
break;
6121
r = 0;
6122
break;
6123
}
6124
case KVM_SET_XSAVE: {
6125
int size = vcpu->arch.guest_fpu.uabi_size;
6126
6127
u.xsave = memdup_user(argp, size);
6128
if (IS_ERR(u.xsave)) {
6129
r = PTR_ERR(u.xsave);
6130
goto out_nofree;
6131
}
6132
6133
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
6134
break;
6135
}
6136
6137
case KVM_GET_XSAVE2: {
6138
int size = vcpu->arch.guest_fpu.uabi_size;
6139
6140
u.xsave = kzalloc(size, GFP_KERNEL);
6141
r = -ENOMEM;
6142
if (!u.xsave)
6143
break;
6144
6145
r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
6146
if (r < 0)
6147
break;
6148
6149
r = -EFAULT;
6150
if (copy_to_user(argp, u.xsave, size))
6151
break;
6152
6153
r = 0;
6154
break;
6155
}
6156
6157
case KVM_GET_XCRS: {
6158
u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
6159
r = -ENOMEM;
6160
if (!u.xcrs)
6161
break;
6162
6163
r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
6164
if (r < 0)
6165
break;
6166
6167
r = -EFAULT;
6168
if (copy_to_user(argp, u.xcrs,
6169
sizeof(struct kvm_xcrs)))
6170
break;
6171
r = 0;
6172
break;
6173
}
6174
case KVM_SET_XCRS: {
6175
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
6176
if (IS_ERR(u.xcrs)) {
6177
r = PTR_ERR(u.xcrs);
6178
goto out_nofree;
6179
}
6180
6181
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
6182
break;
6183
}
6184
case KVM_SET_TSC_KHZ: {
6185
u32 user_tsc_khz;
6186
6187
r = -EINVAL;
6188
6189
if (vcpu->arch.guest_tsc_protected)
6190
goto out;
6191
6192
user_tsc_khz = (u32)arg;
6193
6194
if (kvm_caps.has_tsc_control &&
6195
user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
6196
goto out;
6197
6198
if (user_tsc_khz == 0)
6199
user_tsc_khz = tsc_khz;
6200
6201
if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
6202
r = 0;
6203
6204
goto out;
6205
}
6206
case KVM_GET_TSC_KHZ: {
6207
r = vcpu->arch.virtual_tsc_khz;
6208
goto out;
6209
}
6210
case KVM_KVMCLOCK_CTRL: {
6211
r = kvm_set_guest_paused(vcpu);
6212
goto out;
6213
}
6214
case KVM_ENABLE_CAP: {
6215
struct kvm_enable_cap cap;
6216
6217
r = -EFAULT;
6218
if (copy_from_user(&cap, argp, sizeof(cap)))
6219
goto out;
6220
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
6221
break;
6222
}
6223
case KVM_GET_NESTED_STATE: {
6224
struct kvm_nested_state __user *user_kvm_nested_state = argp;
6225
u32 user_data_size;
6226
6227
r = -EINVAL;
6228
if (!kvm_x86_ops.nested_ops->get_state)
6229
break;
6230
6231
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
6232
r = -EFAULT;
6233
if (get_user(user_data_size, &user_kvm_nested_state->size))
6234
break;
6235
6236
r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
6237
user_data_size);
6238
if (r < 0)
6239
break;
6240
6241
if (r > user_data_size) {
6242
if (put_user(r, &user_kvm_nested_state->size))
6243
r = -EFAULT;
6244
else
6245
r = -E2BIG;
6246
break;
6247
}
6248
6249
r = 0;
6250
break;
6251
}
6252
case KVM_SET_NESTED_STATE: {
6253
struct kvm_nested_state __user *user_kvm_nested_state = argp;
6254
struct kvm_nested_state kvm_state;
6255
int idx;
6256
6257
r = -EINVAL;
6258
if (!kvm_x86_ops.nested_ops->set_state)
6259
break;
6260
6261
r = -EFAULT;
6262
if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
6263
break;
6264
6265
r = -EINVAL;
6266
if (kvm_state.size < sizeof(kvm_state))
6267
break;
6268
6269
if (kvm_state.flags &
6270
~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
6271
| KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
6272
| KVM_STATE_NESTED_GIF_SET))
6273
break;
6274
6275
/* nested_run_pending implies guest_mode. */
6276
if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
6277
&& !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
6278
break;
6279
6280
idx = srcu_read_lock(&vcpu->kvm->srcu);
6281
r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
6282
srcu_read_unlock(&vcpu->kvm->srcu, idx);
6283
break;
6284
}
6285
#ifdef CONFIG_KVM_HYPERV
6286
case KVM_GET_SUPPORTED_HV_CPUID:
6287
r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
6288
break;
6289
#endif
6290
#ifdef CONFIG_KVM_XEN
6291
case KVM_XEN_VCPU_GET_ATTR: {
6292
struct kvm_xen_vcpu_attr xva;
6293
6294
r = -EFAULT;
6295
if (copy_from_user(&xva, argp, sizeof(xva)))
6296
goto out;
6297
r = kvm_xen_vcpu_get_attr(vcpu, &xva);
6298
if (!r && copy_to_user(argp, &xva, sizeof(xva)))
6299
r = -EFAULT;
6300
break;
6301
}
6302
case KVM_XEN_VCPU_SET_ATTR: {
6303
struct kvm_xen_vcpu_attr xva;
6304
6305
r = -EFAULT;
6306
if (copy_from_user(&xva, argp, sizeof(xva)))
6307
goto out;
6308
r = kvm_xen_vcpu_set_attr(vcpu, &xva);
6309
break;
6310
}
6311
#endif
6312
case KVM_GET_SREGS2: {
6313
r = -EINVAL;
6314
if (vcpu->kvm->arch.has_protected_state &&
6315
vcpu->arch.guest_state_protected)
6316
goto out;
6317
6318
u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
6319
r = -ENOMEM;
6320
if (!u.sregs2)
6321
goto out;
6322
__get_sregs2(vcpu, u.sregs2);
6323
r = -EFAULT;
6324
if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
6325
goto out;
6326
r = 0;
6327
break;
6328
}
6329
case KVM_SET_SREGS2: {
6330
r = -EINVAL;
6331
if (vcpu->kvm->arch.has_protected_state &&
6332
vcpu->arch.guest_state_protected)
6333
goto out;
6334
6335
u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
6336
if (IS_ERR(u.sregs2)) {
6337
r = PTR_ERR(u.sregs2);
6338
u.sregs2 = NULL;
6339
goto out;
6340
}
6341
r = __set_sregs2(vcpu, u.sregs2);
6342
break;
6343
}
6344
case KVM_HAS_DEVICE_ATTR:
6345
case KVM_GET_DEVICE_ATTR:
6346
case KVM_SET_DEVICE_ATTR:
6347
r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
6348
break;
6349
case KVM_MEMORY_ENCRYPT_OP:
6350
r = -ENOTTY;
6351
if (!kvm_x86_ops.vcpu_mem_enc_ioctl)
6352
goto out;
6353
r = kvm_x86_ops.vcpu_mem_enc_ioctl(vcpu, argp);
6354
break;
6355
default:
6356
r = -EINVAL;
6357
}
6358
out:
6359
kfree(u.buffer);
6360
out_nofree:
6361
vcpu_put(vcpu);
6362
return r;
6363
}
6364
6365
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
6366
{
6367
return VM_FAULT_SIGBUS;
6368
}
6369
6370
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
6371
{
6372
int ret;
6373
6374
if (addr > (unsigned int)(-3 * PAGE_SIZE))
6375
return -EINVAL;
6376
ret = kvm_x86_call(set_tss_addr)(kvm, addr);
6377
return ret;
6378
}
6379
6380
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
6381
u64 ident_addr)
6382
{
6383
return kvm_x86_call(set_identity_map_addr)(kvm, ident_addr);
6384
}
6385
6386
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
6387
unsigned long kvm_nr_mmu_pages)
6388
{
6389
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
6390
return -EINVAL;
6391
6392
mutex_lock(&kvm->slots_lock);
6393
6394
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
6395
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6396
6397
mutex_unlock(&kvm->slots_lock);
6398
return 0;
6399
}
6400
6401
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6402
{
6403
6404
/*
6405
* Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
6406
* before reporting dirty_bitmap to userspace. KVM flushes the buffers
6407
* on all VM-Exits, thus we only need to kick running vCPUs to force a
6408
* VM-Exit.
6409
*/
6410
struct kvm_vcpu *vcpu;
6411
unsigned long i;
6412
6413
if (!kvm->arch.cpu_dirty_log_size)
6414
return;
6415
6416
kvm_for_each_vcpu(i, vcpu, kvm)
6417
kvm_vcpu_kick(vcpu);
6418
}
6419
6420
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6421
struct kvm_enable_cap *cap)
6422
{
6423
int r;
6424
6425
if (cap->flags)
6426
return -EINVAL;
6427
6428
switch (cap->cap) {
6429
case KVM_CAP_DISABLE_QUIRKS2:
6430
r = -EINVAL;
6431
if (cap->args[0] & ~kvm_caps.supported_quirks)
6432
break;
6433
fallthrough;
6434
case KVM_CAP_DISABLE_QUIRKS:
6435
kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks;
6436
r = 0;
6437
break;
6438
case KVM_CAP_SPLIT_IRQCHIP: {
6439
mutex_lock(&kvm->lock);
6440
r = -EINVAL;
6441
if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
6442
goto split_irqchip_unlock;
6443
r = -EEXIST;
6444
if (irqchip_in_kernel(kvm))
6445
goto split_irqchip_unlock;
6446
if (kvm->created_vcpus)
6447
goto split_irqchip_unlock;
6448
/* Pairs with irqchip_in_kernel. */
6449
smp_wmb();
6450
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6451
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6452
kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6453
r = 0;
6454
split_irqchip_unlock:
6455
mutex_unlock(&kvm->lock);
6456
break;
6457
}
6458
case KVM_CAP_X2APIC_API:
6459
r = -EINVAL;
6460
if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
6461
break;
6462
6463
if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
6464
kvm->arch.x2apic_format = true;
6465
if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
6466
kvm->arch.x2apic_broadcast_quirk_disabled = true;
6467
6468
r = 0;
6469
break;
6470
case KVM_CAP_X86_DISABLE_EXITS:
6471
r = -EINVAL;
6472
if (cap->args[0] & ~kvm_get_allowed_disable_exits())
6473
break;
6474
6475
mutex_lock(&kvm->lock);
6476
if (kvm->created_vcpus)
6477
goto disable_exits_unlock;
6478
6479
#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
6480
"KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
6481
6482
if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) &&
6483
cpu_smt_possible() &&
6484
(cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE |
6485
KVM_X86_DISABLE_EXITS_APERFMPERF)))
6486
pr_warn_once(SMT_RSB_MSG);
6487
6488
kvm_disable_exits(kvm, cap->args[0]);
6489
r = 0;
6490
disable_exits_unlock:
6491
mutex_unlock(&kvm->lock);
6492
break;
6493
case KVM_CAP_MSR_PLATFORM_INFO:
6494
kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6495
r = 0;
6496
break;
6497
case KVM_CAP_EXCEPTION_PAYLOAD:
6498
kvm->arch.exception_payload_enabled = cap->args[0];
6499
r = 0;
6500
break;
6501
case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
6502
kvm->arch.triple_fault_event = cap->args[0];
6503
r = 0;
6504
break;
6505
case KVM_CAP_X86_USER_SPACE_MSR:
6506
r = -EINVAL;
6507
if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK)
6508
break;
6509
kvm->arch.user_space_msr_mask = cap->args[0];
6510
r = 0;
6511
break;
6512
case KVM_CAP_X86_BUS_LOCK_EXIT:
6513
r = -EINVAL;
6514
if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
6515
break;
6516
6517
if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
6518
(cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
6519
break;
6520
6521
if (kvm_caps.has_bus_lock_exit &&
6522
cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
6523
kvm->arch.bus_lock_detection_enabled = true;
6524
r = 0;
6525
break;
6526
#ifdef CONFIG_X86_SGX_KVM
6527
case KVM_CAP_SGX_ATTRIBUTE: {
6528
unsigned long allowed_attributes = 0;
6529
6530
r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
6531
if (r)
6532
break;
6533
6534
/* KVM only supports the PROVISIONKEY privileged attribute. */
6535
if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
6536
!(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
6537
kvm->arch.sgx_provisioning_allowed = true;
6538
else
6539
r = -EINVAL;
6540
break;
6541
}
6542
#endif
6543
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
6544
r = -EINVAL;
6545
if (!kvm_x86_ops.vm_copy_enc_context_from)
6546
break;
6547
6548
r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]);
6549
break;
6550
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
6551
r = -EINVAL;
6552
if (!kvm_x86_ops.vm_move_enc_context_from)
6553
break;
6554
6555
r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]);
6556
break;
6557
case KVM_CAP_EXIT_HYPERCALL:
6558
if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
6559
r = -EINVAL;
6560
break;
6561
}
6562
kvm->arch.hypercall_exit_enabled = cap->args[0];
6563
r = 0;
6564
break;
6565
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
6566
r = -EINVAL;
6567
if (cap->args[0] & ~1)
6568
break;
6569
kvm->arch.exit_on_emulation_error = cap->args[0];
6570
r = 0;
6571
break;
6572
case KVM_CAP_PMU_CAPABILITY:
6573
r = -EINVAL;
6574
if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
6575
break;
6576
6577
mutex_lock(&kvm->lock);
6578
if (!kvm->created_vcpus) {
6579
kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6580
r = 0;
6581
}
6582
mutex_unlock(&kvm->lock);
6583
break;
6584
case KVM_CAP_MAX_VCPU_ID:
6585
r = -EINVAL;
6586
if (cap->args[0] > KVM_MAX_VCPU_IDS)
6587
break;
6588
6589
mutex_lock(&kvm->lock);
6590
if (kvm->arch.bsp_vcpu_id > cap->args[0]) {
6591
;
6592
} else if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6593
r = 0;
6594
} else if (!kvm->arch.max_vcpu_ids) {
6595
kvm->arch.max_vcpu_ids = cap->args[0];
6596
r = 0;
6597
}
6598
mutex_unlock(&kvm->lock);
6599
break;
6600
case KVM_CAP_X86_NOTIFY_VMEXIT:
6601
r = -EINVAL;
6602
if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS)
6603
break;
6604
if (!kvm_caps.has_notify_vmexit)
6605
break;
6606
if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED))
6607
break;
6608
mutex_lock(&kvm->lock);
6609
if (!kvm->created_vcpus) {
6610
kvm->arch.notify_window = cap->args[0] >> 32;
6611
kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6612
r = 0;
6613
}
6614
mutex_unlock(&kvm->lock);
6615
break;
6616
case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
6617
r = -EINVAL;
6618
6619
/*
6620
* Since the risk of disabling NX hugepages is a guest crashing
6621
* the system, ensure the userspace process has permission to
6622
* reboot the system.
6623
*
6624
* Note that unlike the reboot() syscall, the process must have
6625
* this capability in the root namespace because exposing
6626
* /dev/kvm into a container does not limit the scope of the
6627
* iTLB multihit bug to that container. In other words,
6628
* this must use capable(), not ns_capable().
6629
*/
6630
if (!capable(CAP_SYS_BOOT)) {
6631
r = -EPERM;
6632
break;
6633
}
6634
6635
if (cap->args[0])
6636
break;
6637
6638
mutex_lock(&kvm->lock);
6639
if (!kvm->created_vcpus) {
6640
kvm->arch.disable_nx_huge_pages = true;
6641
r = 0;
6642
}
6643
mutex_unlock(&kvm->lock);
6644
break;
6645
case KVM_CAP_X86_APIC_BUS_CYCLES_NS: {
6646
u64 bus_cycle_ns = cap->args[0];
6647
u64 unused;
6648
6649
/*
6650
* Guard against overflow in tmict_to_ns(). 128 is the highest
6651
* divide value that can be programmed in APIC_TDCR.
6652
*/
6653
r = -EINVAL;
6654
if (!bus_cycle_ns ||
6655
check_mul_overflow((u64)U32_MAX * 128, bus_cycle_ns, &unused))
6656
break;
6657
6658
r = 0;
6659
mutex_lock(&kvm->lock);
6660
if (!irqchip_in_kernel(kvm))
6661
r = -ENXIO;
6662
else if (kvm->created_vcpus)
6663
r = -EINVAL;
6664
else
6665
kvm->arch.apic_bus_cycle_ns = bus_cycle_ns;
6666
mutex_unlock(&kvm->lock);
6667
break;
6668
}
6669
default:
6670
r = -EINVAL;
6671
break;
6672
}
6673
return r;
6674
}
6675
6676
static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
6677
{
6678
struct kvm_x86_msr_filter *msr_filter;
6679
6680
msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
6681
if (!msr_filter)
6682
return NULL;
6683
6684
msr_filter->default_allow = default_allow;
6685
return msr_filter;
6686
}
6687
6688
static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
6689
{
6690
u32 i;
6691
6692
if (!msr_filter)
6693
return;
6694
6695
for (i = 0; i < msr_filter->count; i++)
6696
kfree(msr_filter->ranges[i].bitmap);
6697
6698
kfree(msr_filter);
6699
}
6700
6701
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
6702
struct kvm_msr_filter_range *user_range)
6703
{
6704
unsigned long *bitmap;
6705
size_t bitmap_size;
6706
6707
if (!user_range->nmsrs)
6708
return 0;
6709
6710
if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK)
6711
return -EINVAL;
6712
6713
if (!user_range->flags)
6714
return -EINVAL;
6715
6716
bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
6717
if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
6718
return -EINVAL;
6719
6720
bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
6721
if (IS_ERR(bitmap))
6722
return PTR_ERR(bitmap);
6723
6724
msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
6725
.flags = user_range->flags,
6726
.base = user_range->base,
6727
.nmsrs = user_range->nmsrs,
6728
.bitmap = bitmap,
6729
};
6730
6731
msr_filter->count++;
6732
return 0;
6733
}
6734
6735
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
6736
struct kvm_msr_filter *filter)
6737
{
6738
struct kvm_x86_msr_filter *new_filter, *old_filter;
6739
bool default_allow;
6740
bool empty = true;
6741
int r;
6742
u32 i;
6743
6744
if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK)
6745
return -EINVAL;
6746
6747
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
6748
empty &= !filter->ranges[i].nmsrs;
6749
6750
default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
6751
if (empty && !default_allow)
6752
return -EINVAL;
6753
6754
new_filter = kvm_alloc_msr_filter(default_allow);
6755
if (!new_filter)
6756
return -ENOMEM;
6757
6758
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
6759
r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
6760
if (r) {
6761
kvm_free_msr_filter(new_filter);
6762
return r;
6763
}
6764
}
6765
6766
mutex_lock(&kvm->lock);
6767
old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
6768
mutex_is_locked(&kvm->lock));
6769
mutex_unlock(&kvm->lock);
6770
synchronize_srcu(&kvm->srcu);
6771
6772
kvm_free_msr_filter(old_filter);
6773
6774
kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
6775
6776
return 0;
6777
}
6778
6779
#ifdef CONFIG_KVM_COMPAT
6780
/* for KVM_X86_SET_MSR_FILTER */
6781
struct kvm_msr_filter_range_compat {
6782
__u32 flags;
6783
__u32 nmsrs;
6784
__u32 base;
6785
__u32 bitmap;
6786
};
6787
6788
struct kvm_msr_filter_compat {
6789
__u32 flags;
6790
struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
6791
};
6792
6793
#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
6794
6795
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
6796
unsigned long arg)
6797
{
6798
void __user *argp = (void __user *)arg;
6799
struct kvm *kvm = filp->private_data;
6800
long r = -ENOTTY;
6801
6802
switch (ioctl) {
6803
case KVM_X86_SET_MSR_FILTER_COMPAT: {
6804
struct kvm_msr_filter __user *user_msr_filter = argp;
6805
struct kvm_msr_filter_compat filter_compat;
6806
struct kvm_msr_filter filter;
6807
int i;
6808
6809
if (copy_from_user(&filter_compat, user_msr_filter,
6810
sizeof(filter_compat)))
6811
return -EFAULT;
6812
6813
filter.flags = filter_compat.flags;
6814
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
6815
struct kvm_msr_filter_range_compat *cr;
6816
6817
cr = &filter_compat.ranges[i];
6818
filter.ranges[i] = (struct kvm_msr_filter_range) {
6819
.flags = cr->flags,
6820
.nmsrs = cr->nmsrs,
6821
.base = cr->base,
6822
.bitmap = (__u8 *)(ulong)cr->bitmap,
6823
};
6824
}
6825
6826
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
6827
break;
6828
}
6829
}
6830
6831
return r;
6832
}
6833
#endif
6834
6835
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
6836
static int kvm_arch_suspend_notifier(struct kvm *kvm)
6837
{
6838
struct kvm_vcpu *vcpu;
6839
unsigned long i;
6840
6841
/*
6842
* Ignore the return, marking the guest paused only "fails" if the vCPU
6843
* isn't using kvmclock; continuing on is correct and desirable.
6844
*/
6845
kvm_for_each_vcpu(i, vcpu, kvm)
6846
(void)kvm_set_guest_paused(vcpu);
6847
6848
return NOTIFY_DONE;
6849
}
6850
6851
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
6852
{
6853
switch (state) {
6854
case PM_HIBERNATION_PREPARE:
6855
case PM_SUSPEND_PREPARE:
6856
return kvm_arch_suspend_notifier(kvm);
6857
}
6858
6859
return NOTIFY_DONE;
6860
}
6861
#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
6862
6863
static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
6864
{
6865
struct kvm_clock_data data = { 0 };
6866
6867
get_kvmclock(kvm, &data);
6868
if (copy_to_user(argp, &data, sizeof(data)))
6869
return -EFAULT;
6870
6871
return 0;
6872
}
6873
6874
static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
6875
{
6876
struct kvm_arch *ka = &kvm->arch;
6877
struct kvm_clock_data data;
6878
u64 now_raw_ns;
6879
6880
if (copy_from_user(&data, argp, sizeof(data)))
6881
return -EFAULT;
6882
6883
/*
6884
* Only KVM_CLOCK_REALTIME is used, but allow passing the
6885
* result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
6886
*/
6887
if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
6888
return -EINVAL;
6889
6890
kvm_hv_request_tsc_page_update(kvm);
6891
kvm_start_pvclock_update(kvm);
6892
pvclock_update_vm_gtod_copy(kvm);
6893
6894
/*
6895
* This pairs with kvm_guest_time_update(): when masterclock is
6896
* in use, we use master_kernel_ns + kvmclock_offset to set
6897
* unsigned 'system_time' so if we use get_kvmclock_ns() (which
6898
* is slightly ahead) here we risk going negative on unsigned
6899
* 'system_time' when 'data.clock' is very small.
6900
*/
6901
if (data.flags & KVM_CLOCK_REALTIME) {
6902
u64 now_real_ns = ktime_get_real_ns();
6903
6904
/*
6905
* Avoid stepping the kvmclock backwards.
6906
*/
6907
if (now_real_ns > data.realtime)
6908
data.clock += now_real_ns - data.realtime;
6909
}
6910
6911
if (ka->use_master_clock)
6912
now_raw_ns = ka->master_kernel_ns;
6913
else
6914
now_raw_ns = get_kvmclock_base_ns();
6915
ka->kvmclock_offset = data.clock - now_raw_ns;
6916
kvm_end_pvclock_update(kvm);
6917
return 0;
6918
}
6919
6920
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6921
{
6922
struct kvm *kvm = filp->private_data;
6923
void __user *argp = (void __user *)arg;
6924
int r = -ENOTTY;
6925
6926
#ifdef CONFIG_KVM_IOAPIC
6927
/*
6928
* This union makes it completely explicit to gcc-3.x
6929
* that these three variables' stack usage should be
6930
* combined, not added together.
6931
*/
6932
union {
6933
struct kvm_pit_state ps;
6934
struct kvm_pit_state2 ps2;
6935
struct kvm_pit_config pit_config;
6936
} u;
6937
#endif
6938
6939
switch (ioctl) {
6940
case KVM_SET_TSS_ADDR:
6941
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
6942
break;
6943
case KVM_SET_IDENTITY_MAP_ADDR: {
6944
u64 ident_addr;
6945
6946
mutex_lock(&kvm->lock);
6947
r = -EINVAL;
6948
if (kvm->created_vcpus)
6949
goto set_identity_unlock;
6950
r = -EFAULT;
6951
if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
6952
goto set_identity_unlock;
6953
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
6954
set_identity_unlock:
6955
mutex_unlock(&kvm->lock);
6956
break;
6957
}
6958
case KVM_SET_NR_MMU_PAGES:
6959
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
6960
break;
6961
#ifdef CONFIG_KVM_IOAPIC
6962
case KVM_CREATE_IRQCHIP: {
6963
mutex_lock(&kvm->lock);
6964
6965
r = -EEXIST;
6966
if (irqchip_in_kernel(kvm))
6967
goto create_irqchip_unlock;
6968
6969
r = -EINVAL;
6970
if (kvm->created_vcpus)
6971
goto create_irqchip_unlock;
6972
6973
r = kvm_pic_init(kvm);
6974
if (r)
6975
goto create_irqchip_unlock;
6976
6977
r = kvm_ioapic_init(kvm);
6978
if (r) {
6979
kvm_pic_destroy(kvm);
6980
goto create_irqchip_unlock;
6981
}
6982
6983
r = kvm_setup_default_ioapic_and_pic_routing(kvm);
6984
if (r) {
6985
kvm_ioapic_destroy(kvm);
6986
kvm_pic_destroy(kvm);
6987
goto create_irqchip_unlock;
6988
}
6989
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
6990
smp_wmb();
6991
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
6992
kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6993
create_irqchip_unlock:
6994
mutex_unlock(&kvm->lock);
6995
break;
6996
}
6997
case KVM_CREATE_PIT:
6998
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
6999
goto create_pit;
7000
case KVM_CREATE_PIT2:
7001
r = -EFAULT;
7002
if (copy_from_user(&u.pit_config, argp,
7003
sizeof(struct kvm_pit_config)))
7004
goto out;
7005
create_pit:
7006
mutex_lock(&kvm->lock);
7007
r = -EEXIST;
7008
if (kvm->arch.vpit)
7009
goto create_pit_unlock;
7010
r = -ENOENT;
7011
if (!pic_in_kernel(kvm))
7012
goto create_pit_unlock;
7013
r = -ENOMEM;
7014
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7015
if (kvm->arch.vpit)
7016
r = 0;
7017
create_pit_unlock:
7018
mutex_unlock(&kvm->lock);
7019
break;
7020
case KVM_GET_IRQCHIP: {
7021
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7022
struct kvm_irqchip *chip;
7023
7024
chip = memdup_user(argp, sizeof(*chip));
7025
if (IS_ERR(chip)) {
7026
r = PTR_ERR(chip);
7027
goto out;
7028
}
7029
7030
r = -ENXIO;
7031
if (!irqchip_full(kvm))
7032
goto get_irqchip_out;
7033
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
7034
if (r)
7035
goto get_irqchip_out;
7036
r = -EFAULT;
7037
if (copy_to_user(argp, chip, sizeof(*chip)))
7038
goto get_irqchip_out;
7039
r = 0;
7040
get_irqchip_out:
7041
kfree(chip);
7042
break;
7043
}
7044
case KVM_SET_IRQCHIP: {
7045
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7046
struct kvm_irqchip *chip;
7047
7048
chip = memdup_user(argp, sizeof(*chip));
7049
if (IS_ERR(chip)) {
7050
r = PTR_ERR(chip);
7051
goto out;
7052
}
7053
7054
r = -ENXIO;
7055
if (!irqchip_full(kvm))
7056
goto set_irqchip_out;
7057
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
7058
set_irqchip_out:
7059
kfree(chip);
7060
break;
7061
}
7062
case KVM_GET_PIT: {
7063
r = -EFAULT;
7064
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
7065
goto out;
7066
r = -ENXIO;
7067
if (!kvm->arch.vpit)
7068
goto out;
7069
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
7070
if (r)
7071
goto out;
7072
r = -EFAULT;
7073
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
7074
goto out;
7075
r = 0;
7076
break;
7077
}
7078
case KVM_SET_PIT: {
7079
r = -EFAULT;
7080
if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
7081
goto out;
7082
mutex_lock(&kvm->lock);
7083
r = -ENXIO;
7084
if (!kvm->arch.vpit)
7085
goto set_pit_out;
7086
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
7087
set_pit_out:
7088
mutex_unlock(&kvm->lock);
7089
break;
7090
}
7091
case KVM_GET_PIT2: {
7092
r = -ENXIO;
7093
if (!kvm->arch.vpit)
7094
goto out;
7095
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
7096
if (r)
7097
goto out;
7098
r = -EFAULT;
7099
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
7100
goto out;
7101
r = 0;
7102
break;
7103
}
7104
case KVM_SET_PIT2: {
7105
r = -EFAULT;
7106
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
7107
goto out;
7108
mutex_lock(&kvm->lock);
7109
r = -ENXIO;
7110
if (!kvm->arch.vpit)
7111
goto set_pit2_out;
7112
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
7113
set_pit2_out:
7114
mutex_unlock(&kvm->lock);
7115
break;
7116
}
7117
case KVM_REINJECT_CONTROL: {
7118
struct kvm_reinject_control control;
7119
r = -EFAULT;
7120
if (copy_from_user(&control, argp, sizeof(control)))
7121
goto out;
7122
r = -ENXIO;
7123
if (!kvm->arch.vpit)
7124
goto out;
7125
r = kvm_vm_ioctl_reinject(kvm, &control);
7126
break;
7127
}
7128
#endif
7129
case KVM_SET_BOOT_CPU_ID:
7130
r = 0;
7131
mutex_lock(&kvm->lock);
7132
if (kvm->created_vcpus)
7133
r = -EBUSY;
7134
else if (arg > KVM_MAX_VCPU_IDS ||
7135
(kvm->arch.max_vcpu_ids && arg > kvm->arch.max_vcpu_ids))
7136
r = -EINVAL;
7137
else
7138
kvm->arch.bsp_vcpu_id = arg;
7139
mutex_unlock(&kvm->lock);
7140
break;
7141
#ifdef CONFIG_KVM_XEN
7142
case KVM_XEN_HVM_CONFIG: {
7143
struct kvm_xen_hvm_config xhc;
7144
r = -EFAULT;
7145
if (copy_from_user(&xhc, argp, sizeof(xhc)))
7146
goto out;
7147
r = kvm_xen_hvm_config(kvm, &xhc);
7148
break;
7149
}
7150
case KVM_XEN_HVM_GET_ATTR: {
7151
struct kvm_xen_hvm_attr xha;
7152
7153
r = -EFAULT;
7154
if (copy_from_user(&xha, argp, sizeof(xha)))
7155
goto out;
7156
r = kvm_xen_hvm_get_attr(kvm, &xha);
7157
if (!r && copy_to_user(argp, &xha, sizeof(xha)))
7158
r = -EFAULT;
7159
break;
7160
}
7161
case KVM_XEN_HVM_SET_ATTR: {
7162
struct kvm_xen_hvm_attr xha;
7163
7164
r = -EFAULT;
7165
if (copy_from_user(&xha, argp, sizeof(xha)))
7166
goto out;
7167
r = kvm_xen_hvm_set_attr(kvm, &xha);
7168
break;
7169
}
7170
case KVM_XEN_HVM_EVTCHN_SEND: {
7171
struct kvm_irq_routing_xen_evtchn uxe;
7172
7173
r = -EFAULT;
7174
if (copy_from_user(&uxe, argp, sizeof(uxe)))
7175
goto out;
7176
r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
7177
break;
7178
}
7179
#endif
7180
case KVM_SET_CLOCK:
7181
r = kvm_vm_ioctl_set_clock(kvm, argp);
7182
break;
7183
case KVM_GET_CLOCK:
7184
r = kvm_vm_ioctl_get_clock(kvm, argp);
7185
break;
7186
case KVM_SET_TSC_KHZ: {
7187
u32 user_tsc_khz;
7188
7189
r = -EINVAL;
7190
user_tsc_khz = (u32)arg;
7191
7192
if (kvm_caps.has_tsc_control &&
7193
user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
7194
goto out;
7195
7196
if (user_tsc_khz == 0)
7197
user_tsc_khz = tsc_khz;
7198
7199
mutex_lock(&kvm->lock);
7200
if (!kvm->created_vcpus) {
7201
WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7202
r = 0;
7203
}
7204
mutex_unlock(&kvm->lock);
7205
goto out;
7206
}
7207
case KVM_GET_TSC_KHZ: {
7208
r = READ_ONCE(kvm->arch.default_tsc_khz);
7209
goto out;
7210
}
7211
case KVM_MEMORY_ENCRYPT_OP:
7212
r = -ENOTTY;
7213
if (!kvm_x86_ops.mem_enc_ioctl)
7214
goto out;
7215
7216
r = kvm_x86_call(mem_enc_ioctl)(kvm, argp);
7217
break;
7218
case KVM_MEMORY_ENCRYPT_REG_REGION: {
7219
struct kvm_enc_region region;
7220
7221
r = -EFAULT;
7222
if (copy_from_user(&region, argp, sizeof(region)))
7223
goto out;
7224
7225
r = -ENOTTY;
7226
if (!kvm_x86_ops.mem_enc_register_region)
7227
goto out;
7228
7229
r = kvm_x86_call(mem_enc_register_region)(kvm, &region);
7230
break;
7231
}
7232
case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
7233
struct kvm_enc_region region;
7234
7235
r = -EFAULT;
7236
if (copy_from_user(&region, argp, sizeof(region)))
7237
goto out;
7238
7239
r = -ENOTTY;
7240
if (!kvm_x86_ops.mem_enc_unregister_region)
7241
goto out;
7242
7243
r = kvm_x86_call(mem_enc_unregister_region)(kvm, &region);
7244
break;
7245
}
7246
#ifdef CONFIG_KVM_HYPERV
7247
case KVM_HYPERV_EVENTFD: {
7248
struct kvm_hyperv_eventfd hvevfd;
7249
7250
r = -EFAULT;
7251
if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
7252
goto out;
7253
r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
7254
break;
7255
}
7256
#endif
7257
case KVM_SET_PMU_EVENT_FILTER:
7258
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
7259
break;
7260
case KVM_X86_SET_MSR_FILTER: {
7261
struct kvm_msr_filter __user *user_msr_filter = argp;
7262
struct kvm_msr_filter filter;
7263
7264
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
7265
return -EFAULT;
7266
7267
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7268
break;
7269
}
7270
default:
7271
r = -ENOTTY;
7272
}
7273
out:
7274
return r;
7275
}
7276
7277
static void kvm_probe_feature_msr(u32 msr_index)
7278
{
7279
u64 data;
7280
7281
if (kvm_get_feature_msr(NULL, msr_index, &data, true))
7282
return;
7283
7284
msr_based_features[num_msr_based_features++] = msr_index;
7285
}
7286
7287
static void kvm_probe_msr_to_save(u32 msr_index)
7288
{
7289
u32 dummy[2];
7290
7291
if (rdmsr_safe(msr_index, &dummy[0], &dummy[1]))
7292
return;
7293
7294
/*
7295
* Even MSRs that are valid in the host may not be exposed to guests in
7296
* some cases.
7297
*/
7298
switch (msr_index) {
7299
case MSR_IA32_BNDCFGS:
7300
if (!kvm_mpx_supported())
7301
return;
7302
break;
7303
case MSR_TSC_AUX:
7304
if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
7305
!kvm_cpu_cap_has(X86_FEATURE_RDPID))
7306
return;
7307
break;
7308
case MSR_IA32_UMWAIT_CONTROL:
7309
if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
7310
return;
7311
break;
7312
case MSR_IA32_RTIT_CTL:
7313
case MSR_IA32_RTIT_STATUS:
7314
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
7315
return;
7316
break;
7317
case MSR_IA32_RTIT_CR3_MATCH:
7318
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7319
!intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
7320
return;
7321
break;
7322
case MSR_IA32_RTIT_OUTPUT_BASE:
7323
case MSR_IA32_RTIT_OUTPUT_MASK:
7324
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7325
(!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
7326
!intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
7327
return;
7328
break;
7329
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
7330
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7331
(msr_index - MSR_IA32_RTIT_ADDR0_A >=
7332
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
7333
return;
7334
break;
7335
case MSR_ARCH_PERFMON_PERFCTR0 ...
7336
MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1:
7337
if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
7338
kvm_pmu_cap.num_counters_gp)
7339
return;
7340
break;
7341
case MSR_ARCH_PERFMON_EVENTSEL0 ...
7342
MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1:
7343
if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
7344
kvm_pmu_cap.num_counters_gp)
7345
return;
7346
break;
7347
case MSR_ARCH_PERFMON_FIXED_CTR0 ...
7348
MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS - 1:
7349
if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >=
7350
kvm_pmu_cap.num_counters_fixed)
7351
return;
7352
break;
7353
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
7354
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
7355
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
7356
if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
7357
return;
7358
break;
7359
case MSR_IA32_XFD:
7360
case MSR_IA32_XFD_ERR:
7361
if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
7362
return;
7363
break;
7364
case MSR_IA32_TSX_CTRL:
7365
if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
7366
return;
7367
break;
7368
default:
7369
break;
7370
}
7371
7372
msrs_to_save[num_msrs_to_save++] = msr_index;
7373
}
7374
7375
static void kvm_init_msr_lists(void)
7376
{
7377
unsigned i;
7378
7379
BUILD_BUG_ON_MSG(KVM_MAX_NR_FIXED_COUNTERS != 3,
7380
"Please update the fixed PMCs in msrs_to_save_pmu[]");
7381
7382
num_msrs_to_save = 0;
7383
num_emulated_msrs = 0;
7384
num_msr_based_features = 0;
7385
7386
for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
7387
kvm_probe_msr_to_save(msrs_to_save_base[i]);
7388
7389
if (enable_pmu) {
7390
for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
7391
kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
7392
}
7393
7394
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
7395
if (!kvm_x86_call(has_emulated_msr)(NULL,
7396
emulated_msrs_all[i]))
7397
continue;
7398
7399
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
7400
}
7401
7402
for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++)
7403
kvm_probe_feature_msr(i);
7404
7405
for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++)
7406
kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]);
7407
}
7408
7409
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
7410
const void *v)
7411
{
7412
int handled = 0;
7413
int n;
7414
7415
do {
7416
n = min(len, 8);
7417
if (!(lapic_in_kernel(vcpu) &&
7418
!kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7419
&& kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
7420
break;
7421
handled += n;
7422
addr += n;
7423
len -= n;
7424
v += n;
7425
} while (len);
7426
7427
return handled;
7428
}
7429
7430
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
7431
{
7432
int handled = 0;
7433
int n;
7434
7435
do {
7436
n = min(len, 8);
7437
if (!(lapic_in_kernel(vcpu) &&
7438
!kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7439
addr, n, v))
7440
&& kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
7441
break;
7442
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
7443
handled += n;
7444
addr += n;
7445
len -= n;
7446
v += n;
7447
} while (len);
7448
7449
return handled;
7450
}
7451
7452
void kvm_set_segment(struct kvm_vcpu *vcpu,
7453
struct kvm_segment *var, int seg)
7454
{
7455
kvm_x86_call(set_segment)(vcpu, var, seg);
7456
}
7457
7458
void kvm_get_segment(struct kvm_vcpu *vcpu,
7459
struct kvm_segment *var, int seg)
7460
{
7461
kvm_x86_call(get_segment)(vcpu, var, seg);
7462
}
7463
7464
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7465
struct x86_exception *exception)
7466
{
7467
struct kvm_mmu *mmu = vcpu->arch.mmu;
7468
gpa_t t_gpa;
7469
7470
BUG_ON(!mmu_is_nested(vcpu));
7471
7472
/* NPT walks are always user-walks */
7473
access |= PFERR_USER_MASK;
7474
t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7475
7476
return t_gpa;
7477
}
7478
7479
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
7480
struct x86_exception *exception)
7481
{
7482
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7483
7484
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7485
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7486
}
7487
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
7488
7489
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
7490
struct x86_exception *exception)
7491
{
7492
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7493
7494
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7495
access |= PFERR_WRITE_MASK;
7496
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7497
}
7498
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
7499
7500
/* uses this to access any guest's mapped memory without checking CPL */
7501
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
7502
struct x86_exception *exception)
7503
{
7504
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7505
7506
return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
7507
}
7508
7509
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7510
struct kvm_vcpu *vcpu, u64 access,
7511
struct x86_exception *exception)
7512
{
7513
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7514
void *data = val;
7515
int r = X86EMUL_CONTINUE;
7516
7517
while (bytes) {
7518
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7519
unsigned offset = addr & (PAGE_SIZE-1);
7520
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
7521
int ret;
7522
7523
if (gpa == INVALID_GPA)
7524
return X86EMUL_PROPAGATE_FAULT;
7525
ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7526
offset, toread);
7527
if (ret < 0) {
7528
r = X86EMUL_IO_NEEDED;
7529
goto out;
7530
}
7531
7532
bytes -= toread;
7533
data += toread;
7534
addr += toread;
7535
}
7536
out:
7537
return r;
7538
}
7539
7540
/* used for instruction fetching */
7541
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
7542
gva_t addr, void *val, unsigned int bytes,
7543
struct x86_exception *exception)
7544
{
7545
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7546
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7547
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7548
unsigned offset;
7549
int ret;
7550
7551
/* Inline kvm_read_guest_virt_helper for speed. */
7552
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7553
exception);
7554
if (unlikely(gpa == INVALID_GPA))
7555
return X86EMUL_PROPAGATE_FAULT;
7556
7557
offset = addr & (PAGE_SIZE-1);
7558
if (WARN_ON(offset + bytes > PAGE_SIZE))
7559
bytes = (unsigned)PAGE_SIZE - offset;
7560
ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
7561
offset, bytes);
7562
if (unlikely(ret < 0))
7563
return X86EMUL_IO_NEEDED;
7564
7565
return X86EMUL_CONTINUE;
7566
}
7567
7568
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
7569
gva_t addr, void *val, unsigned int bytes,
7570
struct x86_exception *exception)
7571
{
7572
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7573
7574
/*
7575
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7576
* is returned, but our callers are not ready for that and they blindly
7577
* call kvm_inject_page_fault. Ensure that they at least do not leak
7578
* uninitialized kernel stack memory into cr2 and error code.
7579
*/
7580
memset(exception, 0, sizeof(*exception));
7581
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7582
exception);
7583
}
7584
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
7585
7586
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
7587
gva_t addr, void *val, unsigned int bytes,
7588
struct x86_exception *exception, bool system)
7589
{
7590
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7591
u64 access = 0;
7592
7593
if (system)
7594
access |= PFERR_IMPLICIT_ACCESS;
7595
else if (kvm_x86_call(get_cpl)(vcpu) == 3)
7596
access |= PFERR_USER_MASK;
7597
7598
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7599
}
7600
7601
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7602
struct kvm_vcpu *vcpu, u64 access,
7603
struct x86_exception *exception)
7604
{
7605
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7606
void *data = val;
7607
int r = X86EMUL_CONTINUE;
7608
7609
while (bytes) {
7610
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7611
unsigned offset = addr & (PAGE_SIZE-1);
7612
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
7613
int ret;
7614
7615
if (gpa == INVALID_GPA)
7616
return X86EMUL_PROPAGATE_FAULT;
7617
ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
7618
if (ret < 0) {
7619
r = X86EMUL_IO_NEEDED;
7620
goto out;
7621
}
7622
7623
bytes -= towrite;
7624
data += towrite;
7625
addr += towrite;
7626
}
7627
out:
7628
return r;
7629
}
7630
7631
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
7632
unsigned int bytes, struct x86_exception *exception,
7633
bool system)
7634
{
7635
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7636
u64 access = PFERR_WRITE_MASK;
7637
7638
if (system)
7639
access |= PFERR_IMPLICIT_ACCESS;
7640
else if (kvm_x86_call(get_cpl)(vcpu) == 3)
7641
access |= PFERR_USER_MASK;
7642
7643
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7644
access, exception);
7645
}
7646
7647
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
7648
unsigned int bytes, struct x86_exception *exception)
7649
{
7650
/* kvm_write_guest_virt_system can pull in tons of pages. */
7651
vcpu->arch.l1tf_flush_l1d = true;
7652
7653
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7654
PFERR_WRITE_MASK, exception);
7655
}
7656
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
7657
7658
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
7659
void *insn, int insn_len)
7660
{
7661
return kvm_x86_call(check_emulate_instruction)(vcpu, emul_type,
7662
insn, insn_len);
7663
}
7664
7665
int handle_ud(struct kvm_vcpu *vcpu)
7666
{
7667
static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
7668
int fep_flags = READ_ONCE(force_emulation_prefix);
7669
int emul_type = EMULTYPE_TRAP_UD;
7670
char sig[5]; /* ud2; .ascii "kvm" */
7671
struct x86_exception e;
7672
int r;
7673
7674
r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
7675
if (r != X86EMUL_CONTINUE)
7676
return 1;
7677
7678
if (fep_flags &&
7679
kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
7680
sig, sizeof(sig), &e) == 0 &&
7681
memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
7682
if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
7683
kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
7684
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
7685
emul_type = EMULTYPE_TRAP_UD_FORCED;
7686
}
7687
7688
return kvm_emulate_instruction(vcpu, emul_type);
7689
}
7690
EXPORT_SYMBOL_GPL(handle_ud);
7691
7692
static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7693
gpa_t gpa, bool write)
7694
{
7695
/* For APIC access vmexit */
7696
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7697
return 1;
7698
7699
if (vcpu_match_mmio_gpa(vcpu, gpa)) {
7700
trace_vcpu_match_mmio(gva, gpa, write, true);
7701
return 1;
7702
}
7703
7704
return 0;
7705
}
7706
7707
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7708
gpa_t *gpa, struct x86_exception *exception,
7709
bool write)
7710
{
7711
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7712
u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
7713
| (write ? PFERR_WRITE_MASK : 0);
7714
7715
/*
7716
* currently PKRU is only applied to ept enabled guest so
7717
* there is no pkey in EPT page table for L1 guest or EPT
7718
* shadow page table for L2 guest.
7719
*/
7720
if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
7721
!permission_fault(vcpu, vcpu->arch.walk_mmu,
7722
vcpu->arch.mmio_access, 0, access))) {
7723
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
7724
(gva & (PAGE_SIZE - 1));
7725
trace_vcpu_match_mmio(gva, *gpa, write, false);
7726
return 1;
7727
}
7728
7729
*gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7730
7731
if (*gpa == INVALID_GPA)
7732
return -1;
7733
7734
return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
7735
}
7736
7737
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
7738
const void *val, int bytes)
7739
{
7740
int ret;
7741
7742
ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
7743
if (ret < 0)
7744
return 0;
7745
kvm_page_track_write(vcpu, gpa, val, bytes);
7746
return 1;
7747
}
7748
7749
struct read_write_emulator_ops {
7750
int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
7751
int bytes);
7752
int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
7753
void *val, int bytes);
7754
int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7755
int bytes, void *val);
7756
int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7757
void *val, int bytes);
7758
bool write;
7759
};
7760
7761
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
7762
{
7763
if (vcpu->mmio_read_completed) {
7764
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
7765
vcpu->mmio_fragments[0].gpa, val);
7766
vcpu->mmio_read_completed = 0;
7767
return 1;
7768
}
7769
7770
return 0;
7771
}
7772
7773
static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7774
void *val, int bytes)
7775
{
7776
return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
7777
}
7778
7779
static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7780
void *val, int bytes)
7781
{
7782
return emulator_write_phys(vcpu, gpa, val, bytes);
7783
}
7784
7785
static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
7786
{
7787
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
7788
return vcpu_mmio_write(vcpu, gpa, bytes, val);
7789
}
7790
7791
static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7792
void *val, int bytes)
7793
{
7794
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
7795
return X86EMUL_IO_NEEDED;
7796
}
7797
7798
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7799
void *val, int bytes)
7800
{
7801
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
7802
7803
memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
7804
return X86EMUL_CONTINUE;
7805
}
7806
7807
static const struct read_write_emulator_ops read_emultor = {
7808
.read_write_prepare = read_prepare,
7809
.read_write_emulate = read_emulate,
7810
.read_write_mmio = vcpu_mmio_read,
7811
.read_write_exit_mmio = read_exit_mmio,
7812
};
7813
7814
static const struct read_write_emulator_ops write_emultor = {
7815
.read_write_emulate = write_emulate,
7816
.read_write_mmio = write_mmio,
7817
.read_write_exit_mmio = write_exit_mmio,
7818
.write = true,
7819
};
7820
7821
static int emulator_read_write_onepage(unsigned long addr, void *val,
7822
unsigned int bytes,
7823
struct x86_exception *exception,
7824
struct kvm_vcpu *vcpu,
7825
const struct read_write_emulator_ops *ops)
7826
{
7827
gpa_t gpa;
7828
int handled, ret;
7829
bool write = ops->write;
7830
struct kvm_mmio_fragment *frag;
7831
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7832
7833
/*
7834
* If the exit was due to a NPF we may already have a GPA.
7835
* If the GPA is present, use it to avoid the GVA to GPA table walk.
7836
* Note, this cannot be used on string operations since string
7837
* operation using rep will only have the initial GPA from the NPF
7838
* occurred.
7839
*/
7840
if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
7841
(addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
7842
gpa = ctxt->gpa_val;
7843
ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
7844
} else {
7845
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
7846
if (ret < 0)
7847
return X86EMUL_PROPAGATE_FAULT;
7848
}
7849
7850
if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
7851
return X86EMUL_CONTINUE;
7852
7853
/*
7854
* Is this MMIO handled locally?
7855
*/
7856
handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
7857
if (handled == bytes)
7858
return X86EMUL_CONTINUE;
7859
7860
gpa += handled;
7861
bytes -= handled;
7862
val += handled;
7863
7864
WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
7865
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
7866
frag->gpa = gpa;
7867
frag->data = val;
7868
frag->len = bytes;
7869
return X86EMUL_CONTINUE;
7870
}
7871
7872
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
7873
unsigned long addr,
7874
void *val, unsigned int bytes,
7875
struct x86_exception *exception,
7876
const struct read_write_emulator_ops *ops)
7877
{
7878
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7879
gpa_t gpa;
7880
int rc;
7881
7882
if (ops->read_write_prepare &&
7883
ops->read_write_prepare(vcpu, val, bytes))
7884
return X86EMUL_CONTINUE;
7885
7886
vcpu->mmio_nr_fragments = 0;
7887
7888
/* Crossing a page boundary? */
7889
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
7890
int now;
7891
7892
now = -addr & ~PAGE_MASK;
7893
rc = emulator_read_write_onepage(addr, val, now, exception,
7894
vcpu, ops);
7895
7896
if (rc != X86EMUL_CONTINUE)
7897
return rc;
7898
addr += now;
7899
if (ctxt->mode != X86EMUL_MODE_PROT64)
7900
addr = (u32)addr;
7901
val += now;
7902
bytes -= now;
7903
}
7904
7905
rc = emulator_read_write_onepage(addr, val, bytes, exception,
7906
vcpu, ops);
7907
if (rc != X86EMUL_CONTINUE)
7908
return rc;
7909
7910
if (!vcpu->mmio_nr_fragments)
7911
return X86EMUL_CONTINUE;
7912
7913
gpa = vcpu->mmio_fragments[0].gpa;
7914
7915
vcpu->mmio_needed = 1;
7916
vcpu->mmio_cur_fragment = 0;
7917
7918
vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
7919
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
7920
vcpu->run->exit_reason = KVM_EXIT_MMIO;
7921
vcpu->run->mmio.phys_addr = gpa;
7922
7923
return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
7924
}
7925
7926
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
7927
unsigned long addr,
7928
void *val,
7929
unsigned int bytes,
7930
struct x86_exception *exception)
7931
{
7932
return emulator_read_write(ctxt, addr, val, bytes,
7933
exception, &read_emultor);
7934
}
7935
7936
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
7937
unsigned long addr,
7938
const void *val,
7939
unsigned int bytes,
7940
struct x86_exception *exception)
7941
{
7942
return emulator_read_write(ctxt, addr, (void *)val, bytes,
7943
exception, &write_emultor);
7944
}
7945
7946
#define emulator_try_cmpxchg_user(t, ptr, old, new) \
7947
(__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
7948
7949
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
7950
unsigned long addr,
7951
const void *old,
7952
const void *new,
7953
unsigned int bytes,
7954
struct x86_exception *exception)
7955
{
7956
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7957
u64 page_line_mask;
7958
unsigned long hva;
7959
gpa_t gpa;
7960
int r;
7961
7962
/* guests cmpxchg8b have to be emulated atomically */
7963
if (bytes > 8 || (bytes & (bytes - 1)))
7964
goto emul_write;
7965
7966
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
7967
7968
if (gpa == INVALID_GPA ||
7969
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7970
goto emul_write;
7971
7972
/*
7973
* Emulate the atomic as a straight write to avoid #AC if SLD is
7974
* enabled in the host and the access splits a cache line.
7975
*/
7976
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
7977
page_line_mask = ~(cache_line_size() - 1);
7978
else
7979
page_line_mask = PAGE_MASK;
7980
7981
if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
7982
goto emul_write;
7983
7984
hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
7985
if (kvm_is_error_hva(hva))
7986
goto emul_write;
7987
7988
hva += offset_in_page(gpa);
7989
7990
switch (bytes) {
7991
case 1:
7992
r = emulator_try_cmpxchg_user(u8, hva, old, new);
7993
break;
7994
case 2:
7995
r = emulator_try_cmpxchg_user(u16, hva, old, new);
7996
break;
7997
case 4:
7998
r = emulator_try_cmpxchg_user(u32, hva, old, new);
7999
break;
8000
case 8:
8001
r = emulator_try_cmpxchg_user(u64, hva, old, new);
8002
break;
8003
default:
8004
BUG();
8005
}
8006
8007
if (r < 0)
8008
return X86EMUL_UNHANDLEABLE;
8009
8010
/*
8011
* Mark the page dirty _before_ checking whether or not the CMPXCHG was
8012
* successful, as the old value is written back on failure. Note, for
8013
* live migration, this is unnecessarily conservative as CMPXCHG writes
8014
* back the original value and the access is atomic, but KVM's ABI is
8015
* that all writes are dirty logged, regardless of the value written.
8016
*/
8017
kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
8018
8019
if (r)
8020
return X86EMUL_CMPXCHG_FAILED;
8021
8022
kvm_page_track_write(vcpu, gpa, new, bytes);
8023
8024
return X86EMUL_CONTINUE;
8025
8026
emul_write:
8027
pr_warn_once("emulating exchange as write\n");
8028
8029
return emulator_write_emulated(ctxt, addr, new, bytes, exception);
8030
}
8031
8032
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
8033
unsigned short port, void *data,
8034
unsigned int count, bool in)
8035
{
8036
unsigned i;
8037
int r;
8038
8039
WARN_ON_ONCE(vcpu->arch.pio.count);
8040
for (i = 0; i < count; i++) {
8041
if (in)
8042
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
8043
else
8044
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
8045
8046
if (r) {
8047
if (i == 0)
8048
goto userspace_io;
8049
8050
/*
8051
* Userspace must have unregistered the device while PIO
8052
* was running. Drop writes / read as 0.
8053
*/
8054
if (in)
8055
memset(data, 0, size * (count - i));
8056
break;
8057
}
8058
8059
data += size;
8060
}
8061
return 1;
8062
8063
userspace_io:
8064
vcpu->arch.pio.port = port;
8065
vcpu->arch.pio.in = in;
8066
vcpu->arch.pio.count = count;
8067
vcpu->arch.pio.size = size;
8068
8069
if (in)
8070
memset(vcpu->arch.pio_data, 0, size * count);
8071
else
8072
memcpy(vcpu->arch.pio_data, data, size * count);
8073
8074
vcpu->run->exit_reason = KVM_EXIT_IO;
8075
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
8076
vcpu->run->io.size = size;
8077
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
8078
vcpu->run->io.count = count;
8079
vcpu->run->io.port = port;
8080
return 0;
8081
}
8082
8083
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
8084
unsigned short port, void *val, unsigned int count)
8085
{
8086
int r = emulator_pio_in_out(vcpu, size, port, val, count, true);
8087
if (r)
8088
trace_kvm_pio(KVM_PIO_IN, port, size, count, val);
8089
8090
return r;
8091
}
8092
8093
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
8094
{
8095
int size = vcpu->arch.pio.size;
8096
unsigned int count = vcpu->arch.pio.count;
8097
memcpy(val, vcpu->arch.pio_data, size * count);
8098
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
8099
vcpu->arch.pio.count = 0;
8100
}
8101
8102
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
8103
int size, unsigned short port, void *val,
8104
unsigned int count)
8105
{
8106
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8107
if (vcpu->arch.pio.count) {
8108
/*
8109
* Complete a previous iteration that required userspace I/O.
8110
* Note, @count isn't guaranteed to match pio.count as userspace
8111
* can modify ECX before rerunning the vCPU. Ignore any such
8112
* shenanigans as KVM doesn't support modifying the rep count,
8113
* and the emulator ensures @count doesn't overflow the buffer.
8114
*/
8115
complete_emulator_pio_in(vcpu, val);
8116
return 1;
8117
}
8118
8119
return emulator_pio_in(vcpu, size, port, val, count);
8120
}
8121
8122
static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
8123
unsigned short port, const void *val,
8124
unsigned int count)
8125
{
8126
trace_kvm_pio(KVM_PIO_OUT, port, size, count, val);
8127
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
8128
}
8129
8130
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
8131
int size, unsigned short port,
8132
const void *val, unsigned int count)
8133
{
8134
return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
8135
}
8136
8137
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
8138
{
8139
return kvm_x86_call(get_segment_base)(vcpu, seg);
8140
}
8141
8142
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
8143
{
8144
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
8145
}
8146
8147
static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
8148
{
8149
if (!need_emulate_wbinvd(vcpu))
8150
return X86EMUL_CONTINUE;
8151
8152
if (kvm_x86_call(has_wbinvd_exit)()) {
8153
int cpu = get_cpu();
8154
8155
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
8156
wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask);
8157
put_cpu();
8158
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
8159
} else
8160
wbinvd();
8161
return X86EMUL_CONTINUE;
8162
}
8163
8164
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
8165
{
8166
kvm_emulate_wbinvd_noskip(vcpu);
8167
return kvm_skip_emulated_instruction(vcpu);
8168
}
8169
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
8170
8171
8172
8173
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
8174
{
8175
kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
8176
}
8177
8178
static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
8179
{
8180
return kvm_get_dr(emul_to_vcpu(ctxt), dr);
8181
}
8182
8183
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
8184
unsigned long value)
8185
{
8186
8187
return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
8188
}
8189
8190
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
8191
{
8192
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
8193
}
8194
8195
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
8196
{
8197
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8198
unsigned long value;
8199
8200
switch (cr) {
8201
case 0:
8202
value = kvm_read_cr0(vcpu);
8203
break;
8204
case 2:
8205
value = vcpu->arch.cr2;
8206
break;
8207
case 3:
8208
value = kvm_read_cr3(vcpu);
8209
break;
8210
case 4:
8211
value = kvm_read_cr4(vcpu);
8212
break;
8213
case 8:
8214
value = kvm_get_cr8(vcpu);
8215
break;
8216
default:
8217
kvm_err("%s: unexpected cr %u\n", __func__, cr);
8218
return 0;
8219
}
8220
8221
return value;
8222
}
8223
8224
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
8225
{
8226
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8227
int res = 0;
8228
8229
switch (cr) {
8230
case 0:
8231
res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
8232
break;
8233
case 2:
8234
vcpu->arch.cr2 = val;
8235
break;
8236
case 3:
8237
res = kvm_set_cr3(vcpu, val);
8238
break;
8239
case 4:
8240
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
8241
break;
8242
case 8:
8243
res = kvm_set_cr8(vcpu, val);
8244
break;
8245
default:
8246
kvm_err("%s: unexpected cr %u\n", __func__, cr);
8247
res = -1;
8248
}
8249
8250
return res;
8251
}
8252
8253
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
8254
{
8255
return kvm_x86_call(get_cpl)(emul_to_vcpu(ctxt));
8256
}
8257
8258
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8259
{
8260
kvm_x86_call(get_gdt)(emul_to_vcpu(ctxt), dt);
8261
}
8262
8263
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8264
{
8265
kvm_x86_call(get_idt)(emul_to_vcpu(ctxt), dt);
8266
}
8267
8268
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8269
{
8270
kvm_x86_call(set_gdt)(emul_to_vcpu(ctxt), dt);
8271
}
8272
8273
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8274
{
8275
kvm_x86_call(set_idt)(emul_to_vcpu(ctxt), dt);
8276
}
8277
8278
static unsigned long emulator_get_cached_segment_base(
8279
struct x86_emulate_ctxt *ctxt, int seg)
8280
{
8281
return get_segment_base(emul_to_vcpu(ctxt), seg);
8282
}
8283
8284
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
8285
struct desc_struct *desc, u32 *base3,
8286
int seg)
8287
{
8288
struct kvm_segment var;
8289
8290
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
8291
*selector = var.selector;
8292
8293
if (var.unusable) {
8294
memset(desc, 0, sizeof(*desc));
8295
if (base3)
8296
*base3 = 0;
8297
return false;
8298
}
8299
8300
if (var.g)
8301
var.limit >>= 12;
8302
set_desc_limit(desc, var.limit);
8303
set_desc_base(desc, (unsigned long)var.base);
8304
#ifdef CONFIG_X86_64
8305
if (base3)
8306
*base3 = var.base >> 32;
8307
#endif
8308
desc->type = var.type;
8309
desc->s = var.s;
8310
desc->dpl = var.dpl;
8311
desc->p = var.present;
8312
desc->avl = var.avl;
8313
desc->l = var.l;
8314
desc->d = var.db;
8315
desc->g = var.g;
8316
8317
return true;
8318
}
8319
8320
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
8321
struct desc_struct *desc, u32 base3,
8322
int seg)
8323
{
8324
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8325
struct kvm_segment var;
8326
8327
var.selector = selector;
8328
var.base = get_desc_base(desc);
8329
#ifdef CONFIG_X86_64
8330
var.base |= ((u64)base3) << 32;
8331
#endif
8332
var.limit = get_desc_limit(desc);
8333
if (desc->g)
8334
var.limit = (var.limit << 12) | 0xfff;
8335
var.type = desc->type;
8336
var.dpl = desc->dpl;
8337
var.db = desc->d;
8338
var.s = desc->s;
8339
var.l = desc->l;
8340
var.g = desc->g;
8341
var.avl = desc->avl;
8342
var.present = desc->p;
8343
var.unusable = !var.present;
8344
var.padding = 0;
8345
8346
kvm_set_segment(vcpu, &var, seg);
8347
return;
8348
}
8349
8350
static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8351
u32 msr_index, u64 *pdata)
8352
{
8353
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8354
int r;
8355
8356
r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
8357
if (r < 0)
8358
return X86EMUL_UNHANDLEABLE;
8359
8360
if (r) {
8361
if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
8362
complete_emulated_rdmsr, r))
8363
return X86EMUL_IO_NEEDED;
8364
8365
trace_kvm_msr_read_ex(msr_index);
8366
return X86EMUL_PROPAGATE_FAULT;
8367
}
8368
8369
trace_kvm_msr_read(msr_index, *pdata);
8370
return X86EMUL_CONTINUE;
8371
}
8372
8373
static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8374
u32 msr_index, u64 data)
8375
{
8376
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8377
int r;
8378
8379
r = kvm_set_msr_with_filter(vcpu, msr_index, data);
8380
if (r < 0)
8381
return X86EMUL_UNHANDLEABLE;
8382
8383
if (r) {
8384
if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8385
complete_emulated_msr_access, r))
8386
return X86EMUL_IO_NEEDED;
8387
8388
trace_kvm_msr_write_ex(msr_index, data);
8389
return X86EMUL_PROPAGATE_FAULT;
8390
}
8391
8392
trace_kvm_msr_write(msr_index, data);
8393
return X86EMUL_CONTINUE;
8394
}
8395
8396
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
8397
u32 msr_index, u64 *pdata)
8398
{
8399
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
8400
}
8401
8402
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
8403
{
8404
return kvm_pmu_check_rdpmc_early(emul_to_vcpu(ctxt), pmc);
8405
}
8406
8407
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
8408
u32 pmc, u64 *pdata)
8409
{
8410
return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
8411
}
8412
8413
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
8414
{
8415
emul_to_vcpu(ctxt)->arch.halt_request = 1;
8416
}
8417
8418
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8419
struct x86_instruction_info *info,
8420
enum x86_intercept_stage stage)
8421
{
8422
return kvm_x86_call(check_intercept)(emul_to_vcpu(ctxt), info, stage,
8423
&ctxt->exception);
8424
}
8425
8426
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
8427
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
8428
bool exact_only)
8429
{
8430
return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
8431
}
8432
8433
static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
8434
{
8435
return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
8436
}
8437
8438
static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
8439
{
8440
return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
8441
}
8442
8443
static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
8444
{
8445
return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
8446
}
8447
8448
static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
8449
{
8450
return guest_cpuid_is_intel_compatible(emul_to_vcpu(ctxt));
8451
}
8452
8453
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
8454
{
8455
return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
8456
}
8457
8458
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
8459
{
8460
kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
8461
}
8462
8463
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
8464
{
8465
kvm_x86_call(set_nmi_mask)(emul_to_vcpu(ctxt), masked);
8466
}
8467
8468
static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
8469
{
8470
return is_smm(emul_to_vcpu(ctxt));
8471
}
8472
8473
static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
8474
{
8475
return is_guest_mode(emul_to_vcpu(ctxt));
8476
}
8477
8478
#ifndef CONFIG_KVM_SMM
8479
static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
8480
{
8481
WARN_ON_ONCE(1);
8482
return X86EMUL_UNHANDLEABLE;
8483
}
8484
#endif
8485
8486
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
8487
{
8488
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
8489
}
8490
8491
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8492
{
8493
return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
8494
}
8495
8496
static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
8497
{
8498
struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8499
8500
if (!kvm->vm_bugged)
8501
kvm_vm_bugged(kvm);
8502
}
8503
8504
static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
8505
gva_t addr, unsigned int flags)
8506
{
8507
if (!kvm_x86_ops.get_untagged_addr)
8508
return addr;
8509
8510
return kvm_x86_call(get_untagged_addr)(emul_to_vcpu(ctxt),
8511
addr, flags);
8512
}
8513
8514
static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
8515
gva_t addr, unsigned int flags)
8516
{
8517
return !is_noncanonical_address(addr, emul_to_vcpu(ctxt), flags);
8518
}
8519
8520
static const struct x86_emulate_ops emulate_ops = {
8521
.vm_bugged = emulator_vm_bugged,
8522
.read_gpr = emulator_read_gpr,
8523
.write_gpr = emulator_write_gpr,
8524
.read_std = emulator_read_std,
8525
.write_std = emulator_write_std,
8526
.fetch = kvm_fetch_guest_virt,
8527
.read_emulated = emulator_read_emulated,
8528
.write_emulated = emulator_write_emulated,
8529
.cmpxchg_emulated = emulator_cmpxchg_emulated,
8530
.invlpg = emulator_invlpg,
8531
.pio_in_emulated = emulator_pio_in_emulated,
8532
.pio_out_emulated = emulator_pio_out_emulated,
8533
.get_segment = emulator_get_segment,
8534
.set_segment = emulator_set_segment,
8535
.get_cached_segment_base = emulator_get_cached_segment_base,
8536
.get_gdt = emulator_get_gdt,
8537
.get_idt = emulator_get_idt,
8538
.set_gdt = emulator_set_gdt,
8539
.set_idt = emulator_set_idt,
8540
.get_cr = emulator_get_cr,
8541
.set_cr = emulator_set_cr,
8542
.cpl = emulator_get_cpl,
8543
.get_dr = emulator_get_dr,
8544
.set_dr = emulator_set_dr,
8545
.set_msr_with_filter = emulator_set_msr_with_filter,
8546
.get_msr_with_filter = emulator_get_msr_with_filter,
8547
.get_msr = emulator_get_msr,
8548
.check_rdpmc_early = emulator_check_rdpmc_early,
8549
.read_pmc = emulator_read_pmc,
8550
.halt = emulator_halt,
8551
.wbinvd = emulator_wbinvd,
8552
.fix_hypercall = emulator_fix_hypercall,
8553
.intercept = emulator_intercept,
8554
.get_cpuid = emulator_get_cpuid,
8555
.guest_has_movbe = emulator_guest_has_movbe,
8556
.guest_has_fxsr = emulator_guest_has_fxsr,
8557
.guest_has_rdpid = emulator_guest_has_rdpid,
8558
.guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible,
8559
.set_nmi_mask = emulator_set_nmi_mask,
8560
.is_smm = emulator_is_smm,
8561
.is_guest_mode = emulator_is_guest_mode,
8562
.leave_smm = emulator_leave_smm,
8563
.triple_fault = emulator_triple_fault,
8564
.set_xcr = emulator_set_xcr,
8565
.get_untagged_addr = emulator_get_untagged_addr,
8566
.is_canonical_addr = emulator_is_canonical_addr,
8567
};
8568
8569
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
8570
{
8571
u32 int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
8572
/*
8573
* an sti; sti; sequence only disable interrupts for the first
8574
* instruction. So, if the last instruction, be it emulated or
8575
* not, left the system with the INT_STI flag enabled, it
8576
* means that the last instruction is an sti. We should not
8577
* leave the flag on in this case. The same goes for mov ss
8578
*/
8579
if (int_shadow & mask)
8580
mask = 0;
8581
if (unlikely(int_shadow || mask)) {
8582
kvm_x86_call(set_interrupt_shadow)(vcpu, mask);
8583
if (!mask)
8584
kvm_make_request(KVM_REQ_EVENT, vcpu);
8585
}
8586
}
8587
8588
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
8589
{
8590
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8591
8592
if (ctxt->exception.vector == PF_VECTOR)
8593
kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
8594
else if (ctxt->exception.error_code_valid)
8595
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
8596
ctxt->exception.error_code);
8597
else
8598
kvm_queue_exception(vcpu, ctxt->exception.vector);
8599
}
8600
8601
static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
8602
{
8603
struct x86_emulate_ctxt *ctxt;
8604
8605
ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
8606
if (!ctxt) {
8607
pr_err("failed to allocate vcpu's emulator\n");
8608
return NULL;
8609
}
8610
8611
ctxt->vcpu = vcpu;
8612
ctxt->ops = &emulate_ops;
8613
vcpu->arch.emulate_ctxt = ctxt;
8614
8615
return ctxt;
8616
}
8617
8618
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
8619
{
8620
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8621
int cs_db, cs_l;
8622
8623
kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
8624
8625
ctxt->gpa_available = false;
8626
ctxt->eflags = kvm_get_rflags(vcpu);
8627
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
8628
8629
ctxt->eip = kvm_rip_read(vcpu);
8630
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
8631
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
8632
(cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
8633
cs_db ? X86EMUL_MODE_PROT32 :
8634
X86EMUL_MODE_PROT16;
8635
ctxt->interruptibility = 0;
8636
ctxt->have_exception = false;
8637
ctxt->exception.vector = -1;
8638
ctxt->perm_ok = false;
8639
8640
init_decode_cache(ctxt);
8641
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8642
}
8643
8644
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
8645
{
8646
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8647
int ret;
8648
8649
init_emulate_ctxt(vcpu);
8650
8651
ctxt->op_bytes = 2;
8652
ctxt->ad_bytes = 2;
8653
ctxt->_eip = ctxt->eip + inc_eip;
8654
ret = emulate_int_real(ctxt, irq);
8655
8656
if (ret != X86EMUL_CONTINUE) {
8657
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8658
} else {
8659
ctxt->eip = ctxt->_eip;
8660
kvm_rip_write(vcpu, ctxt->eip);
8661
kvm_set_rflags(vcpu, ctxt->eflags);
8662
}
8663
}
8664
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
8665
8666
static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8667
u8 ndata, u8 *insn_bytes, u8 insn_size)
8668
{
8669
struct kvm_run *run = vcpu->run;
8670
u64 info[5];
8671
u8 info_start;
8672
8673
/*
8674
* Zero the whole array used to retrieve the exit info, as casting to
8675
* u32 for select entries will leave some chunks uninitialized.
8676
*/
8677
memset(&info, 0, sizeof(info));
8678
8679
kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2],
8680
(u32 *)&info[3], (u32 *)&info[4]);
8681
8682
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8683
run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
8684
8685
/*
8686
* There's currently space for 13 entries, but 5 are used for the exit
8687
* reason and info. Restrict to 4 to reduce the maintenance burden
8688
* when expanding kvm_run.emulation_failure in the future.
8689
*/
8690
if (WARN_ON_ONCE(ndata > 4))
8691
ndata = 4;
8692
8693
/* Always include the flags as a 'data' entry. */
8694
info_start = 1;
8695
run->emulation_failure.flags = 0;
8696
8697
if (insn_size) {
8698
BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) +
8699
sizeof(run->emulation_failure.insn_bytes) != 16));
8700
info_start += 2;
8701
run->emulation_failure.flags |=
8702
KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
8703
run->emulation_failure.insn_size = insn_size;
8704
memset(run->emulation_failure.insn_bytes, 0x90,
8705
sizeof(run->emulation_failure.insn_bytes));
8706
memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size);
8707
}
8708
8709
memcpy(&run->internal.data[info_start], info, sizeof(info));
8710
memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
8711
ndata * sizeof(data[0]));
8712
8713
run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata;
8714
}
8715
8716
static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
8717
{
8718
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8719
8720
prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
8721
ctxt->fetch.end - ctxt->fetch.data);
8722
}
8723
8724
void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8725
u8 ndata)
8726
{
8727
prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
8728
}
8729
EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit);
8730
8731
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
8732
{
8733
__kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
8734
}
8735
EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit);
8736
8737
void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
8738
{
8739
u32 reason, intr_info, error_code;
8740
struct kvm_run *run = vcpu->run;
8741
u64 info1, info2;
8742
int ndata = 0;
8743
8744
kvm_x86_call(get_exit_info)(vcpu, &reason, &info1, &info2,
8745
&intr_info, &error_code);
8746
8747
run->internal.data[ndata++] = info2;
8748
run->internal.data[ndata++] = reason;
8749
run->internal.data[ndata++] = info1;
8750
run->internal.data[ndata++] = gpa;
8751
run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
8752
8753
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8754
run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
8755
run->internal.ndata = ndata;
8756
}
8757
EXPORT_SYMBOL_GPL(kvm_prepare_event_vectoring_exit);
8758
8759
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
8760
{
8761
struct kvm *kvm = vcpu->kvm;
8762
8763
++vcpu->stat.insn_emulation_fail;
8764
trace_kvm_emulate_insn_failed(vcpu);
8765
8766
if (emulation_type & EMULTYPE_VMWARE_GP) {
8767
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8768
return 1;
8769
}
8770
8771
if (kvm->arch.exit_on_emulation_error ||
8772
(emulation_type & EMULTYPE_SKIP)) {
8773
prepare_emulation_ctxt_failure_exit(vcpu);
8774
return 0;
8775
}
8776
8777
kvm_queue_exception(vcpu, UD_VECTOR);
8778
8779
if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) {
8780
prepare_emulation_ctxt_failure_exit(vcpu);
8781
return 0;
8782
}
8783
8784
return 1;
8785
}
8786
8787
static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
8788
gpa_t cr2_or_gpa,
8789
int emulation_type)
8790
{
8791
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
8792
return false;
8793
8794
/*
8795
* If the failed instruction faulted on an access to page tables that
8796
* are used to translate any part of the instruction, KVM can't resolve
8797
* the issue by unprotecting the gfn, as zapping the shadow page will
8798
* result in the instruction taking a !PRESENT page fault and thus put
8799
* the vCPU into an infinite loop of page faults. E.g. KVM will create
8800
* a SPTE and write-protect the gfn to resolve the !PRESENT fault, and
8801
* then zap the SPTE to unprotect the gfn, and then do it all over
8802
* again. Report the error to userspace.
8803
*/
8804
if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
8805
return false;
8806
8807
/*
8808
* If emulation may have been triggered by a write to a shadowed page
8809
* table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
8810
* guest to let the CPU re-execute the instruction in the hope that the
8811
* CPU can cleanly execute the instruction that KVM failed to emulate.
8812
*/
8813
__kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
8814
8815
/*
8816
* Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
8817
* all SPTEs were already zapped by a different task. The alternative
8818
* is to report the error to userspace and likely terminate the guest,
8819
* and the last_retry_{eip,addr} checks will prevent retrying the page
8820
* fault indefinitely, i.e. there's nothing to lose by retrying.
8821
*/
8822
return true;
8823
}
8824
8825
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
8826
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
8827
8828
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
8829
unsigned long *db)
8830
{
8831
u32 dr6 = 0;
8832
int i;
8833
u32 enable, rwlen;
8834
8835
enable = dr7;
8836
rwlen = dr7 >> 16;
8837
for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
8838
if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
8839
dr6 |= (1 << i);
8840
return dr6;
8841
}
8842
8843
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
8844
{
8845
struct kvm_run *kvm_run = vcpu->run;
8846
8847
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
8848
kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
8849
kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
8850
kvm_run->debug.arch.exception = DB_VECTOR;
8851
kvm_run->exit_reason = KVM_EXIT_DEBUG;
8852
return 0;
8853
}
8854
kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
8855
return 1;
8856
}
8857
8858
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
8859
{
8860
unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
8861
int r;
8862
8863
r = kvm_x86_call(skip_emulated_instruction)(vcpu);
8864
if (unlikely(!r))
8865
return 0;
8866
8867
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
8868
8869
/*
8870
* rflags is the old, "raw" value of the flags. The new value has
8871
* not been saved yet.
8872
*
8873
* This is correct even for TF set by the guest, because "the
8874
* processor will not generate this exception after the instruction
8875
* that sets the TF flag".
8876
*/
8877
if (unlikely(rflags & X86_EFLAGS_TF))
8878
r = kvm_vcpu_do_singlestep(vcpu);
8879
return r;
8880
}
8881
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
8882
8883
static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
8884
{
8885
if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
8886
return true;
8887
8888
/*
8889
* Intel compatible CPUs inhibit code #DBs when MOV/POP SS blocking is
8890
* active, but AMD compatible CPUs do not.
8891
*/
8892
if (!guest_cpuid_is_intel_compatible(vcpu))
8893
return false;
8894
8895
return kvm_x86_call(get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS;
8896
}
8897
8898
static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
8899
int emulation_type, int *r)
8900
{
8901
WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
8902
8903
/*
8904
* Do not check for code breakpoints if hardware has already done the
8905
* checks, as inferred from the emulation type. On NO_DECODE and SKIP,
8906
* the instruction has passed all exception checks, and all intercepted
8907
* exceptions that trigger emulation have lower priority than code
8908
* breakpoints, i.e. the fact that the intercepted exception occurred
8909
* means any code breakpoints have already been serviced.
8910
*
8911
* Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
8912
* hardware has checked the RIP of the magic prefix, but not the RIP of
8913
* the instruction being emulated. The intent of forced emulation is
8914
* to behave as if KVM intercepted the instruction without an exception
8915
* and without a prefix.
8916
*/
8917
if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
8918
EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
8919
return false;
8920
8921
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
8922
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
8923
struct kvm_run *kvm_run = vcpu->run;
8924
unsigned long eip = kvm_get_linear_rip(vcpu);
8925
u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8926
vcpu->arch.guest_debug_dr7,
8927
vcpu->arch.eff_db);
8928
8929
if (dr6 != 0) {
8930
kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
8931
kvm_run->debug.arch.pc = eip;
8932
kvm_run->debug.arch.exception = DB_VECTOR;
8933
kvm_run->exit_reason = KVM_EXIT_DEBUG;
8934
*r = 0;
8935
return true;
8936
}
8937
}
8938
8939
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
8940
!kvm_is_code_breakpoint_inhibited(vcpu)) {
8941
unsigned long eip = kvm_get_linear_rip(vcpu);
8942
u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8943
vcpu->arch.dr7,
8944
vcpu->arch.db);
8945
8946
if (dr6 != 0) {
8947
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
8948
*r = 1;
8949
return true;
8950
}
8951
}
8952
8953
return false;
8954
}
8955
8956
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
8957
{
8958
switch (ctxt->opcode_len) {
8959
case 1:
8960
switch (ctxt->b) {
8961
case 0xe4: /* IN */
8962
case 0xe5:
8963
case 0xec:
8964
case 0xed:
8965
case 0xe6: /* OUT */
8966
case 0xe7:
8967
case 0xee:
8968
case 0xef:
8969
case 0x6c: /* INS */
8970
case 0x6d:
8971
case 0x6e: /* OUTS */
8972
case 0x6f:
8973
return true;
8974
}
8975
break;
8976
case 2:
8977
switch (ctxt->b) {
8978
case 0x33: /* RDPMC */
8979
return true;
8980
}
8981
break;
8982
}
8983
8984
return false;
8985
}
8986
8987
/*
8988
* Decode an instruction for emulation. The caller is responsible for handling
8989
* code breakpoints. Note, manually detecting code breakpoints is unnecessary
8990
* (and wrong) when emulating on an intercepted fault-like exception[*], as
8991
* code breakpoints have higher priority and thus have already been done by
8992
* hardware.
8993
*
8994
* [*] Except #MC, which is higher priority, but KVM should never emulate in
8995
* response to a machine check.
8996
*/
8997
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
8998
void *insn, int insn_len)
8999
{
9000
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9001
int r;
9002
9003
init_emulate_ctxt(vcpu);
9004
9005
r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
9006
9007
trace_kvm_emulate_insn_start(vcpu);
9008
++vcpu->stat.insn_emulation;
9009
9010
return r;
9011
}
9012
EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
9013
9014
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
9015
int emulation_type, void *insn, int insn_len)
9016
{
9017
int r;
9018
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9019
bool writeback = true;
9020
9021
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9022
(WARN_ON_ONCE(is_guest_mode(vcpu)) ||
9023
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
9024
emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
9025
9026
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
9027
if (r != X86EMUL_CONTINUE) {
9028
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
9029
return 1;
9030
9031
if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9032
emulation_type))
9033
return 1;
9034
9035
if (r == X86EMUL_UNHANDLEABLE_VECTORING) {
9036
kvm_prepare_event_vectoring_exit(vcpu, cr2_or_gpa);
9037
return 0;
9038
}
9039
9040
WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
9041
return handle_emulation_failure(vcpu, emulation_type);
9042
}
9043
9044
vcpu->arch.l1tf_flush_l1d = true;
9045
9046
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
9047
kvm_clear_exception_queue(vcpu);
9048
9049
/*
9050
* Return immediately if RIP hits a code breakpoint, such #DBs
9051
* are fault-like and are higher priority than any faults on
9052
* the code fetch itself.
9053
*/
9054
if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
9055
return r;
9056
9057
r = x86_decode_emulated_instruction(vcpu, emulation_type,
9058
insn, insn_len);
9059
if (r != EMULATION_OK) {
9060
if ((emulation_type & EMULTYPE_TRAP_UD) ||
9061
(emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
9062
kvm_queue_exception(vcpu, UD_VECTOR);
9063
return 1;
9064
}
9065
if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9066
emulation_type))
9067
return 1;
9068
9069
if (ctxt->have_exception &&
9070
!(emulation_type & EMULTYPE_SKIP)) {
9071
/*
9072
* #UD should result in just EMULATION_FAILED, and trap-like
9073
* exception should not be encountered during decode.
9074
*/
9075
WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
9076
exception_type(ctxt->exception.vector) == EXCPT_TRAP);
9077
inject_emulated_exception(vcpu);
9078
return 1;
9079
}
9080
return handle_emulation_failure(vcpu, emulation_type);
9081
}
9082
}
9083
9084
if ((emulation_type & EMULTYPE_VMWARE_GP) &&
9085
!is_vmware_backdoor_opcode(ctxt)) {
9086
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9087
return 1;
9088
}
9089
9090
/*
9091
* EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
9092
* use *only* by vendor callbacks for kvm_skip_emulated_instruction().
9093
* The caller is responsible for updating interruptibility state and
9094
* injecting single-step #DBs.
9095
*/
9096
if (emulation_type & EMULTYPE_SKIP) {
9097
if (ctxt->mode != X86EMUL_MODE_PROT64)
9098
ctxt->eip = (u32)ctxt->_eip;
9099
else
9100
ctxt->eip = ctxt->_eip;
9101
9102
if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
9103
r = 1;
9104
goto writeback;
9105
}
9106
9107
kvm_rip_write(vcpu, ctxt->eip);
9108
if (ctxt->eflags & X86_EFLAGS_RF)
9109
kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
9110
return 1;
9111
}
9112
9113
/*
9114
* If emulation was caused by a write-protection #PF on a non-page_table
9115
* writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
9116
* and retry the instruction, as the vCPU is likely no longer using the
9117
* gfn as a page table.
9118
*/
9119
if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9120
!x86_page_table_writing_insn(ctxt) &&
9121
kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
9122
return 1;
9123
9124
/* this is needed for vmware backdoor interface to work since it
9125
changes registers values during IO operation */
9126
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
9127
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9128
emulator_invalidate_register_cache(ctxt);
9129
}
9130
9131
restart:
9132
if (emulation_type & EMULTYPE_PF) {
9133
/* Save the faulting GPA (cr2) in the address field */
9134
ctxt->exception.address = cr2_or_gpa;
9135
9136
/* With shadow page tables, cr2 contains a GVA or nGPA. */
9137
if (vcpu->arch.mmu->root_role.direct) {
9138
ctxt->gpa_available = true;
9139
ctxt->gpa_val = cr2_or_gpa;
9140
}
9141
} else {
9142
/* Sanitize the address out of an abundance of paranoia. */
9143
ctxt->exception.address = 0;
9144
}
9145
9146
r = x86_emulate_insn(ctxt);
9147
9148
if (r == EMULATION_INTERCEPTED)
9149
return 1;
9150
9151
if (r == EMULATION_FAILED) {
9152
if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9153
emulation_type))
9154
return 1;
9155
9156
return handle_emulation_failure(vcpu, emulation_type);
9157
}
9158
9159
if (ctxt->have_exception) {
9160
WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
9161
vcpu->mmio_needed = false;
9162
r = 1;
9163
inject_emulated_exception(vcpu);
9164
} else if (vcpu->arch.pio.count) {
9165
if (!vcpu->arch.pio.in) {
9166
/* FIXME: return into emulator if single-stepping. */
9167
vcpu->arch.pio.count = 0;
9168
} else {
9169
writeback = false;
9170
vcpu->arch.complete_userspace_io = complete_emulated_pio;
9171
}
9172
r = 0;
9173
} else if (vcpu->mmio_needed) {
9174
++vcpu->stat.mmio_exits;
9175
9176
if (!vcpu->mmio_is_write)
9177
writeback = false;
9178
r = 0;
9179
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9180
} else if (vcpu->arch.complete_userspace_io) {
9181
writeback = false;
9182
r = 0;
9183
} else if (r == EMULATION_RESTART)
9184
goto restart;
9185
else
9186
r = 1;
9187
9188
writeback:
9189
if (writeback) {
9190
unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
9191
toggle_interruptibility(vcpu, ctxt->interruptibility);
9192
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9193
9194
/*
9195
* Note, EXCPT_DB is assumed to be fault-like as the emulator
9196
* only supports code breakpoints and general detect #DB, both
9197
* of which are fault-like.
9198
*/
9199
if (!ctxt->have_exception ||
9200
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9201
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
9202
if (ctxt->is_branch)
9203
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
9204
kvm_rip_write(vcpu, ctxt->eip);
9205
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
9206
r = kvm_vcpu_do_singlestep(vcpu);
9207
kvm_x86_call(update_emulated_instruction)(vcpu);
9208
__kvm_set_rflags(vcpu, ctxt->eflags);
9209
}
9210
9211
/*
9212
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9213
* do nothing, and it will be requested again as soon as
9214
* the shadow expires. But we still need to check here,
9215
* because POPF has no interrupt shadow.
9216
*/
9217
if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
9218
kvm_make_request(KVM_REQ_EVENT, vcpu);
9219
} else
9220
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9221
9222
return r;
9223
}
9224
9225
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
9226
{
9227
return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
9228
}
9229
EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
9230
9231
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
9232
void *insn, int insn_len)
9233
{
9234
return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
9235
}
9236
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
9237
9238
static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
9239
{
9240
vcpu->arch.pio.count = 0;
9241
return 1;
9242
}
9243
9244
static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
9245
{
9246
vcpu->arch.pio.count = 0;
9247
9248
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)))
9249
return 1;
9250
9251
return kvm_skip_emulated_instruction(vcpu);
9252
}
9253
9254
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
9255
unsigned short port)
9256
{
9257
unsigned long val = kvm_rax_read(vcpu);
9258
int ret = emulator_pio_out(vcpu, size, port, &val, 1);
9259
9260
if (ret)
9261
return ret;
9262
9263
/*
9264
* Workaround userspace that relies on old KVM behavior of %rip being
9265
* incremented prior to exiting to userspace to handle "OUT 0x7e".
9266
*/
9267
if (port == 0x7e &&
9268
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
9269
vcpu->arch.complete_userspace_io =
9270
complete_fast_pio_out_port_0x7e;
9271
kvm_skip_emulated_instruction(vcpu);
9272
} else {
9273
vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9274
vcpu->arch.complete_userspace_io = complete_fast_pio_out;
9275
}
9276
return 0;
9277
}
9278
9279
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
9280
{
9281
unsigned long val;
9282
9283
/* We should only ever be called with arch.pio.count equal to 1 */
9284
BUG_ON(vcpu->arch.pio.count != 1);
9285
9286
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) {
9287
vcpu->arch.pio.count = 0;
9288
return 1;
9289
}
9290
9291
/* For size less than 4 we merge, else we zero extend */
9292
val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
9293
9294
complete_emulator_pio_in(vcpu, &val);
9295
kvm_rax_write(vcpu, val);
9296
9297
return kvm_skip_emulated_instruction(vcpu);
9298
}
9299
9300
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
9301
unsigned short port)
9302
{
9303
unsigned long val;
9304
int ret;
9305
9306
/* For size less than 4 we merge, else we zero extend */
9307
val = (size < 4) ? kvm_rax_read(vcpu) : 0;
9308
9309
ret = emulator_pio_in(vcpu, size, port, &val, 1);
9310
if (ret) {
9311
kvm_rax_write(vcpu, val);
9312
return ret;
9313
}
9314
9315
vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9316
vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9317
9318
return 0;
9319
}
9320
9321
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
9322
{
9323
int ret;
9324
9325
if (in)
9326
ret = kvm_fast_pio_in(vcpu, size, port);
9327
else
9328
ret = kvm_fast_pio_out(vcpu, size, port);
9329
return ret && kvm_skip_emulated_instruction(vcpu);
9330
}
9331
EXPORT_SYMBOL_GPL(kvm_fast_pio);
9332
9333
static int kvmclock_cpu_down_prep(unsigned int cpu)
9334
{
9335
__this_cpu_write(cpu_tsc_khz, 0);
9336
return 0;
9337
}
9338
9339
static void tsc_khz_changed(void *data)
9340
{
9341
struct cpufreq_freqs *freq = data;
9342
unsigned long khz;
9343
9344
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC));
9345
9346
if (data)
9347
khz = freq->new;
9348
else
9349
khz = cpufreq_quick_get(raw_smp_processor_id());
9350
if (!khz)
9351
khz = tsc_khz;
9352
__this_cpu_write(cpu_tsc_khz, khz);
9353
}
9354
9355
#ifdef CONFIG_X86_64
9356
static void kvm_hyperv_tsc_notifier(void)
9357
{
9358
struct kvm *kvm;
9359
int cpu;
9360
9361
mutex_lock(&kvm_lock);
9362
list_for_each_entry(kvm, &vm_list, vm_list)
9363
kvm_make_mclock_inprogress_request(kvm);
9364
9365
/* no guest entries from this point */
9366
hyperv_stop_tsc_emulation();
9367
9368
/* TSC frequency always matches when on Hyper-V */
9369
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9370
for_each_present_cpu(cpu)
9371
per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
9372
}
9373
kvm_caps.max_guest_tsc_khz = tsc_khz;
9374
9375
list_for_each_entry(kvm, &vm_list, vm_list) {
9376
__kvm_start_pvclock_update(kvm);
9377
pvclock_update_vm_gtod_copy(kvm);
9378
kvm_end_pvclock_update(kvm);
9379
}
9380
9381
mutex_unlock(&kvm_lock);
9382
}
9383
#endif
9384
9385
static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
9386
{
9387
struct kvm *kvm;
9388
struct kvm_vcpu *vcpu;
9389
int send_ipi = 0;
9390
unsigned long i;
9391
9392
/*
9393
* We allow guests to temporarily run on slowing clocks,
9394
* provided we notify them after, or to run on accelerating
9395
* clocks, provided we notify them before. Thus time never
9396
* goes backwards.
9397
*
9398
* However, we have a problem. We can't atomically update
9399
* the frequency of a given CPU from this function; it is
9400
* merely a notifier, which can be called from any CPU.
9401
* Changing the TSC frequency at arbitrary points in time
9402
* requires a recomputation of local variables related to
9403
* the TSC for each VCPU. We must flag these local variables
9404
* to be updated and be sure the update takes place with the
9405
* new frequency before any guests proceed.
9406
*
9407
* Unfortunately, the combination of hotplug CPU and frequency
9408
* change creates an intractable locking scenario; the order
9409
* of when these callouts happen is undefined with respect to
9410
* CPU hotplug, and they can race with each other. As such,
9411
* merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9412
* undefined; you can actually have a CPU frequency change take
9413
* place in between the computation of X and the setting of the
9414
* variable. To protect against this problem, all updates of
9415
* the per_cpu tsc_khz variable are done in an interrupt
9416
* protected IPI, and all callers wishing to update the value
9417
* must wait for a synchronous IPI to complete (which is trivial
9418
* if the caller is on the CPU already). This establishes the
9419
* necessary total order on variable updates.
9420
*
9421
* Note that because a guest time update may take place
9422
* anytime after the setting of the VCPU's request bit, the
9423
* correct TSC value must be set before the request. However,
9424
* to ensure the update actually makes it to any guest which
9425
* starts running in hardware virtualization between the set
9426
* and the acquisition of the spinlock, we must also ping the
9427
* CPU after setting the request bit.
9428
*
9429
*/
9430
9431
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9432
9433
mutex_lock(&kvm_lock);
9434
list_for_each_entry(kvm, &vm_list, vm_list) {
9435
kvm_for_each_vcpu(i, vcpu, kvm) {
9436
if (vcpu->cpu != cpu)
9437
continue;
9438
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9439
if (vcpu->cpu != raw_smp_processor_id())
9440
send_ipi = 1;
9441
}
9442
}
9443
mutex_unlock(&kvm_lock);
9444
9445
if (freq->old < freq->new && send_ipi) {
9446
/*
9447
* We upscale the frequency. Must make the guest
9448
* doesn't see old kvmclock values while running with
9449
* the new frequency, otherwise we risk the guest sees
9450
* time go backwards.
9451
*
9452
* In case we update the frequency for another cpu
9453
* (which might be in guest context) send an interrupt
9454
* to kick the cpu out of guest context. Next time
9455
* guest context is entered kvmclock will be updated,
9456
* so the guest will not see stale values.
9457
*/
9458
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9459
}
9460
}
9461
9462
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
9463
void *data)
9464
{
9465
struct cpufreq_freqs *freq = data;
9466
int cpu;
9467
9468
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
9469
return 0;
9470
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
9471
return 0;
9472
9473
for_each_cpu(cpu, freq->policy->cpus)
9474
__kvmclock_cpufreq_notifier(freq, cpu);
9475
9476
return 0;
9477
}
9478
9479
static struct notifier_block kvmclock_cpufreq_notifier_block = {
9480
.notifier_call = kvmclock_cpufreq_notifier
9481
};
9482
9483
static int kvmclock_cpu_online(unsigned int cpu)
9484
{
9485
tsc_khz_changed(NULL);
9486
return 0;
9487
}
9488
9489
static void kvm_timer_init(void)
9490
{
9491
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9492
max_tsc_khz = tsc_khz;
9493
9494
if (IS_ENABLED(CONFIG_CPU_FREQ)) {
9495
struct cpufreq_policy *policy;
9496
int cpu;
9497
9498
cpu = get_cpu();
9499
policy = cpufreq_cpu_get(cpu);
9500
if (policy) {
9501
if (policy->cpuinfo.max_freq)
9502
max_tsc_khz = policy->cpuinfo.max_freq;
9503
cpufreq_cpu_put(policy);
9504
}
9505
put_cpu();
9506
}
9507
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
9508
CPUFREQ_TRANSITION_NOTIFIER);
9509
9510
cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9511
kvmclock_cpu_online, kvmclock_cpu_down_prep);
9512
}
9513
}
9514
9515
#ifdef CONFIG_X86_64
9516
static void pvclock_gtod_update_fn(struct work_struct *work)
9517
{
9518
struct kvm *kvm;
9519
struct kvm_vcpu *vcpu;
9520
unsigned long i;
9521
9522
mutex_lock(&kvm_lock);
9523
list_for_each_entry(kvm, &vm_list, vm_list)
9524
kvm_for_each_vcpu(i, vcpu, kvm)
9525
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
9526
atomic_set(&kvm_guest_has_master_clock, 0);
9527
mutex_unlock(&kvm_lock);
9528
}
9529
9530
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
9531
9532
/*
9533
* Indirection to move queue_work() out of the tk_core.seq write held
9534
* region to prevent possible deadlocks against time accessors which
9535
* are invoked with work related locks held.
9536
*/
9537
static void pvclock_irq_work_fn(struct irq_work *w)
9538
{
9539
queue_work(system_long_wq, &pvclock_gtod_work);
9540
}
9541
9542
static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
9543
9544
/*
9545
* Notification about pvclock gtod data update.
9546
*/
9547
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
9548
void *priv)
9549
{
9550
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
9551
struct timekeeper *tk = priv;
9552
9553
update_pvclock_gtod(tk);
9554
9555
/*
9556
* Disable master clock if host does not trust, or does not use,
9557
* TSC based clocksource. Delegate queue_work() to irq_work as
9558
* this is invoked with tk_core.seq write held.
9559
*/
9560
if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
9561
atomic_read(&kvm_guest_has_master_clock) != 0)
9562
irq_work_queue(&pvclock_irq_work);
9563
return 0;
9564
}
9565
9566
static struct notifier_block pvclock_gtod_notifier = {
9567
.notifier_call = pvclock_gtod_notify,
9568
};
9569
#endif
9570
9571
static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
9572
{
9573
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
9574
9575
#define __KVM_X86_OP(func) \
9576
static_call_update(kvm_x86_##func, kvm_x86_ops.func);
9577
#define KVM_X86_OP(func) \
9578
WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
9579
#define KVM_X86_OP_OPTIONAL __KVM_X86_OP
9580
#define KVM_X86_OP_OPTIONAL_RET0(func) \
9581
static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
9582
(void *)__static_call_return0);
9583
#include <asm/kvm-x86-ops.h>
9584
#undef __KVM_X86_OP
9585
9586
kvm_pmu_ops_update(ops->pmu_ops);
9587
}
9588
9589
static int kvm_x86_check_processor_compatibility(void)
9590
{
9591
int cpu = smp_processor_id();
9592
struct cpuinfo_x86 *c = &cpu_data(cpu);
9593
9594
/*
9595
* Compatibility checks are done when loading KVM and when enabling
9596
* hardware, e.g. during CPU hotplug, to ensure all online CPUs are
9597
* compatible, i.e. KVM should never perform a compatibility check on
9598
* an offline CPU.
9599
*/
9600
WARN_ON(!cpu_online(cpu));
9601
9602
if (__cr4_reserved_bits(cpu_has, c) !=
9603
__cr4_reserved_bits(cpu_has, &boot_cpu_data))
9604
return -EIO;
9605
9606
return kvm_x86_call(check_processor_compatibility)();
9607
}
9608
9609
static void kvm_x86_check_cpu_compat(void *ret)
9610
{
9611
*(int *)ret = kvm_x86_check_processor_compatibility();
9612
}
9613
9614
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
9615
{
9616
u64 host_pat;
9617
int r, cpu;
9618
9619
guard(mutex)(&vendor_module_lock);
9620
9621
if (kvm_x86_ops.enable_virtualization_cpu) {
9622
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
9623
return -EEXIST;
9624
}
9625
9626
/*
9627
* KVM explicitly assumes that the guest has an FPU and
9628
* FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
9629
* vCPU's FPU state as a fxregs_state struct.
9630
*/
9631
if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
9632
pr_err("inadequate fpu\n");
9633
return -EOPNOTSUPP;
9634
}
9635
9636
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9637
pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
9638
return -EOPNOTSUPP;
9639
}
9640
9641
/*
9642
* KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
9643
* the PAT bits in SPTEs. Bail if PAT[0] is programmed to something
9644
* other than WB. Note, EPT doesn't utilize the PAT, but don't bother
9645
* with an exception. PAT[0] is set to WB on RESET and also by the
9646
* kernel, i.e. failure indicates a kernel bug or broken firmware.
9647
*/
9648
if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) ||
9649
(host_pat & GENMASK(2, 0)) != 6) {
9650
pr_err("host PAT[0] is not WB\n");
9651
return -EIO;
9652
}
9653
9654
memset(&kvm_caps, 0, sizeof(kvm_caps));
9655
9656
x86_emulator_cache = kvm_alloc_emulator_cache();
9657
if (!x86_emulator_cache) {
9658
pr_err("failed to allocate cache for x86 emulator\n");
9659
return -ENOMEM;
9660
}
9661
9662
user_return_msrs = alloc_percpu(struct kvm_user_return_msrs);
9663
if (!user_return_msrs) {
9664
pr_err("failed to allocate percpu kvm_user_return_msrs\n");
9665
r = -ENOMEM;
9666
goto out_free_x86_emulator_cache;
9667
}
9668
kvm_nr_uret_msrs = 0;
9669
9670
r = kvm_mmu_vendor_module_init();
9671
if (r)
9672
goto out_free_percpu;
9673
9674
kvm_caps.supported_vm_types = BIT(KVM_X86_DEFAULT_VM);
9675
kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
9676
9677
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
9678
kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
9679
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
9680
}
9681
kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
9682
kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
9683
9684
rdmsrq_safe(MSR_EFER, &kvm_host.efer);
9685
9686
if (boot_cpu_has(X86_FEATURE_XSAVES))
9687
rdmsrq(MSR_IA32_XSS, kvm_host.xss);
9688
9689
kvm_init_pmu_capability(ops->pmu_ops);
9690
9691
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
9692
rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
9693
9694
r = ops->hardware_setup();
9695
if (r != 0)
9696
goto out_mmu_exit;
9697
9698
enable_device_posted_irqs &= enable_apicv &&
9699
irq_remapping_cap(IRQ_POSTING_CAP);
9700
9701
kvm_ops_update(ops);
9702
9703
for_each_online_cpu(cpu) {
9704
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
9705
if (r < 0)
9706
goto out_unwind_ops;
9707
}
9708
9709
/*
9710
* Point of no return! DO NOT add error paths below this point unless
9711
* absolutely necessary, as most operations from this point forward
9712
* require unwinding.
9713
*/
9714
kvm_timer_init();
9715
9716
if (pi_inject_timer == -1)
9717
pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER);
9718
#ifdef CONFIG_X86_64
9719
pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
9720
9721
if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9722
set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
9723
#endif
9724
9725
kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
9726
9727
if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
9728
kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
9729
9730
/* KVM always ignores guest PAT for shadow paging. */
9731
if (!tdp_enabled)
9732
kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
9733
9734
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
9735
kvm_caps.supported_xss = 0;
9736
9737
if (kvm_caps.has_tsc_control) {
9738
/*
9739
* Make sure the user can only configure tsc_khz values that
9740
* fit into a signed integer.
9741
* A min value is not calculated because it will always
9742
* be 1 on all machines.
9743
*/
9744
u64 max = min(0x7fffffffULL,
9745
__scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz));
9746
kvm_caps.max_guest_tsc_khz = max;
9747
}
9748
kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits;
9749
kvm_init_msr_lists();
9750
return 0;
9751
9752
out_unwind_ops:
9753
kvm_x86_ops.enable_virtualization_cpu = NULL;
9754
kvm_x86_call(hardware_unsetup)();
9755
out_mmu_exit:
9756
kvm_mmu_vendor_module_exit();
9757
out_free_percpu:
9758
free_percpu(user_return_msrs);
9759
out_free_x86_emulator_cache:
9760
kmem_cache_destroy(x86_emulator_cache);
9761
return r;
9762
}
9763
EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
9764
9765
void kvm_x86_vendor_exit(void)
9766
{
9767
kvm_unregister_perf_callbacks();
9768
9769
#ifdef CONFIG_X86_64
9770
if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9771
clear_hv_tscchange_cb();
9772
#endif
9773
kvm_lapic_exit();
9774
9775
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9776
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
9777
CPUFREQ_TRANSITION_NOTIFIER);
9778
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
9779
}
9780
#ifdef CONFIG_X86_64
9781
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
9782
irq_work_sync(&pvclock_irq_work);
9783
cancel_work_sync(&pvclock_gtod_work);
9784
#endif
9785
kvm_x86_call(hardware_unsetup)();
9786
kvm_mmu_vendor_module_exit();
9787
free_percpu(user_return_msrs);
9788
kmem_cache_destroy(x86_emulator_cache);
9789
#ifdef CONFIG_KVM_XEN
9790
static_key_deferred_flush(&kvm_xen_enabled);
9791
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
9792
#endif
9793
mutex_lock(&vendor_module_lock);
9794
kvm_x86_ops.enable_virtualization_cpu = NULL;
9795
mutex_unlock(&vendor_module_lock);
9796
}
9797
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
9798
9799
#ifdef CONFIG_X86_64
9800
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
9801
unsigned long clock_type)
9802
{
9803
struct kvm_clock_pairing clock_pairing;
9804
struct timespec64 ts;
9805
u64 cycle;
9806
int ret;
9807
9808
if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
9809
return -KVM_EOPNOTSUPP;
9810
9811
/*
9812
* When tsc is in permanent catchup mode guests won't be able to use
9813
* pvclock_read_retry loop to get consistent view of pvclock
9814
*/
9815
if (vcpu->arch.tsc_always_catchup)
9816
return -KVM_EOPNOTSUPP;
9817
9818
if (!kvm_get_walltime_and_clockread(&ts, &cycle))
9819
return -KVM_EOPNOTSUPP;
9820
9821
clock_pairing.sec = ts.tv_sec;
9822
clock_pairing.nsec = ts.tv_nsec;
9823
clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
9824
clock_pairing.flags = 0;
9825
memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
9826
9827
ret = 0;
9828
if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
9829
sizeof(struct kvm_clock_pairing)))
9830
ret = -KVM_EFAULT;
9831
9832
return ret;
9833
}
9834
#endif
9835
9836
/*
9837
* kvm_pv_kick_cpu_op: Kick a vcpu.
9838
*
9839
* @apicid - apicid of vcpu to be kicked.
9840
*/
9841
static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
9842
{
9843
/*
9844
* All other fields are unused for APIC_DM_REMRD, but may be consumed by
9845
* common code, e.g. for tracing. Defer initialization to the compiler.
9846
*/
9847
struct kvm_lapic_irq lapic_irq = {
9848
.delivery_mode = APIC_DM_REMRD,
9849
.dest_mode = APIC_DEST_PHYSICAL,
9850
.shorthand = APIC_DEST_NOSHORT,
9851
.dest_id = apicid,
9852
};
9853
9854
kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
9855
}
9856
9857
bool kvm_apicv_activated(struct kvm *kvm)
9858
{
9859
return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
9860
}
9861
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
9862
9863
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
9864
{
9865
ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
9866
ulong vcpu_reasons =
9867
kvm_x86_call(vcpu_get_apicv_inhibit_reasons)(vcpu);
9868
9869
return (vm_reasons | vcpu_reasons) == 0;
9870
}
9871
EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
9872
9873
static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
9874
enum kvm_apicv_inhibit reason, bool set)
9875
{
9876
const struct trace_print_flags apicv_inhibits[] = { APICV_INHIBIT_REASONS };
9877
9878
BUILD_BUG_ON(ARRAY_SIZE(apicv_inhibits) != NR_APICV_INHIBIT_REASONS);
9879
9880
if (set)
9881
__set_bit(reason, inhibits);
9882
else
9883
__clear_bit(reason, inhibits);
9884
9885
trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
9886
}
9887
9888
static void kvm_apicv_init(struct kvm *kvm)
9889
{
9890
enum kvm_apicv_inhibit reason = enable_apicv ? APICV_INHIBIT_REASON_ABSENT :
9891
APICV_INHIBIT_REASON_DISABLED;
9892
9893
set_or_clear_apicv_inhibit(&kvm->arch.apicv_inhibit_reasons, reason, true);
9894
9895
init_rwsem(&kvm->arch.apicv_update_lock);
9896
}
9897
9898
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
9899
{
9900
struct kvm_vcpu *target = NULL;
9901
struct kvm_apic_map *map;
9902
9903
vcpu->stat.directed_yield_attempted++;
9904
9905
if (single_task_running())
9906
goto no_yield;
9907
9908
rcu_read_lock();
9909
map = rcu_dereference(vcpu->kvm->arch.apic_map);
9910
9911
if (likely(map) && dest_id <= map->max_apic_id) {
9912
dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
9913
if (map->phys_map[dest_id])
9914
target = map->phys_map[dest_id]->vcpu;
9915
}
9916
9917
rcu_read_unlock();
9918
9919
if (!target || !READ_ONCE(target->ready))
9920
goto no_yield;
9921
9922
/* Ignore requests to yield to self */
9923
if (vcpu == target)
9924
goto no_yield;
9925
9926
if (kvm_vcpu_yield_to(target) <= 0)
9927
goto no_yield;
9928
9929
vcpu->stat.directed_yield_successful++;
9930
9931
no_yield:
9932
return;
9933
}
9934
9935
static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
9936
{
9937
u64 ret = vcpu->run->hypercall.ret;
9938
9939
if (!is_64_bit_hypercall(vcpu))
9940
ret = (u32)ret;
9941
kvm_rax_write(vcpu, ret);
9942
return kvm_skip_emulated_instruction(vcpu);
9943
}
9944
9945
int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
9946
int (*complete_hypercall)(struct kvm_vcpu *))
9947
{
9948
unsigned long ret;
9949
unsigned long nr = kvm_rax_read(vcpu);
9950
unsigned long a0 = kvm_rbx_read(vcpu);
9951
unsigned long a1 = kvm_rcx_read(vcpu);
9952
unsigned long a2 = kvm_rdx_read(vcpu);
9953
unsigned long a3 = kvm_rsi_read(vcpu);
9954
int op_64_bit = is_64_bit_hypercall(vcpu);
9955
9956
++vcpu->stat.hypercalls;
9957
9958
trace_kvm_hypercall(nr, a0, a1, a2, a3);
9959
9960
if (!op_64_bit) {
9961
nr &= 0xFFFFFFFF;
9962
a0 &= 0xFFFFFFFF;
9963
a1 &= 0xFFFFFFFF;
9964
a2 &= 0xFFFFFFFF;
9965
a3 &= 0xFFFFFFFF;
9966
}
9967
9968
if (cpl) {
9969
ret = -KVM_EPERM;
9970
goto out;
9971
}
9972
9973
ret = -KVM_ENOSYS;
9974
9975
switch (nr) {
9976
case KVM_HC_VAPIC_POLL_IRQ:
9977
ret = 0;
9978
break;
9979
case KVM_HC_KICK_CPU:
9980
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
9981
break;
9982
9983
kvm_pv_kick_cpu_op(vcpu->kvm, a1);
9984
kvm_sched_yield(vcpu, a1);
9985
ret = 0;
9986
break;
9987
#ifdef CONFIG_X86_64
9988
case KVM_HC_CLOCK_PAIRING:
9989
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
9990
break;
9991
#endif
9992
case KVM_HC_SEND_IPI:
9993
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
9994
break;
9995
9996
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
9997
break;
9998
case KVM_HC_SCHED_YIELD:
9999
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
10000
break;
10001
10002
kvm_sched_yield(vcpu, a0);
10003
ret = 0;
10004
break;
10005
case KVM_HC_MAP_GPA_RANGE: {
10006
u64 gpa = a0, npages = a1, attrs = a2;
10007
10008
ret = -KVM_ENOSYS;
10009
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE))
10010
break;
10011
10012
if (!PAGE_ALIGNED(gpa) || !npages ||
10013
gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
10014
ret = -KVM_EINVAL;
10015
break;
10016
}
10017
10018
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
10019
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
10020
/*
10021
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
10022
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
10023
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
10024
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
10025
*/
10026
vcpu->run->hypercall.ret = 0;
10027
vcpu->run->hypercall.args[0] = gpa;
10028
vcpu->run->hypercall.args[1] = npages;
10029
vcpu->run->hypercall.args[2] = attrs;
10030
vcpu->run->hypercall.flags = 0;
10031
if (op_64_bit)
10032
vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
10033
10034
WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
10035
vcpu->arch.complete_userspace_io = complete_hypercall;
10036
return 0;
10037
}
10038
default:
10039
ret = -KVM_ENOSYS;
10040
break;
10041
}
10042
10043
out:
10044
vcpu->run->hypercall.ret = ret;
10045
return 1;
10046
}
10047
EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
10048
10049
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
10050
{
10051
if (kvm_xen_hypercall_enabled(vcpu->kvm))
10052
return kvm_xen_hypercall(vcpu);
10053
10054
if (kvm_hv_hypercall_enabled(vcpu))
10055
return kvm_hv_hypercall(vcpu);
10056
10057
return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu),
10058
complete_hypercall_exit);
10059
}
10060
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
10061
10062
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
10063
{
10064
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
10065
char instruction[3];
10066
unsigned long rip = kvm_rip_read(vcpu);
10067
10068
/*
10069
* If the quirk is disabled, synthesize a #UD and let the guest pick up
10070
* the pieces.
10071
*/
10072
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
10073
ctxt->exception.error_code_valid = false;
10074
ctxt->exception.vector = UD_VECTOR;
10075
ctxt->have_exception = true;
10076
return X86EMUL_PROPAGATE_FAULT;
10077
}
10078
10079
kvm_x86_call(patch_hypercall)(vcpu, instruction);
10080
10081
return emulator_write_emulated(ctxt, rip, instruction, 3,
10082
&ctxt->exception);
10083
}
10084
10085
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
10086
{
10087
return vcpu->run->request_interrupt_window &&
10088
likely(!pic_in_kernel(vcpu->kvm));
10089
}
10090
10091
/* Called within kvm->srcu read side. */
10092
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
10093
{
10094
struct kvm_run *kvm_run = vcpu->run;
10095
10096
kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu);
10097
kvm_run->cr8 = kvm_get_cr8(vcpu);
10098
kvm_run->apic_base = vcpu->arch.apic_base;
10099
10100
kvm_run->ready_for_interrupt_injection =
10101
pic_in_kernel(vcpu->kvm) ||
10102
kvm_vcpu_ready_for_interrupt_injection(vcpu);
10103
10104
if (is_smm(vcpu))
10105
kvm_run->flags |= KVM_RUN_X86_SMM;
10106
if (is_guest_mode(vcpu))
10107
kvm_run->flags |= KVM_RUN_X86_GUEST_MODE;
10108
}
10109
10110
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
10111
{
10112
int max_irr, tpr;
10113
10114
if (!kvm_x86_ops.update_cr8_intercept)
10115
return;
10116
10117
if (!lapic_in_kernel(vcpu))
10118
return;
10119
10120
if (vcpu->arch.apic->apicv_active)
10121
return;
10122
10123
if (!vcpu->arch.apic->vapic_addr)
10124
max_irr = kvm_lapic_find_highest_irr(vcpu);
10125
else
10126
max_irr = -1;
10127
10128
if (max_irr != -1)
10129
max_irr >>= 4;
10130
10131
tpr = kvm_lapic_get_cr8(vcpu);
10132
10133
kvm_x86_call(update_cr8_intercept)(vcpu, tpr, max_irr);
10134
}
10135
10136
10137
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
10138
{
10139
if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10140
kvm_x86_ops.nested_ops->triple_fault(vcpu);
10141
return 1;
10142
}
10143
10144
return kvm_x86_ops.nested_ops->check_events(vcpu);
10145
}
10146
10147
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
10148
{
10149
/*
10150
* Suppress the error code if the vCPU is in Real Mode, as Real Mode
10151
* exceptions don't report error codes. The presence of an error code
10152
* is carried with the exception and only stripped when the exception
10153
* is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
10154
* report an error code despite the CPU being in Real Mode.
10155
*/
10156
vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
10157
10158
trace_kvm_inj_exception(vcpu->arch.exception.vector,
10159
vcpu->arch.exception.has_error_code,
10160
vcpu->arch.exception.error_code,
10161
vcpu->arch.exception.injected);
10162
10163
kvm_x86_call(inject_exception)(vcpu);
10164
}
10165
10166
/*
10167
* Check for any event (interrupt or exception) that is ready to be injected,
10168
* and if there is at least one event, inject the event with the highest
10169
* priority. This handles both "pending" events, i.e. events that have never
10170
* been injected into the guest, and "injected" events, i.e. events that were
10171
* injected as part of a previous VM-Enter, but weren't successfully delivered
10172
* and need to be re-injected.
10173
*
10174
* Note, this is not guaranteed to be invoked on a guest instruction boundary,
10175
* i.e. doesn't guarantee that there's an event window in the guest. KVM must
10176
* be able to inject exceptions in the "middle" of an instruction, and so must
10177
* also be able to re-inject NMIs and IRQs in the middle of an instruction.
10178
* I.e. for exceptions and re-injected events, NOT invoking this on instruction
10179
* boundaries is necessary and correct.
10180
*
10181
* For simplicity, KVM uses a single path to inject all events (except events
10182
* that are injected directly from L1 to L2) and doesn't explicitly track
10183
* instruction boundaries for asynchronous events. However, because VM-Exits
10184
* that can occur during instruction execution typically result in KVM skipping
10185
* the instruction or injecting an exception, e.g. instruction and exception
10186
* intercepts, and because pending exceptions have higher priority than pending
10187
* interrupts, KVM still honors instruction boundaries in most scenarios.
10188
*
10189
* But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10190
* the instruction or inject an exception, then KVM can incorrecty inject a new
10191
* asynchronous event if the event became pending after the CPU fetched the
10192
* instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
10193
* occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
10194
* injected on the restarted instruction instead of being deferred until the
10195
* instruction completes.
10196
*
10197
* In practice, this virtualization hole is unlikely to be observed by the
10198
* guest, and even less likely to cause functional problems. To detect the
10199
* hole, the guest would have to trigger an event on a side effect of an early
10200
* phase of instruction execution, e.g. on the instruction fetch from memory.
10201
* And for it to be a functional problem, the guest would need to depend on the
10202
* ordering between that side effect, the instruction completing, _and_ the
10203
* delivery of the asynchronous event.
10204
*/
10205
static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
10206
bool *req_immediate_exit)
10207
{
10208
bool can_inject;
10209
int r;
10210
10211
/*
10212
* Process nested events first, as nested VM-Exit supersedes event
10213
* re-injection. If there's an event queued for re-injection, it will
10214
* be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
10215
*/
10216
if (is_guest_mode(vcpu))
10217
r = kvm_check_nested_events(vcpu);
10218
else
10219
r = 0;
10220
10221
/*
10222
* Re-inject exceptions and events *especially* if immediate entry+exit
10223
* to/from L2 is needed, as any event that has already been injected
10224
* into L2 needs to complete its lifecycle before injecting a new event.
10225
*
10226
* Don't re-inject an NMI or interrupt if there is a pending exception.
10227
* This collision arises if an exception occurred while vectoring the
10228
* injected event, KVM intercepted said exception, and KVM ultimately
10229
* determined the fault belongs to the guest and queues the exception
10230
* for injection back into the guest.
10231
*
10232
* "Injected" interrupts can also collide with pending exceptions if
10233
* userspace ignores the "ready for injection" flag and blindly queues
10234
* an interrupt. In that case, prioritizing the exception is correct,
10235
* as the exception "occurred" before the exit to userspace. Trap-like
10236
* exceptions, e.g. most #DBs, have higher priority than interrupts.
10237
* And while fault-like exceptions, e.g. #GP and #PF, are the lowest
10238
* priority, they're only generated (pended) during instruction
10239
* execution, and interrupts are recognized at instruction boundaries.
10240
* Thus a pending fault-like exception means the fault occurred on the
10241
* *previous* instruction and must be serviced prior to recognizing any
10242
* new events in order to fully complete the previous instruction.
10243
*/
10244
if (vcpu->arch.exception.injected)
10245
kvm_inject_exception(vcpu);
10246
else if (kvm_is_exception_pending(vcpu))
10247
; /* see above */
10248
else if (vcpu->arch.nmi_injected)
10249
kvm_x86_call(inject_nmi)(vcpu);
10250
else if (vcpu->arch.interrupt.injected)
10251
kvm_x86_call(inject_irq)(vcpu, true);
10252
10253
/*
10254
* Exceptions that morph to VM-Exits are handled above, and pending
10255
* exceptions on top of injected exceptions that do not VM-Exit should
10256
* either morph to #DF or, sadly, override the injected exception.
10257
*/
10258
WARN_ON_ONCE(vcpu->arch.exception.injected &&
10259
vcpu->arch.exception.pending);
10260
10261
/*
10262
* Bail if immediate entry+exit to/from the guest is needed to complete
10263
* nested VM-Enter or event re-injection so that a different pending
10264
* event can be serviced (or if KVM needs to exit to userspace).
10265
*
10266
* Otherwise, continue processing events even if VM-Exit occurred. The
10267
* VM-Exit will have cleared exceptions that were meant for L2, but
10268
* there may now be events that can be injected into L1.
10269
*/
10270
if (r < 0)
10271
goto out;
10272
10273
/*
10274
* A pending exception VM-Exit should either result in nested VM-Exit
10275
* or force an immediate re-entry and exit to/from L2, and exception
10276
* VM-Exits cannot be injected (flag should _never_ be set).
10277
*/
10278
WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
10279
vcpu->arch.exception_vmexit.pending);
10280
10281
/*
10282
* New events, other than exceptions, cannot be injected if KVM needs
10283
* to re-inject a previous event. See above comments on re-injecting
10284
* for why pending exceptions get priority.
10285
*/
10286
can_inject = !kvm_event_needs_reinjection(vcpu);
10287
10288
if (vcpu->arch.exception.pending) {
10289
/*
10290
* Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
10291
* value pushed on the stack. Trap-like exception and all #DBs
10292
* leave RF as-is (KVM follows Intel's behavior in this regard;
10293
* AMD states that code breakpoint #DBs excplitly clear RF=0).
10294
*
10295
* Note, most versions of Intel's SDM and AMD's APM incorrectly
10296
* describe the behavior of General Detect #DBs, which are
10297
* fault-like. They do _not_ set RF, a la code breakpoints.
10298
*/
10299
if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
10300
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
10301
X86_EFLAGS_RF);
10302
10303
if (vcpu->arch.exception.vector == DB_VECTOR) {
10304
kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
10305
if (vcpu->arch.dr7 & DR7_GD) {
10306
vcpu->arch.dr7 &= ~DR7_GD;
10307
kvm_update_dr7(vcpu);
10308
}
10309
}
10310
10311
kvm_inject_exception(vcpu);
10312
10313
vcpu->arch.exception.pending = false;
10314
vcpu->arch.exception.injected = true;
10315
10316
can_inject = false;
10317
}
10318
10319
/* Don't inject interrupts if the user asked to avoid doing so */
10320
if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)
10321
return 0;
10322
10323
/*
10324
* Finally, inject interrupt events. If an event cannot be injected
10325
* due to architectural conditions (e.g. IF=0) a window-open exit
10326
* will re-request KVM_REQ_EVENT. Sometimes however an event is pending
10327
* and can architecturally be injected, but we cannot do it right now:
10328
* an interrupt could have arrived just now and we have to inject it
10329
* as a vmexit, or there could already an event in the queue, which is
10330
* indicated by can_inject. In that case we request an immediate exit
10331
* in order to make progress and get back here for another iteration.
10332
* The kvm_x86_ops hooks communicate this by returning -EBUSY.
10333
*/
10334
#ifdef CONFIG_KVM_SMM
10335
if (vcpu->arch.smi_pending) {
10336
r = can_inject ? kvm_x86_call(smi_allowed)(vcpu, true) :
10337
-EBUSY;
10338
if (r < 0)
10339
goto out;
10340
if (r) {
10341
vcpu->arch.smi_pending = false;
10342
++vcpu->arch.smi_count;
10343
enter_smm(vcpu);
10344
can_inject = false;
10345
} else
10346
kvm_x86_call(enable_smi_window)(vcpu);
10347
}
10348
#endif
10349
10350
if (vcpu->arch.nmi_pending) {
10351
r = can_inject ? kvm_x86_call(nmi_allowed)(vcpu, true) :
10352
-EBUSY;
10353
if (r < 0)
10354
goto out;
10355
if (r) {
10356
--vcpu->arch.nmi_pending;
10357
vcpu->arch.nmi_injected = true;
10358
kvm_x86_call(inject_nmi)(vcpu);
10359
can_inject = false;
10360
WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0);
10361
}
10362
if (vcpu->arch.nmi_pending)
10363
kvm_x86_call(enable_nmi_window)(vcpu);
10364
}
10365
10366
if (kvm_cpu_has_injectable_intr(vcpu)) {
10367
r = can_inject ? kvm_x86_call(interrupt_allowed)(vcpu, true) :
10368
-EBUSY;
10369
if (r < 0)
10370
goto out;
10371
if (r) {
10372
int irq = kvm_cpu_get_interrupt(vcpu);
10373
10374
if (!WARN_ON_ONCE(irq == -1)) {
10375
kvm_queue_interrupt(vcpu, irq, false);
10376
kvm_x86_call(inject_irq)(vcpu, false);
10377
WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0);
10378
}
10379
}
10380
if (kvm_cpu_has_injectable_intr(vcpu))
10381
kvm_x86_call(enable_irq_window)(vcpu);
10382
}
10383
10384
if (is_guest_mode(vcpu) &&
10385
kvm_x86_ops.nested_ops->has_events &&
10386
kvm_x86_ops.nested_ops->has_events(vcpu, true))
10387
*req_immediate_exit = true;
10388
10389
/*
10390
* KVM must never queue a new exception while injecting an event; KVM
10391
* is done emulating and should only propagate the to-be-injected event
10392
* to the VMCS/VMCB. Queueing a new exception can put the vCPU into an
10393
* infinite loop as KVM will bail from VM-Enter to inject the pending
10394
* exception and start the cycle all over.
10395
*
10396
* Exempt triple faults as they have special handling and won't put the
10397
* vCPU into an infinite loop. Triple fault can be queued when running
10398
* VMX without unrestricted guest, as that requires KVM to emulate Real
10399
* Mode events (see kvm_inject_realmode_interrupt()).
10400
*/
10401
WARN_ON_ONCE(vcpu->arch.exception.pending ||
10402
vcpu->arch.exception_vmexit.pending);
10403
return 0;
10404
10405
out:
10406
if (r == -EBUSY) {
10407
*req_immediate_exit = true;
10408
r = 0;
10409
}
10410
return r;
10411
}
10412
10413
static void process_nmi(struct kvm_vcpu *vcpu)
10414
{
10415
unsigned int limit;
10416
10417
/*
10418
* x86 is limited to one NMI pending, but because KVM can't react to
10419
* incoming NMIs as quickly as bare metal, e.g. if the vCPU is
10420
* scheduled out, KVM needs to play nice with two queued NMIs showing
10421
* up at the same time. To handle this scenario, allow two NMIs to be
10422
* (temporarily) pending so long as NMIs are not blocked and KVM is not
10423
* waiting for a previous NMI injection to complete (which effectively
10424
* blocks NMIs). KVM will immediately inject one of the two NMIs, and
10425
* will request an NMI window to handle the second NMI.
10426
*/
10427
if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10428
limit = 1;
10429
else
10430
limit = 2;
10431
10432
/*
10433
* Adjust the limit to account for pending virtual NMIs, which aren't
10434
* tracked in vcpu->arch.nmi_pending.
10435
*/
10436
if (kvm_x86_call(is_vnmi_pending)(vcpu))
10437
limit--;
10438
10439
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10440
vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10441
10442
if (vcpu->arch.nmi_pending &&
10443
(kvm_x86_call(set_vnmi_pending)(vcpu)))
10444
vcpu->arch.nmi_pending--;
10445
10446
if (vcpu->arch.nmi_pending)
10447
kvm_make_request(KVM_REQ_EVENT, vcpu);
10448
}
10449
10450
/* Return total number of NMIs pending injection to the VM */
10451
int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
10452
{
10453
return vcpu->arch.nmi_pending +
10454
kvm_x86_call(is_vnmi_pending)(vcpu);
10455
}
10456
10457
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10458
unsigned long *vcpu_bitmap)
10459
{
10460
kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10461
}
10462
10463
void kvm_make_scan_ioapic_request(struct kvm *kvm)
10464
{
10465
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10466
}
10467
10468
void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10469
{
10470
struct kvm_lapic *apic = vcpu->arch.apic;
10471
bool activate;
10472
10473
if (!lapic_in_kernel(vcpu))
10474
return;
10475
10476
down_read(&vcpu->kvm->arch.apicv_update_lock);
10477
preempt_disable();
10478
10479
/* Do not activate APICV when APIC is disabled */
10480
activate = kvm_vcpu_apicv_activated(vcpu) &&
10481
(kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED);
10482
10483
if (apic->apicv_active == activate)
10484
goto out;
10485
10486
apic->apicv_active = activate;
10487
kvm_apic_update_apicv(vcpu);
10488
kvm_x86_call(refresh_apicv_exec_ctrl)(vcpu);
10489
10490
/*
10491
* When APICv gets disabled, we may still have injected interrupts
10492
* pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10493
* still active when the interrupt got accepted. Make sure
10494
* kvm_check_and_inject_events() is called to check for that.
10495
*/
10496
if (!apic->apicv_active)
10497
kvm_make_request(KVM_REQ_EVENT, vcpu);
10498
10499
out:
10500
preempt_enable();
10501
up_read(&vcpu->kvm->arch.apicv_update_lock);
10502
}
10503
EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv);
10504
10505
static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10506
{
10507
if (!lapic_in_kernel(vcpu))
10508
return;
10509
10510
/*
10511
* Due to sharing page tables across vCPUs, the xAPIC memslot must be
10512
* deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but
10513
* and hardware doesn't support x2APIC virtualization. E.g. some AMD
10514
* CPUs support AVIC but not x2APIC. KVM still allows enabling AVIC in
10515
* this case so that KVM can use the AVIC doorbell to inject interrupts
10516
* to running vCPUs, but KVM must not create SPTEs for the APIC base as
10517
* the vCPU would incorrectly be able to access the vAPIC page via MMIO
10518
* despite being in x2APIC mode. For simplicity, inhibiting the APIC
10519
* access page is sticky.
10520
*/
10521
if (apic_x2apic_mode(vcpu->arch.apic) &&
10522
kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization)
10523
kvm_inhibit_apic_access_page(vcpu);
10524
10525
__kvm_vcpu_update_apicv(vcpu);
10526
}
10527
10528
void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10529
enum kvm_apicv_inhibit reason, bool set)
10530
{
10531
unsigned long old, new;
10532
10533
lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10534
10535
if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason)))
10536
return;
10537
10538
old = new = kvm->arch.apicv_inhibit_reasons;
10539
10540
set_or_clear_apicv_inhibit(&new, reason, set);
10541
10542
if (!!old != !!new) {
10543
/*
10544
* Kick all vCPUs before setting apicv_inhibit_reasons to avoid
10545
* false positives in the sanity check WARN in vcpu_enter_guest().
10546
* This task will wait for all vCPUs to ack the kick IRQ before
10547
* updating apicv_inhibit_reasons, and all other vCPUs will
10548
* block on acquiring apicv_update_lock so that vCPUs can't
10549
* redo vcpu_enter_guest() without seeing the new inhibit state.
10550
*
10551
* Note, holding apicv_update_lock and taking it in the read
10552
* side (handling the request) also prevents other vCPUs from
10553
* servicing the request with a stale apicv_inhibit_reasons.
10554
*/
10555
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
10556
kvm->arch.apicv_inhibit_reasons = new;
10557
if (new) {
10558
unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
10559
int idx = srcu_read_lock(&kvm->srcu);
10560
10561
kvm_zap_gfn_range(kvm, gfn, gfn+1);
10562
srcu_read_unlock(&kvm->srcu, idx);
10563
}
10564
} else {
10565
kvm->arch.apicv_inhibit_reasons = new;
10566
}
10567
}
10568
10569
void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10570
enum kvm_apicv_inhibit reason, bool set)
10571
{
10572
if (!enable_apicv)
10573
return;
10574
10575
down_write(&kvm->arch.apicv_update_lock);
10576
__kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
10577
up_write(&kvm->arch.apicv_update_lock);
10578
}
10579
EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
10580
10581
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
10582
{
10583
if (!kvm_apic_present(vcpu))
10584
return;
10585
10586
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
10587
vcpu->arch.highest_stale_pending_ioapic_eoi = -1;
10588
10589
kvm_x86_call(sync_pir_to_irr)(vcpu);
10590
10591
if (irqchip_split(vcpu->kvm))
10592
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
10593
#ifdef CONFIG_KVM_IOAPIC
10594
else if (ioapic_in_kernel(vcpu->kvm))
10595
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
10596
#endif
10597
10598
if (is_guest_mode(vcpu))
10599
vcpu->arch.load_eoi_exitmap_pending = true;
10600
else
10601
kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
10602
}
10603
10604
static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
10605
{
10606
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
10607
return;
10608
10609
#ifdef CONFIG_KVM_HYPERV
10610
if (to_hv_vcpu(vcpu)) {
10611
u64 eoi_exit_bitmap[4];
10612
10613
bitmap_or((ulong *)eoi_exit_bitmap,
10614
vcpu->arch.ioapic_handled_vectors,
10615
to_hv_synic(vcpu)->vec_bitmap, 256);
10616
kvm_x86_call(load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
10617
return;
10618
}
10619
#endif
10620
kvm_x86_call(load_eoi_exitmap)(
10621
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
10622
}
10623
10624
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
10625
{
10626
kvm_x86_call(guest_memory_reclaimed)(kvm);
10627
}
10628
10629
static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
10630
{
10631
if (!lapic_in_kernel(vcpu))
10632
return;
10633
10634
kvm_x86_call(set_apic_access_page_addr)(vcpu);
10635
}
10636
10637
/*
10638
* Called within kvm->srcu read side.
10639
* Returns 1 to let vcpu_run() continue the guest execution loop without
10640
* exiting to the userspace. Otherwise, the value will be returned to the
10641
* userspace.
10642
*/
10643
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
10644
{
10645
int r;
10646
bool req_int_win =
10647
dm_request_for_irq_injection(vcpu) &&
10648
kvm_cpu_accept_dm_intr(vcpu);
10649
fastpath_t exit_fastpath;
10650
u64 run_flags, debug_ctl;
10651
10652
bool req_immediate_exit = false;
10653
10654
if (kvm_request_pending(vcpu)) {
10655
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
10656
r = -EIO;
10657
goto out;
10658
}
10659
10660
if (kvm_dirty_ring_check_request(vcpu)) {
10661
r = 0;
10662
goto out;
10663
}
10664
10665
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
10666
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
10667
r = 0;
10668
goto out;
10669
}
10670
}
10671
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
10672
kvm_mmu_free_obsolete_roots(vcpu);
10673
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
10674
__kvm_migrate_timers(vcpu);
10675
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
10676
kvm_update_masterclock(vcpu->kvm);
10677
if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
10678
kvm_gen_kvmclock_update(vcpu);
10679
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
10680
r = kvm_guest_time_update(vcpu);
10681
if (unlikely(r))
10682
goto out;
10683
}
10684
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
10685
kvm_mmu_sync_roots(vcpu);
10686
if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
10687
kvm_mmu_load_pgd(vcpu);
10688
10689
/*
10690
* Note, the order matters here, as flushing "all" TLB entries
10691
* also flushes the "current" TLB entries, i.e. servicing the
10692
* flush "all" will clear any request to flush "current".
10693
*/
10694
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
10695
kvm_vcpu_flush_tlb_all(vcpu);
10696
10697
kvm_service_local_tlb_flush_requests(vcpu);
10698
10699
/*
10700
* Fall back to a "full" guest flush if Hyper-V's precise
10701
* flushing fails. Note, Hyper-V's flushing is per-vCPU, but
10702
* the flushes are considered "remote" and not "local" because
10703
* the requests can be initiated from other vCPUs.
10704
*/
10705
#ifdef CONFIG_KVM_HYPERV
10706
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
10707
kvm_hv_vcpu_flush_tlb(vcpu))
10708
kvm_vcpu_flush_tlb_guest(vcpu);
10709
#endif
10710
10711
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
10712
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
10713
r = 0;
10714
goto out;
10715
}
10716
if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10717
if (is_guest_mode(vcpu))
10718
kvm_x86_ops.nested_ops->triple_fault(vcpu);
10719
10720
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10721
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
10722
vcpu->mmio_needed = 0;
10723
r = 0;
10724
goto out;
10725
}
10726
}
10727
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
10728
/* Page is swapped out. Do synthetic halt */
10729
vcpu->arch.apf.halted = true;
10730
r = 1;
10731
goto out;
10732
}
10733
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
10734
record_steal_time(vcpu);
10735
if (kvm_check_request(KVM_REQ_PMU, vcpu))
10736
kvm_pmu_handle_event(vcpu);
10737
if (kvm_check_request(KVM_REQ_PMI, vcpu))
10738
kvm_pmu_deliver_pmi(vcpu);
10739
#ifdef CONFIG_KVM_SMM
10740
if (kvm_check_request(KVM_REQ_SMI, vcpu))
10741
process_smi(vcpu);
10742
#endif
10743
if (kvm_check_request(KVM_REQ_NMI, vcpu))
10744
process_nmi(vcpu);
10745
if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
10746
BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
10747
if (test_bit(vcpu->arch.pending_ioapic_eoi,
10748
vcpu->arch.ioapic_handled_vectors)) {
10749
vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
10750
vcpu->run->eoi.vector =
10751
vcpu->arch.pending_ioapic_eoi;
10752
r = 0;
10753
goto out;
10754
}
10755
}
10756
if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
10757
vcpu_scan_ioapic(vcpu);
10758
if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
10759
vcpu_load_eoi_exitmap(vcpu);
10760
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
10761
kvm_vcpu_reload_apic_access_page(vcpu);
10762
#ifdef CONFIG_KVM_HYPERV
10763
if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
10764
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10765
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
10766
vcpu->run->system_event.ndata = 0;
10767
r = 0;
10768
goto out;
10769
}
10770
if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
10771
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10772
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
10773
vcpu->run->system_event.ndata = 0;
10774
r = 0;
10775
goto out;
10776
}
10777
if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
10778
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
10779
10780
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
10781
vcpu->run->hyperv = hv_vcpu->exit;
10782
r = 0;
10783
goto out;
10784
}
10785
10786
/*
10787
* KVM_REQ_HV_STIMER has to be processed after
10788
* KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
10789
* depend on the guest clock being up-to-date
10790
*/
10791
if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
10792
kvm_hv_process_stimers(vcpu);
10793
#endif
10794
if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
10795
kvm_vcpu_update_apicv(vcpu);
10796
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
10797
kvm_check_async_pf_completion(vcpu);
10798
10799
/*
10800
* Recalc MSR intercepts as userspace may want to intercept
10801
* accesses to MSRs that KVM would otherwise pass through to
10802
* the guest.
10803
*/
10804
if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
10805
kvm_x86_call(recalc_msr_intercepts)(vcpu);
10806
10807
if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
10808
kvm_x86_call(update_cpu_dirty_logging)(vcpu);
10809
10810
if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) {
10811
kvm_vcpu_reset(vcpu, true);
10812
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) {
10813
r = 1;
10814
goto out;
10815
}
10816
}
10817
}
10818
10819
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
10820
kvm_xen_has_interrupt(vcpu)) {
10821
++vcpu->stat.req_event;
10822
r = kvm_apic_accept_events(vcpu);
10823
if (r < 0) {
10824
r = 0;
10825
goto out;
10826
}
10827
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
10828
r = 1;
10829
goto out;
10830
}
10831
10832
r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
10833
if (r < 0) {
10834
r = 0;
10835
goto out;
10836
}
10837
if (req_int_win)
10838
kvm_x86_call(enable_irq_window)(vcpu);
10839
10840
if (kvm_lapic_enabled(vcpu)) {
10841
update_cr8_intercept(vcpu);
10842
kvm_lapic_sync_to_vapic(vcpu);
10843
}
10844
}
10845
10846
r = kvm_mmu_reload(vcpu);
10847
if (unlikely(r)) {
10848
goto cancel_injection;
10849
}
10850
10851
preempt_disable();
10852
10853
kvm_x86_call(prepare_switch_to_guest)(vcpu);
10854
10855
/*
10856
* Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
10857
* IPI are then delayed after guest entry, which ensures that they
10858
* result in virtual interrupt delivery.
10859
*/
10860
local_irq_disable();
10861
10862
/* Store vcpu->apicv_active before vcpu->mode. */
10863
smp_store_release(&vcpu->mode, IN_GUEST_MODE);
10864
10865
kvm_vcpu_srcu_read_unlock(vcpu);
10866
10867
/*
10868
* 1) We should set ->mode before checking ->requests. Please see
10869
* the comment in kvm_vcpu_exiting_guest_mode().
10870
*
10871
* 2) For APICv, we should set ->mode before checking PID.ON. This
10872
* pairs with the memory barrier implicit in pi_test_and_set_on
10873
* (see vmx_deliver_posted_interrupt).
10874
*
10875
* 3) This also orders the write to mode from any reads to the page
10876
* tables done while the VCPU is running. Please see the comment
10877
* in kvm_flush_remote_tlbs.
10878
*/
10879
smp_mb__after_srcu_read_unlock();
10880
10881
/*
10882
* Process pending posted interrupts to handle the case where the
10883
* notification IRQ arrived in the host, or was never sent (because the
10884
* target vCPU wasn't running). Do this regardless of the vCPU's APICv
10885
* status, KVM doesn't update assigned devices when APICv is inhibited,
10886
* i.e. they can post interrupts even if APICv is temporarily disabled.
10887
*/
10888
if (kvm_lapic_enabled(vcpu))
10889
kvm_x86_call(sync_pir_to_irr)(vcpu);
10890
10891
if (kvm_vcpu_exit_request(vcpu)) {
10892
vcpu->mode = OUTSIDE_GUEST_MODE;
10893
smp_wmb();
10894
local_irq_enable();
10895
preempt_enable();
10896
kvm_vcpu_srcu_read_lock(vcpu);
10897
r = 1;
10898
goto cancel_injection;
10899
}
10900
10901
run_flags = 0;
10902
if (req_immediate_exit) {
10903
run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
10904
kvm_make_request(KVM_REQ_EVENT, vcpu);
10905
}
10906
10907
fpregs_assert_state_consistent();
10908
if (test_thread_flag(TIF_NEED_FPU_LOAD))
10909
switch_fpu_return();
10910
10911
if (vcpu->arch.guest_fpu.xfd_err)
10912
wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
10913
10914
if (unlikely(vcpu->arch.switch_db_regs &&
10915
!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
10916
set_debugreg(DR7_FIXED_1, 7);
10917
set_debugreg(vcpu->arch.eff_db[0], 0);
10918
set_debugreg(vcpu->arch.eff_db[1], 1);
10919
set_debugreg(vcpu->arch.eff_db[2], 2);
10920
set_debugreg(vcpu->arch.eff_db[3], 3);
10921
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
10922
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
10923
run_flags |= KVM_RUN_LOAD_GUEST_DR6;
10924
} else if (unlikely(hw_breakpoint_active())) {
10925
set_debugreg(DR7_FIXED_1, 7);
10926
}
10927
10928
/*
10929
* Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL
10930
* can be modified in IRQ context, e.g. via SMP function calls. Inform
10931
* vendor code if any host-owned bits were changed, e.g. so that the
10932
* value loaded into hardware while running the guest can be updated.
10933
*/
10934
debug_ctl = get_debugctlmsr();
10935
if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
10936
!vcpu->arch.guest_state_protected)
10937
run_flags |= KVM_RUN_LOAD_DEBUGCTL;
10938
vcpu->arch.host_debugctl = debug_ctl;
10939
10940
guest_timing_enter_irqoff();
10941
10942
for (;;) {
10943
/*
10944
* Assert that vCPU vs. VM APICv state is consistent. An APICv
10945
* update must kick and wait for all vCPUs before toggling the
10946
* per-VM state, and responding vCPUs must wait for the update
10947
* to complete before servicing KVM_REQ_APICV_UPDATE.
10948
*/
10949
WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
10950
(kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
10951
10952
exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
10953
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
10954
break;
10955
10956
if (kvm_lapic_enabled(vcpu))
10957
kvm_x86_call(sync_pir_to_irr)(vcpu);
10958
10959
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
10960
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
10961
break;
10962
}
10963
10964
run_flags = 0;
10965
10966
/* Note, VM-Exits that go down the "slow" path are accounted below. */
10967
++vcpu->stat.exits;
10968
}
10969
10970
/*
10971
* Do this here before restoring debug registers on the host. And
10972
* since we do this before handling the vmexit, a DR access vmexit
10973
* can (a) read the correct value of the debug registers, (b) set
10974
* KVM_DEBUGREG_WONT_EXIT again.
10975
*/
10976
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
10977
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
10978
WARN_ON(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH);
10979
kvm_x86_call(sync_dirty_debug_regs)(vcpu);
10980
kvm_update_dr0123(vcpu);
10981
kvm_update_dr7(vcpu);
10982
}
10983
10984
/*
10985
* If the guest has used debug registers, at least dr7
10986
* will be disabled while returning to the host.
10987
* If we don't have active breakpoints in the host, we don't
10988
* care about the messed up debug address registers. But if
10989
* we have some of them active, restore the old state.
10990
*/
10991
if (hw_breakpoint_active())
10992
hw_breakpoint_restore();
10993
10994
vcpu->arch.last_vmentry_cpu = vcpu->cpu;
10995
vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
10996
10997
vcpu->mode = OUTSIDE_GUEST_MODE;
10998
smp_wmb();
10999
11000
/*
11001
* Sync xfd before calling handle_exit_irqoff() which may
11002
* rely on the fact that guest_fpu::xfd is up-to-date (e.g.
11003
* in #NM irqoff handler).
11004
*/
11005
if (vcpu->arch.xfd_no_write_intercept)
11006
fpu_sync_guest_vmexit_xfd_state();
11007
11008
kvm_x86_call(handle_exit_irqoff)(vcpu);
11009
11010
if (vcpu->arch.guest_fpu.xfd_err)
11011
wrmsrq(MSR_IA32_XFD_ERR, 0);
11012
11013
/*
11014
* Consume any pending interrupts, including the possible source of
11015
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
11016
* An instruction is required after local_irq_enable() to fully unblock
11017
* interrupts on processors that implement an interrupt shadow, the
11018
* stat.exits increment will do nicely.
11019
*/
11020
kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
11021
local_irq_enable();
11022
++vcpu->stat.exits;
11023
local_irq_disable();
11024
kvm_after_interrupt(vcpu);
11025
11026
/*
11027
* Wait until after servicing IRQs to account guest time so that any
11028
* ticks that occurred while running the guest are properly accounted
11029
* to the guest. Waiting until IRQs are enabled degrades the accuracy
11030
* of accounting via context tracking, but the loss of accuracy is
11031
* acceptable for all known use cases.
11032
*/
11033
guest_timing_exit_irqoff();
11034
11035
local_irq_enable();
11036
preempt_enable();
11037
11038
kvm_vcpu_srcu_read_lock(vcpu);
11039
11040
/*
11041
* Call this to ensure WC buffers in guest are evicted after each VM
11042
* Exit, so that the evicted WC writes can be snooped across all cpus
11043
*/
11044
smp_mb__after_srcu_read_lock();
11045
11046
/*
11047
* Profile KVM exit RIPs:
11048
*/
11049
if (unlikely(prof_on == KVM_PROFILING &&
11050
!vcpu->arch.guest_state_protected)) {
11051
unsigned long rip = kvm_rip_read(vcpu);
11052
profile_hit(KVM_PROFILING, (void *)rip);
11053
}
11054
11055
if (unlikely(vcpu->arch.tsc_always_catchup))
11056
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
11057
11058
if (vcpu->arch.apic_attention)
11059
kvm_lapic_sync_from_vapic(vcpu);
11060
11061
if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE))
11062
return 0;
11063
11064
r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath);
11065
return r;
11066
11067
cancel_injection:
11068
if (req_immediate_exit)
11069
kvm_make_request(KVM_REQ_EVENT, vcpu);
11070
kvm_x86_call(cancel_injection)(vcpu);
11071
if (unlikely(vcpu->arch.apic_attention))
11072
kvm_lapic_sync_from_vapic(vcpu);
11073
out:
11074
return r;
11075
}
11076
11077
static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
11078
{
11079
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
11080
!vcpu->arch.apf.halted);
11081
}
11082
11083
bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
11084
{
11085
if (!list_empty_careful(&vcpu->async_pf.done))
11086
return true;
11087
11088
if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
11089
kvm_apic_init_sipi_allowed(vcpu))
11090
return true;
11091
11092
if (kvm_is_exception_pending(vcpu))
11093
return true;
11094
11095
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11096
(vcpu->arch.nmi_pending &&
11097
kvm_x86_call(nmi_allowed)(vcpu, false)))
11098
return true;
11099
11100
#ifdef CONFIG_KVM_SMM
11101
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
11102
(vcpu->arch.smi_pending &&
11103
kvm_x86_call(smi_allowed)(vcpu, false)))
11104
return true;
11105
#endif
11106
11107
if (kvm_test_request(KVM_REQ_PMI, vcpu))
11108
return true;
11109
11110
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
11111
return true;
11112
11113
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
11114
return true;
11115
11116
if (kvm_hv_has_stimer_pending(vcpu))
11117
return true;
11118
11119
if (is_guest_mode(vcpu) &&
11120
kvm_x86_ops.nested_ops->has_events &&
11121
kvm_x86_ops.nested_ops->has_events(vcpu, false))
11122
return true;
11123
11124
if (kvm_xen_has_pending_events(vcpu))
11125
return true;
11126
11127
return false;
11128
}
11129
EXPORT_SYMBOL_GPL(kvm_vcpu_has_events);
11130
11131
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
11132
{
11133
return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted ||
11134
kvm_vcpu_has_events(vcpu);
11135
}
11136
11137
/* Called within kvm->srcu read side. */
11138
static inline int vcpu_block(struct kvm_vcpu *vcpu)
11139
{
11140
bool hv_timer;
11141
11142
if (!kvm_arch_vcpu_runnable(vcpu)) {
11143
/*
11144
* Switch to the software timer before halt-polling/blocking as
11145
* the guest's timer may be a break event for the vCPU, and the
11146
* hypervisor timer runs only when the CPU is in guest mode.
11147
* Switch before halt-polling so that KVM recognizes an expired
11148
* timer before blocking.
11149
*/
11150
hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
11151
if (hv_timer)
11152
kvm_lapic_switch_to_sw_timer(vcpu);
11153
11154
kvm_vcpu_srcu_read_unlock(vcpu);
11155
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
11156
kvm_vcpu_halt(vcpu);
11157
else
11158
kvm_vcpu_block(vcpu);
11159
kvm_vcpu_srcu_read_lock(vcpu);
11160
11161
if (hv_timer)
11162
kvm_lapic_switch_to_hv_timer(vcpu);
11163
11164
/*
11165
* If the vCPU is not runnable, a signal or another host event
11166
* of some kind is pending; service it without changing the
11167
* vCPU's activity state.
11168
*/
11169
if (!kvm_arch_vcpu_runnable(vcpu))
11170
return 1;
11171
}
11172
11173
/*
11174
* Evaluate nested events before exiting the halted state. This allows
11175
* the halt state to be recorded properly in the VMCS12's activity
11176
* state field (AMD does not have a similar field and a VM-Exit always
11177
* causes a spurious wakeup from HLT).
11178
*/
11179
if (is_guest_mode(vcpu)) {
11180
int r = kvm_check_nested_events(vcpu);
11181
11182
WARN_ON_ONCE(r == -EBUSY);
11183
if (r < 0)
11184
return 0;
11185
}
11186
11187
if (kvm_apic_accept_events(vcpu) < 0)
11188
return 0;
11189
switch(vcpu->arch.mp_state) {
11190
case KVM_MP_STATE_HALTED:
11191
case KVM_MP_STATE_AP_RESET_HOLD:
11192
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
11193
fallthrough;
11194
case KVM_MP_STATE_RUNNABLE:
11195
vcpu->arch.apf.halted = false;
11196
break;
11197
case KVM_MP_STATE_INIT_RECEIVED:
11198
break;
11199
default:
11200
WARN_ON_ONCE(1);
11201
break;
11202
}
11203
return 1;
11204
}
11205
11206
/* Called within kvm->srcu read side. */
11207
static int vcpu_run(struct kvm_vcpu *vcpu)
11208
{
11209
int r;
11210
11211
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
11212
11213
for (;;) {
11214
/*
11215
* If another guest vCPU requests a PV TLB flush in the middle
11216
* of instruction emulation, the rest of the emulation could
11217
* use a stale page translation. Assume that any code after
11218
* this point can start executing an instruction.
11219
*/
11220
vcpu->arch.at_instruction_boundary = false;
11221
if (kvm_vcpu_running(vcpu)) {
11222
r = vcpu_enter_guest(vcpu);
11223
} else {
11224
r = vcpu_block(vcpu);
11225
}
11226
11227
if (r <= 0)
11228
break;
11229
11230
kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
11231
if (kvm_xen_has_pending_events(vcpu))
11232
kvm_xen_inject_pending_events(vcpu);
11233
11234
if (kvm_cpu_has_pending_timer(vcpu))
11235
kvm_inject_pending_timer_irqs(vcpu);
11236
11237
if (dm_request_for_irq_injection(vcpu) &&
11238
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
11239
r = 0;
11240
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
11241
++vcpu->stat.request_irq_exits;
11242
break;
11243
}
11244
11245
if (__xfer_to_guest_mode_work_pending()) {
11246
kvm_vcpu_srcu_read_unlock(vcpu);
11247
r = xfer_to_guest_mode_handle_work(vcpu);
11248
kvm_vcpu_srcu_read_lock(vcpu);
11249
if (r)
11250
return r;
11251
}
11252
}
11253
11254
return r;
11255
}
11256
11257
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
11258
{
11259
/*
11260
* The vCPU has halted, e.g. executed HLT. Update the run state if the
11261
* local APIC is in-kernel, the run loop will detect the non-runnable
11262
* state and halt the vCPU. Exit to userspace if the local APIC is
11263
* managed by userspace, in which case userspace is responsible for
11264
* handling wake events.
11265
*/
11266
++vcpu->stat.halt_exits;
11267
if (lapic_in_kernel(vcpu)) {
11268
if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted)
11269
state = KVM_MP_STATE_RUNNABLE;
11270
kvm_set_mp_state(vcpu, state);
11271
return 1;
11272
} else {
11273
vcpu->run->exit_reason = reason;
11274
return 0;
11275
}
11276
}
11277
11278
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
11279
{
11280
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
11281
}
11282
EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
11283
11284
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
11285
{
11286
int ret = kvm_skip_emulated_instruction(vcpu);
11287
/*
11288
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
11289
* KVM_EXIT_DEBUG here.
11290
*/
11291
return kvm_emulate_halt_noskip(vcpu) && ret;
11292
}
11293
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
11294
11295
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
11296
{
11297
int ret;
11298
11299
kvm_vcpu_srcu_read_lock(vcpu);
11300
ret = kvm_emulate_halt(vcpu);
11301
kvm_vcpu_srcu_read_unlock(vcpu);
11302
11303
if (!ret)
11304
return EXIT_FASTPATH_EXIT_USERSPACE;
11305
11306
if (kvm_vcpu_running(vcpu))
11307
return EXIT_FASTPATH_REENTER_GUEST;
11308
11309
return EXIT_FASTPATH_EXIT_HANDLED;
11310
}
11311
EXPORT_SYMBOL_GPL(handle_fastpath_hlt);
11312
11313
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
11314
{
11315
int ret = kvm_skip_emulated_instruction(vcpu);
11316
11317
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
11318
KVM_EXIT_AP_RESET_HOLD) && ret;
11319
}
11320
EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
11321
11322
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
11323
{
11324
return kvm_vcpu_apicv_active(vcpu) &&
11325
kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
11326
}
11327
11328
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
11329
{
11330
return vcpu->arch.preempted_in_kernel;
11331
}
11332
11333
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
11334
{
11335
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
11336
return true;
11337
11338
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11339
#ifdef CONFIG_KVM_SMM
11340
kvm_test_request(KVM_REQ_SMI, vcpu) ||
11341
#endif
11342
kvm_test_request(KVM_REQ_EVENT, vcpu))
11343
return true;
11344
11345
return kvm_arch_dy_has_pending_interrupt(vcpu);
11346
}
11347
11348
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
11349
{
11350
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
11351
}
11352
11353
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
11354
{
11355
BUG_ON(!vcpu->arch.pio.count);
11356
11357
return complete_emulated_io(vcpu);
11358
}
11359
11360
/*
11361
* Implements the following, as a state machine:
11362
*
11363
* read:
11364
* for each fragment
11365
* for each mmio piece in the fragment
11366
* write gpa, len
11367
* exit
11368
* copy data
11369
* execute insn
11370
*
11371
* write:
11372
* for each fragment
11373
* for each mmio piece in the fragment
11374
* write gpa, len
11375
* copy data
11376
* exit
11377
*/
11378
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
11379
{
11380
struct kvm_run *run = vcpu->run;
11381
struct kvm_mmio_fragment *frag;
11382
unsigned len;
11383
11384
BUG_ON(!vcpu->mmio_needed);
11385
11386
/* Complete previous fragment */
11387
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
11388
len = min(8u, frag->len);
11389
if (!vcpu->mmio_is_write)
11390
memcpy(frag->data, run->mmio.data, len);
11391
11392
if (frag->len <= 8) {
11393
/* Switch to the next fragment. */
11394
frag++;
11395
vcpu->mmio_cur_fragment++;
11396
} else {
11397
/* Go forward to the next mmio piece. */
11398
frag->data += len;
11399
frag->gpa += len;
11400
frag->len -= len;
11401
}
11402
11403
if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
11404
vcpu->mmio_needed = 0;
11405
11406
/* FIXME: return into emulator if single-stepping. */
11407
if (vcpu->mmio_is_write)
11408
return 1;
11409
vcpu->mmio_read_completed = 1;
11410
return complete_emulated_io(vcpu);
11411
}
11412
11413
run->exit_reason = KVM_EXIT_MMIO;
11414
run->mmio.phys_addr = frag->gpa;
11415
if (vcpu->mmio_is_write)
11416
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
11417
run->mmio.len = min(8u, frag->len);
11418
run->mmio.is_write = vcpu->mmio_is_write;
11419
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
11420
return 0;
11421
}
11422
11423
/* Swap (qemu) user FPU context for the guest FPU context. */
11424
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
11425
{
11426
/* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11427
fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11428
trace_kvm_fpu(1);
11429
}
11430
11431
/* When vcpu_run ends, restore user space FPU context. */
11432
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
11433
{
11434
fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11435
++vcpu->stat.fpu_reload;
11436
trace_kvm_fpu(0);
11437
}
11438
11439
static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
11440
{
11441
/*
11442
* SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and
11443
* tracks the pending SIPI separately. SIPI_RECEIVED is still accepted
11444
* by KVM_SET_VCPU_EVENTS for backwards compatibility, but should be
11445
* converted to INIT_RECEIVED.
11446
*/
11447
if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
11448
return -EINVAL;
11449
11450
/*
11451
* Disallow running the vCPU if userspace forced it into an impossible
11452
* MP_STATE, e.g. if the vCPU is in WFS but SIPI is blocked.
11453
*/
11454
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED &&
11455
!kvm_apic_init_sipi_allowed(vcpu))
11456
return -EINVAL;
11457
11458
return kvm_x86_call(vcpu_pre_run)(vcpu);
11459
}
11460
11461
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11462
{
11463
struct kvm_queued_exception *ex = &vcpu->arch.exception;
11464
struct kvm_run *kvm_run = vcpu->run;
11465
u64 sync_valid_fields;
11466
int r;
11467
11468
r = kvm_mmu_post_init_vm(vcpu->kvm);
11469
if (r)
11470
return r;
11471
11472
vcpu_load(vcpu);
11473
kvm_sigset_activate(vcpu);
11474
kvm_run->flags = 0;
11475
kvm_load_guest_fpu(vcpu);
11476
11477
kvm_vcpu_srcu_read_lock(vcpu);
11478
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
11479
if (!vcpu->wants_to_run) {
11480
r = -EINTR;
11481
goto out;
11482
}
11483
11484
/*
11485
* Don't bother switching APIC timer emulation from the
11486
* hypervisor timer to the software timer, the only way for the
11487
* APIC timer to be active is if userspace stuffed vCPU state,
11488
* i.e. put the vCPU into a nonsensical state. Only an INIT
11489
* will transition the vCPU out of UNINITIALIZED (without more
11490
* state stuffing from userspace), which will reset the local
11491
* APIC and thus cancel the timer or drop the IRQ (if the timer
11492
* already expired).
11493
*/
11494
kvm_vcpu_srcu_read_unlock(vcpu);
11495
kvm_vcpu_block(vcpu);
11496
kvm_vcpu_srcu_read_lock(vcpu);
11497
11498
if (kvm_apic_accept_events(vcpu) < 0) {
11499
r = 0;
11500
goto out;
11501
}
11502
r = -EAGAIN;
11503
if (signal_pending(current)) {
11504
r = -EINTR;
11505
kvm_run->exit_reason = KVM_EXIT_INTR;
11506
++vcpu->stat.signal_exits;
11507
}
11508
goto out;
11509
}
11510
11511
sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
11512
if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
11513
(kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
11514
r = -EINVAL;
11515
goto out;
11516
}
11517
11518
if (kvm_run->kvm_dirty_regs) {
11519
r = sync_regs(vcpu);
11520
if (r != 0)
11521
goto out;
11522
}
11523
11524
/* re-sync apic's tpr */
11525
if (!lapic_in_kernel(vcpu)) {
11526
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
11527
r = -EINVAL;
11528
goto out;
11529
}
11530
}
11531
11532
/*
11533
* If userspace set a pending exception and L2 is active, convert it to
11534
* a pending VM-Exit if L1 wants to intercept the exception.
11535
*/
11536
if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
11537
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
11538
ex->error_code)) {
11539
kvm_queue_exception_vmexit(vcpu, ex->vector,
11540
ex->has_error_code, ex->error_code,
11541
ex->has_payload, ex->payload);
11542
ex->injected = false;
11543
ex->pending = false;
11544
}
11545
vcpu->arch.exception_from_userspace = false;
11546
11547
if (unlikely(vcpu->arch.complete_userspace_io)) {
11548
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
11549
vcpu->arch.complete_userspace_io = NULL;
11550
r = cui(vcpu);
11551
if (r <= 0)
11552
goto out;
11553
} else {
11554
WARN_ON_ONCE(vcpu->arch.pio.count);
11555
WARN_ON_ONCE(vcpu->mmio_needed);
11556
}
11557
11558
if (!vcpu->wants_to_run) {
11559
r = -EINTR;
11560
goto out;
11561
}
11562
11563
r = kvm_x86_vcpu_pre_run(vcpu);
11564
if (r <= 0)
11565
goto out;
11566
11567
r = vcpu_run(vcpu);
11568
11569
out:
11570
kvm_put_guest_fpu(vcpu);
11571
if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
11572
store_regs(vcpu);
11573
post_kvm_run_save(vcpu);
11574
kvm_vcpu_srcu_read_unlock(vcpu);
11575
11576
kvm_sigset_deactivate(vcpu);
11577
vcpu_put(vcpu);
11578
return r;
11579
}
11580
11581
static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11582
{
11583
if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
11584
/*
11585
* We are here if userspace calls get_regs() in the middle of
11586
* instruction emulation. Registers state needs to be copied
11587
* back from emulation context to vcpu. Userspace shouldn't do
11588
* that usually, but some bad designed PV devices (vmware
11589
* backdoor interface) need this to work
11590
*/
11591
emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
11592
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11593
}
11594
regs->rax = kvm_rax_read(vcpu);
11595
regs->rbx = kvm_rbx_read(vcpu);
11596
regs->rcx = kvm_rcx_read(vcpu);
11597
regs->rdx = kvm_rdx_read(vcpu);
11598
regs->rsi = kvm_rsi_read(vcpu);
11599
regs->rdi = kvm_rdi_read(vcpu);
11600
regs->rsp = kvm_rsp_read(vcpu);
11601
regs->rbp = kvm_rbp_read(vcpu);
11602
#ifdef CONFIG_X86_64
11603
regs->r8 = kvm_r8_read(vcpu);
11604
regs->r9 = kvm_r9_read(vcpu);
11605
regs->r10 = kvm_r10_read(vcpu);
11606
regs->r11 = kvm_r11_read(vcpu);
11607
regs->r12 = kvm_r12_read(vcpu);
11608
regs->r13 = kvm_r13_read(vcpu);
11609
regs->r14 = kvm_r14_read(vcpu);
11610
regs->r15 = kvm_r15_read(vcpu);
11611
#endif
11612
11613
regs->rip = kvm_rip_read(vcpu);
11614
regs->rflags = kvm_get_rflags(vcpu);
11615
}
11616
11617
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11618
{
11619
if (vcpu->kvm->arch.has_protected_state &&
11620
vcpu->arch.guest_state_protected)
11621
return -EINVAL;
11622
11623
vcpu_load(vcpu);
11624
__get_regs(vcpu, regs);
11625
vcpu_put(vcpu);
11626
return 0;
11627
}
11628
11629
static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11630
{
11631
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
11632
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11633
11634
kvm_rax_write(vcpu, regs->rax);
11635
kvm_rbx_write(vcpu, regs->rbx);
11636
kvm_rcx_write(vcpu, regs->rcx);
11637
kvm_rdx_write(vcpu, regs->rdx);
11638
kvm_rsi_write(vcpu, regs->rsi);
11639
kvm_rdi_write(vcpu, regs->rdi);
11640
kvm_rsp_write(vcpu, regs->rsp);
11641
kvm_rbp_write(vcpu, regs->rbp);
11642
#ifdef CONFIG_X86_64
11643
kvm_r8_write(vcpu, regs->r8);
11644
kvm_r9_write(vcpu, regs->r9);
11645
kvm_r10_write(vcpu, regs->r10);
11646
kvm_r11_write(vcpu, regs->r11);
11647
kvm_r12_write(vcpu, regs->r12);
11648
kvm_r13_write(vcpu, regs->r13);
11649
kvm_r14_write(vcpu, regs->r14);
11650
kvm_r15_write(vcpu, regs->r15);
11651
#endif
11652
11653
kvm_rip_write(vcpu, regs->rip);
11654
kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
11655
11656
vcpu->arch.exception.pending = false;
11657
vcpu->arch.exception_vmexit.pending = false;
11658
11659
kvm_make_request(KVM_REQ_EVENT, vcpu);
11660
}
11661
11662
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11663
{
11664
if (vcpu->kvm->arch.has_protected_state &&
11665
vcpu->arch.guest_state_protected)
11666
return -EINVAL;
11667
11668
vcpu_load(vcpu);
11669
__set_regs(vcpu, regs);
11670
vcpu_put(vcpu);
11671
return 0;
11672
}
11673
11674
static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11675
{
11676
struct desc_ptr dt;
11677
11678
if (vcpu->arch.guest_state_protected)
11679
goto skip_protected_regs;
11680
11681
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11682
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11683
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11684
kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11685
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11686
kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11687
11688
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11689
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11690
11691
kvm_x86_call(get_idt)(vcpu, &dt);
11692
sregs->idt.limit = dt.size;
11693
sregs->idt.base = dt.address;
11694
kvm_x86_call(get_gdt)(vcpu, &dt);
11695
sregs->gdt.limit = dt.size;
11696
sregs->gdt.base = dt.address;
11697
11698
sregs->cr2 = vcpu->arch.cr2;
11699
sregs->cr3 = kvm_read_cr3(vcpu);
11700
11701
skip_protected_regs:
11702
sregs->cr0 = kvm_read_cr0(vcpu);
11703
sregs->cr4 = kvm_read_cr4(vcpu);
11704
sregs->cr8 = kvm_get_cr8(vcpu);
11705
sregs->efer = vcpu->arch.efer;
11706
sregs->apic_base = vcpu->arch.apic_base;
11707
}
11708
11709
static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11710
{
11711
__get_sregs_common(vcpu, sregs);
11712
11713
if (vcpu->arch.guest_state_protected)
11714
return;
11715
11716
if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
11717
set_bit(vcpu->arch.interrupt.nr,
11718
(unsigned long *)sregs->interrupt_bitmap);
11719
}
11720
11721
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11722
{
11723
int i;
11724
11725
__get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
11726
11727
if (vcpu->arch.guest_state_protected)
11728
return;
11729
11730
if (is_pae_paging(vcpu)) {
11731
for (i = 0 ; i < 4 ; i++)
11732
sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
11733
sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
11734
}
11735
}
11736
11737
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
11738
struct kvm_sregs *sregs)
11739
{
11740
if (vcpu->kvm->arch.has_protected_state &&
11741
vcpu->arch.guest_state_protected)
11742
return -EINVAL;
11743
11744
vcpu_load(vcpu);
11745
__get_sregs(vcpu, sregs);
11746
vcpu_put(vcpu);
11747
return 0;
11748
}
11749
11750
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
11751
struct kvm_mp_state *mp_state)
11752
{
11753
int r;
11754
11755
vcpu_load(vcpu);
11756
if (kvm_mpx_supported())
11757
kvm_load_guest_fpu(vcpu);
11758
11759
kvm_vcpu_srcu_read_lock(vcpu);
11760
11761
r = kvm_apic_accept_events(vcpu);
11762
if (r < 0)
11763
goto out;
11764
r = 0;
11765
11766
if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
11767
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
11768
vcpu->arch.pv.pv_unhalted)
11769
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
11770
else
11771
mp_state->mp_state = vcpu->arch.mp_state;
11772
11773
out:
11774
kvm_vcpu_srcu_read_unlock(vcpu);
11775
11776
if (kvm_mpx_supported())
11777
kvm_put_guest_fpu(vcpu);
11778
vcpu_put(vcpu);
11779
return r;
11780
}
11781
11782
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
11783
struct kvm_mp_state *mp_state)
11784
{
11785
int ret = -EINVAL;
11786
11787
vcpu_load(vcpu);
11788
11789
switch (mp_state->mp_state) {
11790
case KVM_MP_STATE_UNINITIALIZED:
11791
case KVM_MP_STATE_HALTED:
11792
case KVM_MP_STATE_AP_RESET_HOLD:
11793
case KVM_MP_STATE_INIT_RECEIVED:
11794
case KVM_MP_STATE_SIPI_RECEIVED:
11795
if (!lapic_in_kernel(vcpu))
11796
goto out;
11797
break;
11798
11799
case KVM_MP_STATE_RUNNABLE:
11800
break;
11801
11802
default:
11803
goto out;
11804
}
11805
11806
/*
11807
* SIPI_RECEIVED is obsolete and no longer used internally; KVM instead
11808
* leaves the vCPU in INIT_RECIEVED (Wait-For-SIPI) and pends the SIPI.
11809
* Translate SIPI_RECEIVED as appropriate for backwards compatibility.
11810
*/
11811
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
11812
mp_state->mp_state = KVM_MP_STATE_INIT_RECEIVED;
11813
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
11814
}
11815
11816
kvm_set_mp_state(vcpu, mp_state->mp_state);
11817
kvm_make_request(KVM_REQ_EVENT, vcpu);
11818
11819
ret = 0;
11820
out:
11821
vcpu_put(vcpu);
11822
return ret;
11823
}
11824
11825
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
11826
int reason, bool has_error_code, u32 error_code)
11827
{
11828
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
11829
int ret;
11830
11831
init_emulate_ctxt(vcpu);
11832
11833
ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
11834
has_error_code, error_code);
11835
11836
/*
11837
* Report an error userspace if MMIO is needed, as KVM doesn't support
11838
* MMIO during a task switch (or any other complex operation).
11839
*/
11840
if (ret || vcpu->mmio_needed) {
11841
vcpu->mmio_needed = false;
11842
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
11843
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
11844
vcpu->run->internal.ndata = 0;
11845
return 0;
11846
}
11847
11848
kvm_rip_write(vcpu, ctxt->eip);
11849
kvm_set_rflags(vcpu, ctxt->eflags);
11850
return 1;
11851
}
11852
EXPORT_SYMBOL_GPL(kvm_task_switch);
11853
11854
static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11855
{
11856
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
11857
/*
11858
* When EFER.LME and CR0.PG are set, the processor is in
11859
* 64-bit mode (though maybe in a 32-bit code segment).
11860
* CR4.PAE and EFER.LMA must be set.
11861
*/
11862
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
11863
return false;
11864
if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3))
11865
return false;
11866
} else {
11867
/*
11868
* Not in 64-bit mode: EFER.LMA is clear and the code
11869
* segment cannot be 64-bit.
11870
*/
11871
if (sregs->efer & EFER_LMA || sregs->cs.l)
11872
return false;
11873
}
11874
11875
return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
11876
kvm_is_valid_cr0(vcpu, sregs->cr0);
11877
}
11878
11879
static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
11880
int *mmu_reset_needed, bool update_pdptrs)
11881
{
11882
int idx;
11883
struct desc_ptr dt;
11884
11885
if (!kvm_is_valid_sregs(vcpu, sregs))
11886
return -EINVAL;
11887
11888
if (kvm_apic_set_base(vcpu, sregs->apic_base, true))
11889
return -EINVAL;
11890
11891
if (vcpu->arch.guest_state_protected)
11892
return 0;
11893
11894
dt.size = sregs->idt.limit;
11895
dt.address = sregs->idt.base;
11896
kvm_x86_call(set_idt)(vcpu, &dt);
11897
dt.size = sregs->gdt.limit;
11898
dt.address = sregs->gdt.base;
11899
kvm_x86_call(set_gdt)(vcpu, &dt);
11900
11901
vcpu->arch.cr2 = sregs->cr2;
11902
*mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
11903
vcpu->arch.cr3 = sregs->cr3;
11904
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
11905
kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3);
11906
11907
kvm_set_cr8(vcpu, sregs->cr8);
11908
11909
*mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
11910
kvm_x86_call(set_efer)(vcpu, sregs->efer);
11911
11912
*mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
11913
kvm_x86_call(set_cr0)(vcpu, sregs->cr0);
11914
11915
*mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
11916
kvm_x86_call(set_cr4)(vcpu, sregs->cr4);
11917
11918
if (update_pdptrs) {
11919
idx = srcu_read_lock(&vcpu->kvm->srcu);
11920
if (is_pae_paging(vcpu)) {
11921
load_pdptrs(vcpu, kvm_read_cr3(vcpu));
11922
*mmu_reset_needed = 1;
11923
}
11924
srcu_read_unlock(&vcpu->kvm->srcu, idx);
11925
}
11926
11927
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11928
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11929
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11930
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11931
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11932
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11933
11934
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11935
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11936
11937
update_cr8_intercept(vcpu);
11938
11939
/* Older userspace won't unhalt the vcpu on reset. */
11940
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
11941
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
11942
!is_protmode(vcpu))
11943
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
11944
11945
return 0;
11946
}
11947
11948
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11949
{
11950
int pending_vec, max_bits;
11951
int mmu_reset_needed = 0;
11952
int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
11953
11954
if (ret)
11955
return ret;
11956
11957
if (mmu_reset_needed) {
11958
kvm_mmu_reset_context(vcpu);
11959
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
11960
}
11961
11962
max_bits = KVM_NR_INTERRUPTS;
11963
pending_vec = find_first_bit(
11964
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
11965
11966
if (pending_vec < max_bits) {
11967
kvm_queue_interrupt(vcpu, pending_vec, false);
11968
pr_debug("Set back pending irq %d\n", pending_vec);
11969
kvm_make_request(KVM_REQ_EVENT, vcpu);
11970
}
11971
return 0;
11972
}
11973
11974
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11975
{
11976
int mmu_reset_needed = 0;
11977
bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
11978
bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
11979
!(sregs2->efer & EFER_LMA);
11980
int i, ret;
11981
11982
if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
11983
return -EINVAL;
11984
11985
if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
11986
return -EINVAL;
11987
11988
ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
11989
&mmu_reset_needed, !valid_pdptrs);
11990
if (ret)
11991
return ret;
11992
11993
if (valid_pdptrs) {
11994
for (i = 0; i < 4 ; i++)
11995
kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
11996
11997
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
11998
mmu_reset_needed = 1;
11999
vcpu->arch.pdptrs_from_userspace = true;
12000
}
12001
if (mmu_reset_needed) {
12002
kvm_mmu_reset_context(vcpu);
12003
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12004
}
12005
return 0;
12006
}
12007
12008
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
12009
struct kvm_sregs *sregs)
12010
{
12011
int ret;
12012
12013
if (vcpu->kvm->arch.has_protected_state &&
12014
vcpu->arch.guest_state_protected)
12015
return -EINVAL;
12016
12017
vcpu_load(vcpu);
12018
ret = __set_sregs(vcpu, sregs);
12019
vcpu_put(vcpu);
12020
return ret;
12021
}
12022
12023
static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
12024
{
12025
bool set = false;
12026
struct kvm_vcpu *vcpu;
12027
unsigned long i;
12028
12029
if (!enable_apicv)
12030
return;
12031
12032
down_write(&kvm->arch.apicv_update_lock);
12033
12034
kvm_for_each_vcpu(i, vcpu, kvm) {
12035
if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
12036
set = true;
12037
break;
12038
}
12039
}
12040
__kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
12041
up_write(&kvm->arch.apicv_update_lock);
12042
}
12043
12044
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
12045
struct kvm_guest_debug *dbg)
12046
{
12047
unsigned long rflags;
12048
int i, r;
12049
12050
if (vcpu->arch.guest_state_protected)
12051
return -EINVAL;
12052
12053
vcpu_load(vcpu);
12054
12055
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
12056
r = -EBUSY;
12057
if (kvm_is_exception_pending(vcpu))
12058
goto out;
12059
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
12060
kvm_queue_exception(vcpu, DB_VECTOR);
12061
else
12062
kvm_queue_exception(vcpu, BP_VECTOR);
12063
}
12064
12065
/*
12066
* Read rflags as long as potentially injected trace flags are still
12067
* filtered out.
12068
*/
12069
rflags = kvm_get_rflags(vcpu);
12070
12071
vcpu->guest_debug = dbg->control;
12072
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
12073
vcpu->guest_debug = 0;
12074
12075
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
12076
for (i = 0; i < KVM_NR_DB_REGS; ++i)
12077
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
12078
vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
12079
} else {
12080
for (i = 0; i < KVM_NR_DB_REGS; i++)
12081
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
12082
}
12083
kvm_update_dr7(vcpu);
12084
12085
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
12086
vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
12087
12088
/*
12089
* Trigger an rflags update that will inject or remove the trace
12090
* flags.
12091
*/
12092
kvm_set_rflags(vcpu, rflags);
12093
12094
kvm_x86_call(update_exception_bitmap)(vcpu);
12095
12096
kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
12097
12098
r = 0;
12099
12100
out:
12101
vcpu_put(vcpu);
12102
return r;
12103
}
12104
12105
/*
12106
* Translate a guest virtual address to a guest physical address.
12107
*/
12108
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
12109
struct kvm_translation *tr)
12110
{
12111
unsigned long vaddr = tr->linear_address;
12112
gpa_t gpa;
12113
int idx;
12114
12115
vcpu_load(vcpu);
12116
12117
idx = srcu_read_lock(&vcpu->kvm->srcu);
12118
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
12119
srcu_read_unlock(&vcpu->kvm->srcu, idx);
12120
tr->physical_address = gpa;
12121
tr->valid = gpa != INVALID_GPA;
12122
tr->writeable = 1;
12123
tr->usermode = 0;
12124
12125
vcpu_put(vcpu);
12126
return 0;
12127
}
12128
12129
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12130
{
12131
struct fxregs_state *fxsave;
12132
12133
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12134
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12135
12136
vcpu_load(vcpu);
12137
12138
fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12139
memcpy(fpu->fpr, fxsave->st_space, 128);
12140
fpu->fcw = fxsave->cwd;
12141
fpu->fsw = fxsave->swd;
12142
fpu->ftwx = fxsave->twd;
12143
fpu->last_opcode = fxsave->fop;
12144
fpu->last_ip = fxsave->rip;
12145
fpu->last_dp = fxsave->rdp;
12146
memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
12147
12148
vcpu_put(vcpu);
12149
return 0;
12150
}
12151
12152
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12153
{
12154
struct fxregs_state *fxsave;
12155
12156
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12157
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12158
12159
vcpu_load(vcpu);
12160
12161
fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12162
12163
memcpy(fxsave->st_space, fpu->fpr, 128);
12164
fxsave->cwd = fpu->fcw;
12165
fxsave->swd = fpu->fsw;
12166
fxsave->twd = fpu->ftwx;
12167
fxsave->fop = fpu->last_opcode;
12168
fxsave->rip = fpu->last_ip;
12169
fxsave->rdp = fpu->last_dp;
12170
memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
12171
12172
vcpu_put(vcpu);
12173
return 0;
12174
}
12175
12176
static void store_regs(struct kvm_vcpu *vcpu)
12177
{
12178
BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
12179
12180
if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
12181
__get_regs(vcpu, &vcpu->run->s.regs.regs);
12182
12183
if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
12184
__get_sregs(vcpu, &vcpu->run->s.regs.sregs);
12185
12186
if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
12187
kvm_vcpu_ioctl_x86_get_vcpu_events(
12188
vcpu, &vcpu->run->s.regs.events);
12189
}
12190
12191
static int sync_regs(struct kvm_vcpu *vcpu)
12192
{
12193
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
12194
__set_regs(vcpu, &vcpu->run->s.regs.regs);
12195
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
12196
}
12197
12198
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
12199
struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
12200
12201
if (__set_sregs(vcpu, &sregs))
12202
return -EINVAL;
12203
12204
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
12205
}
12206
12207
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
12208
struct kvm_vcpu_events events = vcpu->run->s.regs.events;
12209
12210
if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
12211
return -EINVAL;
12212
12213
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
12214
}
12215
12216
return 0;
12217
}
12218
12219
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
12220
{
12221
if (kvm_check_tsc_unstable() && kvm->created_vcpus)
12222
pr_warn_once("SMP vm created on host with unstable TSC; "
12223
"guest TSC will not be reliable\n");
12224
12225
if (!kvm->arch.max_vcpu_ids)
12226
kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
12227
12228
if (id >= kvm->arch.max_vcpu_ids)
12229
return -EINVAL;
12230
12231
return kvm_x86_call(vcpu_precreate)(kvm);
12232
}
12233
12234
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
12235
{
12236
struct page *page;
12237
int r;
12238
12239
vcpu->arch.last_vmentry_cpu = -1;
12240
vcpu->arch.regs_avail = ~0;
12241
vcpu->arch.regs_dirty = ~0;
12242
12243
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
12244
12245
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
12246
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
12247
else
12248
kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED);
12249
12250
r = kvm_mmu_create(vcpu);
12251
if (r < 0)
12252
return r;
12253
12254
r = kvm_create_lapic(vcpu);
12255
if (r < 0)
12256
goto fail_mmu_destroy;
12257
12258
r = -ENOMEM;
12259
12260
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
12261
if (!page)
12262
goto fail_free_lapic;
12263
vcpu->arch.pio_data = page_address(page);
12264
12265
vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
12266
GFP_KERNEL_ACCOUNT);
12267
vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
12268
GFP_KERNEL_ACCOUNT);
12269
if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
12270
goto fail_free_mce_banks;
12271
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
12272
12273
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
12274
GFP_KERNEL_ACCOUNT))
12275
goto fail_free_mce_banks;
12276
12277
if (!alloc_emulate_ctxt(vcpu))
12278
goto free_wbinvd_dirty_mask;
12279
12280
if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
12281
pr_err("failed to allocate vcpu's fpu\n");
12282
goto free_emulate_ctxt;
12283
}
12284
12285
kvm_async_pf_hash_reset(vcpu);
12286
12287
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
12288
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
12289
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
12290
vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
12291
}
12292
kvm_pmu_init(vcpu);
12293
12294
vcpu->arch.pending_external_vector = -1;
12295
vcpu->arch.preempted_in_kernel = false;
12296
12297
#if IS_ENABLED(CONFIG_HYPERV)
12298
vcpu->arch.hv_root_tdp = INVALID_PAGE;
12299
#endif
12300
12301
r = kvm_x86_call(vcpu_create)(vcpu);
12302
if (r)
12303
goto free_guest_fpu;
12304
12305
kvm_xen_init_vcpu(vcpu);
12306
vcpu_load(vcpu);
12307
kvm_vcpu_after_set_cpuid(vcpu);
12308
kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
12309
kvm_vcpu_reset(vcpu, false);
12310
kvm_init_mmu(vcpu);
12311
vcpu_put(vcpu);
12312
return 0;
12313
12314
free_guest_fpu:
12315
fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12316
free_emulate_ctxt:
12317
kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12318
free_wbinvd_dirty_mask:
12319
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12320
fail_free_mce_banks:
12321
kfree(vcpu->arch.mce_banks);
12322
kfree(vcpu->arch.mci_ctl2_banks);
12323
free_page((unsigned long)vcpu->arch.pio_data);
12324
fail_free_lapic:
12325
kvm_free_lapic(vcpu);
12326
fail_mmu_destroy:
12327
kvm_mmu_destroy(vcpu);
12328
return r;
12329
}
12330
12331
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
12332
{
12333
struct kvm *kvm = vcpu->kvm;
12334
12335
if (mutex_lock_killable(&vcpu->mutex))
12336
return;
12337
vcpu_load(vcpu);
12338
kvm_synchronize_tsc(vcpu, NULL);
12339
vcpu_put(vcpu);
12340
12341
/* poll control enabled by default */
12342
vcpu->arch.msr_kvm_poll_control = 1;
12343
12344
mutex_unlock(&vcpu->mutex);
12345
12346
if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
12347
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
12348
KVMCLOCK_SYNC_PERIOD);
12349
}
12350
12351
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
12352
{
12353
int idx, cpu;
12354
12355
kvm_clear_async_pf_completion_queue(vcpu);
12356
kvm_mmu_unload(vcpu);
12357
12358
kvmclock_reset(vcpu);
12359
12360
for_each_possible_cpu(cpu)
12361
cmpxchg(per_cpu_ptr(&last_vcpu, cpu), vcpu, NULL);
12362
12363
kvm_x86_call(vcpu_free)(vcpu);
12364
12365
kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12366
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12367
fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12368
12369
kvm_xen_destroy_vcpu(vcpu);
12370
kvm_hv_vcpu_uninit(vcpu);
12371
kvm_pmu_destroy(vcpu);
12372
kfree(vcpu->arch.mce_banks);
12373
kfree(vcpu->arch.mci_ctl2_banks);
12374
kvm_free_lapic(vcpu);
12375
idx = srcu_read_lock(&vcpu->kvm->srcu);
12376
kvm_mmu_destroy(vcpu);
12377
srcu_read_unlock(&vcpu->kvm->srcu, idx);
12378
free_page((unsigned long)vcpu->arch.pio_data);
12379
kvfree(vcpu->arch.cpuid_entries);
12380
}
12381
12382
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
12383
{
12384
struct kvm_cpuid_entry2 *cpuid_0x1;
12385
unsigned long old_cr0 = kvm_read_cr0(vcpu);
12386
unsigned long new_cr0;
12387
12388
/*
12389
* Several of the "set" flows, e.g. ->set_cr0(), read other registers
12390
* to handle side effects. RESET emulation hits those flows and relies
12391
* on emulated/virtualized registers, including those that are loaded
12392
* into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel
12393
* to detect improper or missing initialization.
12394
*/
12395
WARN_ON_ONCE(!init_event &&
12396
(old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
12397
12398
/*
12399
* SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
12400
* possible to INIT the vCPU while L2 is active. Force the vCPU back
12401
* into L1 as EFER.SVME is cleared on INIT (along with all other EFER
12402
* bits), i.e. virtualization is disabled.
12403
*/
12404
if (is_guest_mode(vcpu))
12405
kvm_leave_nested(vcpu);
12406
12407
kvm_lapic_reset(vcpu, init_event);
12408
12409
WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
12410
vcpu->arch.hflags = 0;
12411
12412
vcpu->arch.smi_pending = 0;
12413
vcpu->arch.smi_count = 0;
12414
atomic_set(&vcpu->arch.nmi_queued, 0);
12415
vcpu->arch.nmi_pending = 0;
12416
vcpu->arch.nmi_injected = false;
12417
kvm_clear_interrupt_queue(vcpu);
12418
kvm_clear_exception_queue(vcpu);
12419
12420
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
12421
kvm_update_dr0123(vcpu);
12422
vcpu->arch.dr6 = DR6_ACTIVE_LOW;
12423
vcpu->arch.dr7 = DR7_FIXED_1;
12424
kvm_update_dr7(vcpu);
12425
12426
vcpu->arch.cr2 = 0;
12427
12428
kvm_make_request(KVM_REQ_EVENT, vcpu);
12429
vcpu->arch.apf.msr_en_val = 0;
12430
vcpu->arch.apf.msr_int_val = 0;
12431
vcpu->arch.st.msr_val = 0;
12432
12433
kvmclock_reset(vcpu);
12434
12435
kvm_clear_async_pf_completion_queue(vcpu);
12436
kvm_async_pf_hash_reset(vcpu);
12437
vcpu->arch.apf.halted = false;
12438
12439
if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
12440
struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
12441
12442
/*
12443
* All paths that lead to INIT are required to load the guest's
12444
* FPU state (because most paths are buried in KVM_RUN).
12445
*/
12446
if (init_event)
12447
kvm_put_guest_fpu(vcpu);
12448
12449
fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
12450
fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
12451
12452
if (init_event)
12453
kvm_load_guest_fpu(vcpu);
12454
}
12455
12456
if (!init_event) {
12457
vcpu->arch.smbase = 0x30000;
12458
12459
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
12460
12461
vcpu->arch.msr_misc_features_enables = 0;
12462
vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
12463
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
12464
12465
__kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
12466
__kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
12467
}
12468
12469
/* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
12470
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
12471
kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP);
12472
12473
/*
12474
* Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
12475
* if no CPUID match is found. Note, it's impossible to get a match at
12476
* RESET since KVM emulates RESET before exposing the vCPU to userspace,
12477
* i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
12478
* on RESET. But, go through the motions in case that's ever remedied.
12479
*/
12480
cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
12481
kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
12482
12483
kvm_x86_call(vcpu_reset)(vcpu, init_event);
12484
12485
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
12486
kvm_rip_write(vcpu, 0xfff0);
12487
12488
vcpu->arch.cr3 = 0;
12489
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
12490
12491
/*
12492
* CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
12493
* of Intel's SDM list CD/NW as being set on INIT, but they contradict
12494
* (or qualify) that with a footnote stating that CD/NW are preserved.
12495
*/
12496
new_cr0 = X86_CR0_ET;
12497
if (init_event)
12498
new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD));
12499
else
12500
new_cr0 |= X86_CR0_NW | X86_CR0_CD;
12501
12502
kvm_x86_call(set_cr0)(vcpu, new_cr0);
12503
kvm_x86_call(set_cr4)(vcpu, 0);
12504
kvm_x86_call(set_efer)(vcpu, 0);
12505
kvm_x86_call(update_exception_bitmap)(vcpu);
12506
12507
/*
12508
* On the standard CR0/CR4/EFER modification paths, there are several
12509
* complex conditions determining whether the MMU has to be reset and/or
12510
* which PCIDs have to be flushed. However, CR0.WP and the paging-related
12511
* bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
12512
* is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
12513
* CR0 will be '0' prior to RESET). So we only need to check CR0.PG here.
12514
*/
12515
if (old_cr0 & X86_CR0_PG) {
12516
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12517
kvm_mmu_reset_context(vcpu);
12518
}
12519
12520
/*
12521
* Intel's SDM states that all TLB entries are flushed on INIT. AMD's
12522
* APM states the TLBs are untouched by INIT, but it also states that
12523
* the TLBs are flushed on "External initialization of the processor."
12524
* Flush the guest TLB regardless of vendor, there is no meaningful
12525
* benefit in relying on the guest to flush the TLB immediately after
12526
* INIT. A spurious TLB flush is benign and likely negligible from a
12527
* performance perspective.
12528
*/
12529
if (init_event)
12530
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12531
}
12532
EXPORT_SYMBOL_GPL(kvm_vcpu_reset);
12533
12534
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
12535
{
12536
struct kvm_segment cs;
12537
12538
kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
12539
cs.selector = vector << 8;
12540
cs.base = vector << 12;
12541
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
12542
kvm_rip_write(vcpu, 0);
12543
}
12544
EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
12545
12546
void kvm_arch_enable_virtualization(void)
12547
{
12548
cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
12549
}
12550
12551
void kvm_arch_disable_virtualization(void)
12552
{
12553
cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
12554
}
12555
12556
int kvm_arch_enable_virtualization_cpu(void)
12557
{
12558
struct kvm *kvm;
12559
struct kvm_vcpu *vcpu;
12560
unsigned long i;
12561
int ret;
12562
u64 local_tsc;
12563
u64 max_tsc = 0;
12564
bool stable, backwards_tsc = false;
12565
12566
kvm_user_return_msr_cpu_online();
12567
12568
ret = kvm_x86_check_processor_compatibility();
12569
if (ret)
12570
return ret;
12571
12572
ret = kvm_x86_call(enable_virtualization_cpu)();
12573
if (ret != 0)
12574
return ret;
12575
12576
local_tsc = rdtsc();
12577
stable = !kvm_check_tsc_unstable();
12578
list_for_each_entry(kvm, &vm_list, vm_list) {
12579
kvm_for_each_vcpu(i, vcpu, kvm) {
12580
if (!stable && vcpu->cpu == smp_processor_id())
12581
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
12582
if (stable && vcpu->arch.last_host_tsc > local_tsc) {
12583
backwards_tsc = true;
12584
if (vcpu->arch.last_host_tsc > max_tsc)
12585
max_tsc = vcpu->arch.last_host_tsc;
12586
}
12587
}
12588
}
12589
12590
/*
12591
* Sometimes, even reliable TSCs go backwards. This happens on
12592
* platforms that reset TSC during suspend or hibernate actions, but
12593
* maintain synchronization. We must compensate. Fortunately, we can
12594
* detect that condition here, which happens early in CPU bringup,
12595
* before any KVM threads can be running. Unfortunately, we can't
12596
* bring the TSCs fully up to date with real time, as we aren't yet far
12597
* enough into CPU bringup that we know how much real time has actually
12598
* elapsed; our helper function, ktime_get_boottime_ns() will be using boot
12599
* variables that haven't been updated yet.
12600
*
12601
* So we simply find the maximum observed TSC above, then record the
12602
* adjustment to TSC in each VCPU. When the VCPU later gets loaded,
12603
* the adjustment will be applied. Note that we accumulate
12604
* adjustments, in case multiple suspend cycles happen before some VCPU
12605
* gets a chance to run again. In the event that no KVM threads get a
12606
* chance to run, we will miss the entire elapsed period, as we'll have
12607
* reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
12608
* loose cycle time. This isn't too big a deal, since the loss will be
12609
* uniform across all VCPUs (not to mention the scenario is extremely
12610
* unlikely). It is possible that a second hibernate recovery happens
12611
* much faster than a first, causing the observed TSC here to be
12612
* smaller; this would require additional padding adjustment, which is
12613
* why we set last_host_tsc to the local tsc observed here.
12614
*
12615
* N.B. - this code below runs only on platforms with reliable TSC,
12616
* as that is the only way backwards_tsc is set above. Also note
12617
* that this runs for ALL vcpus, which is not a bug; all VCPUs should
12618
* have the same delta_cyc adjustment applied if backwards_tsc
12619
* is detected. Note further, this adjustment is only done once,
12620
* as we reset last_host_tsc on all VCPUs to stop this from being
12621
* called multiple times (one for each physical CPU bringup).
12622
*
12623
* Platforms with unreliable TSCs don't have to deal with this, they
12624
* will be compensated by the logic in vcpu_load, which sets the TSC to
12625
* catchup mode. This will catchup all VCPUs to real time, but cannot
12626
* guarantee that they stay in perfect synchronization.
12627
*/
12628
if (backwards_tsc) {
12629
u64 delta_cyc = max_tsc - local_tsc;
12630
list_for_each_entry(kvm, &vm_list, vm_list) {
12631
kvm->arch.backwards_tsc_observed = true;
12632
kvm_for_each_vcpu(i, vcpu, kvm) {
12633
vcpu->arch.tsc_offset_adjustment += delta_cyc;
12634
vcpu->arch.last_host_tsc = local_tsc;
12635
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
12636
}
12637
12638
/*
12639
* We have to disable TSC offset matching.. if you were
12640
* booting a VM while issuing an S4 host suspend....
12641
* you may have some problem. Solving this issue is
12642
* left as an exercise to the reader.
12643
*/
12644
kvm->arch.last_tsc_nsec = 0;
12645
kvm->arch.last_tsc_write = 0;
12646
}
12647
12648
}
12649
return 0;
12650
}
12651
12652
void kvm_arch_disable_virtualization_cpu(void)
12653
{
12654
kvm_x86_call(disable_virtualization_cpu)();
12655
drop_user_return_notifiers();
12656
}
12657
12658
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
12659
{
12660
return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
12661
}
12662
EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
12663
12664
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
12665
{
12666
return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
12667
}
12668
12669
void kvm_arch_free_vm(struct kvm *kvm)
12670
{
12671
#if IS_ENABLED(CONFIG_HYPERV)
12672
kfree(kvm->arch.hv_pa_pg);
12673
#endif
12674
__kvm_arch_free_vm(kvm);
12675
}
12676
12677
12678
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
12679
{
12680
int ret;
12681
unsigned long flags;
12682
12683
if (!kvm_is_vm_type_supported(type))
12684
return -EINVAL;
12685
12686
kvm->arch.vm_type = type;
12687
kvm->arch.has_private_mem =
12688
(type == KVM_X86_SW_PROTECTED_VM);
12689
/* Decided by the vendor code for other VM types. */
12690
kvm->arch.pre_fault_allowed =
12691
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
12692
kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks;
12693
12694
ret = kvm_page_track_init(kvm);
12695
if (ret)
12696
goto out;
12697
12698
ret = kvm_mmu_init_vm(kvm);
12699
if (ret)
12700
goto out_cleanup_page_track;
12701
12702
ret = kvm_x86_call(vm_init)(kvm);
12703
if (ret)
12704
goto out_uninit_mmu;
12705
12706
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
12707
12708
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
12709
mutex_init(&kvm->arch.apic_map_lock);
12710
seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
12711
kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
12712
12713
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
12714
pvclock_update_vm_gtod_copy(kvm);
12715
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
12716
12717
kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
12718
kvm->arch.apic_bus_cycle_ns = APIC_BUS_CYCLE_NS_DEFAULT;
12719
kvm->arch.guest_can_read_msr_platform_info = true;
12720
kvm->arch.enable_pmu = enable_pmu;
12721
12722
#if IS_ENABLED(CONFIG_HYPERV)
12723
spin_lock_init(&kvm->arch.hv_root_tdp_lock);
12724
kvm->arch.hv_root_tdp = INVALID_PAGE;
12725
#endif
12726
12727
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
12728
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
12729
12730
kvm_apicv_init(kvm);
12731
kvm_hv_init_vm(kvm);
12732
kvm_xen_init_vm(kvm);
12733
12734
if (ignore_msrs && !report_ignored_msrs) {
12735
pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n"
12736
"a supported configuration. Lying to the guest about the existence of MSRs\n"
12737
"may cause the guest operating system to hang or produce errors. If a guest\n"
12738
"does not run without ignore_msrs=1, please report it to [email protected].\n");
12739
}
12740
12741
once_init(&kvm->arch.nx_once);
12742
return 0;
12743
12744
out_uninit_mmu:
12745
kvm_mmu_uninit_vm(kvm);
12746
out_cleanup_page_track:
12747
kvm_page_track_cleanup(kvm);
12748
out:
12749
return ret;
12750
}
12751
12752
/**
12753
* __x86_set_memory_region: Setup KVM internal memory slot
12754
*
12755
* @kvm: the kvm pointer to the VM.
12756
* @id: the slot ID to setup.
12757
* @gpa: the GPA to install the slot (unused when @size == 0).
12758
* @size: the size of the slot. Set to zero to uninstall a slot.
12759
*
12760
* This function helps to setup a KVM internal memory slot. Specify
12761
* @size > 0 to install a new slot, while @size == 0 to uninstall a
12762
* slot. The return code can be one of the following:
12763
*
12764
* HVA: on success (uninstall will return a bogus HVA)
12765
* -errno: on error
12766
*
12767
* The caller should always use IS_ERR() to check the return value
12768
* before use. Note, the KVM internal memory slots are guaranteed to
12769
* remain valid and unchanged until the VM is destroyed, i.e., the
12770
* GPA->HVA translation will not change. However, the HVA is a user
12771
* address, i.e. its accessibility is not guaranteed, and must be
12772
* accessed via __copy_{to,from}_user().
12773
*/
12774
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
12775
u32 size)
12776
{
12777
int i, r;
12778
unsigned long hva, old_npages;
12779
struct kvm_memslots *slots = kvm_memslots(kvm);
12780
struct kvm_memory_slot *slot;
12781
12782
lockdep_assert_held(&kvm->slots_lock);
12783
12784
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
12785
return ERR_PTR_USR(-EINVAL);
12786
12787
slot = id_to_memslot(slots, id);
12788
if (size) {
12789
if (slot && slot->npages)
12790
return ERR_PTR_USR(-EEXIST);
12791
12792
/*
12793
* MAP_SHARED to prevent internal slot pages from being moved
12794
* by fork()/COW.
12795
*/
12796
hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
12797
MAP_SHARED | MAP_ANONYMOUS, 0);
12798
if (IS_ERR_VALUE(hva))
12799
return (void __user *)hva;
12800
} else {
12801
if (!slot || !slot->npages)
12802
return NULL;
12803
12804
old_npages = slot->npages;
12805
hva = slot->userspace_addr;
12806
}
12807
12808
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
12809
struct kvm_userspace_memory_region2 m;
12810
12811
m.slot = id | (i << 16);
12812
m.flags = 0;
12813
m.guest_phys_addr = gpa;
12814
m.userspace_addr = hva;
12815
m.memory_size = size;
12816
r = kvm_set_internal_memslot(kvm, &m);
12817
if (r < 0)
12818
return ERR_PTR_USR(r);
12819
}
12820
12821
if (!size)
12822
vm_munmap(hva, old_npages * PAGE_SIZE);
12823
12824
return (void __user *)hva;
12825
}
12826
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
12827
12828
void kvm_arch_pre_destroy_vm(struct kvm *kvm)
12829
{
12830
/*
12831
* Stop all background workers and kthreads before destroying vCPUs, as
12832
* iterating over vCPUs in a different task while vCPUs are being freed
12833
* is unsafe, i.e. will lead to use-after-free. The PIT also needs to
12834
* be stopped before IRQ routing is freed.
12835
*/
12836
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
12837
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
12838
12839
#ifdef CONFIG_KVM_IOAPIC
12840
kvm_free_pit(kvm);
12841
#endif
12842
12843
kvm_mmu_pre_destroy_vm(kvm);
12844
static_call_cond(kvm_x86_vm_pre_destroy)(kvm);
12845
}
12846
12847
void kvm_arch_destroy_vm(struct kvm *kvm)
12848
{
12849
if (current->mm == kvm->mm) {
12850
/*
12851
* Free memory regions allocated on behalf of userspace,
12852
* unless the memory map has changed due to process exit
12853
* or fd copying.
12854
*/
12855
mutex_lock(&kvm->slots_lock);
12856
__x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
12857
0, 0);
12858
__x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
12859
0, 0);
12860
__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
12861
mutex_unlock(&kvm->slots_lock);
12862
}
12863
kvm_destroy_vcpus(kvm);
12864
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
12865
#ifdef CONFIG_KVM_IOAPIC
12866
kvm_pic_destroy(kvm);
12867
kvm_ioapic_destroy(kvm);
12868
#endif
12869
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
12870
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
12871
kvm_mmu_uninit_vm(kvm);
12872
kvm_page_track_cleanup(kvm);
12873
kvm_xen_destroy_vm(kvm);
12874
kvm_hv_destroy_vm(kvm);
12875
kvm_x86_call(vm_destroy)(kvm);
12876
}
12877
12878
static void memslot_rmap_free(struct kvm_memory_slot *slot)
12879
{
12880
int i;
12881
12882
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12883
vfree(slot->arch.rmap[i]);
12884
slot->arch.rmap[i] = NULL;
12885
}
12886
}
12887
12888
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
12889
{
12890
int i;
12891
12892
memslot_rmap_free(slot);
12893
12894
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12895
vfree(slot->arch.lpage_info[i - 1]);
12896
slot->arch.lpage_info[i - 1] = NULL;
12897
}
12898
12899
kvm_page_track_free_memslot(slot);
12900
}
12901
12902
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
12903
{
12904
const int sz = sizeof(*slot->arch.rmap[0]);
12905
int i;
12906
12907
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12908
int level = i + 1;
12909
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12910
12911
if (slot->arch.rmap[i])
12912
continue;
12913
12914
slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
12915
if (!slot->arch.rmap[i]) {
12916
memslot_rmap_free(slot);
12917
return -ENOMEM;
12918
}
12919
}
12920
12921
return 0;
12922
}
12923
12924
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
12925
struct kvm_memory_slot *slot)
12926
{
12927
unsigned long npages = slot->npages;
12928
int i, r;
12929
12930
/*
12931
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
12932
* old arrays will be freed by kvm_set_memory_region() if installing
12933
* the new memslot is successful.
12934
*/
12935
memset(&slot->arch, 0, sizeof(slot->arch));
12936
12937
if (kvm_memslots_have_rmaps(kvm)) {
12938
r = memslot_rmap_alloc(slot, npages);
12939
if (r)
12940
return r;
12941
}
12942
12943
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12944
struct kvm_lpage_info *linfo;
12945
unsigned long ugfn;
12946
int lpages;
12947
int level = i + 1;
12948
12949
lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12950
12951
linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
12952
if (!linfo)
12953
goto out_free;
12954
12955
slot->arch.lpage_info[i - 1] = linfo;
12956
12957
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
12958
linfo[0].disallow_lpage = 1;
12959
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
12960
linfo[lpages - 1].disallow_lpage = 1;
12961
ugfn = slot->userspace_addr >> PAGE_SHIFT;
12962
/*
12963
* If the gfn and userspace address are not aligned wrt each
12964
* other, disable large page support for this slot.
12965
*/
12966
if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
12967
unsigned long j;
12968
12969
for (j = 0; j < lpages; ++j)
12970
linfo[j].disallow_lpage = 1;
12971
}
12972
}
12973
12974
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
12975
kvm_mmu_init_memslot_memory_attributes(kvm, slot);
12976
#endif
12977
12978
if (kvm_page_track_create_memslot(kvm, slot, npages))
12979
goto out_free;
12980
12981
return 0;
12982
12983
out_free:
12984
memslot_rmap_free(slot);
12985
12986
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12987
vfree(slot->arch.lpage_info[i - 1]);
12988
slot->arch.lpage_info[i - 1] = NULL;
12989
}
12990
return -ENOMEM;
12991
}
12992
12993
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
12994
{
12995
struct kvm_vcpu *vcpu;
12996
unsigned long i;
12997
12998
/*
12999
* memslots->generation has been incremented.
13000
* mmio generation may have reached its maximum value.
13001
*/
13002
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
13003
13004
/* Force re-initialization of steal_time cache */
13005
kvm_for_each_vcpu(i, vcpu, kvm)
13006
kvm_vcpu_kick(vcpu);
13007
}
13008
13009
int kvm_arch_prepare_memory_region(struct kvm *kvm,
13010
const struct kvm_memory_slot *old,
13011
struct kvm_memory_slot *new,
13012
enum kvm_mr_change change)
13013
{
13014
/*
13015
* KVM doesn't support moving memslots when there are external page
13016
* trackers attached to the VM, i.e. if KVMGT is in use.
13017
*/
13018
if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
13019
return -EINVAL;
13020
13021
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
13022
if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
13023
return -EINVAL;
13024
13025
if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1))
13026
return -EINVAL;
13027
13028
return kvm_alloc_memslot_metadata(kvm, new);
13029
}
13030
13031
if (change == KVM_MR_FLAGS_ONLY)
13032
memcpy(&new->arch, &old->arch, sizeof(old->arch));
13033
else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
13034
return -EIO;
13035
13036
return 0;
13037
}
13038
13039
13040
static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
13041
{
13042
int nr_slots;
13043
13044
if (!kvm->arch.cpu_dirty_log_size)
13045
return;
13046
13047
nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
13048
if ((enable && nr_slots == 1) || !nr_slots)
13049
kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
13050
}
13051
13052
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
13053
struct kvm_memory_slot *old,
13054
const struct kvm_memory_slot *new,
13055
enum kvm_mr_change change)
13056
{
13057
u32 old_flags = old ? old->flags : 0;
13058
u32 new_flags = new ? new->flags : 0;
13059
bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
13060
13061
/*
13062
* Update CPU dirty logging if dirty logging is being toggled. This
13063
* applies to all operations.
13064
*/
13065
if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
13066
kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
13067
13068
/*
13069
* Nothing more to do for RO slots (which can't be dirtied and can't be
13070
* made writable) or CREATE/MOVE/DELETE of a slot.
13071
*
13072
* For a memslot with dirty logging disabled:
13073
* CREATE: No dirty mappings will already exist.
13074
* MOVE/DELETE: The old mappings will already have been cleaned up by
13075
* kvm_arch_flush_shadow_memslot()
13076
*
13077
* For a memslot with dirty logging enabled:
13078
* CREATE: No shadow pages exist, thus nothing to write-protect
13079
* and no dirty bits to clear.
13080
* MOVE/DELETE: The old mappings will already have been cleaned up by
13081
* kvm_arch_flush_shadow_memslot().
13082
*/
13083
if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
13084
return;
13085
13086
/*
13087
* READONLY and non-flags changes were filtered out above, and the only
13088
* other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
13089
* logging isn't being toggled on or off.
13090
*/
13091
if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
13092
return;
13093
13094
if (!log_dirty_pages) {
13095
/*
13096
* Recover huge page mappings in the slot now that dirty logging
13097
* is disabled, i.e. now that KVM does not have to track guest
13098
* writes at 4KiB granularity.
13099
*
13100
* Dirty logging might be disabled by userspace if an ongoing VM
13101
* live migration is cancelled and the VM must continue running
13102
* on the source.
13103
*/
13104
kvm_mmu_recover_huge_pages(kvm, new);
13105
} else {
13106
/*
13107
* Initially-all-set does not require write protecting any page,
13108
* because they're all assumed to be dirty.
13109
*/
13110
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
13111
return;
13112
13113
if (READ_ONCE(eager_page_split))
13114
kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
13115
13116
if (kvm->arch.cpu_dirty_log_size) {
13117
kvm_mmu_slot_leaf_clear_dirty(kvm, new);
13118
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
13119
} else {
13120
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
13121
}
13122
13123
/*
13124
* Unconditionally flush the TLBs after enabling dirty logging.
13125
* A flush is almost always going to be necessary (see below),
13126
* and unconditionally flushing allows the helpers to omit
13127
* the subtly complex checks when removing write access.
13128
*
13129
* Do the flush outside of mmu_lock to reduce the amount of
13130
* time mmu_lock is held. Flushing after dropping mmu_lock is
13131
* safe as KVM only needs to guarantee the slot is fully
13132
* write-protected before returning to userspace, i.e. before
13133
* userspace can consume the dirty status.
13134
*
13135
* Flushing outside of mmu_lock requires KVM to be careful when
13136
* making decisions based on writable status of an SPTE, e.g. a
13137
* !writable SPTE doesn't guarantee a CPU can't perform writes.
13138
*
13139
* Specifically, KVM also write-protects guest page tables to
13140
* monitor changes when using shadow paging, and must guarantee
13141
* no CPUs can write to those page before mmu_lock is dropped.
13142
* Because CPUs may have stale TLB entries at this point, a
13143
* !writable SPTE doesn't guarantee CPUs can't perform writes.
13144
*
13145
* KVM also allows making SPTES writable outside of mmu_lock,
13146
* e.g. to allow dirty logging without taking mmu_lock.
13147
*
13148
* To handle these scenarios, KVM uses a separate software-only
13149
* bit (MMU-writable) to track if a SPTE is !writable due to
13150
* a guest page table being write-protected (KVM clears the
13151
* MMU-writable flag when write-protecting for shadow paging).
13152
*
13153
* The use of MMU-writable is also the primary motivation for
13154
* the unconditional flush. Because KVM must guarantee that a
13155
* CPU doesn't contain stale, writable TLB entries for a
13156
* !MMU-writable SPTE, KVM must flush if it encounters any
13157
* MMU-writable SPTE regardless of whether the actual hardware
13158
* writable bit was set. I.e. KVM is almost guaranteed to need
13159
* to flush, while unconditionally flushing allows the "remove
13160
* write access" helpers to ignore MMU-writable entirely.
13161
*
13162
* See is_writable_pte() for more details (the case involving
13163
* access-tracked SPTEs is particularly relevant).
13164
*/
13165
kvm_flush_remote_tlbs_memslot(kvm, new);
13166
}
13167
}
13168
13169
void kvm_arch_commit_memory_region(struct kvm *kvm,
13170
struct kvm_memory_slot *old,
13171
const struct kvm_memory_slot *new,
13172
enum kvm_mr_change change)
13173
{
13174
if (change == KVM_MR_DELETE)
13175
kvm_page_track_delete_slot(kvm, old);
13176
13177
if (!kvm->arch.n_requested_mmu_pages &&
13178
(change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
13179
unsigned long nr_mmu_pages;
13180
13181
nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
13182
nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
13183
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
13184
}
13185
13186
kvm_mmu_slot_apply_flags(kvm, old, new, change);
13187
13188
/* Free the arrays associated with the old memslot. */
13189
if (change == KVM_MR_MOVE)
13190
kvm_arch_free_memslot(kvm, old);
13191
}
13192
13193
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
13194
{
13195
WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13196
13197
if (vcpu->arch.guest_state_protected)
13198
return true;
13199
13200
return kvm_x86_call(get_cpl)(vcpu) == 0;
13201
}
13202
13203
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
13204
{
13205
WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13206
13207
if (vcpu->arch.guest_state_protected)
13208
return 0;
13209
13210
return kvm_rip_read(vcpu);
13211
}
13212
13213
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
13214
{
13215
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
13216
}
13217
13218
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
13219
{
13220
return kvm_x86_call(interrupt_allowed)(vcpu, false);
13221
}
13222
13223
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
13224
{
13225
/* Can't read the RIP when guest state is protected, just return 0 */
13226
if (vcpu->arch.guest_state_protected)
13227
return 0;
13228
13229
if (is_64_bit_mode(vcpu))
13230
return kvm_rip_read(vcpu);
13231
return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
13232
kvm_rip_read(vcpu));
13233
}
13234
EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
13235
13236
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
13237
{
13238
return kvm_get_linear_rip(vcpu) == linear_rip;
13239
}
13240
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
13241
13242
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
13243
{
13244
unsigned long rflags;
13245
13246
rflags = kvm_x86_call(get_rflags)(vcpu);
13247
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
13248
rflags &= ~X86_EFLAGS_TF;
13249
return rflags;
13250
}
13251
EXPORT_SYMBOL_GPL(kvm_get_rflags);
13252
13253
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13254
{
13255
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
13256
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
13257
rflags |= X86_EFLAGS_TF;
13258
kvm_x86_call(set_rflags)(vcpu, rflags);
13259
}
13260
13261
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13262
{
13263
__kvm_set_rflags(vcpu, rflags);
13264
kvm_make_request(KVM_REQ_EVENT, vcpu);
13265
}
13266
EXPORT_SYMBOL_GPL(kvm_set_rflags);
13267
13268
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
13269
{
13270
BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
13271
13272
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
13273
}
13274
13275
static inline u32 kvm_async_pf_next_probe(u32 key)
13276
{
13277
return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
13278
}
13279
13280
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13281
{
13282
u32 key = kvm_async_pf_hash_fn(gfn);
13283
13284
while (vcpu->arch.apf.gfns[key] != ~0)
13285
key = kvm_async_pf_next_probe(key);
13286
13287
vcpu->arch.apf.gfns[key] = gfn;
13288
}
13289
13290
static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
13291
{
13292
int i;
13293
u32 key = kvm_async_pf_hash_fn(gfn);
13294
13295
for (i = 0; i < ASYNC_PF_PER_VCPU &&
13296
(vcpu->arch.apf.gfns[key] != gfn &&
13297
vcpu->arch.apf.gfns[key] != ~0); i++)
13298
key = kvm_async_pf_next_probe(key);
13299
13300
return key;
13301
}
13302
13303
bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13304
{
13305
return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
13306
}
13307
13308
static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13309
{
13310
u32 i, j, k;
13311
13312
i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
13313
13314
if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
13315
return;
13316
13317
while (true) {
13318
vcpu->arch.apf.gfns[i] = ~0;
13319
do {
13320
j = kvm_async_pf_next_probe(j);
13321
if (vcpu->arch.apf.gfns[j] == ~0)
13322
return;
13323
k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
13324
/*
13325
* k lies cyclically in ]i,j]
13326
* | i.k.j |
13327
* |....j i.k.| or |.k..j i...|
13328
*/
13329
} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
13330
vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13331
i = j;
13332
}
13333
}
13334
13335
static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
13336
{
13337
u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
13338
13339
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13340
sizeof(reason));
13341
}
13342
13343
static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
13344
{
13345
unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13346
13347
return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13348
&token, offset, sizeof(token));
13349
}
13350
13351
static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
13352
{
13353
unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13354
u32 val;
13355
13356
if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13357
&val, offset, sizeof(val)))
13358
return false;
13359
13360
return !val;
13361
}
13362
13363
static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
13364
{
13365
13366
if (!kvm_pv_async_pf_enabled(vcpu))
13367
return false;
13368
13369
if (!vcpu->arch.apf.send_always &&
13370
(vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu)))
13371
return false;
13372
13373
if (is_guest_mode(vcpu)) {
13374
/*
13375
* L1 needs to opt into the special #PF vmexits that are
13376
* used to deliver async page faults.
13377
*/
13378
return vcpu->arch.apf.delivery_as_pf_vmexit;
13379
} else {
13380
/*
13381
* Play it safe in case the guest temporarily disables paging.
13382
* The real mode IDT in particular is unlikely to have a #PF
13383
* exception setup.
13384
*/
13385
return is_paging(vcpu);
13386
}
13387
}
13388
13389
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
13390
{
13391
if (unlikely(!lapic_in_kernel(vcpu) ||
13392
kvm_event_needs_reinjection(vcpu) ||
13393
kvm_is_exception_pending(vcpu)))
13394
return false;
13395
13396
if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13397
return false;
13398
13399
/*
13400
* If interrupts are off we cannot even use an artificial
13401
* halt state.
13402
*/
13403
return kvm_arch_interrupt_allowed(vcpu);
13404
}
13405
13406
bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
13407
struct kvm_async_pf *work)
13408
{
13409
struct x86_exception fault;
13410
13411
trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
13412
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
13413
13414
if (kvm_can_deliver_async_pf(vcpu) &&
13415
!apf_put_user_notpresent(vcpu)) {
13416
fault.vector = PF_VECTOR;
13417
fault.error_code_valid = true;
13418
fault.error_code = 0;
13419
fault.nested_page_fault = false;
13420
fault.address = work->arch.token;
13421
fault.async_page_fault = true;
13422
kvm_inject_page_fault(vcpu, &fault);
13423
return true;
13424
} else {
13425
/*
13426
* It is not possible to deliver a paravirtualized asynchronous
13427
* page fault, but putting the guest in an artificial halt state
13428
* can be beneficial nevertheless: if an interrupt arrives, we
13429
* can deliver it timely and perhaps the guest will schedule
13430
* another process. When the instruction that triggered a page
13431
* fault is retried, hopefully the page will be ready in the host.
13432
*/
13433
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
13434
return false;
13435
}
13436
}
13437
13438
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
13439
struct kvm_async_pf *work)
13440
{
13441
struct kvm_lapic_irq irq = {
13442
.delivery_mode = APIC_DM_FIXED,
13443
.vector = vcpu->arch.apf.vec
13444
};
13445
13446
if (work->wakeup_all)
13447
work->arch.token = ~0; /* broadcast wakeup */
13448
else
13449
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
13450
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
13451
13452
if ((work->wakeup_all || work->notpresent_injected) &&
13453
kvm_pv_async_pf_enabled(vcpu) &&
13454
!apf_put_user_ready(vcpu, work->arch.token)) {
13455
vcpu->arch.apf.pageready_pending = true;
13456
kvm_apic_set_irq(vcpu, &irq, NULL);
13457
}
13458
13459
vcpu->arch.apf.halted = false;
13460
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
13461
}
13462
13463
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
13464
{
13465
kvm_make_request(KVM_REQ_APF_READY, vcpu);
13466
if (!vcpu->arch.apf.pageready_pending)
13467
kvm_vcpu_kick(vcpu);
13468
}
13469
13470
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
13471
{
13472
if (!kvm_pv_async_pf_enabled(vcpu))
13473
return true;
13474
else
13475
return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
13476
}
13477
13478
static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
13479
{
13480
/*
13481
* Non-coherent DMA assignment and de-assignment may affect whether or
13482
* not KVM honors guest PAT, and thus may cause changes in EPT SPTEs
13483
* due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
13484
* (or last) non-coherent device is (un)registered to so that new SPTEs
13485
* with the correct "ignore guest PAT" setting are created.
13486
*
13487
* If KVM always honors guest PAT, however, there is nothing to do.
13488
*/
13489
if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
13490
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
13491
}
13492
13493
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
13494
{
13495
if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1)
13496
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
13497
}
13498
EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
13499
13500
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
13501
{
13502
if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
13503
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
13504
}
13505
EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
13506
13507
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
13508
{
13509
return atomic_read(&kvm->arch.noncoherent_dma_count);
13510
}
13511
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
13512
13513
bool kvm_vector_hashing_enabled(void)
13514
{
13515
return vector_hashing;
13516
}
13517
13518
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
13519
{
13520
return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
13521
}
13522
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
13523
13524
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
13525
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
13526
{
13527
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
13528
}
13529
#endif
13530
13531
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
13532
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
13533
{
13534
kvm_x86_call(gmem_invalidate)(start, end);
13535
}
13536
#endif
13537
13538
int kvm_spec_ctrl_test_value(u64 value)
13539
{
13540
/*
13541
* test that setting IA32_SPEC_CTRL to given value
13542
* is allowed by the host processor
13543
*/
13544
13545
u64 saved_value;
13546
unsigned long flags;
13547
int ret = 0;
13548
13549
local_irq_save(flags);
13550
13551
if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
13552
ret = 1;
13553
else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value))
13554
ret = 1;
13555
else
13556
wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
13557
13558
local_irq_restore(flags);
13559
13560
return ret;
13561
}
13562
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
13563
13564
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
13565
{
13566
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
13567
struct x86_exception fault;
13568
u64 access = error_code &
13569
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
13570
13571
if (!(error_code & PFERR_PRESENT_MASK) ||
13572
mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
13573
/*
13574
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13575
* tables probably do not match the TLB. Just proceed
13576
* with the error code that the processor gave.
13577
*/
13578
fault.vector = PF_VECTOR;
13579
fault.error_code_valid = true;
13580
fault.error_code = error_code;
13581
fault.nested_page_fault = false;
13582
fault.address = gva;
13583
fault.async_page_fault = false;
13584
}
13585
vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
13586
}
13587
EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
13588
13589
/*
13590
* Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
13591
* KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
13592
* indicates whether exit to userspace is needed.
13593
*/
13594
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
13595
struct x86_exception *e)
13596
{
13597
if (r == X86EMUL_PROPAGATE_FAULT) {
13598
if (KVM_BUG_ON(!e, vcpu->kvm))
13599
return -EIO;
13600
13601
kvm_inject_emulated_page_fault(vcpu, e);
13602
return 1;
13603
}
13604
13605
/*
13606
* In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
13607
* while handling a VMX instruction KVM could've handled the request
13608
* correctly by exiting to userspace and performing I/O but there
13609
* doesn't seem to be a real use-case behind such requests, just return
13610
* KVM_EXIT_INTERNAL_ERROR for now.
13611
*/
13612
kvm_prepare_emulation_failure_exit(vcpu);
13613
13614
return 0;
13615
}
13616
EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
13617
13618
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
13619
{
13620
bool pcid_enabled;
13621
struct x86_exception e;
13622
struct {
13623
u64 pcid;
13624
u64 gla;
13625
} operand;
13626
int r;
13627
13628
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
13629
if (r != X86EMUL_CONTINUE)
13630
return kvm_handle_memory_failure(vcpu, r, &e);
13631
13632
if (operand.pcid >> 12 != 0) {
13633
kvm_inject_gp(vcpu, 0);
13634
return 1;
13635
}
13636
13637
pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
13638
13639
switch (type) {
13640
case INVPCID_TYPE_INDIV_ADDR:
13641
/*
13642
* LAM doesn't apply to addresses that are inputs to TLB
13643
* invalidation.
13644
*/
13645
if ((!pcid_enabled && (operand.pcid != 0)) ||
13646
is_noncanonical_invlpg_address(operand.gla, vcpu)) {
13647
kvm_inject_gp(vcpu, 0);
13648
return 1;
13649
}
13650
kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
13651
return kvm_skip_emulated_instruction(vcpu);
13652
13653
case INVPCID_TYPE_SINGLE_CTXT:
13654
if (!pcid_enabled && (operand.pcid != 0)) {
13655
kvm_inject_gp(vcpu, 0);
13656
return 1;
13657
}
13658
13659
kvm_invalidate_pcid(vcpu, operand.pcid);
13660
return kvm_skip_emulated_instruction(vcpu);
13661
13662
case INVPCID_TYPE_ALL_NON_GLOBAL:
13663
/*
13664
* Currently, KVM doesn't mark global entries in the shadow
13665
* page tables, so a non-global flush just degenerates to a
13666
* global flush. If needed, we could optimize this later by
13667
* keeping track of global entries in shadow page tables.
13668
*/
13669
13670
fallthrough;
13671
case INVPCID_TYPE_ALL_INCL_GLOBAL:
13672
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13673
return kvm_skip_emulated_instruction(vcpu);
13674
13675
default:
13676
kvm_inject_gp(vcpu, 0);
13677
return 1;
13678
}
13679
}
13680
EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
13681
13682
static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
13683
{
13684
struct kvm_run *run = vcpu->run;
13685
struct kvm_mmio_fragment *frag;
13686
unsigned int len;
13687
13688
BUG_ON(!vcpu->mmio_needed);
13689
13690
/* Complete previous fragment */
13691
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
13692
len = min(8u, frag->len);
13693
if (!vcpu->mmio_is_write)
13694
memcpy(frag->data, run->mmio.data, len);
13695
13696
if (frag->len <= 8) {
13697
/* Switch to the next fragment. */
13698
frag++;
13699
vcpu->mmio_cur_fragment++;
13700
} else {
13701
/* Go forward to the next mmio piece. */
13702
frag->data += len;
13703
frag->gpa += len;
13704
frag->len -= len;
13705
}
13706
13707
if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
13708
vcpu->mmio_needed = 0;
13709
13710
// VMG change, at this point, we're always done
13711
// RIP has already been advanced
13712
return 1;
13713
}
13714
13715
// More MMIO is needed
13716
run->mmio.phys_addr = frag->gpa;
13717
run->mmio.len = min(8u, frag->len);
13718
run->mmio.is_write = vcpu->mmio_is_write;
13719
if (run->mmio.is_write)
13720
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
13721
run->exit_reason = KVM_EXIT_MMIO;
13722
13723
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13724
13725
return 0;
13726
}
13727
13728
int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13729
void *data)
13730
{
13731
int handled;
13732
struct kvm_mmio_fragment *frag;
13733
13734
if (!data)
13735
return -EINVAL;
13736
13737
handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13738
if (handled == bytes)
13739
return 1;
13740
13741
bytes -= handled;
13742
gpa += handled;
13743
data += handled;
13744
13745
/*TODO: Check if need to increment number of frags */
13746
frag = vcpu->mmio_fragments;
13747
vcpu->mmio_nr_fragments = 1;
13748
frag->len = bytes;
13749
frag->gpa = gpa;
13750
frag->data = data;
13751
13752
vcpu->mmio_needed = 1;
13753
vcpu->mmio_cur_fragment = 0;
13754
13755
vcpu->run->mmio.phys_addr = gpa;
13756
vcpu->run->mmio.len = min(8u, frag->len);
13757
vcpu->run->mmio.is_write = 1;
13758
memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
13759
vcpu->run->exit_reason = KVM_EXIT_MMIO;
13760
13761
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13762
13763
return 0;
13764
}
13765
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write);
13766
13767
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13768
void *data)
13769
{
13770
int handled;
13771
struct kvm_mmio_fragment *frag;
13772
13773
if (!data)
13774
return -EINVAL;
13775
13776
handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13777
if (handled == bytes)
13778
return 1;
13779
13780
bytes -= handled;
13781
gpa += handled;
13782
data += handled;
13783
13784
/*TODO: Check if need to increment number of frags */
13785
frag = vcpu->mmio_fragments;
13786
vcpu->mmio_nr_fragments = 1;
13787
frag->len = bytes;
13788
frag->gpa = gpa;
13789
frag->data = data;
13790
13791
vcpu->mmio_needed = 1;
13792
vcpu->mmio_cur_fragment = 0;
13793
13794
vcpu->run->mmio.phys_addr = gpa;
13795
vcpu->run->mmio.len = min(8u, frag->len);
13796
vcpu->run->mmio.is_write = 0;
13797
vcpu->run->exit_reason = KVM_EXIT_MMIO;
13798
13799
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13800
13801
return 0;
13802
}
13803
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
13804
13805
static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
13806
{
13807
vcpu->arch.sev_pio_count -= count;
13808
vcpu->arch.sev_pio_data += count * size;
13809
}
13810
13811
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13812
unsigned int port);
13813
13814
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
13815
{
13816
int size = vcpu->arch.pio.size;
13817
int port = vcpu->arch.pio.port;
13818
13819
vcpu->arch.pio.count = 0;
13820
if (vcpu->arch.sev_pio_count)
13821
return kvm_sev_es_outs(vcpu, size, port);
13822
return 1;
13823
}
13824
13825
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13826
unsigned int port)
13827
{
13828
for (;;) {
13829
unsigned int count =
13830
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13831
int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
13832
13833
/* memcpy done already by emulator_pio_out. */
13834
advance_sev_es_emulated_pio(vcpu, count, size);
13835
if (!ret)
13836
break;
13837
13838
/* Emulation done by the kernel. */
13839
if (!vcpu->arch.sev_pio_count)
13840
return 1;
13841
}
13842
13843
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
13844
return 0;
13845
}
13846
13847
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13848
unsigned int port);
13849
13850
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
13851
{
13852
unsigned count = vcpu->arch.pio.count;
13853
int size = vcpu->arch.pio.size;
13854
int port = vcpu->arch.pio.port;
13855
13856
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
13857
advance_sev_es_emulated_pio(vcpu, count, size);
13858
if (vcpu->arch.sev_pio_count)
13859
return kvm_sev_es_ins(vcpu, size, port);
13860
return 1;
13861
}
13862
13863
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13864
unsigned int port)
13865
{
13866
for (;;) {
13867
unsigned int count =
13868
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13869
if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
13870
break;
13871
13872
/* Emulation done by the kernel. */
13873
advance_sev_es_emulated_pio(vcpu, count, size);
13874
if (!vcpu->arch.sev_pio_count)
13875
return 1;
13876
}
13877
13878
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
13879
return 0;
13880
}
13881
13882
int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
13883
unsigned int port, void *data, unsigned int count,
13884
int in)
13885
{
13886
vcpu->arch.sev_pio_data = data;
13887
vcpu->arch.sev_pio_count = count;
13888
return in ? kvm_sev_es_ins(vcpu, size, port)
13889
: kvm_sev_es_outs(vcpu, size, port);
13890
}
13891
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
13892
13893
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
13894
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
13895
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio);
13896
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
13897
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
13898
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
13899
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
13900
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
13901
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
13902
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
13903
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
13904
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
13905
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
13906
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
13907
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
13908
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
13909
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
13910
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
13911
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
13912
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
13913
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
13914
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
13915
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
13916
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell);
13917
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
13918
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
13919
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
13920
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
13921
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
13922
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
13923
13924
static int __init kvm_x86_init(void)
13925
{
13926
kvm_init_xstate_sizes();
13927
13928
kvm_mmu_x86_module_init();
13929
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
13930
return 0;
13931
}
13932
module_init(kvm_x86_init);
13933
13934
static void __exit kvm_x86_exit(void)
13935
{
13936
WARN_ON_ONCE(static_branch_unlikely(&kvm_has_noapic_vcpu));
13937
}
13938
module_exit(kvm_x86_exit);
13939
13940