Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/hyperv.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* KVM Microsoft Hyper-V emulation
4
*
5
* derived from arch/x86/kvm/x86.c
6
*
7
* Copyright (C) 2006 Qumranet, Inc.
8
* Copyright (C) 2008 Qumranet, Inc.
9
* Copyright IBM Corporation, 2008
10
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
11
* Copyright (C) 2015 Andrey Smetanin <[email protected]>
12
*
13
* Authors:
14
* Avi Kivity <[email protected]>
15
* Yaniv Kamay <[email protected]>
16
* Amit Shah <[email protected]>
17
* Ben-Ami Yassour <[email protected]>
18
* Andrey Smetanin <[email protected]>
19
*/
20
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22
#include "x86.h"
23
#include "lapic.h"
24
#include "ioapic.h"
25
#include "cpuid.h"
26
#include "hyperv.h"
27
#include "mmu.h"
28
#include "xen.h"
29
30
#include <linux/cpu.h>
31
#include <linux/kvm_host.h>
32
#include <linux/highmem.h>
33
#include <linux/sched/cputime.h>
34
#include <linux/spinlock.h>
35
#include <linux/eventfd.h>
36
37
#include <asm/apicdef.h>
38
#include <asm/mshyperv.h>
39
#include <trace/events/kvm.h>
40
41
#include "trace.h"
42
#include "irq.h"
43
#include "fpu.h"
44
45
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
46
47
/*
48
* As per Hyper-V TLFS, extended hypercalls start from 0x8001
49
* (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
50
* where each bit tells which extended hypercall is available besides
51
* HvExtCallQueryCapabilities.
52
*
53
* 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
54
* assigned.
55
*
56
* 0x8002 - Bit 0
57
* 0x8003 - Bit 1
58
* ..
59
* 0x8041 - Bit 63
60
*
61
* Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
62
*/
63
#define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
64
65
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
66
bool vcpu_kick);
67
68
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
69
{
70
return atomic64_read(&synic->sint[sint]);
71
}
72
73
static inline int synic_get_sint_vector(u64 sint_value)
74
{
75
if (sint_value & HV_SYNIC_SINT_MASKED)
76
return -1;
77
return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
78
}
79
80
static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
81
int vector)
82
{
83
int i;
84
85
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
86
if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
87
return true;
88
}
89
return false;
90
}
91
92
static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
93
int vector)
94
{
95
int i;
96
u64 sint_value;
97
98
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
99
sint_value = synic_read_sint(synic, i);
100
if (synic_get_sint_vector(sint_value) == vector &&
101
sint_value & HV_SYNIC_SINT_AUTO_EOI)
102
return true;
103
}
104
return false;
105
}
106
107
static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
108
int vector)
109
{
110
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
111
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
112
bool auto_eoi_old, auto_eoi_new;
113
114
if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
115
return;
116
117
if (synic_has_vector_connected(synic, vector))
118
__set_bit(vector, synic->vec_bitmap);
119
else
120
__clear_bit(vector, synic->vec_bitmap);
121
122
auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
123
124
if (synic_has_vector_auto_eoi(synic, vector))
125
__set_bit(vector, synic->auto_eoi_bitmap);
126
else
127
__clear_bit(vector, synic->auto_eoi_bitmap);
128
129
auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
130
131
if (auto_eoi_old == auto_eoi_new)
132
return;
133
134
if (!enable_apicv)
135
return;
136
137
down_write(&vcpu->kvm->arch.apicv_update_lock);
138
139
if (auto_eoi_new)
140
hv->synic_auto_eoi_used++;
141
else
142
hv->synic_auto_eoi_used--;
143
144
/*
145
* Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
146
* the hypervisor to manually inject IRQs.
147
*/
148
__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
149
APICV_INHIBIT_REASON_HYPERV,
150
!!hv->synic_auto_eoi_used);
151
152
up_write(&vcpu->kvm->arch.apicv_update_lock);
153
}
154
155
static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
156
u64 data, bool host)
157
{
158
int vector, old_vector;
159
bool masked;
160
161
vector = data & HV_SYNIC_SINT_VECTOR_MASK;
162
masked = data & HV_SYNIC_SINT_MASKED;
163
164
/*
165
* Valid vectors are 16-255, however, nested Hyper-V attempts to write
166
* default '0x10000' value on boot and this should not #GP. We need to
167
* allow zero-initing the register from host as well.
168
*/
169
if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
170
return 1;
171
/*
172
* Guest may configure multiple SINTs to use the same vector, so
173
* we maintain a bitmap of vectors handled by synic, and a
174
* bitmap of vectors with auto-eoi behavior. The bitmaps are
175
* updated here, and atomically queried on fast paths.
176
*/
177
old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
178
179
atomic64_set(&synic->sint[sint], data);
180
181
synic_update_vector(synic, old_vector);
182
183
synic_update_vector(synic, vector);
184
185
/* Load SynIC vectors into EOI exit bitmap */
186
kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
187
return 0;
188
}
189
190
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
191
{
192
struct kvm_vcpu *vcpu = NULL;
193
unsigned long i;
194
195
if (vpidx >= KVM_MAX_VCPUS)
196
return NULL;
197
198
vcpu = kvm_get_vcpu(kvm, vpidx);
199
if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
200
return vcpu;
201
kvm_for_each_vcpu(i, vcpu, kvm)
202
if (kvm_hv_get_vpindex(vcpu) == vpidx)
203
return vcpu;
204
return NULL;
205
}
206
207
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
208
{
209
struct kvm_vcpu *vcpu;
210
struct kvm_vcpu_hv_synic *synic;
211
212
vcpu = get_vcpu_by_vpidx(kvm, vpidx);
213
if (!vcpu || !to_hv_vcpu(vcpu))
214
return NULL;
215
synic = to_hv_synic(vcpu);
216
return (synic->active) ? synic : NULL;
217
}
218
219
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
220
{
221
struct kvm *kvm = vcpu->kvm;
222
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
223
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
224
struct kvm_vcpu_hv_stimer *stimer;
225
int gsi, idx;
226
227
trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
228
229
/* Try to deliver pending Hyper-V SynIC timers messages */
230
for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
231
stimer = &hv_vcpu->stimer[idx];
232
if (stimer->msg_pending && stimer->config.enable &&
233
!stimer->config.direct_mode &&
234
stimer->config.sintx == sint)
235
stimer_mark_pending(stimer, false);
236
}
237
238
idx = srcu_read_lock(&kvm->irq_srcu);
239
gsi = atomic_read(&synic->sint_to_gsi[sint]);
240
if (gsi != -1)
241
kvm_notify_acked_gsi(kvm, gsi);
242
srcu_read_unlock(&kvm->irq_srcu, idx);
243
}
244
245
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
246
{
247
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
248
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
249
250
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
251
hv_vcpu->exit.u.synic.msr = msr;
252
hv_vcpu->exit.u.synic.control = synic->control;
253
hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
254
hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
255
256
kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
257
}
258
259
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
260
u32 msr, u64 data, bool host)
261
{
262
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
263
int ret;
264
265
if (!synic->active && (!host || data))
266
return 1;
267
268
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
269
270
ret = 0;
271
switch (msr) {
272
case HV_X64_MSR_SCONTROL:
273
synic->control = data;
274
if (!host)
275
synic_exit(synic, msr);
276
break;
277
case HV_X64_MSR_SVERSION:
278
if (!host) {
279
ret = 1;
280
break;
281
}
282
synic->version = data;
283
break;
284
case HV_X64_MSR_SIEFP:
285
if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
286
!synic->dont_zero_synic_pages)
287
if (kvm_clear_guest(vcpu->kvm,
288
data & PAGE_MASK, PAGE_SIZE)) {
289
ret = 1;
290
break;
291
}
292
synic->evt_page = data;
293
if (!host)
294
synic_exit(synic, msr);
295
break;
296
case HV_X64_MSR_SIMP:
297
if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
298
!synic->dont_zero_synic_pages)
299
if (kvm_clear_guest(vcpu->kvm,
300
data & PAGE_MASK, PAGE_SIZE)) {
301
ret = 1;
302
break;
303
}
304
synic->msg_page = data;
305
if (!host)
306
synic_exit(synic, msr);
307
break;
308
case HV_X64_MSR_EOM: {
309
int i;
310
311
if (!synic->active)
312
break;
313
314
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
315
kvm_hv_notify_acked_sint(vcpu, i);
316
break;
317
}
318
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
319
ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
320
break;
321
default:
322
ret = 1;
323
break;
324
}
325
return ret;
326
}
327
328
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
329
{
330
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
331
332
return hv_vcpu->cpuid_cache.syndbg_cap_eax &
333
HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
334
}
335
336
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
337
{
338
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
339
340
if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
341
hv->hv_syndbg.control.status =
342
vcpu->run->hyperv.u.syndbg.status;
343
return 1;
344
}
345
346
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
347
{
348
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
349
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
350
351
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
352
hv_vcpu->exit.u.syndbg.msr = msr;
353
hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
354
hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
355
hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
356
hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
357
vcpu->arch.complete_userspace_io =
358
kvm_hv_syndbg_complete_userspace;
359
360
kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
361
}
362
363
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
364
{
365
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
366
367
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
368
return 1;
369
370
trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
371
to_hv_vcpu(vcpu)->vp_index, msr, data);
372
switch (msr) {
373
case HV_X64_MSR_SYNDBG_CONTROL:
374
syndbg->control.control = data;
375
if (!host)
376
syndbg_exit(vcpu, msr);
377
break;
378
case HV_X64_MSR_SYNDBG_STATUS:
379
syndbg->control.status = data;
380
break;
381
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
382
syndbg->control.send_page = data;
383
break;
384
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
385
syndbg->control.recv_page = data;
386
break;
387
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
388
syndbg->control.pending_page = data;
389
if (!host)
390
syndbg_exit(vcpu, msr);
391
break;
392
case HV_X64_MSR_SYNDBG_OPTIONS:
393
syndbg->options = data;
394
break;
395
default:
396
break;
397
}
398
399
return 0;
400
}
401
402
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
403
{
404
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
405
406
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
407
return 1;
408
409
switch (msr) {
410
case HV_X64_MSR_SYNDBG_CONTROL:
411
*pdata = syndbg->control.control;
412
break;
413
case HV_X64_MSR_SYNDBG_STATUS:
414
*pdata = syndbg->control.status;
415
break;
416
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
417
*pdata = syndbg->control.send_page;
418
break;
419
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
420
*pdata = syndbg->control.recv_page;
421
break;
422
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
423
*pdata = syndbg->control.pending_page;
424
break;
425
case HV_X64_MSR_SYNDBG_OPTIONS:
426
*pdata = syndbg->options;
427
break;
428
default:
429
break;
430
}
431
432
trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
433
434
return 0;
435
}
436
437
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
438
bool host)
439
{
440
int ret;
441
442
if (!synic->active && !host)
443
return 1;
444
445
ret = 0;
446
switch (msr) {
447
case HV_X64_MSR_SCONTROL:
448
*pdata = synic->control;
449
break;
450
case HV_X64_MSR_SVERSION:
451
*pdata = synic->version;
452
break;
453
case HV_X64_MSR_SIEFP:
454
*pdata = synic->evt_page;
455
break;
456
case HV_X64_MSR_SIMP:
457
*pdata = synic->msg_page;
458
break;
459
case HV_X64_MSR_EOM:
460
*pdata = 0;
461
break;
462
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
463
*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
464
break;
465
default:
466
ret = 1;
467
break;
468
}
469
return ret;
470
}
471
472
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
473
{
474
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
475
struct kvm_lapic_irq irq;
476
int ret, vector;
477
478
if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
479
return -EINVAL;
480
481
if (sint >= ARRAY_SIZE(synic->sint))
482
return -EINVAL;
483
484
vector = synic_get_sint_vector(synic_read_sint(synic, sint));
485
if (vector < 0)
486
return -ENOENT;
487
488
memset(&irq, 0, sizeof(irq));
489
irq.shorthand = APIC_DEST_SELF;
490
irq.dest_mode = APIC_DEST_PHYSICAL;
491
irq.delivery_mode = APIC_DM_FIXED;
492
irq.vector = vector;
493
irq.level = 1;
494
495
ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
496
trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
497
return ret;
498
}
499
500
int kvm_hv_synic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
501
int irq_source_id, int level, bool line_status)
502
{
503
struct kvm_vcpu_hv_synic *synic;
504
505
if (!level)
506
return -1;
507
508
synic = synic_get(kvm, e->hv_sint.vcpu);
509
if (!synic)
510
return -EINVAL;
511
512
return synic_set_irq(synic, e->hv_sint.sint);
513
}
514
515
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
516
{
517
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
518
int i;
519
520
trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
521
522
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
523
if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
524
kvm_hv_notify_acked_sint(vcpu, i);
525
}
526
527
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
528
{
529
struct kvm_vcpu_hv_synic *synic;
530
531
synic = synic_get(kvm, vpidx);
532
if (!synic)
533
return -EINVAL;
534
535
if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
536
return -EINVAL;
537
538
atomic_set(&synic->sint_to_gsi[sint], gsi);
539
return 0;
540
}
541
542
void kvm_hv_irq_routing_update(struct kvm *kvm)
543
{
544
struct kvm_irq_routing_table *irq_rt;
545
struct kvm_kernel_irq_routing_entry *e;
546
u32 gsi;
547
548
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
549
lockdep_is_held(&kvm->irq_lock));
550
551
for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
552
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
553
if (e->type == KVM_IRQ_ROUTING_HV_SINT)
554
kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
555
e->hv_sint.sint, gsi);
556
}
557
}
558
}
559
560
static void synic_init(struct kvm_vcpu_hv_synic *synic)
561
{
562
int i;
563
564
memset(synic, 0, sizeof(*synic));
565
synic->version = HV_SYNIC_VERSION_1;
566
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
567
atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
568
atomic_set(&synic->sint_to_gsi[i], -1);
569
}
570
}
571
572
static u64 get_time_ref_counter(struct kvm *kvm)
573
{
574
struct kvm_hv *hv = to_kvm_hv(kvm);
575
struct kvm_vcpu *vcpu;
576
u64 tsc;
577
578
/*
579
* Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
580
* is broken, disabled or being updated.
581
*/
582
if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
583
return div_u64(get_kvmclock_ns(kvm), 100);
584
585
vcpu = kvm_get_vcpu(kvm, 0);
586
tsc = kvm_read_l1_tsc(vcpu, rdtsc());
587
return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
588
+ hv->tsc_ref.tsc_offset;
589
}
590
591
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
592
bool vcpu_kick)
593
{
594
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
595
596
set_bit(stimer->index,
597
to_hv_vcpu(vcpu)->stimer_pending_bitmap);
598
kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
599
if (vcpu_kick)
600
kvm_vcpu_kick(vcpu);
601
}
602
603
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
604
{
605
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
606
607
trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
608
stimer->index);
609
610
hrtimer_cancel(&stimer->timer);
611
clear_bit(stimer->index,
612
to_hv_vcpu(vcpu)->stimer_pending_bitmap);
613
stimer->msg_pending = false;
614
stimer->exp_time = 0;
615
}
616
617
static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
618
{
619
struct kvm_vcpu_hv_stimer *stimer;
620
621
stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
622
trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
623
stimer->index);
624
stimer_mark_pending(stimer, true);
625
626
return HRTIMER_NORESTART;
627
}
628
629
/*
630
* stimer_start() assumptions:
631
* a) stimer->count is not equal to 0
632
* b) stimer->config has HV_STIMER_ENABLE flag
633
*/
634
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
635
{
636
u64 time_now;
637
ktime_t ktime_now;
638
639
time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
640
ktime_now = ktime_get();
641
642
if (stimer->config.periodic) {
643
if (stimer->exp_time) {
644
if (time_now >= stimer->exp_time) {
645
u64 remainder;
646
647
div64_u64_rem(time_now - stimer->exp_time,
648
stimer->count, &remainder);
649
stimer->exp_time =
650
time_now + (stimer->count - remainder);
651
}
652
} else
653
stimer->exp_time = time_now + stimer->count;
654
655
trace_kvm_hv_stimer_start_periodic(
656
hv_stimer_to_vcpu(stimer)->vcpu_id,
657
stimer->index,
658
time_now, stimer->exp_time);
659
660
hrtimer_start(&stimer->timer,
661
ktime_add_ns(ktime_now,
662
100 * (stimer->exp_time - time_now)),
663
HRTIMER_MODE_ABS);
664
return 0;
665
}
666
stimer->exp_time = stimer->count;
667
if (time_now >= stimer->count) {
668
/*
669
* Expire timer according to Hypervisor Top-Level Functional
670
* specification v4(15.3.1):
671
* "If a one shot is enabled and the specified count is in
672
* the past, it will expire immediately."
673
*/
674
stimer_mark_pending(stimer, false);
675
return 0;
676
}
677
678
trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
679
stimer->index,
680
time_now, stimer->count);
681
682
hrtimer_start(&stimer->timer,
683
ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
684
HRTIMER_MODE_ABS);
685
return 0;
686
}
687
688
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
689
bool host)
690
{
691
union hv_stimer_config new_config = {.as_uint64 = config},
692
old_config = {.as_uint64 = stimer->config.as_uint64};
693
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
694
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
695
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
696
697
if (!synic->active && (!host || config))
698
return 1;
699
700
if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
701
!(hv_vcpu->cpuid_cache.features_edx &
702
HV_STIMER_DIRECT_MODE_AVAILABLE)))
703
return 1;
704
705
trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
706
stimer->index, config, host);
707
708
stimer_cleanup(stimer);
709
if (old_config.enable &&
710
!new_config.direct_mode && new_config.sintx == 0)
711
new_config.enable = 0;
712
stimer->config.as_uint64 = new_config.as_uint64;
713
714
if (stimer->config.enable)
715
stimer_mark_pending(stimer, false);
716
717
return 0;
718
}
719
720
static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
721
bool host)
722
{
723
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
724
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
725
726
if (!synic->active && (!host || count))
727
return 1;
728
729
trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
730
stimer->index, count, host);
731
732
stimer_cleanup(stimer);
733
stimer->count = count;
734
if (!host) {
735
if (stimer->count == 0)
736
stimer->config.enable = 0;
737
else if (stimer->config.auto_enable)
738
stimer->config.enable = 1;
739
}
740
741
if (stimer->config.enable)
742
stimer_mark_pending(stimer, false);
743
744
return 0;
745
}
746
747
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
748
{
749
*pconfig = stimer->config.as_uint64;
750
return 0;
751
}
752
753
static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
754
{
755
*pcount = stimer->count;
756
return 0;
757
}
758
759
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
760
struct hv_message *src_msg, bool no_retry)
761
{
762
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
763
int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
764
gfn_t msg_page_gfn;
765
struct hv_message_header hv_hdr;
766
int r;
767
768
if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
769
return -ENOENT;
770
771
msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
772
773
/*
774
* Strictly following the spec-mandated ordering would assume setting
775
* .msg_pending before checking .message_type. However, this function
776
* is only called in vcpu context so the entire update is atomic from
777
* guest POV and thus the exact order here doesn't matter.
778
*/
779
r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
780
msg_off + offsetof(struct hv_message,
781
header.message_type),
782
sizeof(hv_hdr.message_type));
783
if (r < 0)
784
return r;
785
786
if (hv_hdr.message_type != HVMSG_NONE) {
787
if (no_retry)
788
return 0;
789
790
hv_hdr.message_flags.msg_pending = 1;
791
r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
792
&hv_hdr.message_flags,
793
msg_off +
794
offsetof(struct hv_message,
795
header.message_flags),
796
sizeof(hv_hdr.message_flags));
797
if (r < 0)
798
return r;
799
return -EAGAIN;
800
}
801
802
r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
803
sizeof(src_msg->header) +
804
src_msg->header.payload_size);
805
if (r < 0)
806
return r;
807
808
r = synic_set_irq(synic, sint);
809
if (r < 0)
810
return r;
811
if (r == 0)
812
return -EFAULT;
813
return 0;
814
}
815
816
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
817
{
818
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
819
struct hv_message *msg = &stimer->msg;
820
struct hv_timer_message_payload *payload =
821
(struct hv_timer_message_payload *)&msg->u.payload;
822
823
/*
824
* To avoid piling up periodic ticks, don't retry message
825
* delivery for them (within "lazy" lost ticks policy).
826
*/
827
bool no_retry = stimer->config.periodic;
828
829
payload->expiration_time = stimer->exp_time;
830
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
831
return synic_deliver_msg(to_hv_synic(vcpu),
832
stimer->config.sintx, msg,
833
no_retry);
834
}
835
836
static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
837
{
838
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
839
struct kvm_lapic_irq irq = {
840
.delivery_mode = APIC_DM_FIXED,
841
.vector = stimer->config.apic_vector
842
};
843
844
if (lapic_in_kernel(vcpu))
845
return !kvm_apic_set_irq(vcpu, &irq, NULL);
846
return 0;
847
}
848
849
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
850
{
851
int r, direct = stimer->config.direct_mode;
852
853
stimer->msg_pending = true;
854
if (!direct)
855
r = stimer_send_msg(stimer);
856
else
857
r = stimer_notify_direct(stimer);
858
trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
859
stimer->index, direct, r);
860
if (!r) {
861
stimer->msg_pending = false;
862
if (!(stimer->config.periodic))
863
stimer->config.enable = 0;
864
}
865
}
866
867
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
868
{
869
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
870
struct kvm_vcpu_hv_stimer *stimer;
871
u64 time_now, exp_time;
872
int i;
873
874
if (!hv_vcpu)
875
return;
876
877
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
878
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
879
stimer = &hv_vcpu->stimer[i];
880
if (stimer->config.enable) {
881
exp_time = stimer->exp_time;
882
883
if (exp_time) {
884
time_now =
885
get_time_ref_counter(vcpu->kvm);
886
if (time_now >= exp_time)
887
stimer_expiration(stimer);
888
}
889
890
if ((stimer->config.enable) &&
891
stimer->count) {
892
if (!stimer->msg_pending)
893
stimer_start(stimer);
894
} else
895
stimer_cleanup(stimer);
896
}
897
}
898
}
899
900
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
901
{
902
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
903
int i;
904
905
if (!hv_vcpu)
906
return;
907
908
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
909
stimer_cleanup(&hv_vcpu->stimer[i]);
910
911
kfree(hv_vcpu);
912
vcpu->arch.hyperv = NULL;
913
}
914
915
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
916
{
917
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
918
919
if (!hv_vcpu)
920
return false;
921
922
if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
923
return false;
924
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
925
}
926
EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
927
928
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
929
{
930
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
931
932
if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
933
return -EFAULT;
934
935
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
936
&hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
937
}
938
EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
939
940
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
941
{
942
struct hv_message *msg = &stimer->msg;
943
struct hv_timer_message_payload *payload =
944
(struct hv_timer_message_payload *)&msg->u.payload;
945
946
memset(&msg->header, 0, sizeof(msg->header));
947
msg->header.message_type = HVMSG_TIMER_EXPIRED;
948
msg->header.payload_size = sizeof(*payload);
949
950
payload->timer_index = stimer->index;
951
payload->expiration_time = 0;
952
payload->delivery_time = 0;
953
}
954
955
static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
956
{
957
memset(stimer, 0, sizeof(*stimer));
958
stimer->index = timer_index;
959
hrtimer_setup(&stimer->timer, stimer_timer_callback, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
960
stimer_prepare_msg(stimer);
961
}
962
963
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
964
{
965
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
966
int i;
967
968
if (hv_vcpu)
969
return 0;
970
971
hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
972
if (!hv_vcpu)
973
return -ENOMEM;
974
975
vcpu->arch.hyperv = hv_vcpu;
976
hv_vcpu->vcpu = vcpu;
977
978
synic_init(&hv_vcpu->synic);
979
980
bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
981
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
982
stimer_init(&hv_vcpu->stimer[i], i);
983
984
hv_vcpu->vp_index = vcpu->vcpu_idx;
985
986
for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
987
INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
988
spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
989
}
990
991
return 0;
992
}
993
994
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
995
{
996
struct kvm_vcpu_hv_synic *synic;
997
int r;
998
999
r = kvm_hv_vcpu_init(vcpu);
1000
if (r)
1001
return r;
1002
1003
synic = to_hv_synic(vcpu);
1004
1005
synic->active = true;
1006
synic->dont_zero_synic_pages = dont_zero_synic_pages;
1007
synic->control = HV_SYNIC_CONTROL_ENABLE;
1008
return 0;
1009
}
1010
1011
static bool kvm_hv_msr_partition_wide(u32 msr)
1012
{
1013
bool r = false;
1014
1015
switch (msr) {
1016
case HV_X64_MSR_GUEST_OS_ID:
1017
case HV_X64_MSR_HYPERCALL:
1018
case HV_X64_MSR_REFERENCE_TSC:
1019
case HV_X64_MSR_TIME_REF_COUNT:
1020
case HV_X64_MSR_CRASH_CTL:
1021
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1022
case HV_X64_MSR_RESET:
1023
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1024
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1025
case HV_X64_MSR_TSC_EMULATION_STATUS:
1026
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1027
case HV_X64_MSR_SYNDBG_OPTIONS:
1028
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1029
r = true;
1030
break;
1031
}
1032
1033
return r;
1034
}
1035
1036
static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1037
{
1038
struct kvm_hv *hv = to_kvm_hv(kvm);
1039
size_t size = ARRAY_SIZE(hv->hv_crash_param);
1040
1041
if (WARN_ON_ONCE(index >= size))
1042
return -EINVAL;
1043
1044
*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1045
return 0;
1046
}
1047
1048
static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1049
{
1050
struct kvm_hv *hv = to_kvm_hv(kvm);
1051
1052
*pdata = hv->hv_crash_ctl;
1053
return 0;
1054
}
1055
1056
static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1057
{
1058
struct kvm_hv *hv = to_kvm_hv(kvm);
1059
1060
hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1061
1062
return 0;
1063
}
1064
1065
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1066
{
1067
struct kvm_hv *hv = to_kvm_hv(kvm);
1068
size_t size = ARRAY_SIZE(hv->hv_crash_param);
1069
1070
if (WARN_ON_ONCE(index >= size))
1071
return -EINVAL;
1072
1073
hv->hv_crash_param[array_index_nospec(index, size)] = data;
1074
return 0;
1075
}
1076
1077
/*
1078
* The kvmclock and Hyper-V TSC page use similar formulas, and converting
1079
* between them is possible:
1080
*
1081
* kvmclock formula:
1082
* nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1083
* + system_time
1084
*
1085
* Hyper-V formula:
1086
* nsec/100 = ticks * scale / 2^64 + offset
1087
*
1088
* When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1089
* By dividing the kvmclock formula by 100 and equating what's left we get:
1090
* ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1091
* scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1092
* scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1093
*
1094
* Now expand the kvmclock formula and divide by 100:
1095
* nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1096
* - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1097
* + system_time
1098
* nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1099
* - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1100
* + system_time / 100
1101
*
1102
* Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1103
* nsec/100 = ticks * scale / 2^64
1104
* - tsc_timestamp * scale / 2^64
1105
* + system_time / 100
1106
*
1107
* Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1108
* offset = system_time / 100 - tsc_timestamp * scale / 2^64
1109
*
1110
* These two equivalencies are implemented in this function.
1111
*/
1112
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1113
struct ms_hyperv_tsc_page *tsc_ref)
1114
{
1115
u64 max_mul;
1116
1117
if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1118
return false;
1119
1120
/*
1121
* check if scale would overflow, if so we use the time ref counter
1122
* tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1123
* tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1124
* tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1125
*/
1126
max_mul = 100ull << (32 - hv_clock->tsc_shift);
1127
if (hv_clock->tsc_to_system_mul >= max_mul)
1128
return false;
1129
1130
/*
1131
* Otherwise compute the scale and offset according to the formulas
1132
* derived above.
1133
*/
1134
tsc_ref->tsc_scale =
1135
mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1136
hv_clock->tsc_to_system_mul,
1137
100);
1138
1139
tsc_ref->tsc_offset = hv_clock->system_time;
1140
do_div(tsc_ref->tsc_offset, 100);
1141
tsc_ref->tsc_offset -=
1142
mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1143
return true;
1144
}
1145
1146
/*
1147
* Don't touch TSC page values if the guest has opted for TSC emulation after
1148
* migration. KVM doesn't fully support reenlightenment notifications and TSC
1149
* access emulation and Hyper-V is known to expect the values in TSC page to
1150
* stay constant before TSC access emulation is disabled from guest side
1151
* (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1152
* frequency and guest visible TSC value across migration (and prevent it when
1153
* TSC scaling is unsupported).
1154
*/
1155
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1156
{
1157
return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1158
hv->hv_tsc_emulation_control;
1159
}
1160
1161
void kvm_hv_setup_tsc_page(struct kvm *kvm,
1162
struct pvclock_vcpu_time_info *hv_clock)
1163
{
1164
struct kvm_hv *hv = to_kvm_hv(kvm);
1165
u32 tsc_seq;
1166
u64 gfn;
1167
1168
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1169
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1170
1171
mutex_lock(&hv->hv_lock);
1172
1173
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1174
hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
1175
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1176
goto out_unlock;
1177
1178
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1179
goto out_unlock;
1180
1181
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1182
/*
1183
* Because the TSC parameters only vary when there is a
1184
* change in the master clock, do not bother with caching.
1185
*/
1186
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1187
&tsc_seq, sizeof(tsc_seq))))
1188
goto out_err;
1189
1190
if (tsc_seq && tsc_page_update_unsafe(hv)) {
1191
if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1192
goto out_err;
1193
1194
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1195
goto out_unlock;
1196
}
1197
1198
/*
1199
* While we're computing and writing the parameters, force the
1200
* guest to use the time reference count MSR.
1201
*/
1202
hv->tsc_ref.tsc_sequence = 0;
1203
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1204
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1205
goto out_err;
1206
1207
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1208
goto out_err;
1209
1210
/* Ensure sequence is zero before writing the rest of the struct. */
1211
smp_wmb();
1212
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1213
goto out_err;
1214
1215
/*
1216
* Now switch to the TSC page mechanism by writing the sequence.
1217
*/
1218
tsc_seq++;
1219
if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1220
tsc_seq = 1;
1221
1222
/* Write the struct entirely before the non-zero sequence. */
1223
smp_wmb();
1224
1225
hv->tsc_ref.tsc_sequence = tsc_seq;
1226
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1227
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1228
goto out_err;
1229
1230
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1231
goto out_unlock;
1232
1233
out_err:
1234
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1235
out_unlock:
1236
mutex_unlock(&hv->hv_lock);
1237
}
1238
1239
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1240
{
1241
struct kvm_hv *hv = to_kvm_hv(kvm);
1242
1243
mutex_lock(&hv->hv_lock);
1244
1245
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
1246
!tsc_page_update_unsafe(hv))
1247
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1248
1249
mutex_unlock(&hv->hv_lock);
1250
}
1251
1252
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1253
{
1254
if (!hv_vcpu->enforce_cpuid)
1255
return true;
1256
1257
switch (msr) {
1258
case HV_X64_MSR_GUEST_OS_ID:
1259
case HV_X64_MSR_HYPERCALL:
1260
return hv_vcpu->cpuid_cache.features_eax &
1261
HV_MSR_HYPERCALL_AVAILABLE;
1262
case HV_X64_MSR_VP_RUNTIME:
1263
return hv_vcpu->cpuid_cache.features_eax &
1264
HV_MSR_VP_RUNTIME_AVAILABLE;
1265
case HV_X64_MSR_TIME_REF_COUNT:
1266
return hv_vcpu->cpuid_cache.features_eax &
1267
HV_MSR_TIME_REF_COUNT_AVAILABLE;
1268
case HV_X64_MSR_VP_INDEX:
1269
return hv_vcpu->cpuid_cache.features_eax &
1270
HV_MSR_VP_INDEX_AVAILABLE;
1271
case HV_X64_MSR_RESET:
1272
return hv_vcpu->cpuid_cache.features_eax &
1273
HV_MSR_RESET_AVAILABLE;
1274
case HV_X64_MSR_REFERENCE_TSC:
1275
return hv_vcpu->cpuid_cache.features_eax &
1276
HV_MSR_REFERENCE_TSC_AVAILABLE;
1277
case HV_X64_MSR_SCONTROL:
1278
case HV_X64_MSR_SVERSION:
1279
case HV_X64_MSR_SIEFP:
1280
case HV_X64_MSR_SIMP:
1281
case HV_X64_MSR_EOM:
1282
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1283
return hv_vcpu->cpuid_cache.features_eax &
1284
HV_MSR_SYNIC_AVAILABLE;
1285
case HV_X64_MSR_STIMER0_CONFIG:
1286
case HV_X64_MSR_STIMER1_CONFIG:
1287
case HV_X64_MSR_STIMER2_CONFIG:
1288
case HV_X64_MSR_STIMER3_CONFIG:
1289
case HV_X64_MSR_STIMER0_COUNT:
1290
case HV_X64_MSR_STIMER1_COUNT:
1291
case HV_X64_MSR_STIMER2_COUNT:
1292
case HV_X64_MSR_STIMER3_COUNT:
1293
return hv_vcpu->cpuid_cache.features_eax &
1294
HV_MSR_SYNTIMER_AVAILABLE;
1295
case HV_X64_MSR_EOI:
1296
case HV_X64_MSR_ICR:
1297
case HV_X64_MSR_TPR:
1298
case HV_X64_MSR_VP_ASSIST_PAGE:
1299
return hv_vcpu->cpuid_cache.features_eax &
1300
HV_MSR_APIC_ACCESS_AVAILABLE;
1301
case HV_X64_MSR_TSC_FREQUENCY:
1302
case HV_X64_MSR_APIC_FREQUENCY:
1303
return hv_vcpu->cpuid_cache.features_eax &
1304
HV_ACCESS_FREQUENCY_MSRS;
1305
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1306
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1307
case HV_X64_MSR_TSC_EMULATION_STATUS:
1308
return hv_vcpu->cpuid_cache.features_eax &
1309
HV_ACCESS_REENLIGHTENMENT;
1310
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1311
return hv_vcpu->cpuid_cache.features_eax &
1312
HV_ACCESS_TSC_INVARIANT;
1313
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1314
case HV_X64_MSR_CRASH_CTL:
1315
return hv_vcpu->cpuid_cache.features_edx &
1316
HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1317
case HV_X64_MSR_SYNDBG_OPTIONS:
1318
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1319
return hv_vcpu->cpuid_cache.features_edx &
1320
HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1321
default:
1322
break;
1323
}
1324
1325
return false;
1326
}
1327
1328
#define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839
1329
#define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */
1330
1331
/*
1332
* Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
1333
* configuration.
1334
* Such configuration can result from, for example, AMD Erratum 1386 workaround.
1335
*
1336
* Print a notice so users aren't left wondering what's suddenly gone wrong.
1337
*/
1338
static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
1339
{
1340
struct kvm *kvm = vcpu->kvm;
1341
struct kvm_hv *hv = to_kvm_hv(kvm);
1342
1343
/* Check again under the hv_lock. */
1344
if (hv->xsaves_xsavec_checked)
1345
return;
1346
1347
if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) !=
1348
KVM_HV_WIN2016_GUEST_ID)
1349
return;
1350
1351
hv->xsaves_xsavec_checked = true;
1352
1353
/* UP configurations aren't affected */
1354
if (atomic_read(&kvm->online_vcpus) < 2)
1355
return;
1356
1357
if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
1358
!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVEC))
1359
return;
1360
1361
pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
1362
"If it fails to boot try disabling XSAVEC in the VM config.\n");
1363
}
1364
1365
void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
1366
{
1367
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1368
1369
if (!vcpu->arch.hyperv_enabled ||
1370
hv->xsaves_xsavec_checked)
1371
return;
1372
1373
mutex_lock(&hv->hv_lock);
1374
__kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
1375
mutex_unlock(&hv->hv_lock);
1376
}
1377
1378
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1379
bool host)
1380
{
1381
struct kvm *kvm = vcpu->kvm;
1382
struct kvm_hv *hv = to_kvm_hv(kvm);
1383
1384
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1385
return 1;
1386
1387
switch (msr) {
1388
case HV_X64_MSR_GUEST_OS_ID:
1389
hv->hv_guest_os_id = data;
1390
/* setting guest os id to zero disables hypercall page */
1391
if (!hv->hv_guest_os_id)
1392
hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1393
break;
1394
case HV_X64_MSR_HYPERCALL: {
1395
u8 instructions[9];
1396
int i = 0;
1397
u64 addr;
1398
1399
/* if guest os id is not set hypercall should remain disabled */
1400
if (!hv->hv_guest_os_id)
1401
break;
1402
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1403
hv->hv_hypercall = data;
1404
break;
1405
}
1406
1407
/*
1408
* If Xen and Hyper-V hypercalls are both enabled, disambiguate
1409
* the same way Xen itself does, by setting the bit 31 of EAX
1410
* which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1411
* going to be clobbered on 64-bit.
1412
*/
1413
if (kvm_xen_hypercall_enabled(kvm)) {
1414
/* orl $0x80000000, %eax */
1415
instructions[i++] = 0x0d;
1416
instructions[i++] = 0x00;
1417
instructions[i++] = 0x00;
1418
instructions[i++] = 0x00;
1419
instructions[i++] = 0x80;
1420
}
1421
1422
/* vmcall/vmmcall */
1423
kvm_x86_call(patch_hypercall)(vcpu, instructions + i);
1424
i += 3;
1425
1426
/* ret */
1427
((unsigned char *)instructions)[i++] = 0xc3;
1428
1429
addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1430
if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1431
return 1;
1432
hv->hv_hypercall = data;
1433
break;
1434
}
1435
case HV_X64_MSR_REFERENCE_TSC:
1436
hv->hv_tsc_page = data;
1437
if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1438
if (!host)
1439
hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1440
else
1441
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1442
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1443
} else {
1444
hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1445
}
1446
break;
1447
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1448
return kvm_hv_msr_set_crash_data(kvm,
1449
msr - HV_X64_MSR_CRASH_P0,
1450
data);
1451
case HV_X64_MSR_CRASH_CTL:
1452
if (host)
1453
return kvm_hv_msr_set_crash_ctl(kvm, data);
1454
1455
if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1456
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1457
hv->hv_crash_param[0],
1458
hv->hv_crash_param[1],
1459
hv->hv_crash_param[2],
1460
hv->hv_crash_param[3],
1461
hv->hv_crash_param[4]);
1462
1463
/* Send notification about crash to user space */
1464
kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1465
}
1466
break;
1467
case HV_X64_MSR_RESET:
1468
if (data == 1) {
1469
vcpu_debug(vcpu, "hyper-v reset requested\n");
1470
kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1471
}
1472
break;
1473
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1474
hv->hv_reenlightenment_control = data;
1475
break;
1476
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1477
hv->hv_tsc_emulation_control = data;
1478
break;
1479
case HV_X64_MSR_TSC_EMULATION_STATUS:
1480
if (data && !host)
1481
return 1;
1482
1483
hv->hv_tsc_emulation_status = data;
1484
break;
1485
case HV_X64_MSR_TIME_REF_COUNT:
1486
/* read-only, but still ignore it if host-initiated */
1487
if (!host)
1488
return 1;
1489
break;
1490
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1491
/* Only bit 0 is supported */
1492
if (data & ~HV_EXPOSE_INVARIANT_TSC)
1493
return 1;
1494
1495
/* The feature can't be disabled from the guest */
1496
if (!host && hv->hv_invtsc_control && !data)
1497
return 1;
1498
1499
hv->hv_invtsc_control = data;
1500
break;
1501
case HV_X64_MSR_SYNDBG_OPTIONS:
1502
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1503
return syndbg_set_msr(vcpu, msr, data, host);
1504
default:
1505
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1506
return 1;
1507
}
1508
return 0;
1509
}
1510
1511
/* Calculate cpu time spent by current task in 100ns units */
1512
static u64 current_task_runtime_100ns(void)
1513
{
1514
u64 utime, stime;
1515
1516
task_cputime_adjusted(current, &utime, &stime);
1517
1518
return div_u64(utime + stime, 100);
1519
}
1520
1521
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1522
{
1523
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1524
1525
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1526
return 1;
1527
1528
switch (msr) {
1529
case HV_X64_MSR_VP_INDEX: {
1530
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1531
u32 new_vp_index = (u32)data;
1532
1533
if (!host || new_vp_index >= KVM_MAX_VCPUS)
1534
return 1;
1535
1536
if (new_vp_index == hv_vcpu->vp_index)
1537
return 0;
1538
1539
/*
1540
* The VP index is initialized to vcpu_index by
1541
* kvm_hv_vcpu_postcreate so they initially match. Now the
1542
* VP index is changing, adjust num_mismatched_vp_indexes if
1543
* it now matches or no longer matches vcpu_idx.
1544
*/
1545
if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1546
atomic_inc(&hv->num_mismatched_vp_indexes);
1547
else if (new_vp_index == vcpu->vcpu_idx)
1548
atomic_dec(&hv->num_mismatched_vp_indexes);
1549
1550
hv_vcpu->vp_index = new_vp_index;
1551
break;
1552
}
1553
case HV_X64_MSR_VP_ASSIST_PAGE: {
1554
u64 gfn;
1555
unsigned long addr;
1556
1557
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1558
hv_vcpu->hv_vapic = data;
1559
if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1560
return 1;
1561
break;
1562
}
1563
gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1564
addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1565
if (kvm_is_error_hva(addr))
1566
return 1;
1567
1568
/*
1569
* Clear apic_assist portion of struct hv_vp_assist_page
1570
* only, there can be valuable data in the rest which needs
1571
* to be preserved e.g. on migration.
1572
*/
1573
if (__put_user(0, (u32 __user *)addr))
1574
return 1;
1575
hv_vcpu->hv_vapic = data;
1576
kvm_vcpu_mark_page_dirty(vcpu, gfn);
1577
if (kvm_lapic_set_pv_eoi(vcpu,
1578
gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1579
sizeof(struct hv_vp_assist_page)))
1580
return 1;
1581
break;
1582
}
1583
case HV_X64_MSR_EOI:
1584
return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1585
case HV_X64_MSR_ICR:
1586
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1587
case HV_X64_MSR_TPR:
1588
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1589
case HV_X64_MSR_VP_RUNTIME:
1590
if (!host)
1591
return 1;
1592
hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1593
break;
1594
case HV_X64_MSR_SCONTROL:
1595
case HV_X64_MSR_SVERSION:
1596
case HV_X64_MSR_SIEFP:
1597
case HV_X64_MSR_SIMP:
1598
case HV_X64_MSR_EOM:
1599
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1600
return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1601
case HV_X64_MSR_STIMER0_CONFIG:
1602
case HV_X64_MSR_STIMER1_CONFIG:
1603
case HV_X64_MSR_STIMER2_CONFIG:
1604
case HV_X64_MSR_STIMER3_CONFIG: {
1605
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1606
1607
return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1608
data, host);
1609
}
1610
case HV_X64_MSR_STIMER0_COUNT:
1611
case HV_X64_MSR_STIMER1_COUNT:
1612
case HV_X64_MSR_STIMER2_COUNT:
1613
case HV_X64_MSR_STIMER3_COUNT: {
1614
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1615
1616
return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1617
data, host);
1618
}
1619
case HV_X64_MSR_TSC_FREQUENCY:
1620
case HV_X64_MSR_APIC_FREQUENCY:
1621
/* read-only, but still ignore it if host-initiated */
1622
if (!host)
1623
return 1;
1624
break;
1625
default:
1626
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1627
return 1;
1628
}
1629
1630
return 0;
1631
}
1632
1633
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1634
bool host)
1635
{
1636
u64 data = 0;
1637
struct kvm *kvm = vcpu->kvm;
1638
struct kvm_hv *hv = to_kvm_hv(kvm);
1639
1640
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1641
return 1;
1642
1643
switch (msr) {
1644
case HV_X64_MSR_GUEST_OS_ID:
1645
data = hv->hv_guest_os_id;
1646
break;
1647
case HV_X64_MSR_HYPERCALL:
1648
data = hv->hv_hypercall;
1649
break;
1650
case HV_X64_MSR_TIME_REF_COUNT:
1651
data = get_time_ref_counter(kvm);
1652
break;
1653
case HV_X64_MSR_REFERENCE_TSC:
1654
data = hv->hv_tsc_page;
1655
break;
1656
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1657
return kvm_hv_msr_get_crash_data(kvm,
1658
msr - HV_X64_MSR_CRASH_P0,
1659
pdata);
1660
case HV_X64_MSR_CRASH_CTL:
1661
return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1662
case HV_X64_MSR_RESET:
1663
data = 0;
1664
break;
1665
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1666
data = hv->hv_reenlightenment_control;
1667
break;
1668
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1669
data = hv->hv_tsc_emulation_control;
1670
break;
1671
case HV_X64_MSR_TSC_EMULATION_STATUS:
1672
data = hv->hv_tsc_emulation_status;
1673
break;
1674
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1675
data = hv->hv_invtsc_control;
1676
break;
1677
case HV_X64_MSR_SYNDBG_OPTIONS:
1678
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1679
return syndbg_get_msr(vcpu, msr, pdata, host);
1680
default:
1681
kvm_pr_unimpl_rdmsr(vcpu, msr);
1682
return 1;
1683
}
1684
1685
*pdata = data;
1686
return 0;
1687
}
1688
1689
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1690
bool host)
1691
{
1692
u64 data = 0;
1693
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1694
1695
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1696
return 1;
1697
1698
switch (msr) {
1699
case HV_X64_MSR_VP_INDEX:
1700
data = hv_vcpu->vp_index;
1701
break;
1702
case HV_X64_MSR_EOI:
1703
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1704
case HV_X64_MSR_ICR:
1705
return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1706
case HV_X64_MSR_TPR:
1707
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1708
case HV_X64_MSR_VP_ASSIST_PAGE:
1709
data = hv_vcpu->hv_vapic;
1710
break;
1711
case HV_X64_MSR_VP_RUNTIME:
1712
data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1713
break;
1714
case HV_X64_MSR_SCONTROL:
1715
case HV_X64_MSR_SVERSION:
1716
case HV_X64_MSR_SIEFP:
1717
case HV_X64_MSR_SIMP:
1718
case HV_X64_MSR_EOM:
1719
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1720
return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1721
case HV_X64_MSR_STIMER0_CONFIG:
1722
case HV_X64_MSR_STIMER1_CONFIG:
1723
case HV_X64_MSR_STIMER2_CONFIG:
1724
case HV_X64_MSR_STIMER3_CONFIG: {
1725
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1726
1727
return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1728
pdata);
1729
}
1730
case HV_X64_MSR_STIMER0_COUNT:
1731
case HV_X64_MSR_STIMER1_COUNT:
1732
case HV_X64_MSR_STIMER2_COUNT:
1733
case HV_X64_MSR_STIMER3_COUNT: {
1734
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1735
1736
return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1737
pdata);
1738
}
1739
case HV_X64_MSR_TSC_FREQUENCY:
1740
data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1741
break;
1742
case HV_X64_MSR_APIC_FREQUENCY:
1743
data = div64_u64(1000000000ULL,
1744
vcpu->kvm->arch.apic_bus_cycle_ns);
1745
break;
1746
default:
1747
kvm_pr_unimpl_rdmsr(vcpu, msr);
1748
return 1;
1749
}
1750
*pdata = data;
1751
return 0;
1752
}
1753
1754
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1755
{
1756
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1757
1758
if (!host && !vcpu->arch.hyperv_enabled)
1759
return 1;
1760
1761
if (kvm_hv_vcpu_init(vcpu))
1762
return 1;
1763
1764
if (kvm_hv_msr_partition_wide(msr)) {
1765
int r;
1766
1767
mutex_lock(&hv->hv_lock);
1768
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1769
mutex_unlock(&hv->hv_lock);
1770
return r;
1771
} else
1772
return kvm_hv_set_msr(vcpu, msr, data, host);
1773
}
1774
1775
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1776
{
1777
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1778
1779
if (!host && !vcpu->arch.hyperv_enabled)
1780
return 1;
1781
1782
if (kvm_hv_vcpu_init(vcpu))
1783
return 1;
1784
1785
if (kvm_hv_msr_partition_wide(msr)) {
1786
int r;
1787
1788
mutex_lock(&hv->hv_lock);
1789
r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1790
mutex_unlock(&hv->hv_lock);
1791
return r;
1792
} else
1793
return kvm_hv_get_msr(vcpu, msr, pdata, host);
1794
}
1795
1796
static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1797
u64 valid_bank_mask, unsigned long *vcpu_mask)
1798
{
1799
struct kvm_hv *hv = to_kvm_hv(kvm);
1800
bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1801
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1802
struct kvm_vcpu *vcpu;
1803
int bank, sbank = 0;
1804
unsigned long i;
1805
u64 *bitmap;
1806
1807
BUILD_BUG_ON(sizeof(vp_bitmap) >
1808
sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1809
1810
/*
1811
* If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1812
* fill a temporary buffer and manually test each vCPU's VP index.
1813
*/
1814
if (likely(!has_mismatch))
1815
bitmap = (u64 *)vcpu_mask;
1816
else
1817
bitmap = vp_bitmap;
1818
1819
/*
1820
* Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1821
* having a '1' for each bank that exists in sparse_banks. Sets must
1822
* be in ascending order, i.e. bank0..bankN.
1823
*/
1824
memset(bitmap, 0, sizeof(vp_bitmap));
1825
for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1826
KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1827
bitmap[bank] = sparse_banks[sbank++];
1828
1829
if (likely(!has_mismatch))
1830
return;
1831
1832
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1833
kvm_for_each_vcpu(i, vcpu, kvm) {
1834
if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1835
__set_bit(i, vcpu_mask);
1836
}
1837
}
1838
1839
static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
1840
{
1841
int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
1842
unsigned long sbank;
1843
1844
if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
1845
return false;
1846
1847
/*
1848
* The index into the sparse bank is the number of preceding bits in
1849
* the valid mask. Optimize for VMs with <64 vCPUs by skipping the
1850
* fancy math if there can't possibly be preceding bits.
1851
*/
1852
if (valid_bit_nr)
1853
sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
1854
else
1855
sbank = 0;
1856
1857
return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
1858
(unsigned long *)&sparse_banks[sbank]);
1859
}
1860
1861
struct kvm_hv_hcall {
1862
/* Hypercall input data */
1863
u64 param;
1864
u64 ingpa;
1865
u64 outgpa;
1866
u16 code;
1867
u16 var_cnt;
1868
u16 rep_cnt;
1869
u16 rep_idx;
1870
bool fast;
1871
bool rep;
1872
sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1873
1874
/*
1875
* Current read offset when KVM reads hypercall input data gradually,
1876
* either offset in bytes from 'ingpa' for regular hypercalls or the
1877
* number of already consumed 'XMM halves' for 'fast' hypercalls.
1878
*/
1879
union {
1880
gpa_t data_offset;
1881
int consumed_xmm_halves;
1882
};
1883
};
1884
1885
1886
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
1887
u16 orig_cnt, u16 cnt_cap, u64 *data)
1888
{
1889
/*
1890
* Preserve the original count when ignoring entries via a "cap", KVM
1891
* still needs to validate the guest input (though the non-XMM path
1892
* punts on the checks).
1893
*/
1894
u16 cnt = min(orig_cnt, cnt_cap);
1895
int i, j;
1896
1897
if (hc->fast) {
1898
/*
1899
* Each XMM holds two sparse banks, but do not count halves that
1900
* have already been consumed for hypercall parameters.
1901
*/
1902
if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1903
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1904
1905
for (i = 0; i < cnt; i++) {
1906
j = i + hc->consumed_xmm_halves;
1907
if (j % 2)
1908
data[i] = sse128_hi(hc->xmm[j / 2]);
1909
else
1910
data[i] = sse128_lo(hc->xmm[j / 2]);
1911
}
1912
return 0;
1913
}
1914
1915
return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
1916
cnt * sizeof(*data));
1917
}
1918
1919
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1920
u64 *sparse_banks)
1921
{
1922
if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
1923
return -EINVAL;
1924
1925
/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
1926
return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
1927
sparse_banks);
1928
}
1929
1930
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
1931
{
1932
return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
1933
}
1934
1935
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
1936
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
1937
u64 *entries, int count)
1938
{
1939
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1940
u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
1941
1942
if (!hv_vcpu)
1943
return;
1944
1945
spin_lock(&tlb_flush_fifo->write_lock);
1946
1947
/*
1948
* All entries should fit on the fifo leaving one free for 'flush all'
1949
* entry in case another request comes in. In case there's not enough
1950
* space, just put 'flush all' entry there.
1951
*/
1952
if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
1953
WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
1954
goto out_unlock;
1955
}
1956
1957
/*
1958
* Note: full fifo always contains 'flush all' entry, no need to check the
1959
* return value.
1960
*/
1961
kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
1962
1963
out_unlock:
1964
spin_unlock(&tlb_flush_fifo->write_lock);
1965
}
1966
1967
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
1968
{
1969
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1970
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1971
u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
1972
int i, j, count;
1973
gva_t gva;
1974
1975
if (!tdp_enabled || !hv_vcpu)
1976
return -EINVAL;
1977
1978
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
1979
1980
count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
1981
1982
for (i = 0; i < count; i++) {
1983
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
1984
goto out_flush_all;
1985
1986
if (is_noncanonical_invlpg_address(entries[i], vcpu))
1987
continue;
1988
1989
/*
1990
* Lower 12 bits of 'address' encode the number of additional
1991
* pages to flush.
1992
*/
1993
gva = entries[i] & PAGE_MASK;
1994
for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1995
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1996
1997
++vcpu->stat.tlb_flush;
1998
}
1999
return 0;
2000
2001
out_flush_all:
2002
kfifo_reset_out(&tlb_flush_fifo->entries);
2003
2004
/* Fall back to full flush. */
2005
return -ENOSPC;
2006
}
2007
2008
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2009
{
2010
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2011
unsigned long *vcpu_mask = hv_vcpu->vcpu_mask;
2012
u64 *sparse_banks = hv_vcpu->sparse_banks;
2013
struct kvm *kvm = vcpu->kvm;
2014
struct hv_tlb_flush_ex flush_ex;
2015
struct hv_tlb_flush flush;
2016
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
2017
/*
2018
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
2019
* entries on the TLB flush fifo. The last entry, however, needs to be
2020
* always left free for 'flush all' entry which gets placed when
2021
* there is not enough space to put all the requested entries.
2022
*/
2023
u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
2024
u64 *tlb_flush_entries;
2025
u64 valid_bank_mask;
2026
struct kvm_vcpu *v;
2027
unsigned long i;
2028
bool all_cpus;
2029
2030
/*
2031
* The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
2032
* sparse banks. Fail the build if KVM's max allowed number of
2033
* vCPUs (>4096) exceeds this limit.
2034
*/
2035
BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
2036
2037
/*
2038
* 'Slow' hypercall's first parameter is the address in guest's memory
2039
* where hypercall parameters are placed. This is either a GPA or a
2040
* nested GPA when KVM is handling the call from L2 ('direct' TLB
2041
* flush). Translate the address here so the memory can be uniformly
2042
* read with kvm_read_guest().
2043
*/
2044
if (!hc->fast && is_guest_mode(vcpu)) {
2045
hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
2046
if (unlikely(hc->ingpa == INVALID_GPA))
2047
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2048
}
2049
2050
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
2051
hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
2052
if (hc->fast) {
2053
flush.address_space = hc->ingpa;
2054
flush.flags = hc->outgpa;
2055
flush.processor_mask = sse128_lo(hc->xmm[0]);
2056
hc->consumed_xmm_halves = 1;
2057
} else {
2058
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
2059
&flush, sizeof(flush))))
2060
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2061
hc->data_offset = sizeof(flush);
2062
}
2063
2064
trace_kvm_hv_flush_tlb(flush.processor_mask,
2065
flush.address_space, flush.flags,
2066
is_guest_mode(vcpu));
2067
2068
valid_bank_mask = BIT_ULL(0);
2069
sparse_banks[0] = flush.processor_mask;
2070
2071
/*
2072
* Work around possible WS2012 bug: it sends hypercalls
2073
* with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
2074
* while also expecting us to flush something and crashing if
2075
* we don't. Let's treat processor_mask == 0 same as
2076
* HV_FLUSH_ALL_PROCESSORS.
2077
*/
2078
all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
2079
flush.processor_mask == 0;
2080
} else {
2081
if (hc->fast) {
2082
flush_ex.address_space = hc->ingpa;
2083
flush_ex.flags = hc->outgpa;
2084
memcpy(&flush_ex.hv_vp_set,
2085
&hc->xmm[0], sizeof(hc->xmm[0]));
2086
hc->consumed_xmm_halves = 2;
2087
} else {
2088
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2089
sizeof(flush_ex))))
2090
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2091
hc->data_offset = sizeof(flush_ex);
2092
}
2093
2094
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
2095
flush_ex.hv_vp_set.format,
2096
flush_ex.address_space,
2097
flush_ex.flags, is_guest_mode(vcpu));
2098
2099
valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
2100
all_cpus = flush_ex.hv_vp_set.format !=
2101
HV_GENERIC_SET_SPARSE_4K;
2102
2103
if (hc->var_cnt != hweight64(valid_bank_mask))
2104
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2105
2106
if (!all_cpus) {
2107
if (!hc->var_cnt)
2108
goto ret_success;
2109
2110
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2111
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2112
}
2113
2114
/*
2115
* Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
2116
* banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
2117
* case (HV_GENERIC_SET_ALL). Always adjust data_offset and
2118
* consumed_xmm_halves to make sure TLB flush entries are read
2119
* from the correct offset.
2120
*/
2121
if (hc->fast)
2122
hc->consumed_xmm_halves += hc->var_cnt;
2123
else
2124
hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
2125
}
2126
2127
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
2128
hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
2129
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
2130
tlb_flush_entries = NULL;
2131
} else {
2132
if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
2133
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2134
tlb_flush_entries = __tlb_flush_entries;
2135
}
2136
2137
/*
2138
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2139
* analyze it here, flush TLB regardless of the specified address space.
2140
*/
2141
if (all_cpus && !is_guest_mode(vcpu)) {
2142
kvm_for_each_vcpu(i, v, kvm) {
2143
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2144
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2145
tlb_flush_entries, hc->rep_cnt);
2146
}
2147
2148
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
2149
} else if (!is_guest_mode(vcpu)) {
2150
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
2151
2152
for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
2153
v = kvm_get_vcpu(kvm, i);
2154
if (!v)
2155
continue;
2156
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2157
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2158
tlb_flush_entries, hc->rep_cnt);
2159
}
2160
2161
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2162
} else {
2163
struct kvm_vcpu_hv *hv_v;
2164
2165
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
2166
2167
kvm_for_each_vcpu(i, v, kvm) {
2168
hv_v = to_hv_vcpu(v);
2169
2170
/*
2171
* The following check races with nested vCPUs entering/exiting
2172
* and/or migrating between L1's vCPUs, however the only case when
2173
* KVM *must* flush the TLB is when the target L2 vCPU keeps
2174
* running on the same L1 vCPU from the moment of the request until
2175
* kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
2176
* cases, e.g. when the target L2 vCPU migrates to a different L1
2177
* vCPU or when the corresponding L1 vCPU temporary switches to a
2178
* different L2 vCPU while the request is being processed.
2179
*/
2180
if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2181
continue;
2182
2183
if (!all_cpus &&
2184
!hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
2185
sparse_banks))
2186
continue;
2187
2188
__set_bit(i, vcpu_mask);
2189
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
2190
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2191
tlb_flush_entries, hc->rep_cnt);
2192
}
2193
2194
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2195
}
2196
2197
ret_success:
2198
/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
2199
return (u64)HV_STATUS_SUCCESS |
2200
((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2201
}
2202
2203
static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
2204
u64 *sparse_banks, u64 valid_bank_mask)
2205
{
2206
struct kvm_lapic_irq irq = {
2207
.delivery_mode = APIC_DM_FIXED,
2208
.vector = vector
2209
};
2210
struct kvm_vcpu *vcpu;
2211
unsigned long i;
2212
2213
kvm_for_each_vcpu(i, vcpu, kvm) {
2214
if (sparse_banks &&
2215
!hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
2216
valid_bank_mask, sparse_banks))
2217
continue;
2218
2219
/* We fail only when APIC is disabled */
2220
kvm_apic_set_irq(vcpu, &irq, NULL);
2221
}
2222
}
2223
2224
static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2225
{
2226
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2227
u64 *sparse_banks = hv_vcpu->sparse_banks;
2228
struct kvm *kvm = vcpu->kvm;
2229
struct hv_send_ipi_ex send_ipi_ex;
2230
struct hv_send_ipi send_ipi;
2231
u64 valid_bank_mask;
2232
u32 vector;
2233
bool all_cpus;
2234
2235
if (!lapic_in_kernel(vcpu))
2236
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2237
2238
if (hc->code == HVCALL_SEND_IPI) {
2239
if (!hc->fast) {
2240
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2241
sizeof(send_ipi))))
2242
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2243
sparse_banks[0] = send_ipi.cpu_mask;
2244
vector = send_ipi.vector;
2245
} else {
2246
/* 'reserved' part of hv_send_ipi should be 0 */
2247
if (unlikely(hc->ingpa >> 32 != 0))
2248
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2249
sparse_banks[0] = hc->outgpa;
2250
vector = (u32)hc->ingpa;
2251
}
2252
all_cpus = false;
2253
valid_bank_mask = BIT_ULL(0);
2254
2255
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
2256
} else {
2257
if (!hc->fast) {
2258
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2259
sizeof(send_ipi_ex))))
2260
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2261
} else {
2262
send_ipi_ex.vector = (u32)hc->ingpa;
2263
send_ipi_ex.vp_set.format = hc->outgpa;
2264
send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
2265
}
2266
2267
trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
2268
send_ipi_ex.vp_set.format,
2269
send_ipi_ex.vp_set.valid_bank_mask);
2270
2271
vector = send_ipi_ex.vector;
2272
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
2273
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
2274
2275
if (hc->var_cnt != hweight64(valid_bank_mask))
2276
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2277
2278
if (all_cpus)
2279
goto check_and_send_ipi;
2280
2281
if (!hc->var_cnt)
2282
goto ret_success;
2283
2284
if (!hc->fast)
2285
hc->data_offset = offsetof(struct hv_send_ipi_ex,
2286
vp_set.bank_contents);
2287
else
2288
hc->consumed_xmm_halves = 1;
2289
2290
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2291
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2292
}
2293
2294
check_and_send_ipi:
2295
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
2296
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2297
2298
if (all_cpus)
2299
kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
2300
else
2301
kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
2302
2303
ret_success:
2304
return HV_STATUS_SUCCESS;
2305
}
2306
2307
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
2308
{
2309
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2310
struct kvm_cpuid_entry2 *entry;
2311
2312
vcpu->arch.hyperv_enabled = hyperv_enabled;
2313
2314
if (!hv_vcpu) {
2315
/*
2316
* KVM should have already allocated kvm_vcpu_hv if Hyper-V is
2317
* enabled in CPUID.
2318
*/
2319
WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
2320
return;
2321
}
2322
2323
memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2324
2325
if (!vcpu->arch.hyperv_enabled)
2326
return;
2327
2328
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
2329
if (entry) {
2330
hv_vcpu->cpuid_cache.features_eax = entry->eax;
2331
hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2332
hv_vcpu->cpuid_cache.features_edx = entry->edx;
2333
}
2334
2335
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
2336
if (entry) {
2337
hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2338
hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2339
}
2340
2341
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
2342
if (entry)
2343
hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2344
2345
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
2346
if (entry) {
2347
hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2348
hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2349
}
2350
}
2351
2352
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2353
{
2354
struct kvm_vcpu_hv *hv_vcpu;
2355
int ret = 0;
2356
2357
if (!to_hv_vcpu(vcpu)) {
2358
if (enforce) {
2359
ret = kvm_hv_vcpu_init(vcpu);
2360
if (ret)
2361
return ret;
2362
} else {
2363
return 0;
2364
}
2365
}
2366
2367
hv_vcpu = to_hv_vcpu(vcpu);
2368
hv_vcpu->enforce_cpuid = enforce;
2369
2370
return ret;
2371
}
2372
2373
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2374
{
2375
bool longmode;
2376
2377
longmode = is_64_bit_hypercall(vcpu);
2378
if (longmode)
2379
kvm_rax_write(vcpu, result);
2380
else {
2381
kvm_rdx_write(vcpu, result >> 32);
2382
kvm_rax_write(vcpu, result & 0xffffffff);
2383
}
2384
}
2385
2386
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2387
{
2388
u32 tlb_lock_count = 0;
2389
int ret;
2390
2391
if (hv_result_success(result) && is_guest_mode(vcpu) &&
2392
kvm_hv_is_tlb_flush_hcall(vcpu) &&
2393
kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
2394
&tlb_lock_count, sizeof(tlb_lock_count)))
2395
result = HV_STATUS_INVALID_HYPERCALL_INPUT;
2396
2397
trace_kvm_hv_hypercall_done(result);
2398
kvm_hv_hypercall_set_result(vcpu, result);
2399
++vcpu->stat.hypercalls;
2400
2401
ret = kvm_skip_emulated_instruction(vcpu);
2402
2403
if (tlb_lock_count)
2404
kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
2405
2406
return ret;
2407
}
2408
2409
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2410
{
2411
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2412
}
2413
2414
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2415
{
2416
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2417
struct eventfd_ctx *eventfd;
2418
2419
if (unlikely(!hc->fast)) {
2420
int ret;
2421
gpa_t gpa = hc->ingpa;
2422
2423
if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2424
offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2425
return HV_STATUS_INVALID_ALIGNMENT;
2426
2427
ret = kvm_vcpu_read_guest(vcpu, gpa,
2428
&hc->ingpa, sizeof(hc->ingpa));
2429
if (ret < 0)
2430
return HV_STATUS_INVALID_ALIGNMENT;
2431
}
2432
2433
/*
2434
* Per spec, bits 32-47 contain the extra "flag number". However, we
2435
* have no use for it, and in all known usecases it is zero, so just
2436
* report lookup failure if it isn't.
2437
*/
2438
if (hc->ingpa & 0xffff00000000ULL)
2439
return HV_STATUS_INVALID_PORT_ID;
2440
/* remaining bits are reserved-zero */
2441
if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2442
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2443
2444
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2445
rcu_read_lock();
2446
eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2447
rcu_read_unlock();
2448
if (!eventfd)
2449
return HV_STATUS_INVALID_PORT_ID;
2450
2451
eventfd_signal(eventfd);
2452
return HV_STATUS_SUCCESS;
2453
}
2454
2455
static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2456
{
2457
switch (hc->code) {
2458
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2459
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2460
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2461
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2462
case HVCALL_SEND_IPI_EX:
2463
return true;
2464
}
2465
2466
return false;
2467
}
2468
2469
static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2470
{
2471
int reg;
2472
2473
kvm_fpu_get();
2474
for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2475
_kvm_read_sse_reg(reg, &hc->xmm[reg]);
2476
kvm_fpu_put();
2477
}
2478
2479
static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2480
{
2481
if (!hv_vcpu->enforce_cpuid)
2482
return true;
2483
2484
switch (code) {
2485
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2486
return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2487
hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2488
case HVCALL_POST_MESSAGE:
2489
return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2490
case HVCALL_SIGNAL_EVENT:
2491
return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2492
case HVCALL_POST_DEBUG_DATA:
2493
case HVCALL_RETRIEVE_DEBUG_DATA:
2494
case HVCALL_RESET_DEBUG_SESSION:
2495
/*
2496
* Return 'true' when SynDBG is disabled so the resulting code
2497
* will be HV_STATUS_INVALID_HYPERCALL_CODE.
2498
*/
2499
return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2500
hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2501
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2502
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2503
if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2504
HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2505
return false;
2506
fallthrough;
2507
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2508
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2509
return hv_vcpu->cpuid_cache.enlightenments_eax &
2510
HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2511
case HVCALL_SEND_IPI_EX:
2512
if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2513
HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2514
return false;
2515
fallthrough;
2516
case HVCALL_SEND_IPI:
2517
return hv_vcpu->cpuid_cache.enlightenments_eax &
2518
HV_X64_CLUSTER_IPI_RECOMMENDED;
2519
case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2520
return hv_vcpu->cpuid_cache.features_ebx &
2521
HV_ENABLE_EXTENDED_HYPERCALLS;
2522
default:
2523
break;
2524
}
2525
2526
return true;
2527
}
2528
2529
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2530
{
2531
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2532
struct kvm_hv_hcall hc;
2533
u64 ret = HV_STATUS_SUCCESS;
2534
2535
/*
2536
* hypercall generates UD from non zero cpl and real mode
2537
* per HYPER-V spec
2538
*/
2539
if (kvm_x86_call(get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2540
kvm_queue_exception(vcpu, UD_VECTOR);
2541
return 1;
2542
}
2543
2544
#ifdef CONFIG_X86_64
2545
if (is_64_bit_hypercall(vcpu)) {
2546
hc.param = kvm_rcx_read(vcpu);
2547
hc.ingpa = kvm_rdx_read(vcpu);
2548
hc.outgpa = kvm_r8_read(vcpu);
2549
} else
2550
#endif
2551
{
2552
hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2553
(kvm_rax_read(vcpu) & 0xffffffff);
2554
hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2555
(kvm_rcx_read(vcpu) & 0xffffffff);
2556
hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2557
(kvm_rsi_read(vcpu) & 0xffffffff);
2558
}
2559
2560
hc.code = hc.param & 0xffff;
2561
hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2562
hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2563
hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2564
hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2565
hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2566
2567
trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2568
hc.rep_idx, hc.ingpa, hc.outgpa);
2569
2570
if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2571
ret = HV_STATUS_ACCESS_DENIED;
2572
goto hypercall_complete;
2573
}
2574
2575
if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2576
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2577
goto hypercall_complete;
2578
}
2579
2580
if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2581
if (unlikely(hv_vcpu->enforce_cpuid &&
2582
!(hv_vcpu->cpuid_cache.features_edx &
2583
HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2584
kvm_queue_exception(vcpu, UD_VECTOR);
2585
return 1;
2586
}
2587
2588
kvm_hv_hypercall_read_xmm(&hc);
2589
}
2590
2591
switch (hc.code) {
2592
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2593
if (unlikely(hc.rep || hc.var_cnt)) {
2594
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2595
break;
2596
}
2597
kvm_vcpu_on_spin(vcpu, true);
2598
break;
2599
case HVCALL_SIGNAL_EVENT:
2600
if (unlikely(hc.rep || hc.var_cnt)) {
2601
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2602
break;
2603
}
2604
ret = kvm_hvcall_signal_event(vcpu, &hc);
2605
if (ret != HV_STATUS_INVALID_PORT_ID)
2606
break;
2607
fallthrough; /* maybe userspace knows this conn_id */
2608
case HVCALL_POST_MESSAGE:
2609
/* don't bother userspace if it has no way to handle it */
2610
if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2611
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2612
break;
2613
}
2614
goto hypercall_userspace_exit;
2615
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2616
if (unlikely(hc.var_cnt)) {
2617
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2618
break;
2619
}
2620
fallthrough;
2621
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2622
if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2623
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2624
break;
2625
}
2626
ret = kvm_hv_flush_tlb(vcpu, &hc);
2627
break;
2628
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2629
if (unlikely(hc.var_cnt)) {
2630
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2631
break;
2632
}
2633
fallthrough;
2634
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2635
if (unlikely(hc.rep)) {
2636
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2637
break;
2638
}
2639
ret = kvm_hv_flush_tlb(vcpu, &hc);
2640
break;
2641
case HVCALL_SEND_IPI:
2642
if (unlikely(hc.var_cnt)) {
2643
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2644
break;
2645
}
2646
fallthrough;
2647
case HVCALL_SEND_IPI_EX:
2648
if (unlikely(hc.rep)) {
2649
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2650
break;
2651
}
2652
ret = kvm_hv_send_ipi(vcpu, &hc);
2653
break;
2654
case HVCALL_POST_DEBUG_DATA:
2655
case HVCALL_RETRIEVE_DEBUG_DATA:
2656
if (unlikely(hc.fast)) {
2657
ret = HV_STATUS_INVALID_PARAMETER;
2658
break;
2659
}
2660
fallthrough;
2661
case HVCALL_RESET_DEBUG_SESSION: {
2662
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2663
2664
if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2665
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2666
break;
2667
}
2668
2669
if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2670
ret = HV_STATUS_OPERATION_DENIED;
2671
break;
2672
}
2673
goto hypercall_userspace_exit;
2674
}
2675
case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2676
if (unlikely(hc.fast)) {
2677
ret = HV_STATUS_INVALID_PARAMETER;
2678
break;
2679
}
2680
goto hypercall_userspace_exit;
2681
default:
2682
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2683
break;
2684
}
2685
2686
hypercall_complete:
2687
return kvm_hv_hypercall_complete(vcpu, ret);
2688
2689
hypercall_userspace_exit:
2690
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2691
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2692
vcpu->run->hyperv.u.hcall.input = hc.param;
2693
vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2694
vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2695
vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
2696
return 0;
2697
}
2698
2699
void kvm_hv_init_vm(struct kvm *kvm)
2700
{
2701
struct kvm_hv *hv = to_kvm_hv(kvm);
2702
2703
mutex_init(&hv->hv_lock);
2704
idr_init(&hv->conn_to_evt);
2705
}
2706
2707
void kvm_hv_destroy_vm(struct kvm *kvm)
2708
{
2709
struct kvm_hv *hv = to_kvm_hv(kvm);
2710
struct eventfd_ctx *eventfd;
2711
int i;
2712
2713
idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2714
eventfd_ctx_put(eventfd);
2715
idr_destroy(&hv->conn_to_evt);
2716
}
2717
2718
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2719
{
2720
struct kvm_hv *hv = to_kvm_hv(kvm);
2721
struct eventfd_ctx *eventfd;
2722
int ret;
2723
2724
eventfd = eventfd_ctx_fdget(fd);
2725
if (IS_ERR(eventfd))
2726
return PTR_ERR(eventfd);
2727
2728
mutex_lock(&hv->hv_lock);
2729
ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2730
GFP_KERNEL_ACCOUNT);
2731
mutex_unlock(&hv->hv_lock);
2732
2733
if (ret >= 0)
2734
return 0;
2735
2736
if (ret == -ENOSPC)
2737
ret = -EEXIST;
2738
eventfd_ctx_put(eventfd);
2739
return ret;
2740
}
2741
2742
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2743
{
2744
struct kvm_hv *hv = to_kvm_hv(kvm);
2745
struct eventfd_ctx *eventfd;
2746
2747
mutex_lock(&hv->hv_lock);
2748
eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2749
mutex_unlock(&hv->hv_lock);
2750
2751
if (!eventfd)
2752
return -ENOENT;
2753
2754
synchronize_srcu(&kvm->srcu);
2755
eventfd_ctx_put(eventfd);
2756
return 0;
2757
}
2758
2759
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2760
{
2761
if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2762
(args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2763
return -EINVAL;
2764
2765
if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2766
return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2767
return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2768
}
2769
2770
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2771
struct kvm_cpuid_entry2 __user *entries)
2772
{
2773
uint16_t evmcs_ver = 0;
2774
struct kvm_cpuid_entry2 cpuid_entries[] = {
2775
{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2776
{ .function = HYPERV_CPUID_INTERFACE },
2777
{ .function = HYPERV_CPUID_VERSION },
2778
{ .function = HYPERV_CPUID_FEATURES },
2779
{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2780
{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2781
{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2782
{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2783
{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2784
{ .function = HYPERV_CPUID_NESTED_FEATURES },
2785
};
2786
int i, nent = ARRAY_SIZE(cpuid_entries);
2787
2788
if (kvm_x86_ops.nested_ops->get_evmcs_version)
2789
evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2790
2791
if (cpuid->nent < nent)
2792
return -E2BIG;
2793
2794
if (cpuid->nent > nent)
2795
cpuid->nent = nent;
2796
2797
for (i = 0; i < nent; i++) {
2798
struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2799
u32 signature[3];
2800
2801
switch (ent->function) {
2802
case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2803
memcpy(signature, "Linux KVM Hv", 12);
2804
2805
ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2806
ent->ebx = signature[0];
2807
ent->ecx = signature[1];
2808
ent->edx = signature[2];
2809
break;
2810
2811
case HYPERV_CPUID_INTERFACE:
2812
ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2813
break;
2814
2815
case HYPERV_CPUID_VERSION:
2816
/*
2817
* We implement some Hyper-V 2016 functions so let's use
2818
* this version.
2819
*/
2820
ent->eax = 0x00003839;
2821
ent->ebx = 0x000A0000;
2822
break;
2823
2824
case HYPERV_CPUID_FEATURES:
2825
ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2826
ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2827
ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2828
ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2829
ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2830
ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2831
ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2832
ent->eax |= HV_MSR_RESET_AVAILABLE;
2833
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2834
ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2835
ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2836
ent->eax |= HV_ACCESS_TSC_INVARIANT;
2837
2838
ent->ebx |= HV_POST_MESSAGES;
2839
ent->ebx |= HV_SIGNAL_EVENTS;
2840
ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
2841
2842
ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2843
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2844
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2845
2846
ent->ebx |= HV_DEBUGGING;
2847
ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2848
ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2849
ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
2850
2851
/*
2852
* Direct Synthetic timers only make sense with in-kernel
2853
* LAPIC
2854
*/
2855
if (!vcpu || lapic_in_kernel(vcpu))
2856
ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2857
2858
break;
2859
2860
case HYPERV_CPUID_ENLIGHTMENT_INFO:
2861
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2862
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2863
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2864
if (!vcpu || lapic_in_kernel(vcpu))
2865
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2866
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2867
if (evmcs_ver)
2868
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2869
if (!cpu_smt_possible())
2870
ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2871
2872
ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2873
/*
2874
* Default number of spinlock retry attempts, matches
2875
* HyperV 2016.
2876
*/
2877
ent->ebx = 0x00000FFF;
2878
2879
break;
2880
2881
case HYPERV_CPUID_IMPLEMENT_LIMITS:
2882
/* Maximum number of virtual processors */
2883
ent->eax = KVM_MAX_VCPUS;
2884
/*
2885
* Maximum number of logical processors, matches
2886
* HyperV 2016.
2887
*/
2888
ent->ebx = 64;
2889
2890
break;
2891
2892
case HYPERV_CPUID_NESTED_FEATURES:
2893
ent->eax = evmcs_ver;
2894
ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
2895
ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2896
ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
2897
break;
2898
2899
case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2900
memcpy(signature, "Linux KVM Hv", 12);
2901
2902
ent->eax = 0;
2903
ent->ebx = signature[0];
2904
ent->ecx = signature[1];
2905
ent->edx = signature[2];
2906
break;
2907
2908
case HYPERV_CPUID_SYNDBG_INTERFACE:
2909
memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2910
ent->eax = signature[0];
2911
break;
2912
2913
case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2914
ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2915
break;
2916
2917
default:
2918
break;
2919
}
2920
}
2921
2922
if (copy_to_user(entries, cpuid_entries,
2923
nent * sizeof(struct kvm_cpuid_entry2)))
2924
return -EFAULT;
2925
2926
return 0;
2927
}
2928
2929