Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/amd64/vmm/x86.c
105585 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2011 NetApp, Inc.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/pcpu.h>
31
#include <sys/systm.h>
32
#include <sys/sysctl.h>
33
34
#include <machine/clock.h>
35
#include <machine/cpufunc.h>
36
#include <machine/md_var.h>
37
#include <machine/segments.h>
38
#include <machine/specialreg.h>
39
#include <machine/vmm.h>
40
41
#include <dev/vmm/vmm_ktr.h>
42
#include <dev/vmm/vmm_vm.h>
43
44
#include "vmm_host.h"
45
#include "vmm_util.h"
46
#include "x86.h"
47
48
SYSCTL_DECL(_hw_vmm);
49
static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
50
NULL);
51
52
#define CPUID_VM_SIGNATURE 0x40000000
53
#define CPUID_BHYVE_FEATURES 0x40000001
54
#define CPUID_VM_HIGH CPUID_BHYVE_FEATURES
55
56
/* Features advertised in CPUID_BHYVE_FEATURES %eax */
57
#define CPUID_BHYVE_FEAT_EXT_DEST_ID (1UL << 0) /* MSI Extended Dest ID */
58
59
static const char bhyve_id[12] = "bhyve bhyve ";
60
61
static uint64_t bhyve_xcpuids;
62
SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
63
"Number of times an unknown cpuid leaf was accessed");
64
65
static int cpuid_leaf_b = 1;
66
SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
67
&cpuid_leaf_b, 0, NULL);
68
69
/*
70
* Compute ceil(log2(x)). Returns -1 if x is zero.
71
*/
72
static __inline int
73
log2(u_int x)
74
{
75
76
return (x == 0 ? -1 : order_base_2(x));
77
}
78
79
int
80
x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
81
uint64_t *rcx, uint64_t *rdx)
82
{
83
struct vm *vm = vcpu_vm(vcpu);
84
int vcpu_id = vcpu_vcpuid(vcpu);
85
const struct xsave_limits *limits;
86
uint64_t cr4;
87
int error, enable_invpcid, enable_rdpid, enable_rdtscp, level,
88
width, x2apic_id;
89
unsigned int func, regs[4], logical_cpus, param;
90
enum x2apic_state x2apic_state;
91
uint16_t cores, maxcpus, sockets, threads;
92
93
/*
94
* The function of CPUID is controlled through the provided value of
95
* %eax (and secondarily %ecx, for certain leaf data).
96
*/
97
func = (uint32_t)*rax;
98
param = (uint32_t)*rcx;
99
100
VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param);
101
102
/*
103
* Requests for invalid CPUID levels should map to the highest
104
* available level instead.
105
*/
106
if (cpu_exthigh != 0 && func >= 0x80000000) {
107
if (func > cpu_exthigh)
108
func = cpu_exthigh;
109
} else if (func >= CPUID_VM_SIGNATURE) {
110
if (func > CPUID_VM_HIGH)
111
func = CPUID_VM_HIGH;
112
} else if (func > cpu_high) {
113
func = cpu_high;
114
}
115
116
/*
117
* In general the approach used for CPU topology is to
118
* advertise a flat topology where all CPUs are packages with
119
* no multi-core or SMT.
120
*/
121
switch (func) {
122
/*
123
* Pass these through to the guest
124
*/
125
case CPUID_0000_0000:
126
case CPUID_0000_0002:
127
case CPUID_0000_0003:
128
case CPUID_8000_0000:
129
case CPUID_8000_0002:
130
case CPUID_8000_0003:
131
case CPUID_8000_0004:
132
case CPUID_8000_0006:
133
cpuid_count(func, param, regs);
134
break;
135
case CPUID_8000_0008:
136
cpuid_count(func, param, regs);
137
if (vmm_is_svm()) {
138
/*
139
* As on Intel (0000_0007:0, EDX), mask out
140
* unsupported or unsafe AMD extended features
141
* (8000_0008 EBX).
142
*/
143
regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF |
144
AMDFEID_XSAVEERPTR);
145
146
vm_get_topology(vm, &sockets, &cores, &threads,
147
&maxcpus);
148
/*
149
* Here, width is ApicIdCoreIdSize, present on
150
* at least Family 15h and newer. It
151
* represents the "number of bits in the
152
* initial apicid that indicate thread id
153
* within a package."
154
*
155
* Our topo_probe_amd() uses it for
156
* pkg_id_shift and other OSes may rely on it.
157
*/
158
width = MIN(0xF, log2(threads * cores));
159
logical_cpus = MIN(0xFF, threads * cores - 1);
160
regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus;
161
}
162
break;
163
164
case CPUID_8000_0001:
165
cpuid_count(func, param, regs);
166
167
/*
168
* Hide SVM from guest.
169
*/
170
regs[2] &= ~AMDID2_SVM;
171
172
/*
173
* Don't advertise extended performance counter MSRs
174
* to the guest.
175
*/
176
regs[2] &= ~AMDID2_PCXC;
177
regs[2] &= ~AMDID2_PNXC;
178
regs[2] &= ~AMDID2_PTSCEL2I;
179
180
/*
181
* Don't advertise Instruction Based Sampling feature.
182
*/
183
regs[2] &= ~AMDID2_IBS;
184
185
/* NodeID MSR not available */
186
regs[2] &= ~AMDID2_NODE_ID;
187
188
/* Don't advertise the OS visible workaround feature */
189
regs[2] &= ~AMDID2_OSVW;
190
191
/* Hide mwaitx/monitorx capability from the guest */
192
regs[2] &= ~AMDID2_MWAITX;
193
194
/* Advertise RDTSCP if it is enabled. */
195
error = vm_get_capability(vcpu,
196
VM_CAP_RDTSCP, &enable_rdtscp);
197
if (error == 0 && enable_rdtscp)
198
regs[3] |= AMDID_RDTSCP;
199
else
200
regs[3] &= ~AMDID_RDTSCP;
201
break;
202
203
case CPUID_8000_0007:
204
/*
205
* AMD uses this leaf to advertise the processor's
206
* power monitoring and RAS capabilities. These
207
* features are hardware-specific and exposing
208
* them to a guest doesn't make a lot of sense.
209
*
210
* Intel uses this leaf only to advertise the
211
* "Invariant TSC" feature with all other bits
212
* being reserved (set to zero).
213
*/
214
regs[0] = 0;
215
regs[1] = 0;
216
regs[2] = 0;
217
regs[3] = 0;
218
219
/*
220
* "Invariant TSC" can be advertised to the guest if:
221
* - host TSC frequency is invariant
222
* - host TSCs are synchronized across physical cpus
223
*
224
* XXX This still falls short because the vcpu
225
* can observe the TSC moving backwards as it
226
* migrates across physical cpus. But at least
227
* it should discourage the guest from using the
228
* TSC to keep track of time.
229
*/
230
if (tsc_is_invariant && smp_tsc)
231
regs[3] |= AMDPM_TSC_INVARIANT;
232
break;
233
234
case CPUID_8000_001D:
235
/* AMD Cache topology, like 0000_0004 for Intel. */
236
if (!vmm_is_svm())
237
goto default_leaf;
238
239
/*
240
* Similar to Intel, generate a fictitious cache
241
* topology for the guest with L3 shared by the
242
* package, and L1 and L2 local to a core.
243
*/
244
vm_get_topology(vm, &sockets, &cores, &threads,
245
&maxcpus);
246
switch (param) {
247
case 0:
248
logical_cpus = threads;
249
level = 1;
250
func = 1; /* data cache */
251
break;
252
case 1:
253
logical_cpus = threads;
254
level = 2;
255
func = 3; /* unified cache */
256
break;
257
case 2:
258
logical_cpus = threads * cores;
259
level = 3;
260
func = 3; /* unified cache */
261
break;
262
default:
263
logical_cpus = sockets * threads * cores;
264
level = 0;
265
func = 0;
266
break;
267
}
268
269
logical_cpus = MIN(0xfff, logical_cpus - 1);
270
regs[0] = (logical_cpus << 14) | (1 << 8) |
271
(level << 5) | func;
272
regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0;
273
274
/*
275
* ecx: Number of cache ways for non-fully
276
* associative cache, minus 1. Reported value
277
* of zero means there is one way.
278
*/
279
regs[2] = 0;
280
281
regs[3] = 0;
282
break;
283
284
case CPUID_8000_001E:
285
/*
286
* AMD Family 16h+ and Hygon Family 18h additional
287
* identifiers.
288
*/
289
if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16)
290
goto default_leaf;
291
292
vm_get_topology(vm, &sockets, &cores, &threads,
293
&maxcpus);
294
regs[0] = vcpu_id;
295
threads = MIN(0xFF, threads - 1);
296
regs[1] = (threads << 8) |
297
(vcpu_id >> log2(threads + 1));
298
/*
299
* XXX Bhyve topology cannot yet represent >1 node per
300
* processor.
301
*/
302
regs[2] = 0;
303
regs[3] = 0;
304
break;
305
306
case CPUID_0000_0001:
307
do_cpuid(1, regs);
308
309
error = vm_get_x2apic_state(vcpu, &x2apic_state);
310
if (error) {
311
panic("x86_emulate_cpuid: error %d "
312
"fetching x2apic state", error);
313
}
314
315
/*
316
* Override the APIC ID only in ebx
317
*/
318
regs[1] &= ~(CPUID_LOCAL_APIC_ID);
319
regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
320
321
/*
322
* Don't expose VMX, SpeedStep, TME or SMX capability.
323
* Advertise x2APIC capability and Hypervisor guest.
324
*/
325
regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
326
regs[2] &= ~(CPUID2_SMX);
327
328
regs[2] |= CPUID2_HV;
329
330
if (x2apic_state != X2APIC_DISABLED)
331
regs[2] |= CPUID2_X2APIC;
332
else
333
regs[2] &= ~CPUID2_X2APIC;
334
335
/*
336
* Only advertise CPUID2_XSAVE in the guest if
337
* the host is using XSAVE.
338
*/
339
if (!(regs[2] & CPUID2_OSXSAVE))
340
regs[2] &= ~CPUID2_XSAVE;
341
342
/*
343
* If CPUID2_XSAVE is being advertised and the
344
* guest has set CR4_XSAVE, set
345
* CPUID2_OSXSAVE.
346
*/
347
regs[2] &= ~CPUID2_OSXSAVE;
348
if (regs[2] & CPUID2_XSAVE) {
349
error = vm_get_register(vcpu,
350
VM_REG_GUEST_CR4, &cr4);
351
if (error)
352
panic("x86_emulate_cpuid: error %d "
353
"fetching %%cr4", error);
354
if (cr4 & CR4_XSAVE)
355
regs[2] |= CPUID2_OSXSAVE;
356
}
357
358
/*
359
* Hide monitor/mwait until we know how to deal with
360
* these instructions.
361
*/
362
regs[2] &= ~CPUID2_MON;
363
364
/*
365
* Hide the performance and debug features.
366
*/
367
regs[2] &= ~CPUID2_PDCM;
368
369
/*
370
* No TSC deadline support in the APIC yet
371
*/
372
regs[2] &= ~CPUID2_TSCDLT;
373
374
/*
375
* Hide thermal monitoring
376
*/
377
regs[3] &= ~(CPUID_ACPI | CPUID_TM);
378
379
/*
380
* Hide the debug store capability.
381
*/
382
regs[3] &= ~CPUID_DS;
383
384
/*
385
* Advertise the Machine Check and MTRR capability.
386
*
387
* Some guest OSes (e.g. Windows) will not boot if
388
* these features are absent.
389
*/
390
regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
391
392
vm_get_topology(vm, &sockets, &cores, &threads,
393
&maxcpus);
394
logical_cpus = threads * cores;
395
regs[1] &= ~CPUID_HTT_CORES;
396
regs[1] |= (logical_cpus & 0xff) << 16;
397
regs[3] |= CPUID_HTT;
398
break;
399
400
case CPUID_0000_0004:
401
cpuid_count(func, param, regs);
402
403
if (regs[0] || regs[1] || regs[2] || regs[3]) {
404
vm_get_topology(vm, &sockets, &cores, &threads,
405
&maxcpus);
406
regs[0] &= 0x3ff;
407
regs[0] |= (cores - 1) << 26;
408
/*
409
* Cache topology:
410
* - L1 and L2 are shared only by the logical
411
* processors in a single core.
412
* - L3 and above are shared by all logical
413
* processors in the package.
414
*/
415
logical_cpus = threads;
416
level = (regs[0] >> 5) & 0x7;
417
if (level >= 3)
418
logical_cpus *= cores;
419
regs[0] |= (logical_cpus - 1) << 14;
420
}
421
break;
422
423
case CPUID_0000_0007:
424
regs[0] = 0;
425
regs[1] = 0;
426
regs[2] = 0;
427
regs[3] = 0;
428
429
/* leaf 0 */
430
if (param == 0) {
431
cpuid_count(func, param, regs);
432
433
/* Only leaf 0 is supported */
434
regs[0] = 0;
435
436
/*
437
* Expose known-safe features.
438
*/
439
regs[1] &= CPUID_STDEXT_FSGSBASE |
440
CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
441
CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP |
442
CPUID_STDEXT_BMI2 |
443
CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
444
CPUID_STDEXT_AVX512F |
445
CPUID_STDEXT_AVX512DQ |
446
CPUID_STDEXT_RDSEED |
447
CPUID_STDEXT_SMAP |
448
CPUID_STDEXT_AVX512PF |
449
CPUID_STDEXT_AVX512ER |
450
CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA |
451
CPUID_STDEXT_AVX512BW |
452
CPUID_STDEXT_AVX512VL;
453
regs[2] &= CPUID_STDEXT2_VAES |
454
CPUID_STDEXT2_VPCLMULQDQ;
455
regs[3] &= CPUID_STDEXT3_MD_CLEAR;
456
457
/* Advertise RDPID if it is enabled. */
458
error = vm_get_capability(vcpu, VM_CAP_RDPID,
459
&enable_rdpid);
460
if (error == 0 && enable_rdpid)
461
regs[2] |= CPUID_STDEXT2_RDPID;
462
463
/* Advertise INVPCID if it is enabled. */
464
error = vm_get_capability(vcpu,
465
VM_CAP_ENABLE_INVPCID, &enable_invpcid);
466
if (error == 0 && enable_invpcid)
467
regs[1] |= CPUID_STDEXT_INVPCID;
468
}
469
break;
470
471
case CPUID_0000_0006:
472
regs[0] = CPUTPM1_ARAT;
473
regs[1] = 0;
474
regs[2] = 0;
475
regs[3] = 0;
476
break;
477
478
case CPUID_0000_000A:
479
/*
480
* Handle the access, but report 0 for
481
* all options
482
*/
483
regs[0] = 0;
484
regs[1] = 0;
485
regs[2] = 0;
486
regs[3] = 0;
487
break;
488
489
case CPUID_0000_000B:
490
/*
491
* Intel processor topology enumeration
492
*/
493
if (vmm_is_intel()) {
494
vm_get_topology(vm, &sockets, &cores, &threads,
495
&maxcpus);
496
if (param == 0) {
497
logical_cpus = threads;
498
width = log2(logical_cpus);
499
level = CPUID_TYPE_SMT;
500
x2apic_id = vcpu_id;
501
}
502
503
if (param == 1) {
504
logical_cpus = threads * cores;
505
width = log2(logical_cpus);
506
level = CPUID_TYPE_CORE;
507
x2apic_id = vcpu_id;
508
}
509
510
if (!cpuid_leaf_b || param >= 2) {
511
width = 0;
512
logical_cpus = 0;
513
level = 0;
514
x2apic_id = 0;
515
}
516
517
regs[0] = width & 0x1f;
518
regs[1] = logical_cpus & 0xffff;
519
regs[2] = (level << 8) | (param & 0xff);
520
regs[3] = x2apic_id;
521
} else {
522
regs[0] = 0;
523
regs[1] = 0;
524
regs[2] = 0;
525
regs[3] = 0;
526
}
527
break;
528
529
case CPUID_0000_000D:
530
limits = vmm_get_xsave_limits();
531
if (!limits->xsave_enabled) {
532
regs[0] = 0;
533
regs[1] = 0;
534
regs[2] = 0;
535
regs[3] = 0;
536
break;
537
}
538
539
cpuid_count(func, param, regs);
540
switch (param) {
541
case 0:
542
/*
543
* Only permit the guest to use bits
544
* that are active in the host in
545
* %xcr0. Also, claim that the
546
* maximum save area size is
547
* equivalent to the host's current
548
* save area size. Since this runs
549
* "inside" of vmrun(), it runs with
550
* the guest's xcr0, so the current
551
* save area size is correct as-is.
552
*/
553
regs[0] &= limits->xcr0_allowed;
554
regs[2] = limits->xsave_max_size;
555
regs[3] &= (limits->xcr0_allowed >> 32);
556
break;
557
case 1:
558
/* Only permit XSAVEOPT. */
559
regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
560
regs[1] = 0;
561
regs[2] = 0;
562
regs[3] = 0;
563
break;
564
default:
565
/*
566
* If the leaf is for a permitted feature,
567
* pass through as-is, otherwise return
568
* all zeroes.
569
*/
570
if (!(limits->xcr0_allowed & (1ul << param))) {
571
regs[0] = 0;
572
regs[1] = 0;
573
regs[2] = 0;
574
regs[3] = 0;
575
}
576
break;
577
}
578
break;
579
580
case CPUID_0000_000F:
581
case CPUID_0000_0010:
582
/*
583
* Do not report any Resource Director Technology
584
* capabilities. Exposing control of cache or memory
585
* controller resource partitioning to the guest is not
586
* at all sensible.
587
*
588
* This is already hidden at a high level by masking of
589
* leaf 0x7. Even still, a guest may look here for
590
* detailed capability information.
591
*/
592
regs[0] = 0;
593
regs[1] = 0;
594
regs[2] = 0;
595
regs[3] = 0;
596
break;
597
598
case CPUID_0000_0015:
599
/*
600
* Don't report CPU TSC/Crystal ratio and clock
601
* values since guests may use these to derive the
602
* local APIC frequency..
603
*/
604
regs[0] = 0;
605
regs[1] = 0;
606
regs[2] = 0;
607
regs[3] = 0;
608
break;
609
610
case CPUID_VM_SIGNATURE:
611
regs[0] = CPUID_VM_HIGH;
612
bcopy(bhyve_id, &regs[1], 4);
613
bcopy(bhyve_id + 4, &regs[2], 4);
614
bcopy(bhyve_id + 8, &regs[3], 4);
615
break;
616
617
case CPUID_BHYVE_FEATURES:
618
regs[0] = CPUID_BHYVE_FEAT_EXT_DEST_ID;
619
regs[1] = 0;
620
regs[2] = 0;
621
regs[3] = 0;
622
break;
623
624
default:
625
default_leaf:
626
/*
627
* The leaf value has already been clamped so
628
* simply pass this through, keeping count of
629
* how many unhandled leaf values have been seen.
630
*/
631
atomic_add_long(&bhyve_xcpuids, 1);
632
cpuid_count(func, param, regs);
633
break;
634
}
635
636
/*
637
* CPUID clears the upper 32-bits of the long-mode registers.
638
*/
639
*rax = regs[0];
640
*rbx = regs[1];
641
*rcx = regs[2];
642
*rdx = regs[3];
643
644
return (1);
645
}
646
647
bool
648
vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap)
649
{
650
bool rv;
651
652
KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
653
__func__, cap));
654
655
/*
656
* Simply passthrough the capabilities of the host cpu for now.
657
*/
658
rv = false;
659
switch (cap) {
660
case VCC_NO_EXECUTE:
661
if (amd_feature & AMDID_NX)
662
rv = true;
663
break;
664
case VCC_FFXSR:
665
if (amd_feature & AMDID_FFXSR)
666
rv = true;
667
break;
668
case VCC_TCE:
669
if (amd_feature2 & AMDID2_TCE)
670
rv = true;
671
break;
672
default:
673
panic("%s: unknown vm_cpu_capability %d", __func__, cap);
674
}
675
return (rv);
676
}
677
678
int
679
vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val)
680
{
681
switch (num) {
682
case MSR_MTRRcap:
683
*val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX;
684
break;
685
case MSR_MTRRdefType:
686
*val = mtrr->def_type;
687
break;
688
case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
689
*val = mtrr->fixed4k[num - MSR_MTRR4kBase];
690
break;
691
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
692
*val = mtrr->fixed16k[num - MSR_MTRR16kBase];
693
break;
694
case MSR_MTRR64kBase:
695
*val = mtrr->fixed64k;
696
break;
697
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: {
698
u_int offset = num - MSR_MTRRVarBase;
699
if (offset % 2 == 0) {
700
*val = mtrr->var[offset / 2].base;
701
} else {
702
*val = mtrr->var[offset / 2].mask;
703
}
704
break;
705
}
706
default:
707
return (-1);
708
}
709
710
return (0);
711
}
712
713
int
714
vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val)
715
{
716
switch (num) {
717
case MSR_MTRRcap:
718
/* MTRRCAP is read only */
719
return (-1);
720
case MSR_MTRRdefType:
721
if (val & ~VMM_MTRR_DEF_MASK) {
722
/* generate #GP on writes to reserved fields */
723
return (-1);
724
}
725
mtrr->def_type = val;
726
break;
727
case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
728
mtrr->fixed4k[num - MSR_MTRR4kBase] = val;
729
break;
730
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
731
mtrr->fixed16k[num - MSR_MTRR16kBase] = val;
732
break;
733
case MSR_MTRR64kBase:
734
mtrr->fixed64k = val;
735
break;
736
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: {
737
u_int offset = num - MSR_MTRRVarBase;
738
if (offset % 2 == 0) {
739
if (val & ~VMM_MTRR_PHYSBASE_MASK) {
740
/* generate #GP on writes to reserved fields */
741
return (-1);
742
}
743
mtrr->var[offset / 2].base = val;
744
} else {
745
if (val & ~VMM_MTRR_PHYSMASK_MASK) {
746
/* generate #GP on writes to reserved fields */
747
return (-1);
748
}
749
mtrr->var[offset / 2].mask = val;
750
}
751
break;
752
}
753
default:
754
return (-1);
755
}
756
757
return (0);
758
}
759
760