Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/tests/hypervisor_virtualization.rs
5394 views
1
// Copyright 2024 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#![cfg(target_arch = "x86_64")]
6
#![cfg(any(feature = "whpx", feature = "gvm", feature = "haxm", unix))]
7
8
use core::mem;
9
use std::arch::asm;
10
use std::cell::RefCell;
11
use std::ffi::c_void;
12
use std::sync::atomic::AtomicU8;
13
use std::sync::atomic::Ordering;
14
use std::sync::Arc;
15
16
use base::set_cpu_affinity;
17
use base::MappedRegion;
18
use base::MemoryMappingBuilder;
19
use base::SharedMemory;
20
#[cfg(feature = "gvm")]
21
use hypervisor::gvm::*;
22
#[cfg(all(windows, feature = "haxm"))]
23
use hypervisor::haxm::*;
24
#[cfg(any(target_os = "android", target_os = "linux"))]
25
use hypervisor::kvm::*;
26
#[cfg(all(windows, feature = "whpx"))]
27
use hypervisor::whpx::*;
28
#[cfg(any(target_os = "android", target_os = "linux"))]
29
use hypervisor::MemCacheType::CacheCoherent;
30
use hypervisor::*;
31
use hypervisor_test_macro::global_asm_data;
32
use sync::Mutex;
33
use vm_memory::GuestAddress;
34
use vm_memory::GuestMemory;
35
#[cfg(windows)]
36
use windows::Win32::System::Memory::VirtualLock;
37
#[cfg(windows)]
38
use windows::Win32::System::Memory::VirtualUnlock;
39
use zerocopy::FromBytes;
40
use zerocopy::Immutable;
41
use zerocopy::IntoBytes;
42
use zerocopy::KnownLayout;
43
44
const FLAGS_IF_BIT: u64 = 0x200;
45
46
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
47
pub enum HypervisorType {
48
Kvm,
49
Whpx,
50
Haxm,
51
Gvm,
52
}
53
54
#[repr(C, packed)]
55
#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
56
/// Define IDTR value used in real mode or 32bit protected mode.
57
struct Idtr32 {
58
// The lower 2 bytes are limit.
59
limit: u16,
60
// The higher 4 bytes are base address.
61
base_address: u32,
62
}
63
64
#[repr(C, packed)]
65
#[derive(Debug, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)]
66
/// IDT entries for long mode.
67
struct IdtEntry64 {
68
address_low: u16,
69
selector: u16,
70
ist: u8,
71
flags: u8,
72
address_mid: u16,
73
address_high: u32,
74
reserved: u32,
75
}
76
77
impl IdtEntry64 {
78
pub fn new(handler_addr: u64) -> Self {
79
IdtEntry64 {
80
address_low: (handler_addr & 0xFFFF) as u16,
81
selector: 0x10, // Our long mode CS is the third entry (0x0, 0x8, 0x10).
82
ist: 0,
83
flags: 0x8E, // Present, interrupt gate, DPL 0
84
address_mid: ((handler_addr >> 16) & 0xFFFF) as u16,
85
address_high: (handler_addr >> 32) as u32,
86
reserved: 0,
87
}
88
}
89
}
90
91
impl std::fmt::Display for HypervisorType {
92
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
93
match self {
94
HypervisorType::Kvm => write!(f, "KVM"),
95
HypervisorType::Whpx => write!(f, "WHPX"),
96
HypervisorType::Haxm => write!(f, "HAXM"),
97
HypervisorType::Gvm => write!(f, "GVM"),
98
}
99
}
100
}
101
102
pub trait HypervisorTestSetup {
103
type Hypervisor: Hypervisor;
104
type Vm: VmX86_64;
105
106
fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm);
107
}
108
109
#[cfg(any(target_os = "android", target_os = "linux"))]
110
impl HypervisorTestSetup for Kvm {
111
type Hypervisor = Kvm;
112
type Vm = KvmVm;
113
114
fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {
115
let kvm = Kvm::new().expect("failed to create kvm");
116
let vm = KvmVm::new(&kvm, guest_mem, Default::default()).expect("failed to create vm");
117
(kvm, vm)
118
}
119
}
120
121
#[cfg(all(windows, feature = "whpx"))]
122
impl HypervisorTestSetup for Whpx {
123
type Hypervisor = Whpx;
124
type Vm = WhpxVm;
125
126
fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {
127
let whpx = Whpx::new().expect("failed to create whpx");
128
let vm = WhpxVm::new(&whpx, 1, guest_mem, CpuId::new(0), false, None)
129
.expect("failed to create vm");
130
(whpx, vm)
131
}
132
}
133
134
#[cfg(all(windows, feature = "haxm"))]
135
impl HypervisorTestSetup for Haxm {
136
type Hypervisor = Haxm;
137
type Vm = HaxmVm;
138
139
fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {
140
let haxm = Haxm::new().expect("failed to create haxm");
141
let vm = HaxmVm::new(&haxm, guest_mem).expect("failed to create vm");
142
(haxm, vm)
143
}
144
}
145
146
#[cfg(feature = "gvm")]
147
impl HypervisorTestSetup for Gvm {
148
type Hypervisor = Gvm;
149
type Vm = GvmVm;
150
151
fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {
152
let gvm = Gvm::new().expect("failed to create gvm");
153
let vm = GvmVm::new(&gvm, guest_mem).expect("failed to create vm");
154
(gvm, vm)
155
}
156
}
157
158
pub struct TestSetup {
159
pub assembly: Vec<u8>,
160
pub load_addr: GuestAddress,
161
pub mem_size: u64,
162
pub initial_regs: Regs,
163
pub extra_vm_setup: Option<Box<dyn Fn(&mut dyn VcpuX86_64, &mut dyn Vm) + Send>>,
164
pub memory_initializations: Vec<(GuestAddress, Vec<u8>)>,
165
pub expect_run_success: bool,
166
167
/// Whether the `exit_matcher` should recieve [`VcpuExit::Intr`]. Default to `false`.
168
///
169
/// Hypervisors may occasinally receive [`VcpuExit::Intr`] if external interrupt intercept is
170
/// enabled. In such case, we should proceed to the next VCPU run to handle it. HAXM doesn't
171
/// distinguish between [`VcpuExit::Intr`] and [`VcpuExit::IrqWindowOpen`], so it may be
172
/// necessary to intercept [`VcpuExit::Intr`] for testing
173
/// [`VcpuX86_64::set_interrupt_window_requested`].
174
pub intercept_intr: bool,
175
}
176
177
impl Default for TestSetup {
178
fn default() -> Self {
179
TestSetup {
180
assembly: Vec::new(),
181
load_addr: GuestAddress(0),
182
mem_size: 0xF000, // Big enough default for long mode setup
183
initial_regs: Regs::default(),
184
extra_vm_setup: None,
185
memory_initializations: Vec::new(),
186
expect_run_success: true,
187
intercept_intr: false,
188
}
189
}
190
}
191
192
impl TestSetup {
193
pub fn new() -> Self {
194
Default::default()
195
}
196
197
pub fn add_memory_initialization(&mut self, addr: GuestAddress, data: Vec<u8>) {
198
self.memory_initializations.push((addr, data));
199
}
200
}
201
202
pub fn run_configurable_test<H: HypervisorTestSetup>(
203
hypervisor_type: HypervisorType,
204
setup: &TestSetup,
205
regs_matcher: impl Fn(HypervisorType, &Regs, &Sregs),
206
mut exit_matcher: impl FnMut(HypervisorType, &VcpuExit, &mut dyn VcpuX86_64, &mut dyn Vm) -> bool,
207
) {
208
println!("Running test on hypervisor: {hypervisor_type}");
209
210
let guest_mem =
211
GuestMemory::new(&[(GuestAddress(0), setup.mem_size)]).expect("failed to create guest mem");
212
213
for (addr, data) in &setup.memory_initializations {
214
guest_mem
215
.write_at_addr(data, *addr)
216
.expect("failed to write memory initialization");
217
}
218
219
guest_mem
220
.write_at_addr(&setup.assembly, setup.load_addr)
221
.expect("failed to write to guest memory");
222
223
let (_, mut vm) = H::create_vm(guest_mem);
224
225
let mut vcpu = vm.create_vcpu(0).expect("new vcpu failed");
226
227
let mut sregs = vcpu.get_sregs().expect("get sregs failed");
228
sregs.cs.base = 0;
229
sregs.cs.selector = 0;
230
vcpu.set_sregs(&sregs).expect("set sregs failed");
231
vcpu.set_regs(&setup.initial_regs).expect("set regs failed");
232
233
if let Some(ref setup_fn) = setup.extra_vm_setup {
234
setup_fn(&mut *vcpu, &mut vm);
235
}
236
237
if !vm.check_capability(VmCap::EarlyInitCpuid) {
238
let cpuid = vm
239
.get_hypervisor()
240
.get_supported_cpuid()
241
.expect("get_supported_cpuid() failed");
242
vcpu.set_cpuid(&cpuid).expect("set_cpuid() failed");
243
}
244
245
loop {
246
match vcpu.run() {
247
Ok(exit) => match exit {
248
// Handle interrupts by continuing the loop
249
VcpuExit::Intr if !setup.intercept_intr => continue,
250
other_exit => {
251
if !setup.expect_run_success {
252
panic!("Expected vcpu.run() to fail, but it succeeded");
253
}
254
if exit_matcher(hypervisor_type, &other_exit, &mut *vcpu, &mut vm) {
255
break;
256
}
257
}
258
},
259
Err(e) => {
260
if setup.expect_run_success {
261
panic!("Expected vcpu.run() to succeed, but it failed with error: {e:?}");
262
} else {
263
println!("Expected failure occurred: {e:?}");
264
break;
265
}
266
}
267
}
268
}
269
270
let final_regs = vcpu.get_regs().expect("failed to get regs");
271
let final_sregs = vcpu.get_sregs().expect("failed to get sregs");
272
273
regs_matcher(hypervisor_type, &final_regs, &final_sregs);
274
}
275
276
macro_rules! run_tests {
277
($setup:expr, $regs_matcher:expr, $exit_matcher:expr) => {
278
#[cfg(any(target_os = "android", target_os = "linux"))]
279
run_configurable_test::<Kvm>(HypervisorType::Kvm, &$setup, $regs_matcher, $exit_matcher);
280
281
#[cfg(all(windows, feature = "whpx"))]
282
run_configurable_test::<Whpx>(HypervisorType::Whpx, &$setup, $regs_matcher, $exit_matcher);
283
284
#[cfg(all(windows, feature = "haxm"))]
285
run_configurable_test::<Haxm>(HypervisorType::Haxm, &$setup, $regs_matcher, $exit_matcher);
286
287
#[cfg(feature = "gvm")]
288
run_configurable_test::<Gvm>(HypervisorType::Gvm, &$setup, $regs_matcher, $exit_matcher);
289
};
290
}
291
292
const DEFAULT_GDT_OFFSET: u64 = 0x1500;
293
const DEFAULT_IDT_OFFSET: u64 = 0x1528;
294
295
const DESC_ACCESS_EXEC: u8 = 1 << 3;
296
const DESC_ACCESS_RW: u8 = 1 << 1;
297
const DESC_ACCESS_ACCESSED: u8 = 1 << 0;
298
299
#[derive(Debug, Clone, Copy)]
300
struct LongModePageTableEntry {
301
execute_disable: bool,
302
protection_key: u8,
303
address: u64,
304
global: bool,
305
page_attribute_table: bool,
306
dirty: bool,
307
accessed: bool,
308
cache_disable: bool,
309
write_through: bool,
310
user_supervisor: bool,
311
read_write: bool,
312
present: bool,
313
}
314
315
impl LongModePageTableEntry {
316
fn from_address(address: u64) -> Self {
317
assert!(address < 1 << 52, "the address must fit in 52 bits");
318
assert!(address & 0xFFF == 0, "the address must be aligned to 4k");
319
Self {
320
execute_disable: false,
321
protection_key: 0,
322
address,
323
global: false,
324
page_attribute_table: false,
325
dirty: false,
326
accessed: false,
327
cache_disable: false,
328
write_through: false,
329
user_supervisor: false,
330
read_write: false,
331
present: false,
332
}
333
}
334
}
335
336
impl From<LongModePageTableEntry> for u64 {
337
fn from(page_table_entry: LongModePageTableEntry) -> Self {
338
let mut res = 0;
339
if page_table_entry.present {
340
res |= 1;
341
}
342
if page_table_entry.read_write {
343
res |= 1 << 1;
344
}
345
if page_table_entry.user_supervisor {
346
res |= 1 << 2;
347
}
348
if page_table_entry.write_through {
349
res |= 1 << 3;
350
}
351
if page_table_entry.cache_disable {
352
res |= 1 << 4;
353
}
354
if page_table_entry.accessed {
355
res |= 1 << 5;
356
}
357
if page_table_entry.dirty {
358
res |= 1 << 6;
359
}
360
if page_table_entry.page_attribute_table {
361
res |= 1 << 7;
362
}
363
if page_table_entry.global {
364
res |= 1 << 8;
365
}
366
assert!(page_table_entry.address < 1 << 52);
367
assert!(page_table_entry.address & 0xFFF == 0);
368
res |= page_table_entry.address;
369
assert!(page_table_entry.protection_key < 1 << 4);
370
res |= u64::from(page_table_entry.protection_key) << 59;
371
if page_table_entry.execute_disable {
372
res |= 1 << 63;
373
}
374
res
375
}
376
}
377
378
#[derive(Debug, Clone)]
379
struct ModeConfig {
380
idt: Vec<u8>,
381
idt_base_addr: u64,
382
gdt: Vec<Segment>,
383
gdt_base_addr: u64,
384
code_segment_index: u16,
385
task_segment_index: Option<u16>,
386
page_table: Option<Box<[u8; 0x1000]>>,
387
long_mode: bool,
388
}
389
390
impl ModeConfig {
391
const IDT64_SIZE: usize = std::mem::size_of::<IdtEntry64>() * 256;
392
const IDT32_SIZE: usize = 8 * 256;
393
394
/// Set the IDT for long mode.
395
fn set_idt_long_mode(&mut self, idt: impl IntoIterator<Item = IdtEntry64>) -> &mut Self {
396
let entries = idt.into_iter().collect::<Vec<_>>();
397
assert_eq!(entries.len(), 256, "IDT must contain 256 entries");
398
self.idt = entries
399
.into_iter()
400
.flat_map(|entry| entry.as_bytes().to_owned())
401
.collect();
402
self
403
}
404
405
fn set_idt_base_addr(&mut self, idt_base_addr: u64) -> &mut Self {
406
self.idt_base_addr = idt_base_addr;
407
self
408
}
409
410
fn default_code_segment_long_mode() -> Segment {
411
Segment {
412
base: 0,
413
limit_bytes: 0xffff_ffff,
414
type_: DESC_ACCESS_EXEC | DESC_ACCESS_RW | DESC_ACCESS_ACCESSED,
415
present: 1,
416
dpl: 0,
417
db: 0,
418
s: 1,
419
l: 1,
420
g: 1,
421
..Default::default()
422
}
423
}
424
425
fn default_code_segment_protected_mode() -> Segment {
426
Segment {
427
base: 0,
428
limit_bytes: 0xffff_ffff,
429
type_: DESC_ACCESS_EXEC | DESC_ACCESS_RW | DESC_ACCESS_ACCESSED,
430
present: 1,
431
dpl: 0,
432
db: 1,
433
s: 1,
434
l: 0,
435
g: 1,
436
..Default::default()
437
}
438
}
439
440
fn segment_to_bytes(segment: &Segment, long_mode: bool) -> Vec<u8> {
441
if *segment == Segment::default() {
442
// Special handle for null descriptor, so that it won't be recognized as a 64
443
// bit system segment.
444
return vec![0u8; 8];
445
}
446
let Segment {
447
base,
448
limit_bytes,
449
type_,
450
present,
451
dpl,
452
db,
453
s,
454
l,
455
g,
456
..
457
} = *segment;
458
459
let limit = if g != 0 {
460
// 4096-byte granularity
461
limit_bytes / 4096
462
} else {
463
// 1-byte granularity
464
limit_bytes
465
};
466
467
assert!(limit < (1 << 20)); // limit value must fit in 20 bits
468
let flags = {
469
let mut flags = 0;
470
if g != 0 {
471
flags |= 1 << 3;
472
}
473
if db != 0 {
474
flags |= 1 << 2;
475
}
476
if l != 0 {
477
flags |= 1 << 1;
478
}
479
flags << 4
480
};
481
assert!(flags & 0x0F == 0x00); // flags must be in the high 4 bits only
482
let access = {
483
assert!(type_ < (1 << 4), "type must fit in 4 bits");
484
let mut access = type_;
485
if present != 0 {
486
access |= 1 << 7;
487
}
488
assert!(dpl < (1 << 2), "DPL must fit in 2 bits");
489
access |= dpl << 5;
490
if s != 0 {
491
access |= 1 << 4;
492
}
493
access
494
};
495
496
let limit_lo = (limit & 0xffff).try_into().unwrap();
497
let base_lo = (base & 0xffff).try_into().unwrap();
498
let base_mid0 = ((base >> 16) & 0xff).try_into().unwrap();
499
let limit_hi_and_flags = u8::try_from((limit >> 16) & 0xf).unwrap() | flags;
500
let base_mid1 = ((base >> 24) & 0xff).try_into().unwrap();
501
let base_hi = (base >> 32).try_into().unwrap();
502
503
if long_mode && s == 0 {
504
// 64 bit system segment descriptor.
505
#[repr(C, packed)]
506
#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
507
struct Descriptor {
508
limit_lo: u16,
509
base_lo: u16,
510
base_mid0: u8,
511
access: u8,
512
limit_hi_and_flags: u8,
513
base_mid1: u8,
514
base_hi: u32,
515
_reserved: [u8; 4],
516
}
517
518
Descriptor {
519
limit_lo,
520
base_lo,
521
base_mid0,
522
access,
523
limit_hi_and_flags,
524
base_mid1,
525
base_hi,
526
_reserved: [0; 4],
527
}
528
.as_bytes()
529
.to_owned()
530
} else {
531
#[repr(C, packed)]
532
#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
533
struct Descriptor {
534
limit_lo: u16,
535
base_lo: u16,
536
base_mid: u8,
537
access: u8,
538
limit_hi_and_flags: u8,
539
base_hi: u8,
540
}
541
542
assert_eq!(base_hi, 0, "the base address must be within 32 bit range");
543
Descriptor {
544
limit_lo,
545
base_lo,
546
base_mid: base_mid0,
547
access,
548
limit_hi_and_flags,
549
base_hi: base_mid1,
550
}
551
.as_bytes()
552
.to_owned()
553
}
554
}
555
556
fn get_gdt_bytes(&self) -> Vec<u8> {
557
self.gdt
558
.iter()
559
.flat_map(|segment| Self::segment_to_bytes(segment, self.long_mode))
560
.collect()
561
}
562
563
fn configure_gdt_memory(&self, guest_mem: &GuestMemory) {
564
let gdt_bytes = self.get_gdt_bytes();
565
let gdt_start_addr = GuestAddress(self.gdt_base_addr);
566
let gdt_end_addr = gdt_start_addr
567
.checked_add(
568
gdt_bytes
569
.len()
570
.try_into()
571
.expect("the GDT size must be within usize"),
572
)
573
.expect("the end of GDT address shouldn't overflow");
574
assert!(
575
guest_mem.range_overlap(GuestAddress(self.gdt_base_addr), gdt_end_addr),
576
"the address for GDT is not mapped"
577
);
578
guest_mem
579
.write_at_addr(&gdt_bytes, GuestAddress(self.gdt_base_addr))
580
.expect("Failed to write GDT entry to guest memory");
581
}
582
583
fn configure_idt_memory(&self, guest_mem: &GuestMemory) {
584
let expected_length = if self.long_mode {
585
Self::IDT64_SIZE
586
} else {
587
Self::IDT32_SIZE
588
};
589
590
let idt_addr = GuestAddress(self.idt_base_addr);
591
assert_eq!(self.idt.len(), expected_length);
592
assert!(
593
guest_mem.range_overlap(
594
idt_addr,
595
idt_addr
596
.checked_add(
597
self.idt
598
.len()
599
.try_into()
600
.expect("The IDT length must be within the u64 range.")
601
)
602
.expect("The end address of IDT should not overflow")
603
),
604
"The IDT that starts at {:#x} isn't properly mapped as the guest memory.",
605
self.idt_base_addr
606
);
607
guest_mem
608
.write_at_addr(&self.idt, idt_addr)
609
.expect("failed to write IDT entry to guest memory");
610
}
611
612
fn get_idtr_value(&self) -> DescriptorTable {
613
DescriptorTable {
614
base: self.idt_base_addr,
615
limit: {
616
let expected_length = if self.long_mode {
617
Self::IDT64_SIZE
618
} else {
619
Self::IDT32_SIZE
620
};
621
assert_eq!(self.idt.len(), expected_length, "the IDT size should match",);
622
// The IDT limit should be the number of bytes of IDT - 1.
623
(self.idt.len() - 1)
624
.try_into()
625
.expect("the IDT limit should be within the range of u16")
626
},
627
}
628
}
629
630
fn get_gdtr_value(&self) -> DescriptorTable {
631
DescriptorTable {
632
base: self.gdt_base_addr,
633
limit: (self.get_gdt_bytes().len() - 1)
634
.try_into()
635
.expect("the GDT limit should fit in 16 bits"),
636
}
637
}
638
639
fn get_segment_register_value(&self, segment_index: u16) -> Segment {
640
let offset: usize = self
641
.gdt
642
.iter()
643
.take(segment_index.into())
644
.map(|segment| Self::segment_to_bytes(segment, self.long_mode).len())
645
.sum();
646
Segment {
647
selector: offset
648
.try_into()
649
.expect("the offset should be within the range of u16"),
650
..self.gdt[usize::from(segment_index)]
651
}
652
}
653
654
pub fn configure_long_mode_memory(&self, vm: &mut dyn Vm) {
655
let guest_mem = vm.get_memory();
656
657
self.configure_gdt_memory(guest_mem);
658
self.configure_idt_memory(guest_mem);
659
660
// Setup paging
661
let pml4_addr = GuestAddress(0x9000);
662
let pdpte_addr = GuestAddress(0xa000);
663
let pde_addr = GuestAddress(0xb000);
664
let pte_addr = GuestAddress(0xc000);
665
666
assert!(
667
guest_mem.range_overlap(GuestAddress(0x9000), GuestAddress(0xd000)),
668
"the memory range for page tables should be mapped."
669
);
670
671
// Pointing to PDPTE with present and RW flags
672
guest_mem
673
.write_at_addr(&(pdpte_addr.0 | 3).to_le_bytes(), pml4_addr)
674
.expect("failed to write PML4 entry");
675
676
// Pointing to PD with present and RW flags
677
guest_mem
678
.write_at_addr(&(pde_addr.0 | 3).to_le_bytes(), pdpte_addr)
679
.expect("failed to write PDPTE entry");
680
681
for i in 0..512 {
682
// All pages are present and RW.
683
let flags: u64 = if i == 0 {
684
3
685
} else {
686
// The first 2MiB are 4K pages, the rest are 2M pages.
687
0x83
688
};
689
let addr = if i == 0 { pte_addr.offset() } else { i << 21 };
690
let pd_entry_bytes = (addr | flags).to_le_bytes();
691
guest_mem
692
.write_at_addr(
693
&pd_entry_bytes,
694
pde_addr.unchecked_add(i * mem::size_of::<u64>() as u64),
695
)
696
.expect("Failed to write PDE entry");
697
}
698
699
guest_mem
700
.write_at_addr(
701
self.page_table
702
.as_ref()
703
.expect("page table must present for long mode")
704
.as_slice(),
705
pte_addr,
706
)
707
.expect("Failed to write PTE entry");
708
}
709
710
pub fn enter_long_mode(&self, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm) {
711
self.configure_long_mode_memory(vm);
712
713
let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
714
715
sregs.gdt = self.get_gdtr_value();
716
sregs.idt = self.get_idtr_value();
717
sregs.cs = self.get_segment_register_value(self.code_segment_index);
718
719
if let Some(task_segment_index) = self.task_segment_index {
720
sregs.tr = self.get_segment_register_value(task_segment_index);
721
}
722
723
// Long mode
724
let pml4_addr = GuestAddress(0x9000);
725
sregs.cr0 |= 0x1 | 0x80000000; // PE & PG
726
sregs.efer |= 0x100 | 0x400; // LME & LMA (Must be auto-enabled with CR0_PG)
727
sregs.cr3 = pml4_addr.offset();
728
sregs.cr4 |= 0x80 | 0x20; // PGE & PAE
729
730
vcpu.set_sregs(&sregs).expect("failed to set sregs");
731
}
732
733
pub fn configure_flat_protected_mode_memory(&self, vm: &mut dyn Vm) {
734
let guest_mem = vm.get_memory();
735
736
self.configure_gdt_memory(guest_mem);
737
self.configure_idt_memory(guest_mem);
738
}
739
740
pub fn enter_protected_mode(&self, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm) {
741
self.configure_flat_protected_mode_memory(vm);
742
743
let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
744
745
sregs.cs = self.get_segment_register_value(self.code_segment_index);
746
sregs.gdt = self.get_gdtr_value();
747
sregs.idt = self.get_idtr_value();
748
749
assert!(
750
self.task_segment_index.is_none(),
751
"task segment not supported for protected mode yet."
752
);
753
754
assert!(
755
self.page_table.is_none(),
756
"setting page tables for protected mode is not supported yet"
757
);
758
// 32-bit protected mode, paging disabled
759
sregs.cr0 |= 0x1; // PE
760
sregs.cr0 &= !0x80000000; // ~PG
761
762
vcpu.set_sregs(&sregs).expect("failed to set sregs");
763
}
764
765
fn default_long_mode() -> Self {
766
let page_table = (0u64..512)
767
.flat_map(|page_frame_number| {
768
let page_table_entry = LongModePageTableEntry {
769
present: true,
770
read_write: true,
771
..LongModePageTableEntry::from_address(page_frame_number << 12)
772
};
773
u64::from(page_table_entry).as_bytes().to_owned()
774
})
775
.collect::<Box<[u8]>>()
776
.try_into()
777
.expect("the length of the slice must match");
778
Self {
779
idt_base_addr: DEFAULT_IDT_OFFSET,
780
idt: vec![0; Self::IDT64_SIZE],
781
gdt_base_addr: DEFAULT_GDT_OFFSET,
782
gdt: vec![
783
Segment::default(),
784
Segment::default(),
785
Self::default_code_segment_long_mode(),
786
],
787
code_segment_index: 2,
788
task_segment_index: None,
789
page_table: Some(page_table),
790
long_mode: true,
791
}
792
}
793
794
fn default_protected_mode() -> Self {
795
Self {
796
idt_base_addr: DEFAULT_IDT_OFFSET,
797
idt: vec![0; Self::IDT32_SIZE],
798
gdt_base_addr: DEFAULT_GDT_OFFSET,
799
gdt: vec![
800
Segment::default(),
801
Segment::default(),
802
Self::default_code_segment_protected_mode(),
803
],
804
code_segment_index: 2,
805
task_segment_index: None,
806
page_table: None,
807
long_mode: false,
808
}
809
}
810
}
811
812
global_asm_data!(
813
test_minimal_virtualization_code,
814
".code16",
815
"add ax, bx",
816
"hlt"
817
);
818
819
// This runs a minimal program under virtualization.
820
// It should require only the ability to execute instructions under virtualization, physical
821
// memory, the ability to get and set some guest VM registers, and intercepting HLT.
822
#[test]
823
fn test_minimal_virtualization() {
824
let assembly = test_minimal_virtualization_code::data().to_vec();
825
let setup = TestSetup {
826
assembly: assembly.clone(),
827
load_addr: GuestAddress(0x1000),
828
initial_regs: Regs {
829
rip: 0x1000,
830
rax: 1,
831
rbx: 2,
832
rflags: 2,
833
..Default::default()
834
},
835
..Default::default()
836
};
837
838
run_tests!(
839
setup,
840
|_, regs, _| {
841
assert_eq!(regs.rax, 3); // 1 + 2
842
843
// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.
844
assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);
845
},
846
|_, exit: &_, _: &mut _, _: &mut _| -> bool { matches!(exit, VcpuExit::Hlt) }
847
);
848
}
849
850
global_asm_data!(
851
test_io_exit_handler_code,
852
".code16",
853
"out 0x10, al",
854
"in al, 0x20",
855
"add ax, bx",
856
"hlt",
857
);
858
859
#[test]
860
fn test_io_exit_handler() {
861
// Use the OUT/IN instructions, which cause an Io exit in order to
862
// read/write data using a given port.
863
let load_addr = GuestAddress(0x1000);
864
let setup = TestSetup {
865
assembly: test_io_exit_handler_code::data().to_vec(),
866
load_addr,
867
initial_regs: Regs {
868
rip: load_addr.offset(),
869
rax: 0x34, // Only AL (lower byte of RAX) is used
870
rbx: 0x42,
871
rflags: 2,
872
..Default::default()
873
},
874
..Default::default()
875
};
876
877
let regs_matcher = |_, regs: &Regs, _: &_| {
878
// The result in AX should be double the initial value of AX
879
// plus the initial value of BX.
880
assert_eq!(regs.rax, (0x34 * 2) + 0x42);
881
};
882
883
let cached_byte = AtomicU8::new(0);
884
let exit_matcher =
885
move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
886
VcpuExit::Io => {
887
vcpu.handle_io(&mut |IoParams { address, operation }| {
888
match operation {
889
IoOperation::Read(data) => {
890
assert_eq!(address, 0x20);
891
assert_eq!(data.len(), 1);
892
// The original number written below will be doubled and
893
// passed back.
894
data[0] = cached_byte.load(Ordering::SeqCst) * 2;
895
}
896
IoOperation::Write(data) => {
897
assert_eq!(address, 0x10);
898
assert_eq!(data.len(), 1);
899
assert_eq!(data[0], 0x34);
900
cached_byte.fetch_add(data[0], Ordering::SeqCst);
901
}
902
}
903
})
904
.expect("failed to set the data");
905
false // Continue VM runloop
906
}
907
VcpuExit::Hlt => {
908
true // Break VM runloop
909
}
910
r => panic!("unexpected exit reason: {r:?}"),
911
};
912
run_tests!(setup, regs_matcher, &exit_matcher);
913
}
914
915
global_asm_data!(
916
test_io_rep_string_code,
917
".code16",
918
"cld",
919
"mov dx, 0x80", // read data from I/O port 80h
920
"mov di, 0x100", // write data to memory address 0x100
921
"mov cx, 5", // repeat 5 times
922
"rep insb",
923
"mov si, 0x100", // read data from memory address 0x100
924
"mov dx, 0x80", // write data to I/O port 80h
925
"mov cx, 5", // repeat 5 times
926
"rep outsb",
927
"mov cx, 0x5678",
928
"hlt",
929
);
930
931
#[cfg(not(feature = "haxm"))]
932
#[test]
933
fn test_io_rep_string() {
934
// Test the REP OUTS*/REP INS* string I/O instructions, which should call the IO handler
935
// multiple times to handle the requested repeat count.
936
let load_addr = GuestAddress(0x1000);
937
let setup = TestSetup {
938
assembly: test_io_rep_string_code::data().to_vec(),
939
load_addr,
940
initial_regs: Regs {
941
rip: load_addr.offset(),
942
rax: 0x1234,
943
rflags: 2,
944
..Default::default()
945
},
946
..Default::default()
947
};
948
949
let regs_matcher = |_, regs: &Regs, _: &_| {
950
// The string I/O instructions should not modify AX.
951
assert_eq!(regs.rax, 0x1234);
952
assert_eq!(regs.rcx, 0x5678);
953
};
954
955
let read_data = AtomicU8::new(0);
956
let write_data = AtomicU8::new(0);
957
let exit_matcher =
958
move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| match exit {
959
VcpuExit::Io => {
960
vcpu.handle_io(&mut |IoParams { address, operation }| {
961
match operation {
962
IoOperation::Read(data) => {
963
assert_eq!(address, 0x80);
964
assert_eq!(data.len(), 1);
965
// Return 0, 1, 2, 3, 4 for subsequent reads.
966
data[0] = read_data.fetch_add(1, Ordering::SeqCst);
967
}
968
IoOperation::Write(data) => {
969
assert_eq!(address, 0x80);
970
assert_eq!(data.len(), 1);
971
// Expect 0, 1, 2, 3, 4 to be written.
972
let expected_write = write_data.fetch_add(1, Ordering::SeqCst);
973
assert_eq!(data[0], expected_write);
974
}
975
}
976
})
977
.expect("failed to set the data");
978
false // Continue VM runloop
979
}
980
VcpuExit::Hlt => {
981
// Verify 5 reads and writes occurred.
982
assert_eq!(read_data.load(Ordering::SeqCst), 5);
983
assert_eq!(write_data.load(Ordering::SeqCst), 5);
984
985
// Verify the data that should have been written to memory by REP INSB.
986
let mem = vm.get_memory();
987
let mut data = [0u8; 5];
988
mem.read_exact_at_addr(&mut data, GuestAddress(0x100))
989
.unwrap();
990
assert_eq!(data, [0, 1, 2, 3, 4]);
991
992
true // Break VM runloop
993
}
994
r => panic!("unexpected exit reason: {r:?}"),
995
};
996
run_tests!(setup, regs_matcher, &exit_matcher);
997
}
998
999
global_asm_data!(
1000
test_mmio_exit_cross_page_code,
1001
".code16",
1002
"mov byte ptr [ebx], al",
1003
"mov al, byte ptr [ecx]",
1004
"hlt",
1005
);
1006
1007
// This test is similar to mmio_fetch_memory.rs (remove eventually)
1008
// but applies to all hypervisors.
1009
#[test]
1010
fn test_mmio_exit_cross_page() {
1011
let page_size = 4096u64;
1012
let load_addr = GuestAddress(page_size - 1); // Last byte of the first page
1013
1014
let setup = TestSetup {
1015
assembly: test_mmio_exit_cross_page_code::data().to_vec(),
1016
load_addr,
1017
mem_size: 0x2000,
1018
initial_regs: Regs {
1019
rip: load_addr.offset(),
1020
rax: 0x33,
1021
rbx: 0x3000,
1022
rcx: 0x3010,
1023
rflags: 2,
1024
..Default::default()
1025
},
1026
..Default::default()
1027
};
1028
1029
let regs_matcher = |_, regs: &Regs, _: &_| {
1030
assert_eq!(regs.rax, 0x66, "Should match the MMIO read bytes below");
1031
};
1032
1033
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1034
VcpuExit::Mmio => {
1035
vcpu.handle_mmio(&mut |IoParams { address, operation }| {
1036
match operation {
1037
IoOperation::Read(data) => {
1038
match (address, data.len()) {
1039
// First MMIO read asks to load the first 8 bytes
1040
// of a new execution page, when an instruction
1041
// crosses page boundary.
1042
// Return the rest of instructions that are
1043
// supposed to be on the second page.
1044
(0x1000, 8) => {
1045
// Ensure this instruction is the first read
1046
// in the sequence.
1047
data.copy_from_slice(&[0x88, 0x03, 0x67, 0x8a, 0x01, 0xf4, 0, 0]);
1048
Ok(())
1049
}
1050
// Second MMIO read is a regular read from an
1051
// unmapped memory (pointed to by initial EAX).
1052
(0x3010, 1) => {
1053
data.copy_from_slice(&[0x66]);
1054
Ok(())
1055
}
1056
_ => {
1057
panic!("invalid address({:#x})/size({})", address, data.len())
1058
}
1059
}
1060
}
1061
IoOperation::Write(data) => {
1062
assert_eq!(address, 0x3000);
1063
assert_eq!(data[0], 0x33);
1064
assert_eq!(data.len(), 1);
1065
Ok(())
1066
}
1067
}
1068
})
1069
.expect("failed to set the data");
1070
false // Continue VM runloop
1071
}
1072
VcpuExit::Hlt => {
1073
true // Break VM runloop
1074
}
1075
r => panic!("unexpected exit reason: {r:?}"),
1076
};
1077
1078
run_tests!(setup, regs_matcher, exit_matcher);
1079
}
1080
1081
global_asm_data!(
1082
test_mmio_exit_readonly_memory_code,
1083
".code16",
1084
"mov al,BYTE PTR es:[bx]",
1085
"add al, 0x1",
1086
"mov BYTE PTR es:[bx], al",
1087
"hlt",
1088
);
1089
1090
#[test]
1091
#[cfg(any(target_os = "android", target_os = "linux"))] // Not working for WHXP yet.
1092
fn test_mmio_exit_readonly_memory() {
1093
// Read from read-only memory and then write back to it,
1094
// which should trigger an MMIO exit.
1095
let setup = TestSetup {
1096
assembly: test_mmio_exit_readonly_memory_code::data().to_vec(),
1097
load_addr: GuestAddress(0x1000),
1098
mem_size: 0x2000,
1099
initial_regs: Regs {
1100
rip: 0x1000,
1101
rax: 1,
1102
rbx: 0,
1103
rflags: 2,
1104
..Default::default()
1105
},
1106
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
1107
// Add a read-only region of memory to the VM, at address 0x5000.
1108
let prot_mem_size = 0x1000;
1109
let prot_mem =
1110
SharedMemory::new("test", prot_mem_size).expect("failed to create shared memory");
1111
let mmap_ro = MemoryMappingBuilder::new(prot_mem_size as usize)
1112
.from_shared_memory(&prot_mem)
1113
.build()
1114
.expect("failed to create memory mapping");
1115
mmap_ro
1116
.write_obj(0x66, 0)
1117
.expect("failed writing data to ro memory");
1118
vm.add_memory_region(
1119
GuestAddress(0x5000),
1120
Box::new(
1121
MemoryMappingBuilder::new(prot_mem_size as usize)
1122
.from_shared_memory(&prot_mem)
1123
.build()
1124
.expect("failed to create memory mapping"),
1125
),
1126
true,
1127
false,
1128
CacheCoherent,
1129
)
1130
.expect("failed to register memory");
1131
1132
// Set up segments needed by the assembly addressing above.
1133
let mut sregs = vcpu.get_sregs().expect("get sregs failed");
1134
sregs.cs.s = 1;
1135
sregs.cs.type_ = 0b1011;
1136
sregs.es.base = 0x5000;
1137
sregs.es.selector = 0;
1138
sregs.es.s = 1;
1139
sregs.es.type_ = 0b1011;
1140
1141
vcpu.set_sregs(&sregs).expect("set sregs failed");
1142
})),
1143
..Default::default()
1144
};
1145
1146
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1147
VcpuExit::Mmio => {
1148
vcpu.handle_mmio(&mut |IoParams { address, operation }| match operation {
1149
IoOperation::Read(_) => {
1150
panic!("unexpected mmio read call");
1151
}
1152
IoOperation::Write(data) => {
1153
assert_eq!(data.len(), 1);
1154
assert_eq!(address, 0x5000);
1155
assert_eq!(data[0], 0x67);
1156
Ok(())
1157
}
1158
})
1159
.expect("failed to set the data");
1160
false // Continue VM runloop
1161
}
1162
VcpuExit::Hlt => {
1163
true // Break VM runloop
1164
}
1165
r => panic!("unexpected exit reason: {r:?}"),
1166
};
1167
1168
run_tests!(
1169
setup,
1170
|_, regs, _| {
1171
assert_eq!(regs.rax, 0x67);
1172
},
1173
exit_matcher
1174
);
1175
}
1176
1177
#[rustfmt::skip::macros(global_asm_data)]
1178
global_asm_data!(
1179
test_cpuid_exit_handler_code,
1180
".code16",
1181
"cpuid",
1182
"hlt",
1183
);
1184
1185
#[test]
1186
fn test_cpuid_exit_handler() {
1187
let setup = TestSetup {
1188
assembly: test_cpuid_exit_handler_code::data().to_vec(),
1189
load_addr: GuestAddress(0x1000),
1190
initial_regs: Regs {
1191
rip: 0x1000,
1192
rax: 1, // CPUID input EAX=1 to get virtualization bits.
1193
rflags: 2,
1194
..Default::default()
1195
},
1196
..Default::default()
1197
};
1198
1199
let regs_matcher = move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| {
1200
if hypervisor_type == HypervisorType::Haxm {
1201
let hypervisor_bit = regs.rcx & (1 << 31) != 0;
1202
assert!(hypervisor_bit, "Hypervisor bit in CPUID should be set!");
1203
assert_eq!(regs.rip, 0x1003, "CPUID did not execute correctly.");
1204
}
1205
};
1206
1207
let exit_matcher =
1208
|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1209
match hypervisor_type {
1210
HypervisorType::Whpx => match exit {
1211
VcpuExit::Cpuid { entry } => {
1212
println!("Got Cpuid {entry:?}");
1213
true // Break runloop
1214
}
1215
r => panic!("unexpected exit reason: {r:?}"),
1216
},
1217
_ => match exit {
1218
VcpuExit::Hlt => {
1219
true // Break VM runloop
1220
}
1221
r => panic!("unexpected exit reason: {r:?}"),
1222
},
1223
}
1224
};
1225
1226
run_tests!(setup, regs_matcher, exit_matcher);
1227
}
1228
1229
global_asm_data!(
1230
test_control_register_access_invalid_code,
1231
".code16",
1232
// Test setting an unused bit in addition to the Protected Mode Enable and Monitor co-processor
1233
// bits, which causes a triple fault and hence the invalid bit should never make it to RCX.
1234
"mov cr0, eax",
1235
"mov ecx, cr0",
1236
"hlt",
1237
);
1238
1239
#[test]
1240
fn test_control_register_access_invalid() {
1241
let setup = TestSetup {
1242
assembly: test_control_register_access_invalid_code::data().to_vec(),
1243
load_addr: GuestAddress(0x1000),
1244
initial_regs: Regs {
1245
rip: 0x1000,
1246
rax: 0x80000011,
1247
rcx: 0,
1248
rflags: 2,
1249
..Default::default()
1250
},
1251
..Default::default()
1252
};
1253
1254
// Matcher to check that the RAX value never made it to RCX.
1255
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
1256
assert_eq!(
1257
regs.rcx, 0,
1258
"RCX value mismatch: expected 0, found {:X}",
1259
regs.rcx
1260
)
1261
};
1262
1263
let exit_matcher =
1264
move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1265
match hypervisor_type {
1266
HypervisorType::Kvm | HypervisorType::Haxm => {
1267
match exit {
1268
VcpuExit::Shutdown(_) => {
1269
true // Break VM runloop
1270
}
1271
r => panic!("unexpected exit reason: {r:?}"),
1272
}
1273
}
1274
_ => {
1275
match exit {
1276
VcpuExit::UnrecoverableException => {
1277
true // Break VM runloop
1278
}
1279
r => panic!("unexpected exit reason: {r:?}"),
1280
}
1281
}
1282
}
1283
};
1284
run_tests!(setup, regs_matcher, exit_matcher);
1285
}
1286
1287
global_asm_data!(
1288
test_control_register_access_valid_code,
1289
// Set the 0th bit (Protected Mode Enable) of CR0, which should succeed.
1290
".code16",
1291
"mov cr0, eax",
1292
"mov eax, cr0",
1293
"hlt",
1294
);
1295
1296
#[test]
1297
fn test_control_register_access_valid() {
1298
let setup = TestSetup {
1299
assembly: test_control_register_access_invalid_code::data().to_vec(),
1300
load_addr: GuestAddress(0x1000),
1301
initial_regs: Regs {
1302
rip: 0x1000,
1303
rax: 0x1,
1304
rflags: 2,
1305
..Default::default()
1306
},
1307
..Default::default()
1308
};
1309
1310
// Matcher to check the final state of EAX after reading from CR0
1311
let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {
1312
assert!(
1313
(regs.rax & 0x1) != 0,
1314
"CR0 value mismatch: expected the 0th bit to be set, found {:X}",
1315
regs.rax
1316
);
1317
};
1318
1319
let exit_matcher =
1320
move |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1321
VcpuExit::Hlt => {
1322
true // Break VM runloop
1323
}
1324
r => panic!("unexpected exit reason: {r:?}"),
1325
};
1326
run_tests!(setup, regs_matcher, exit_matcher);
1327
}
1328
1329
global_asm_data!(
1330
test_debug_register_access_code,
1331
".code16",
1332
"mov dr2, eax",
1333
"mov ebx, dr2",
1334
"hlt",
1335
);
1336
1337
#[test]
1338
fn test_debug_register_access() {
1339
let setup = TestSetup {
1340
assembly: test_debug_register_access_code::data().to_vec(),
1341
load_addr: GuestAddress(0x1000),
1342
initial_regs: Regs {
1343
rip: 0x1000,
1344
rax: 0x1234,
1345
rflags: 2,
1346
..Default::default()
1347
},
1348
..Default::default()
1349
};
1350
1351
let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {
1352
assert_eq!(
1353
regs.rbx, 0x1234,
1354
"DR2 value mismatch: expected 0x1234, found {:X}",
1355
regs.rbx
1356
);
1357
};
1358
1359
let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1360
VcpuExit::Hlt => {
1361
true // Break VM runloop
1362
}
1363
r => panic!("unexpected exit reason: {r:?}"),
1364
};
1365
1366
run_tests!(setup, regs_matcher, exit_matcher);
1367
}
1368
1369
// This test only succeeds (by failing Vcpu::Run) on haxm.
1370
#[cfg(all(windows, feature = "haxm"))]
1371
#[test]
1372
fn test_msr_access_invalid() {
1373
let msr_index = 0xC0000080; // EFER MSR
1374
1375
let setup = TestSetup {
1376
/*
1377
0: 0f 32 rdmsr
1378
2: 83 c8 02 or ax,0x2 (1st bit is reserved)
1379
5: 0f 30 wrmsr
1380
7: f4 hlt
1381
*/
1382
assembly: vec![0x0F, 0x32, 0x83, 0xC8, 0x02, 0x0F, 0x30, 0xF4],
1383
mem_size: 0x5000,
1384
load_addr: GuestAddress(0x1000),
1385
initial_regs: Regs {
1386
rip: 0x1000,
1387
rcx: msr_index, // MSR index to read/write
1388
rflags: 2,
1389
..Default::default()
1390
},
1391
..Default::default()
1392
};
1393
1394
let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1395
VcpuExit::Shutdown(..) => {
1396
true // Break VM runloop
1397
}
1398
r => panic!("unexpected exit reason: {r:?}"),
1399
};
1400
1401
run_tests!(
1402
setup,
1403
|_, regs, _| {
1404
assert_eq!(regs.rip, 0x1005); // Should stop at the wrmsr
1405
},
1406
exit_matcher
1407
);
1408
}
1409
1410
global_asm_data!(
1411
test_msr_access_valid_code,
1412
".code16",
1413
"rdmsr",
1414
"add ax, 1",
1415
"wrmsr",
1416
"hlt",
1417
);
1418
1419
#[test]
1420
fn test_msr_access_valid() {
1421
let msr_index = 0x10; // TSC MSR index
1422
1423
let setup = TestSetup {
1424
assembly: test_msr_access_valid_code::data().to_vec(),
1425
load_addr: GuestAddress(0x1000),
1426
initial_regs: Regs {
1427
rip: 0x1000,
1428
rcx: msr_index, // MSR index for TSC
1429
rflags: 0x2,
1430
..Default::default()
1431
},
1432
..Default::default()
1433
};
1434
1435
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
1436
assert!(regs.rax > 0x0, "TSC value should be >0");
1437
assert_eq!(regs.rip, 0x1008, "Should stop after the hlt instruction");
1438
};
1439
1440
let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1441
VcpuExit::Hlt => {
1442
true // Break VM runloop
1443
}
1444
r => panic!("unexpected exit reason: {r:?}"),
1445
};
1446
run_tests!(setup, regs_matcher, exit_matcher);
1447
}
1448
1449
#[rustfmt::skip::macros(global_asm_data)]
1450
global_asm_data!(
1451
test_getsec_instruction_code,
1452
".code16",
1453
"getsec",
1454
"hlt",
1455
);
1456
1457
#[cfg(not(unix))]
1458
#[test]
1459
fn test_getsec_instruction() {
1460
let setup = TestSetup {
1461
assembly: test_getsec_instruction_code::data().to_vec(),
1462
load_addr: GuestAddress(0x1000),
1463
initial_regs: Regs {
1464
rip: 0x1000,
1465
rflags: 2,
1466
..Default::default()
1467
},
1468
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
1469
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
1470
})),
1471
..Default::default()
1472
};
1473
1474
let regs_matcher =
1475
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1476
HypervisorType::Whpx => {}
1477
HypervisorType::Haxm => {}
1478
_ => {
1479
assert_eq!(regs.rip, 0x1000, "GETSEC; expected RIP at 0x1002");
1480
}
1481
};
1482
1483
let exit_matcher =
1484
move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1485
match hypervisor_type {
1486
HypervisorType::Whpx => {
1487
match exit {
1488
VcpuExit::UnrecoverableException => {
1489
true // Break VM runloop
1490
}
1491
r => panic!("unexpected exit reason: {r:?}"),
1492
}
1493
}
1494
_ => {
1495
match exit {
1496
VcpuExit::Shutdown(_) => {
1497
true // Break VM runloop
1498
}
1499
r => panic!("unexpected exit reason: {r:?}"),
1500
}
1501
}
1502
}
1503
};
1504
1505
run_tests!(setup, regs_matcher, exit_matcher);
1506
}
1507
1508
#[rustfmt::skip::macros(global_asm_data)]
1509
global_asm_data!(
1510
test_invd_instruction_code,
1511
".code16",
1512
"invd",
1513
"hlt",
1514
);
1515
1516
#[test]
1517
fn test_invd_instruction() {
1518
let setup = TestSetup {
1519
assembly: test_invd_instruction_code::data().to_vec(),
1520
load_addr: GuestAddress(0x1000),
1521
initial_regs: Regs {
1522
rip: 0x1000,
1523
rflags: 2,
1524
..Default::default()
1525
},
1526
..Default::default()
1527
};
1528
1529
let regs_matcher =
1530
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1531
HypervisorType::Haxm => {}
1532
_ => {
1533
assert_eq!(regs.rip, 0x1003, "INVD; expected RIP at 0x1003");
1534
}
1535
};
1536
let exit_matcher =
1537
move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1538
match hypervisor_type {
1539
HypervisorType::Haxm => {
1540
match exit {
1541
VcpuExit::Shutdown(_) => {
1542
true // Break VM runloop
1543
}
1544
r => panic!("unexpected exit reason: {r:?}"),
1545
}
1546
}
1547
_ => {
1548
match exit {
1549
VcpuExit::Hlt => {
1550
true // Break VM runloop
1551
}
1552
r => panic!("unexpected exit reason: {r:?}"),
1553
}
1554
}
1555
}
1556
};
1557
1558
run_tests!(setup, regs_matcher, exit_matcher);
1559
}
1560
1561
global_asm_data!(
1562
test_xsetbv_instruction_code,
1563
".code16",
1564
"mov eax, cr4",
1565
// Set the OSXSAVE bit in CR4 (bit 9)
1566
"or ax, 0x200",
1567
"mov cr4, eax",
1568
"xgetbv",
1569
"xsetbv",
1570
"hlt",
1571
);
1572
1573
#[test]
1574
fn test_xsetbv_instruction() {
1575
let setup = TestSetup {
1576
assembly: test_xsetbv_instruction_code::data().to_vec(),
1577
load_addr: GuestAddress(0x1000),
1578
initial_regs: Regs {
1579
rip: 0x1000,
1580
rax: 1, // Set bit 0 in EAX
1581
rdx: 0, // XSETBV also uses EDX:EAX, must be initialized
1582
rcx: 0, // XCR0
1583
rflags: 2,
1584
..Default::default()
1585
},
1586
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
1587
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
1588
})),
1589
..Default::default()
1590
};
1591
1592
let regs_matcher =
1593
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1594
HypervisorType::Whpx => {}
1595
HypervisorType::Haxm => {}
1596
HypervisorType::Kvm => {}
1597
_ => {
1598
assert_eq!(regs.rip, 0x100D, "XSETBV; expected RIP at 0x100D");
1599
}
1600
};
1601
1602
let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1603
match exit {
1604
VcpuExit::Mmio => {
1605
true // Break VM runloop
1606
}
1607
r => panic!("unexpected exit reason: {r:?}"),
1608
}
1609
};
1610
1611
run_tests!(setup, regs_matcher, exit_matcher);
1612
}
1613
1614
global_asm_data!(
1615
test_invept_instruction_code,
1616
".code16",
1617
"invept eax, [eax]",
1618
"hlt",
1619
);
1620
1621
#[test]
1622
fn test_invept_instruction() {
1623
let setup = TestSetup {
1624
assembly: test_invept_instruction_code::data().to_vec(),
1625
load_addr: GuestAddress(0x1000),
1626
initial_regs: Regs {
1627
rax: 0x2000,
1628
rip: 0x1000,
1629
rflags: 2,
1630
..Default::default()
1631
},
1632
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
1633
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
1634
})),
1635
..Default::default()
1636
};
1637
1638
let regs_matcher =
1639
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1640
HypervisorType::Whpx => {}
1641
HypervisorType::Haxm => {}
1642
HypervisorType::Kvm => {}
1643
_ => {
1644
assert_eq!(regs.rip, 0x1005, "invept; expected RIP at 0x1005");
1645
}
1646
};
1647
1648
let exit_matcher =
1649
move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1650
match hypervisor_type {
1651
HypervisorType::Whpx => {
1652
match exit {
1653
VcpuExit::UnrecoverableException => {
1654
true // Break VM runloop
1655
}
1656
r => panic!("unexpected exit reason: {r:?}"),
1657
}
1658
}
1659
_ => {
1660
match exit {
1661
VcpuExit::Shutdown(_) => {
1662
true // Break VM runloop
1663
}
1664
r => panic!("unexpected exit reason: {r:?}"),
1665
}
1666
}
1667
}
1668
};
1669
1670
run_tests!(setup, regs_matcher, exit_matcher);
1671
}
1672
1673
global_asm_data!(
1674
test_invvpid_instruction_code,
1675
".code16",
1676
"invvpid eax, [eax]",
1677
"hlt",
1678
);
1679
1680
// TODO(b/342183625): invvpid instruction is not valid in real mode. Reconsider how we should write
1681
// this test.
1682
#[cfg(not(unix))]
1683
#[test]
1684
fn test_invvpid_instruction() {
1685
let setup = TestSetup {
1686
assembly: test_invvpid_instruction_code::data().to_vec(),
1687
load_addr: GuestAddress(0x1000),
1688
initial_regs: Regs {
1689
rip: 0x1000,
1690
rax: 0x1500,
1691
rflags: 2,
1692
..Default::default()
1693
},
1694
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
1695
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
1696
})),
1697
..Default::default()
1698
};
1699
1700
let regs_matcher = move |_, regs: &Regs, _: &_| {
1701
assert_eq!(regs.rip, 0x1000, "INVVPID; expected RIP at 0x1000");
1702
};
1703
1704
let exit_matcher =
1705
move |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
1706
VcpuExit::Mmio | VcpuExit::Shutdown(_) | VcpuExit::InternalError => {
1707
true // Break VM runloop
1708
}
1709
r => panic!("unexpected exit reason: {r:?}"),
1710
};
1711
1712
run_tests!(setup, regs_matcher, exit_matcher);
1713
}
1714
1715
#[test]
1716
fn test_vm_instruction_set() {
1717
let instructions = vec![
1718
(vec![0x0F, 0x01, 0xC1], 0x1000, "VMCALL"), // VMCALL
1719
(vec![0x66, 0x0F, 0xC7, 0x30], 0x1004, "VMCLEAR"), // VMCLEAR
1720
(vec![0x0F, 0x01, 0xC2], 0x1003, "VMLAUNCH"), // VMLAUNCH
1721
(vec![0x0F, 0xC7, 0x30], 0x1003, "VMPTRLD"), // VMPTRLD
1722
(vec![0x0F, 0xC7, 0x31], 0x1003, "VMPTRST"), // VMPTRST
1723
(vec![0x0F, 0x01, 0xC3], 0x1003, "VMRESUME"), // VMRESUME
1724
(vec![0x0F, 0x01, 0xC4], 0x1003, "VMXOFF"), // VMXOFF
1725
(vec![0x0F, 0x01, 0xC4], 0x1003, "VMXON"), // VMXON
1726
];
1727
1728
for (bytes, expected_rip, name) in instructions {
1729
let mut assembly = bytes;
1730
assembly.push(0xF4); // Append HLT to each instruction set
1731
1732
let setup = TestSetup {
1733
assembly,
1734
load_addr: GuestAddress(0x1000),
1735
initial_regs: Regs {
1736
rip: 0x1000,
1737
rflags: 2,
1738
..Default::default()
1739
},
1740
..Default::default()
1741
};
1742
1743
let regs_matcher =
1744
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1745
HypervisorType::Whpx => {}
1746
HypervisorType::Kvm => {}
1747
HypervisorType::Haxm => {}
1748
_ => {
1749
assert_eq!(
1750
regs.rip, expected_rip,
1751
"{name}; expected RIP at {expected_rip}"
1752
);
1753
}
1754
};
1755
1756
let exit_matcher =
1757
|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1758
match hypervisor_type {
1759
HypervisorType::Whpx => {
1760
match exit {
1761
VcpuExit::Mmio => {
1762
true // Break VM runloop
1763
}
1764
r => panic!("unexpected exit reason: {r:?}"),
1765
}
1766
}
1767
HypervisorType::Kvm => {
1768
true // Break VM runloop
1769
}
1770
_ => {
1771
match exit {
1772
VcpuExit::Shutdown(_) => {
1773
true // Break VM runloop
1774
}
1775
r => panic!("unexpected exit reason: {r:?}"),
1776
}
1777
}
1778
}
1779
};
1780
1781
run_tests!(setup, regs_matcher, exit_matcher);
1782
}
1783
}
1784
1785
#[rustfmt::skip::macros(global_asm_data)]
1786
global_asm_data!(
1787
test_software_interrupt_code,
1788
"int 0x80",
1789
"hlt",
1790
);
1791
1792
#[test]
1793
fn test_software_interrupt() {
1794
let start_addr = 0x1000;
1795
let setup = TestSetup {
1796
assembly: test_software_interrupt_code::data().to_vec(),
1797
load_addr: GuestAddress(0x1000),
1798
initial_regs: Regs {
1799
rip: start_addr,
1800
rflags: 2,
1801
..Default::default()
1802
},
1803
..Default::default()
1804
};
1805
1806
let regs_matcher =
1807
move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {
1808
HypervisorType::Whpx => {}
1809
HypervisorType::Haxm => {}
1810
HypervisorType::Kvm => {}
1811
_ => {
1812
let expect_rip_addr = start_addr
1813
+ u64::try_from(test_software_interrupt_code::data().len())
1814
.expect("the code length should within the range of u64");
1815
assert_eq!(
1816
regs.rip, expect_rip_addr,
1817
"Expected RIP at {expect_rip_addr:#x}"
1818
);
1819
}
1820
};
1821
1822
let exit_matcher =
1823
|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
1824
match hypervisor_type {
1825
HypervisorType::Kvm | HypervisorType::Whpx => {
1826
match exit {
1827
VcpuExit::Mmio => {
1828
true // Break VM runloop
1829
}
1830
r => panic!("unexpected exit reason: {r:?}"),
1831
}
1832
}
1833
_ => {
1834
match exit {
1835
VcpuExit::Shutdown(_) => {
1836
true // Break VM runloop
1837
}
1838
r => panic!("unexpected exit reason: {r:?}"),
1839
}
1840
}
1841
}
1842
};
1843
1844
run_tests!(setup, regs_matcher, exit_matcher);
1845
}
1846
1847
#[rustfmt::skip::macros(global_asm_data)]
1848
global_asm_data!(
1849
test_rdtsc_instruction_code,
1850
".code16",
1851
"rdtsc",
1852
"hlt",
1853
);
1854
1855
#[test]
1856
fn test_rdtsc_instruction() {
1857
let setup = TestSetup {
1858
assembly: test_rdtsc_instruction_code::data().to_vec(),
1859
load_addr: GuestAddress(0x1000),
1860
initial_regs: Regs {
1861
rip: 0x1000,
1862
rflags: 2,
1863
..Default::default()
1864
},
1865
..Default::default()
1866
};
1867
1868
// This matcher checks that the timestamp counter has been incremented and read into EAX and EDX
1869
let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {
1870
assert!(
1871
regs.rax != 0 || regs.rdx != 0,
1872
"RDTSC returned a zero value, which is unlikely."
1873
);
1874
};
1875
1876
let exit_matcher = |_: HypervisorType,
1877
exit: &VcpuExit,
1878
_: &mut dyn VcpuX86_64,
1879
_: &mut dyn Vm| { matches!(exit, VcpuExit::Hlt) };
1880
1881
run_tests!(setup, regs_matcher, exit_matcher);
1882
}
1883
1884
global_asm_data!(
1885
test_register_access_code,
1886
".code16",
1887
"xchg ax, bx",
1888
"xchg cx, dx",
1889
"xchg sp, bp",
1890
"xchg si, di",
1891
"hlt",
1892
);
1893
1894
// This tests that we can write and read GPRs to/from the VM.
1895
#[test]
1896
fn test_register_access() {
1897
let start_addr = 0x1000;
1898
let setup = TestSetup {
1899
assembly: test_register_access_code::data().to_vec(),
1900
load_addr: GuestAddress(start_addr),
1901
initial_regs: Regs {
1902
rip: start_addr,
1903
rax: 2,
1904
rbx: 1,
1905
rcx: 4,
1906
rdx: 3,
1907
rsp: 6,
1908
rbp: 5,
1909
rsi: 8,
1910
rdi: 7,
1911
rflags: 2,
1912
..Default::default()
1913
},
1914
..Default::default()
1915
};
1916
1917
run_tests!(
1918
setup,
1919
|_, regs, _| {
1920
assert_eq!(regs.rax, 1);
1921
assert_eq!(regs.rbx, 2);
1922
assert_eq!(regs.rcx, 3);
1923
assert_eq!(regs.rdx, 4);
1924
assert_eq!(regs.rsp, 5);
1925
assert_eq!(regs.rbp, 6);
1926
assert_eq!(regs.rsi, 7);
1927
assert_eq!(regs.rdi, 8);
1928
assert_eq!(
1929
regs.rip,
1930
start_addr + test_register_access_code::data().len() as u64
1931
);
1932
},
1933
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
1934
);
1935
}
1936
1937
global_asm_data!(
1938
test_flags_register_code,
1939
".code16",
1940
"jnz fin",
1941
"test ax, ax",
1942
"fin:",
1943
"hlt",
1944
);
1945
1946
// This tests that we can get/set the flags register from the VMM.
1947
#[test]
1948
fn test_flags_register() {
1949
let start_addr = 0x1000;
1950
let setup = TestSetup {
1951
assembly: test_flags_register_code::data().to_vec(),
1952
load_addr: GuestAddress(start_addr),
1953
initial_regs: Regs {
1954
rip: start_addr,
1955
rax: 0xffffffff,
1956
rflags: 0x42, // zero flag set, sign flag clear
1957
..Default::default()
1958
},
1959
..Default::default()
1960
};
1961
1962
run_tests!(
1963
setup,
1964
|_, regs, _| {
1965
assert_eq!(regs.rflags & 0x40, 0); // zero flag is clear
1966
assert_ne!(regs.rflags & 0x80, 0); // sign flag is set
1967
assert_eq!(
1968
regs.rip,
1969
start_addr + test_flags_register_code::data().len() as u64
1970
);
1971
},
1972
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
1973
);
1974
}
1975
1976
global_asm_data!(
1977
test_vmm_set_segs_code,
1978
".code16",
1979
"mov ax, ds:0",
1980
"mov bx, es:0",
1981
"mov cx, fs:0",
1982
"mov dx, gs:0",
1983
"mov sp, ss:0",
1984
"hlt",
1985
);
1986
1987
// This tests that the VMM can set segment registers and have them used by the VM.
1988
#[test]
1989
fn test_vmm_set_segs() {
1990
let start_addr = 0x1000;
1991
let data_addr = 0x2000;
1992
let setup = TestSetup {
1993
assembly: test_vmm_set_segs_code::data().to_vec(),
1994
load_addr: GuestAddress(start_addr),
1995
mem_size: 0x4000,
1996
initial_regs: Regs {
1997
rip: start_addr,
1998
rflags: 0x42,
1999
..Default::default()
2000
},
2001
// simple memory pattern where the value of a byte is (addr - data_addr + 1)
2002
memory_initializations: vec![(GuestAddress(data_addr), (1..=32).collect())],
2003
extra_vm_setup: Some(Box::new(move |vcpu: &mut dyn VcpuX86_64, _| {
2004
let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
2005
sregs.ds.base = data_addr;
2006
sregs.ds.selector = 0;
2007
sregs.es.base = data_addr + 4;
2008
sregs.es.selector = 0;
2009
sregs.fs.base = data_addr + 8;
2010
sregs.fs.selector = 0;
2011
sregs.gs.base = data_addr + 12;
2012
sregs.gs.selector = 0;
2013
sregs.ss.base = data_addr + 16;
2014
sregs.ss.selector = 0;
2015
vcpu.set_sregs(&sregs).expect("failed to set sregs");
2016
})),
2017
..Default::default()
2018
};
2019
2020
run_tests!(
2021
setup,
2022
|_, regs, sregs| {
2023
assert_eq!(sregs.ds.base, data_addr);
2024
assert_eq!(sregs.es.base, data_addr + 4);
2025
assert_eq!(sregs.fs.base, data_addr + 8);
2026
assert_eq!(sregs.gs.base, data_addr + 12);
2027
assert_eq!(sregs.ss.base, data_addr + 16);
2028
2029
// ax was loaded from ds:0, which has offset 0, so is [1, 2]
2030
assert_eq!(regs.rax, 0x0201);
2031
// bx was loaded from es:0, which has offset 4, so is [5, 6]
2032
assert_eq!(regs.rbx, 0x0605);
2033
// cx was loaded from fs:0, which has offset 8, so is [9, 10]
2034
assert_eq!(regs.rcx, 0x0a09);
2035
// dx was loaded from gs:0, which has offset 12, so is [13, 14]
2036
assert_eq!(regs.rdx, 0x0e0d);
2037
// sp was loaded from ss:0, which has offset 16, so is [17, 18]
2038
assert_eq!(regs.rsp, 0x1211);
2039
2040
let expect_rip_addr = start_addr
2041
+ u64::try_from(test_vmm_set_segs_code::data().len())
2042
.expect("the code length should within the range of u64");
2043
assert_eq!(
2044
regs.rip, expect_rip_addr,
2045
"Expected RIP at {expect_rip_addr:#x}"
2046
);
2047
},
2048
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
2049
);
2050
}
2051
2052
global_asm_data!(
2053
test_set_cr_vmm_code,
2054
".code16",
2055
"mov eax, cr0",
2056
"mov ebx, cr3",
2057
"mov ecx, cr4",
2058
"hlt",
2059
);
2060
2061
// Tests that the VMM can read and write CRs and they become visible in the guest.
2062
#[test]
2063
fn test_set_cr_vmm() {
2064
let asm_addr = 0x1000;
2065
let setup = TestSetup {
2066
assembly: test_set_cr_vmm_code::data().to_vec(),
2067
load_addr: GuestAddress(asm_addr),
2068
initial_regs: Regs {
2069
rip: asm_addr,
2070
rflags: 2,
2071
..Default::default()
2072
},
2073
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, _| {
2074
let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
2075
sregs.cr0 |= 1 << 18; // Alignment Mask; does nothing without other config bits
2076
sregs.cr3 = 0xfeedface; // arbitrary value; CR3 is not used in this configuration
2077
sregs.cr4 |= 1 << 2; // Time Stamp Disable; not relevant here
2078
vcpu.set_sregs(&sregs).expect("failed to set sregs");
2079
})),
2080
..Default::default()
2081
};
2082
2083
run_tests!(
2084
setup,
2085
|_, regs, sregs| {
2086
assert_eq!(regs.rax, sregs.cr0);
2087
assert_eq!(regs.rbx, sregs.cr3);
2088
assert_eq!(regs.rcx, sregs.cr4);
2089
assert_eq!(sregs.cr3, 0xfeedface);
2090
assert_ne!(sregs.cr0 & (1 << 18), 0);
2091
assert_ne!(sregs.cr4 & (1 << 2), 0);
2092
assert_eq!(regs.rip, asm_addr + setup.assembly.len() as u64); // after hlt
2093
},
2094
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
2095
);
2096
}
2097
2098
global_asm_data!(
2099
test_set_cr_guest_code,
2100
".code16",
2101
"mov eax, cr0",
2102
"or eax, (1 << 18)",
2103
"mov cr0, eax",
2104
"mov ebx, 0xfeedface",
2105
"mov cr3, ebx",
2106
"mov ecx, cr4",
2107
"or ecx, (1 << 2)",
2108
"mov cr4, ecx",
2109
"hlt",
2110
);
2111
2112
// Tests that the guest can read and write CRs and they become visible to the VMM.
2113
#[test]
2114
fn test_set_cr_guest() {
2115
let asm_addr = 0x1000;
2116
let setup = TestSetup {
2117
assembly: test_set_cr_guest_code::data().to_vec(),
2118
load_addr: GuestAddress(asm_addr),
2119
initial_regs: Regs {
2120
rip: asm_addr,
2121
rflags: 2,
2122
..Default::default()
2123
},
2124
..Default::default()
2125
};
2126
2127
run_tests!(
2128
setup,
2129
|_, regs, sregs| {
2130
assert_eq!(regs.rax, sregs.cr0);
2131
assert_eq!(regs.rbx, sregs.cr3);
2132
assert_eq!(regs.rcx, sregs.cr4);
2133
assert_eq!(sregs.cr3, 0xfeedface);
2134
assert_ne!(sregs.cr0 & (1 << 18), 0);
2135
assert_ne!(sregs.cr4 & (1 << 2), 0);
2136
assert_eq!(regs.rip, asm_addr + setup.assembly.len() as u64); // after hlt
2137
},
2138
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
2139
);
2140
}
2141
2142
mod test_minimal_interrupt_injection_code {
2143
use super::*;
2144
2145
global_asm_data!(
2146
pub init,
2147
".code16",
2148
// Set the IDT
2149
"lidt [0x200]",
2150
// Set up the stack, which will be used when CPU transfers the control to the ISR on
2151
// interrupt.
2152
"mov sp, 0x900",
2153
"mov eax, 902",
2154
// We inject our exception on this hlt command.
2155
"hlt",
2156
"mov ebx, 990",
2157
"hlt"
2158
);
2159
2160
global_asm_data!(
2161
pub isr,
2162
".code16",
2163
"mov eax, 888",
2164
"iret"
2165
);
2166
}
2167
2168
#[test]
2169
fn test_minimal_interrupt_injection() {
2170
let start_addr: u32 = 0x200;
2171
// Allocate exceed 0x900, where we set up our stack.
2172
let mem_size: u32 = 0x1000;
2173
2174
let mut setup = TestSetup {
2175
load_addr: GuestAddress(start_addr.into()),
2176
initial_regs: Regs {
2177
rax: 0,
2178
rbx: 0,
2179
// Set RFLAGS.IF to enable interrupt.
2180
rflags: 2 | FLAGS_IF_BIT,
2181
..Default::default()
2182
},
2183
mem_size: mem_size.into(),
2184
..Default::default()
2185
};
2186
2187
let mut cur_addr = start_addr;
2188
2189
let idtr_size: u32 = 6;
2190
assert_eq!(
2191
Ok(std::mem::size_of::<Idtr32>()),
2192
usize::try_from(idtr_size)
2193
);
2194
// The limit is calculated from 256 entries timed by 4 bytes per entry.
2195
let idt_size = 256u16 * 4u16;
2196
let idtr = Idtr32 {
2197
limit: idt_size - 1,
2198
// The IDT right follows the IDTR.
2199
base_address: start_addr + idtr_size,
2200
};
2201
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());
2202
cur_addr += idtr_size;
2203
2204
let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();
2205
// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.
2206
// We set all entries to the same ISR.
2207
let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();
2208
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());
2209
cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");
2210
2211
let isr_assembly = test_minimal_interrupt_injection_code::isr::data().to_vec();
2212
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());
2213
cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");
2214
2215
let init_assembly = test_minimal_interrupt_injection_code::init::data().to_vec();
2216
setup.initial_regs.rip = cur_addr.into();
2217
setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());
2218
cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");
2219
let init_end_addr = cur_addr;
2220
2221
assert!(mem_size > cur_addr);
2222
2223
let mut counter = 0;
2224
run_tests!(
2225
setup,
2226
|_, regs, _| {
2227
assert_eq!(regs.rip, u64::from(init_end_addr));
2228
assert_eq!(regs.rax, 888);
2229
assert_eq!(regs.rbx, 990);
2230
},
2231
|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
2232
match exit {
2233
VcpuExit::Hlt => {
2234
let regs = vcpu
2235
.get_regs()
2236
.expect("should retrieve registers successfully");
2237
counter += 1;
2238
if counter > 1 {
2239
return true;
2240
}
2241
assert!(vcpu.ready_for_interrupt());
2242
assert_eq!(regs.rax, 902);
2243
assert_eq!(regs.rbx, 0);
2244
// Inject an external custom interrupt.
2245
vcpu.interrupt(32)
2246
.expect("should be able to inject an interrupt");
2247
false
2248
}
2249
r => panic!("unexpected VMEXIT reason: {r:?}"),
2250
}
2251
}
2252
);
2253
}
2254
2255
mod test_multiple_interrupt_injection_code {
2256
use super::*;
2257
2258
global_asm_data!(
2259
pub init,
2260
".code16",
2261
// Set the IDT
2262
"lidt [0x200]",
2263
// Set up the stack, which will be used when CPU transfers the control to the ISR on
2264
// interrupt.
2265
"mov esp, 0x900",
2266
"mov eax, 1",
2267
"mov ebx, 2",
2268
"mov ecx, 3",
2269
"mov edx, 4",
2270
// We inject our interrupts on this hlt command.
2271
"hlt",
2272
"mov edx, 281",
2273
"hlt",
2274
);
2275
2276
global_asm_data!(
2277
pub isr_intr_32,
2278
".code16",
2279
"mov eax, 32",
2280
"iret",
2281
);
2282
2283
global_asm_data!(
2284
pub isr_intr_33,
2285
".code16",
2286
"mov ebx, 33",
2287
"iret",
2288
);
2289
2290
global_asm_data!(
2291
pub isr_default,
2292
".code16",
2293
"mov ecx, 761",
2294
"iret",
2295
);
2296
}
2297
2298
#[test]
2299
fn test_multiple_interrupt_injection() {
2300
let start_addr: u32 = 0x200;
2301
// Allocate exceed 0x900, where we set up our stack.
2302
let mem_size: u32 = 0x1000;
2303
2304
let mut setup = TestSetup {
2305
load_addr: GuestAddress(start_addr.into()),
2306
initial_regs: Regs {
2307
rax: 0,
2308
rbx: 0,
2309
rcx: 0,
2310
rdx: 0,
2311
// Set RFLAGS.IF to enable interrupt.
2312
rflags: 2 | FLAGS_IF_BIT,
2313
..Default::default()
2314
},
2315
mem_size: mem_size.into(),
2316
..Default::default()
2317
};
2318
2319
let mut cur_addr = start_addr;
2320
2321
let idtr_size: u32 = 6;
2322
assert_eq!(
2323
Ok(std::mem::size_of::<Idtr32>()),
2324
usize::try_from(idtr_size)
2325
);
2326
// The limit is calculated from 256 entries timed by 4 bytes per entry.
2327
let idt_size = 256u16 * 4u16;
2328
let idtr = Idtr32 {
2329
limit: idt_size - 1,
2330
// The IDT right follows the IDTR.
2331
base_address: start_addr + idtr_size,
2332
};
2333
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());
2334
cur_addr += idtr_size;
2335
2336
let isr_intr_32_assembly = test_multiple_interrupt_injection_code::isr_intr_32::data().to_vec();
2337
let isr_intr_33_assembly = test_multiple_interrupt_injection_code::isr_intr_33::data().to_vec();
2338
let isr_default_assembly = test_multiple_interrupt_injection_code::isr_default::data().to_vec();
2339
// The ISR for intr 32 right follows the IDT.
2340
let isr_intr_32_addr = cur_addr + u32::from(idt_size);
2341
// The ISR for intr 33 right follows the ISR for intr 32.
2342
let isr_intr_33_addr = isr_intr_32_addr
2343
+ u32::try_from(isr_intr_32_assembly.len())
2344
.expect("the size of the ISR for intr 32 should be within the u32 range");
2345
// The ISR for other interrupts right follows the ISR for intr 33.
2346
let isr_default_addr = isr_intr_33_addr
2347
+ u32::try_from(isr_intr_33_assembly.len())
2348
.expect("the size of the ISR for intr 33 should be within the u32 range");
2349
2350
// IDT entries are far pointers(CS:IP pair) to the correspondent ISR.
2351
let idt = (0..256)
2352
.map(|intr_vec| match intr_vec {
2353
32 => isr_intr_32_addr,
2354
33 => isr_intr_33_addr,
2355
_ => isr_default_addr,
2356
})
2357
.flat_map(u32::to_ne_bytes)
2358
.collect::<Vec<_>>();
2359
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());
2360
assert_eq!(idt.len(), usize::from(idt_size));
2361
cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");
2362
2363
assert_eq!(cur_addr, isr_intr_32_addr);
2364
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_intr_32_assembly.clone());
2365
cur_addr += u32::try_from(isr_intr_32_assembly.len()).expect("ISR size should be within u32");
2366
2367
assert_eq!(cur_addr, isr_intr_33_addr);
2368
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_intr_33_assembly.clone());
2369
cur_addr += u32::try_from(isr_intr_33_assembly.len()).expect("ISR size should be within u32");
2370
2371
assert_eq!(cur_addr, isr_default_addr);
2372
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_default_assembly.clone());
2373
cur_addr += u32::try_from(isr_default_assembly.len()).expect("ISR size should be within u32");
2374
2375
let init_assembly = test_multiple_interrupt_injection_code::init::data().to_vec();
2376
setup.initial_regs.rip = cur_addr.into();
2377
setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());
2378
cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");
2379
let init_end_addr = cur_addr;
2380
2381
assert!(mem_size > cur_addr);
2382
2383
let mut counter = 0;
2384
run_tests!(
2385
setup,
2386
|hypervisor_type, regs, _| {
2387
// Different hypervisors behave differently on how the first injected exception should
2388
// handled: for WHPX and KVM, the later injected interrupt overrides the earlier
2389
// injected interrupt, while for HAXM, both interrupts are marked as pending.
2390
match hypervisor_type {
2391
HypervisorType::Haxm => assert_eq!(regs.rax, 32),
2392
_ => assert_eq!(regs.rax, 1),
2393
}
2394
2395
assert_eq!(regs.rip, u64::from(init_end_addr));
2396
assert_eq!(regs.rbx, 33);
2397
assert_eq!(regs.rcx, 3);
2398
assert_eq!(regs.rdx, 281);
2399
},
2400
|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
2401
match exit {
2402
VcpuExit::Hlt => {
2403
let regs = vcpu
2404
.get_regs()
2405
.expect("should retrieve registers successfully");
2406
counter += 1;
2407
if counter > 1 {
2408
return true;
2409
}
2410
assert_eq!(regs.rax, 1);
2411
assert_eq!(regs.rbx, 2);
2412
assert_eq!(regs.rcx, 3);
2413
assert_eq!(regs.rdx, 4);
2414
// Inject external custom interrupts.
2415
assert!(vcpu.ready_for_interrupt());
2416
vcpu.interrupt(32)
2417
.expect("should be able to inject an interrupt");
2418
assert!(vcpu.ready_for_interrupt());
2419
vcpu.interrupt(33)
2420
.expect("should be able to inject an interrupt");
2421
false
2422
}
2423
r => panic!("unexpected VMEXIT reason: {r:?}"),
2424
}
2425
}
2426
);
2427
}
2428
2429
mod test_interrupt_ready_when_not_interruptible_code {
2430
use super::*;
2431
2432
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
2433
pub enum Instrumentation {
2434
BeforeMovSs,
2435
AfterMovSs,
2436
AfterAfterMovSs,
2437
BeforeSti,
2438
AfterSti,
2439
AfterAfterSti,
2440
InIsr,
2441
}
2442
2443
impl From<u64> for Instrumentation {
2444
fn from(value: u64) -> Self {
2445
match value {
2446
0x10 => Instrumentation::BeforeMovSs,
2447
0x20 => Instrumentation::AfterMovSs,
2448
0x30 => Instrumentation::AfterAfterMovSs,
2449
0x40 => Instrumentation::BeforeSti,
2450
0x50 => Instrumentation::AfterSti,
2451
0x60 => Instrumentation::AfterAfterSti,
2452
0xf0 => Instrumentation::InIsr,
2453
_ => panic!("Unknown instrumentation IO port: {value}"),
2454
}
2455
}
2456
}
2457
2458
// We use port IO to trigger the VMEXIT instead of MMIO, because access to out of bound memory
2459
// doesn't trigger MMIO VMEXIT on WHPX under simple real-mode set up.
2460
global_asm_data!(
2461
pub init,
2462
".code16",
2463
// Set up the stack, which will be used when CPU transfers the control to the ISR on
2464
// interrupt.
2465
"mov sp, 0x1900",
2466
// Set the IDT.
2467
"lidt [0x200]",
2468
// Load the ss register, so that the later mov ss instruction is actually a no-op.
2469
"mov ax, ss",
2470
"out 0x10, ax",
2471
// Hypervisors shouldn't allow interrupt injection right after the mov ss instruction.
2472
"mov ss, ax",
2473
"out 0x20, ax",
2474
// On WHPX we need some other instructions to bring the interuptibility back to normal.
2475
// While this is not needed for other hypervisors, we add this instruction unconditionally.
2476
"nop",
2477
"out 0x30, ax",
2478
"out 0x40, ax",
2479
// Test hypervisors' interruptibilities right after sti instruction when FLAGS.IF is
2480
// cleared.
2481
"cli",
2482
"sti",
2483
"out 0x50, ax",
2484
// On WHPX we need some other instructions to bring the interuptibility back to normal.
2485
// While this is not needed for other hypervisors, we add this instruction unconditionally.
2486
"nop",
2487
"out 0x60, ax",
2488
"hlt",
2489
);
2490
2491
global_asm_data!(
2492
pub isr,
2493
".code16",
2494
"out 0xf0, ax",
2495
"iret",
2496
);
2497
}
2498
2499
// Physical x86 processor won't allow interrupt to be injected after mov ss or sti, while VM can.
2500
#[test]
2501
fn test_interrupt_ready_when_normally_not_interruptible() {
2502
use test_interrupt_ready_when_not_interruptible_code::Instrumentation;
2503
2504
let start_addr: u32 = 0x200;
2505
// Allocate exceed 0x1900, where we set up our stack.
2506
let mem_size: u32 = 0x2000;
2507
2508
let mut setup = TestSetup {
2509
load_addr: GuestAddress(start_addr.into()),
2510
initial_regs: Regs {
2511
rax: 0,
2512
rbx: 0,
2513
// Set RFLAGS.IF to enable interrupt.
2514
rflags: 2 | 0x202,
2515
..Default::default()
2516
},
2517
mem_size: mem_size.into(),
2518
..Default::default()
2519
};
2520
2521
let mut cur_addr = start_addr;
2522
2523
let idtr_size: u32 = 6;
2524
assert_eq!(
2525
Ok(std::mem::size_of::<Idtr32>()),
2526
usize::try_from(idtr_size)
2527
);
2528
// The limit is calculated from 256 entries timed by 4 bytes per entry.
2529
let idt_size = 256u16 * 4u16;
2530
let idtr = Idtr32 {
2531
limit: idt_size - 1,
2532
// The IDT right follows the IDTR.
2533
base_address: start_addr + idtr_size,
2534
};
2535
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());
2536
cur_addr += idtr_size;
2537
2538
let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();
2539
// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.
2540
// We set all entries to the same ISR.
2541
let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();
2542
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());
2543
cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");
2544
2545
let isr_assembly = test_interrupt_ready_when_not_interruptible_code::isr::data().to_vec();
2546
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());
2547
cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");
2548
2549
let init_assembly = test_interrupt_ready_when_not_interruptible_code::init::data().to_vec();
2550
setup.initial_regs.rip = cur_addr.into();
2551
setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());
2552
cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");
2553
2554
assert!(mem_size > cur_addr);
2555
2556
// This helps us check the interruptibility under different situations.
2557
let interruptibility_traces = RefCell::<Vec<_>>::default();
2558
// This helps us check when the interrupt actually delivers.
2559
let instrumentation_traces = RefCell::<Vec<_>>::default();
2560
2561
run_tests!(
2562
setup,
2563
|_, regs, _| {
2564
use Instrumentation::*;
2565
assert_eq!(
2566
*interruptibility_traces.borrow(),
2567
[
2568
(BeforeMovSs, true),
2569
// Hypervisors don't allow interrupt injection right after mov ss.
2570
(AfterMovSs, false),
2571
(AfterAfterMovSs, true),
2572
(BeforeSti, true),
2573
// Hypervisors don't allow interrupt injection right after sti when FLAGS.IF is
2574
// not set.
2575
(AfterSti, false),
2576
(AfterAfterSti, true)
2577
]
2578
);
2579
// Hypervisors always deliver the interrupt right after we inject it in the next VCPU
2580
// run.
2581
assert_eq!(
2582
*instrumentation_traces.borrow(),
2583
[
2584
BeforeMovSs,
2585
InIsr,
2586
AfterMovSs,
2587
AfterAfterMovSs,
2588
InIsr,
2589
BeforeSti,
2590
InIsr,
2591
AfterSti,
2592
AfterAfterSti,
2593
InIsr,
2594
]
2595
);
2596
assert_eq!(regs.rip, u64::from(cur_addr));
2597
},
2598
|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
2599
match exit {
2600
VcpuExit::Io => {
2601
let ready_for_interrupt = vcpu.ready_for_interrupt();
2602
let mut should_inject_interrupt = ready_for_interrupt;
2603
vcpu.handle_io(&mut |io_params| {
2604
let instrumentation = Instrumentation::from(io_params.address);
2605
match instrumentation {
2606
Instrumentation::InIsr => {
2607
// Only inject interrupt outside ISR.
2608
should_inject_interrupt = false;
2609
}
2610
_ => {
2611
// Only the interuptibility outside the ISR is important for this
2612
// test.
2613
interruptibility_traces
2614
.borrow_mut()
2615
.push((instrumentation, ready_for_interrupt));
2616
}
2617
}
2618
instrumentation_traces.borrow_mut().push(instrumentation);
2619
// We are always handling out IO port, so no data to return.
2620
})
2621
.expect("should handle IO successfully");
2622
if should_inject_interrupt {
2623
vcpu.interrupt(32)
2624
.expect("interrupt injection should succeed when ready for interrupt");
2625
}
2626
false
2627
}
2628
VcpuExit::Hlt => true,
2629
r => panic!("unexpected VMEXIT reason: {r:?}"),
2630
}
2631
}
2632
);
2633
}
2634
2635
global_asm_data!(
2636
test_interrupt_ready_when_interrupt_enable_flag_not_set_code,
2637
".code16",
2638
"cli",
2639
// We can't use hlt for VMEXIT, because HAXM unconditionally allows interrupt injection for
2640
// hlt.
2641
"out 0x10, ax",
2642
"sti",
2643
// nop is necessary to avoid the one instruction ineterrupt disable window for sti when
2644
// FLAGS.IF is not set.
2645
"nop",
2646
"out 0x20, ax",
2647
"hlt",
2648
);
2649
2650
#[test]
2651
fn test_interrupt_ready_when_interrupt_enable_flag_not_set() {
2652
let assembly = test_interrupt_ready_when_interrupt_enable_flag_not_set_code::data().to_vec();
2653
let setup = TestSetup {
2654
assembly: assembly.clone(),
2655
load_addr: GuestAddress(0x1000),
2656
initial_regs: Regs {
2657
rip: 0x1000,
2658
rflags: 2,
2659
..Default::default()
2660
},
2661
..Default::default()
2662
};
2663
2664
run_tests!(
2665
setup,
2666
|_, regs, _| {
2667
// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.
2668
assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);
2669
},
2670
|_, exit, vcpu, _: &mut dyn Vm| {
2671
match exit {
2672
VcpuExit::Io => {
2673
let mut addr = 0;
2674
vcpu.handle_io(&mut |io_params| {
2675
addr = io_params.address;
2676
// We are always handling out IO port, so no data to return.
2677
})
2678
.expect("should handle IO successfully");
2679
let regs = vcpu
2680
.get_regs()
2681
.expect("should retrieve the registers successfully");
2682
match addr {
2683
0x10 => {
2684
assert_eq!(regs.rflags & FLAGS_IF_BIT, 0);
2685
assert!(!vcpu.ready_for_interrupt());
2686
}
2687
0x20 => {
2688
assert_eq!(regs.rflags & FLAGS_IF_BIT, FLAGS_IF_BIT);
2689
assert!(vcpu.ready_for_interrupt());
2690
}
2691
_ => panic!("unexpected addr: {addr}"),
2692
}
2693
false
2694
}
2695
VcpuExit::Hlt => true,
2696
r => panic!("unexpected VMEXIT reason: {r:?}"),
2697
}
2698
}
2699
);
2700
}
2701
2702
#[test]
2703
fn test_enter_long_mode_direct() {
2704
global_asm_data!(
2705
pub long_mode_asm,
2706
".code64",
2707
"mov rdx, rax",
2708
"mov rbx, [0x10000]",
2709
"hlt"
2710
);
2711
2712
let bigly_mem_value: u64 = 0x1_0000_0000;
2713
let biglier_mem_value: u64 = 0x1_0000_0001;
2714
let mut setup = TestSetup {
2715
assembly: long_mode_asm::data().to_vec(),
2716
mem_size: 0x11000,
2717
load_addr: GuestAddress(0x1000),
2718
initial_regs: Regs {
2719
rax: bigly_mem_value,
2720
rip: 0x1000,
2721
rflags: 0x2,
2722
..Default::default()
2723
},
2724
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
2725
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
2726
})),
2727
2728
..Default::default()
2729
};
2730
2731
setup.add_memory_initialization(
2732
GuestAddress(0x10000),
2733
biglier_mem_value.to_le_bytes().to_vec(),
2734
);
2735
let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {
2736
assert!((sregs.efer & 0x400) != 0, "Long-Mode Active bit not set");
2737
assert_eq!(
2738
regs.rdx, bigly_mem_value,
2739
"Did not execute instructions correctly in long mode."
2740
);
2741
assert_eq!(
2742
regs.rbx, biglier_mem_value,
2743
"Was not able to access translated memory in long mode."
2744
);
2745
assert_eq!((sregs.cs.l), 1, "Long-mode bit not set in CS");
2746
};
2747
2748
let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
2749
VcpuExit::Hlt => {
2750
true // Break VM runloop
2751
}
2752
r => panic!("unexpected exit reason: {r:?}"),
2753
};
2754
2755
run_tests!(setup, regs_matcher, exit_matcher);
2756
}
2757
2758
#[test]
2759
fn test_enter_long_mode_asm() {
2760
global_asm_data!(
2761
pub enter_long_mode_asm,
2762
".code16",
2763
"lidt [0xd100]", // Address of the IDT limit + base
2764
"mov eax, cr4",
2765
"or ax, 1 << 7 | 1 << 5", // Set the PAE-bit (bit 5) and PGE (bit 7).
2766
"mov cr4, eax",
2767
2768
"mov bx, 0x9000", // Address of the page table.
2769
"mov cr3, ebx",
2770
2771
"mov ecx, 0xC0000080", // Set ECX to EFER MSR (0xC0000080)
2772
"rdmsr", // Read from the MSR
2773
"or ax, 1 << 8", // Set the LM-bit (bit 8).
2774
"wrmsr", // Write to the MSR
2775
2776
"mov eax, cr0",
2777
"or eax, 1 << 31 | 1 << 0", // Set PG (31nd bit) & PM (0th bit).
2778
"mov cr0, eax",
2779
2780
"lgdt [0xd000]", // Address of the GDT limit + base
2781
"ljmp 16, 0xe000" // Address of long_mode_asm
2782
);
2783
2784
global_asm_data!(
2785
pub long_mode_asm,
2786
".code64",
2787
"mov rdx, r8",
2788
"mov rbx, [0x10000]",
2789
"hlt"
2790
);
2791
2792
let bigly_mem_value: u64 = 0x1_0000_0000;
2793
let biglier_mem_value: u64 = 0x1_0000_0001;
2794
let mut setup = TestSetup {
2795
assembly: enter_long_mode_asm::data().to_vec(),
2796
mem_size: 0x13000,
2797
load_addr: GuestAddress(0x1000),
2798
initial_regs: Regs {
2799
r8: bigly_mem_value,
2800
rip: 0x1000,
2801
rflags: 0x2,
2802
..Default::default()
2803
},
2804
extra_vm_setup: Some(Box::new(|_: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
2805
// TODO(b/354901961): configure_long_mode_memory loads GDT and IDT for 64 bit usage, and
2806
// the ABI doesn't match real mode and protected mode, but in this test, we first launch
2807
// in real mode.
2808
2809
ModeConfig::default_long_mode().configure_long_mode_memory(vm);
2810
})),
2811
2812
..Default::default()
2813
};
2814
2815
setup.add_memory_initialization(
2816
GuestAddress(0x10000),
2817
biglier_mem_value.to_le_bytes().to_vec(),
2818
);
2819
setup.add_memory_initialization(GuestAddress(0xe000), long_mode_asm::data().to_vec());
2820
2821
// GDT limit + base, to be loaded by the lgdt instruction.
2822
// Must be within 0xFFFF as it's executed in real-mode.
2823
setup.add_memory_initialization(GuestAddress(0xd000), 0xFFFF_u32.to_le_bytes().to_vec());
2824
setup.add_memory_initialization(
2825
GuestAddress(0xd000 + 2),
2826
(DEFAULT_GDT_OFFSET as u32).to_le_bytes().to_vec(),
2827
);
2828
2829
// IDT limit + base, to be loaded by the lidt instruction.
2830
// Must be within 0xFFFF as it's executed in real-mode.
2831
setup.add_memory_initialization(GuestAddress(0xd100), 0xFFFF_u32.to_le_bytes().to_vec());
2832
setup.add_memory_initialization(
2833
GuestAddress(0xd100 + 2),
2834
(DEFAULT_IDT_OFFSET as u32).to_le_bytes().to_vec(),
2835
);
2836
2837
let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {
2838
assert!((sregs.efer & 0x400) != 0, "Long-Mode Active bit not set");
2839
assert_eq!(
2840
regs.rdx, bigly_mem_value,
2841
"Did not execute instructions correctly in long mode."
2842
);
2843
assert_eq!(
2844
regs.rbx, biglier_mem_value,
2845
"Was not able to access translated memory in long mode."
2846
);
2847
assert_eq!((sregs.cs.l), 1, "Long-mode bit not set in CS");
2848
};
2849
2850
let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
2851
VcpuExit::Hlt => {
2852
true // Break VM runloop
2853
}
2854
r => panic!("unexpected exit reason: {r:?}"),
2855
};
2856
2857
run_tests!(setup, regs_matcher, exit_matcher);
2858
}
2859
2860
#[test]
2861
fn test_request_interrupt_window() {
2862
global_asm_data!(
2863
assembly,
2864
".code16",
2865
// Disable the interrupt, and the interrupt window shouldn't cause a vcpu exit until the
2866
// interrupt is enabled again.
2867
"cli",
2868
// vcpu exit here to request an interrupt window when interrupt is not ready. We can't use
2869
// hlt for VMEXIT, because HAXM unconditionally allows interrupt injection for hlt.
2870
"out 0x10, ax",
2871
// Enable the interrupt.
2872
"sti",
2873
// Another instruction window for interrupt delivery after sti. We shouldn't receive the
2874
// interrupt window exit until we complete this instruction. We use another intercepted
2875
// instruction here to make sure the hypervisor doesn't shadow the not delivered interrupt
2876
// request window on an intercepted instruction.
2877
"out 0x10, ax",
2878
// WHPX requires another not intercepted instruction to restore from the not interruptible
2879
// state.
2880
"nop",
2881
// The interrupt window exit should happen either right before nop or right after nop.
2882
"hlt",
2883
);
2884
2885
let assembly = assembly::data().to_vec();
2886
let setup = TestSetup {
2887
assembly: assembly.clone(),
2888
load_addr: GuestAddress(0x1000),
2889
initial_regs: Regs {
2890
rip: 0x1000,
2891
rflags: 2,
2892
..Default::default()
2893
},
2894
intercept_intr: true,
2895
..Default::default()
2896
};
2897
2898
run_tests!(
2899
setup,
2900
|_, regs, _| assert_eq!(regs.rip, 0x1000 + assembly.len() as u64),
2901
{
2902
let mut io_counter = 0;
2903
let mut irq_window_received = false;
2904
move |hypervisor_type, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
2905
let is_irq_window = if hypervisor_type == HypervisorType::Haxm {
2906
matches!(exit, VcpuExit::Intr) && io_counter == 2
2907
} else {
2908
matches!(exit, VcpuExit::IrqWindowOpen)
2909
};
2910
if is_irq_window {
2911
assert_eq!(io_counter, 2);
2912
assert!(vcpu.ready_for_interrupt());
2913
vcpu.set_interrupt_window_requested(false);
2914
2915
irq_window_received = true;
2916
return false;
2917
}
2918
match exit {
2919
VcpuExit::Intr => false,
2920
VcpuExit::Io => {
2921
// We are always handling out IO port, so no data to return.
2922
vcpu.handle_io(&mut |_| {})
2923
.expect("should handle IO successfully");
2924
2925
assert!(!vcpu.ready_for_interrupt());
2926
2927
// Only set the interrupt window request on the first out instruction.
2928
if io_counter == 0 {
2929
vcpu.set_interrupt_window_requested(true);
2930
}
2931
io_counter += 1;
2932
false
2933
}
2934
VcpuExit::Hlt => {
2935
assert!(irq_window_received);
2936
true
2937
}
2938
r => panic!("unexpected VMEXIT: {r:?}"),
2939
}
2940
}
2941
}
2942
);
2943
}
2944
2945
#[test]
2946
fn test_fsgsbase() {
2947
global_asm_data!(
2948
pub fsgsbase_asm,
2949
".code64",
2950
"wrfsbase rax",
2951
"wrgsbase rbx",
2952
"rdfsbase rcx",
2953
"rdgsbase rdx",
2954
"mov rax, fs:0",
2955
"mov rbx, gs:0",
2956
"hlt"
2957
);
2958
2959
let code_addr = 0x1000;
2960
let fs = 0x10000;
2961
let gs = 0x10100;
2962
2963
let setup = TestSetup {
2964
assembly: fsgsbase_asm::data().to_vec(),
2965
mem_size: 0x11000,
2966
load_addr: GuestAddress(code_addr),
2967
initial_regs: Regs {
2968
rax: fs,
2969
rbx: gs,
2970
rip: code_addr,
2971
rflags: 0x2,
2972
..Default::default()
2973
},
2974
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
2975
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
2976
2977
let mut sregs = vcpu.get_sregs().expect("unable to get sregs");
2978
sregs.cr4 |= 1 << 16; // FSGSBASE (bit 16)
2979
vcpu.set_sregs(&sregs).expect("unable to set sregs");
2980
})),
2981
memory_initializations: vec![
2982
(GuestAddress(fs), [0xaa; 8].into()),
2983
(GuestAddress(gs), [0xbb; 8].into()),
2984
],
2985
..Default::default()
2986
};
2987
2988
let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {
2989
assert_eq!(regs.rcx, fs);
2990
assert_eq!(regs.rdx, gs);
2991
assert_eq!(regs.rax, 0xaaaaaaaaaaaaaaaa);
2992
assert_eq!(regs.rbx, 0xbbbbbbbbbbbbbbbb);
2993
assert_eq!(sregs.fs.base, fs);
2994
assert_eq!(sregs.gs.base, gs);
2995
};
2996
2997
let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
2998
VcpuExit::Hlt => {
2999
true // Break VM runloop
3000
}
3001
r => panic!("unexpected exit reason: {r:?}"),
3002
};
3003
3004
run_tests!(setup, regs_matcher, exit_matcher);
3005
}
3006
3007
/// Tests whether MMX state is being preserved by the hypervisor correctly (e.g. the hypervisor is
3008
/// properly using fxsave/fxrstor, or xsave/xrstor (or xsaves/xrstors)).
3009
#[test]
3010
fn test_mmx_state_is_preserved_by_hypervisor() {
3011
// This program stores a sentinel value into mm0 (the first MMX register) and verifies
3012
// that after a vmexit, that value is properly restored (we copy it to rbx so it can be checked
3013
// by the reg matcher when the VM hlts). In the vmexit handler function below, we make sure the
3014
// sentinel value is NOT in mm0. This way we know the mm0 value has changed, so we're guaranteed
3015
// the hypervisor has to restore the guest's sentinel value for the test to pass. (The read
3016
// from mm0 to rbx happens *after* the vmexit, so the hypervisor has to restore the guest's
3017
// mm0 otherwise there will be random garbage in there from the host. This would also be a
3018
// security issue.)
3019
//
3020
// Note: this program also verifies the guest has MMX support. If it does not, rdx will be 1 and
3021
// no MMX instructions will be attempted.
3022
let sentinel_mm0_value = 0x1337FFFFu64;
3023
global_asm_data!(
3024
pub mmx_ops_asm,
3025
".code64",
3026
"mov eax, 1",
3027
"cpuid",
3028
"bt edx, 23",
3029
"jc HasMMX",
3030
"mov rdx, 1",
3031
"hlt",
3032
"HasMMX:",
3033
"xor rdx, rdx",
3034
"mov rax, 0x1337FFFF",
3035
"mov rbx, 0x0",
3036
"movq mm0, rax",
3037
"out 0x5, al",
3038
"movq rbx, mm0",
3039
"emms",
3040
"hlt",
3041
);
3042
3043
let code_addr = 0x1000;
3044
let setup = TestSetup {
3045
assembly: mmx_ops_asm::data().to_vec(),
3046
mem_size: 0x12000,
3047
load_addr: GuestAddress(code_addr),
3048
initial_regs: Regs {
3049
rip: code_addr,
3050
rflags: 0x2,
3051
..Default::default()
3052
},
3053
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3054
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3055
})),
3056
memory_initializations: vec![],
3057
..Default::default()
3058
};
3059
3060
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
3061
assert_ne!(regs.rdx, 1, "guest has no MMX support");
3062
assert_eq!(
3063
regs.rbx, sentinel_mm0_value,
3064
"guest MMX register not restored by hypervisor"
3065
);
3066
};
3067
3068
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3069
VcpuExit::Hlt => {
3070
true // Break VM runloop
3071
}
3072
VcpuExit::Cpuid { entry } => {
3073
vcpu.handle_cpuid(entry)
3074
.expect("should handle cpuid successfully");
3075
false
3076
}
3077
VcpuExit::Io => {
3078
vcpu.handle_io(&mut |_| {})
3079
.expect("should handle IO successfully");
3080
3081
// kaiyili@ pointed out we should check the XSAVE state exposed by the hypervisor via
3082
// its API (e.g. vm.get_xsave_state). This is used in snapshotting, so if it's wrong,
3083
// that would break things. It's also a good cross-check that the hypervisor is properly
3084
// handling xsave state.
3085
//
3086
// There are a couple of things blocking us from doing that today:
3087
// 1. gHAXM, our hypervisor of interest, doesn't expose its xsave area state for
3088
// the guest.
3089
// 2. We don't have an xsave area parser (yet).
3090
3091
// mm0 MUST NOT have the guest's sentinel value. If it somehow does, the hypervisor
3092
// didn't save the guest's FPU/MMX state / restore the host's state before exiting to
3093
// CrosVM.
3094
//
3095
// Note: MMX is ubiquitous on x86_64, so we don't check for support on the host (the
3096
// guest checks, so unless the guest's support is software implemented, it's highly
3097
// likely the host has MMX support).
3098
let mut mm0_value: u64;
3099
// SAFETY: we do not clobber any undeclared registers. Technically emms changes some
3100
// x87 state, so there's some UB risk here, but it is not explicitly called out by
3101
// the Rust docs as a bad idea.
3102
unsafe {
3103
asm!(
3104
"movq rax, mm0",
3105
"emms",
3106
out("rax") mm0_value);
3107
}
3108
assert_ne!(
3109
mm0_value, sentinel_mm0_value,
3110
"host mm0 value is the same as the guest sentinel value"
3111
);
3112
false
3113
}
3114
r => panic!("unexpected exit reason: {r:?}"),
3115
};
3116
3117
run_tests!(setup, regs_matcher, exit_matcher);
3118
}
3119
3120
/// Tests whether AVX state is being preserved by the hypervisor correctly (e.g. the hypervisor is
3121
/// properly using xsave/xrstor (or xsaves/xrstors)). This is very similar to the MMX test, but
3122
/// AVX state is *not* captured by fxsave, so that's how we guarantee xsave state of some kind is
3123
/// being handled properly.
3124
#[test]
3125
fn test_avx_state_is_preserved_by_hypervisor() {
3126
if !is_x86_feature_detected!("avx") {
3127
panic!("this test requires host AVX support and it was not detected");
3128
}
3129
3130
let sentinel_value = 0x1337FFFFu64;
3131
global_asm_data!(
3132
pub avx_ops_asm,
3133
".code64",
3134
"mov eax, 1",
3135
"cpuid",
3136
"bt ecx, 28",
3137
"jc HasAVX",
3138
"mov rdx, 1",
3139
"hlt",
3140
"HasAVX:",
3141
3142
// Turn on OSXSAVE (we can't touch XCR0 without it).
3143
"mov rax, cr4",
3144
"or eax, 1 << 18",
3145
"mov cr4, rax",
3146
3147
// AVX won't work unless we enable it.
3148
//
3149
// Set the relevant XCR0 bits:
3150
// 0: X87
3151
// 1: SSE
3152
// 2: AVX
3153
"xor rcx, rcx",
3154
"xgetbv",
3155
// (7 = 111b)
3156
"or eax, 7",
3157
"xsetbv",
3158
3159
// Now that AVX is ready to use, let's start with a clean slate (and signify we have AVX
3160
// support to the test assert below by zeroing rdx).
3161
"xor rdx, rdx",
3162
"xor rax, rax",
3163
"xor rbx, rbx",
3164
"vzeroall",
3165
3166
// Here's the actual test (finally). Since AVX is a little tricky to follow, here's what
3167
// the test does:
3168
// 1. We load 0x1337FFFF into ymm1 via xmm0.
3169
// 2. We perform port IO to exit out to CrosVM (our vmexit handler below).
3170
// 3. The vmexit handler makes sure ymm1 does NOT contain 0x1337FFFF.
3171
// 4. We return to this program. Then we dump the value of ymm1 into ebx. The exit
3172
// register matcher verifies that 0x1337FFFF is in ebx. This means the hypervisor
3173
// properly restored ymm1 for the guest on vmenter.
3174
"mov eax, 0x1337FFFF",
3175
"vpinsrd xmm0, xmm1, eax, 3",
3176
"vinserti128 ymm1, ymm2, xmm0, 1",
3177
"out 0x5, al",
3178
"vextracti128 xmm3, ymm1, 1",
3179
"vpextrd ebx, xmm3, 3",
3180
"hlt",
3181
);
3182
3183
let code_addr = 0x1000;
3184
let setup = TestSetup {
3185
assembly: avx_ops_asm::data().to_vec(),
3186
mem_size: 0x12000,
3187
load_addr: GuestAddress(code_addr),
3188
initial_regs: Regs {
3189
rip: code_addr,
3190
rflags: 0x2,
3191
..Default::default()
3192
},
3193
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3194
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3195
})),
3196
memory_initializations: vec![],
3197
..Default::default()
3198
};
3199
3200
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
3201
assert_ne!(regs.rdx, 1, "guest has no AVX support");
3202
assert_eq!(
3203
regs.rbx, sentinel_value,
3204
"guest AVX register not restored by hypervisor"
3205
);
3206
};
3207
3208
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3209
VcpuExit::Hlt => {
3210
true // Break VM runloop
3211
}
3212
VcpuExit::Cpuid { entry } => {
3213
vcpu.handle_cpuid(entry)
3214
.expect("should handle cpuid successfully");
3215
false
3216
}
3217
VcpuExit::Io => {
3218
vcpu.handle_io(&mut |_| {})
3219
.expect("should handle IO successfully");
3220
3221
// kaiyili@ pointed out we should check the XSAVE state exposed by the hypervisor via
3222
// its API (e.g. vm.get_xsave_state). This is used in snapshotting, so if it's wrong,
3223
// that would break things. It's also a good cross-check that the hypervisor is properly
3224
// handling xsave state.
3225
//
3226
// There are a couple of things blocking us from doing that today:
3227
// 1. gHAXM, our hypervisor of interest, doesn't expose its xsave area state for
3228
// the guest.
3229
// 2. We don't have a xsave area parser (yet).
3230
3231
// ymm1 MUST NOT have the guest's sentinel value. If it somehow does, the hypervisor
3232
// didn't save the guest's AVX state / restore the host's state before exiting to
3233
// CrosVM.
3234
//
3235
// Note: AVX is ubiquitous on x86_64, so we don't check for support on the host (the
3236
// guest checks, so unless the guest's support is software implemented, it's highly
3237
// likely the host has AVX support).
3238
let mut ymm1_sub_value: u64;
3239
// SAFETY: we don't clobber any undeclared registers.
3240
unsafe {
3241
asm!(
3242
"vextracti128 xmm4, ymm1, 1",
3243
"vpextrd eax, xmm4, 3",
3244
out("rax") ymm1_sub_value,
3245
out("xmm4") _);
3246
}
3247
assert_ne!(
3248
ymm1_sub_value, sentinel_value,
3249
"host ymm1 value is the same as the guest sentinel value. Hypervisor likely didn't \
3250
save guest's state."
3251
);
3252
false
3253
}
3254
r => panic!("unexpected exit reason: {r:?}"),
3255
};
3256
3257
run_tests!(setup, regs_matcher, exit_matcher);
3258
}
3259
3260
/// Tests whether XSAVE works inside a guest.
3261
#[test]
3262
fn test_xsave() {
3263
let sentinel_xmm0_value = 0x1337FFFFu64;
3264
global_asm_data!(
3265
pub xsave_ops_asm,
3266
".code64",
3267
3268
// Make sure XSAVE is supported.
3269
"mov eax, 1",
3270
"mov ecx, 0",
3271
"cpuid",
3272
"bt ecx, 26",
3273
"jc HasXSAVE",
3274
"mov rdx, 1",
3275
"hlt",
3276
"HasXSAVE:",
3277
"xor rdx, rdx",
3278
3279
// Turn on OSXSAVE.
3280
"mov rax, cr4",
3281
"or eax, 1 << 18",
3282
"mov cr4, rax",
3283
3284
// Enable X87, SSE, and AVX.
3285
//
3286
// Set the relevant XCR0 bits:
3287
// 0: X87
3288
// 1: SSE
3289
// 3: AVX
3290
"xor rcx, rcx",
3291
"xgetbv",
3292
// (7 = 111b)
3293
"or eax, 7",
3294
"xsetbv",
3295
3296
// Put the sentinel value in xmm0, and save it off.
3297
"mov eax, 0x1337FFFF",
3298
"vzeroall",
3299
"vpinsrd xmm0, xmm1, eax, 3",
3300
"xor edx, edx",
3301
"mov eax, 7",
3302
"xsave dword ptr [0x10000]",
3303
3304
// Clear xmm0.
3305
"vpxor xmm0, xmm0, xmm0",
3306
3307
// Restoring should put the sentinel value back.
3308
"xor edx, edx",
3309
"mov eax, 7",
3310
"xrstor dword ptr [0x10000]",
3311
3312
"xor rbx, rbx",
3313
"vpextrd ebx, xmm0, 3",
3314
"hlt",
3315
);
3316
3317
let code_addr = 0x1000;
3318
let setup = TestSetup {
3319
assembly: xsave_ops_asm::data().to_vec(),
3320
mem_size: 0x12000,
3321
load_addr: GuestAddress(code_addr),
3322
initial_regs: Regs {
3323
rip: code_addr,
3324
rflags: 0x2,
3325
..Default::default()
3326
},
3327
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3328
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3329
})),
3330
memory_initializations: vec![(GuestAddress(0x10000), vec![0; 0x1000])],
3331
..Default::default()
3332
};
3333
3334
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
3335
assert_ne!(regs.rdx, 1, "guest has no XSAVE support");
3336
assert_eq!(
3337
regs.rbx, sentinel_xmm0_value,
3338
"guest SSE register not restored by XRSTOR",
3339
);
3340
};
3341
3342
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3343
VcpuExit::Hlt => {
3344
true // Break VM runloop
3345
}
3346
VcpuExit::Cpuid { entry } => {
3347
vcpu.handle_cpuid(entry)
3348
.expect("should handle cpuid successfully");
3349
false
3350
}
3351
VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl
3352
r => panic!("unexpected exit reason: {r:?}"),
3353
};
3354
3355
run_tests!(setup, regs_matcher, exit_matcher);
3356
}
3357
3358
/// Tests whether XSAVES works inside a guest.
3359
///
3360
/// Ignored because CET is not available in some nested virtualization
3361
/// environments (such as CI). (CET is the feature we use to test XSAVES.)
3362
#[ignore]
3363
#[cfg(feature = "whpx")]
3364
#[test]
3365
fn test_xsaves() {
3366
global_asm_data!(
3367
pub xsaves_ops_asm,
3368
".code64",
3369
3370
// Make sure XSAVES is supported.
3371
"mov eax, 0xd",
3372
"mov ecx, 1",
3373
"cpuid",
3374
"bt eax, 3",
3375
"jc HasXSAVES",
3376
"mov rdx, 1",
3377
"hlt",
3378
"HasXSAVES:",
3379
3380
// Make sure CET is supported.
3381
"mov eax, 7",
3382
"mov ecx, 0",
3383
"cpuid",
3384
"bt ecx, 7",
3385
"jc HasCET",
3386
"mov rdx, 2",
3387
"hlt",
3388
"HasCET:",
3389
3390
// Turn on write protection for ring 0 (required by CET).
3391
"mov rax, cr0",
3392
"or eax, 1 << 16",
3393
"mov cr0, rax",
3394
3395
// Turn on OSXSAVE (18) and CET (23).
3396
"mov rax, cr4",
3397
"or eax, 1 << 18",
3398
"or eax, 1 << 23",
3399
"mov cr4, rax",
3400
3401
// Set up XSAVES to manage CET state.
3402
// IA32_XSS = 0x0DA0
3403
"mov ecx, 0x0DA0",
3404
"rdmsr",
3405
"or eax, 1 << 12",
3406
"wrmsr",
3407
3408
// Enable CET.
3409
"mov ecx, 0x6A2",
3410
"rdmsr",
3411
"or eax, 1",
3412
"wrmsr",
3413
3414
// Now CET is usable and managed by XSAVES. Let's set a sentinel value and make sure xsaves
3415
// restores it as expected. Note that PL0_SSP's linear address must be 8 byte aligned.
3416
// PL0_SSP = 0x06A5
3417
"mov ecx, 0x06A4",
3418
"xor edx, edx",
3419
"xor eax, eax",
3420
"mov eax, 0x13370000",
3421
"wrmsr",
3422
3423
// Set the RFBM / feature mask to include CET.
3424
"xor edx, edx",
3425
"mov eax, 1 << 12",
3426
"xsaves dword ptr [0x10000]",
3427
3428
// Clear PL0_SSP
3429
"xor edx, edx",
3430
"xor eax, eax",
3431
"mov ecx, 0x06A4",
3432
"wrmsr",
3433
3434
// Set the RFBM / feature mask to include CET.
3435
"xor edx, edx",
3436
"mov eax, 1 << 12",
3437
"xrstors dword ptr [0x10000]",
3438
3439
// Check to see if PL0_SSP was restored.
3440
"mov ecx, 0x06A4",
3441
"rdmsr",
3442
"cmp eax, 0x13370000",
3443
"jz TestPasses",
3444
"mov rdx, 3",
3445
"hlt",
3446
"TestPasses:",
3447
"xor rdx, rdx",
3448
"hlt",
3449
);
3450
3451
let code_addr = 0x1000;
3452
let setup = TestSetup {
3453
assembly: xsaves_ops_asm::data().to_vec(),
3454
mem_size: 0x12000,
3455
load_addr: GuestAddress(code_addr),
3456
initial_regs: Regs {
3457
rip: code_addr,
3458
rdx: 0x4,
3459
rflags: 0x2,
3460
..Default::default()
3461
},
3462
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3463
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3464
})),
3465
memory_initializations: vec![(GuestAddress(0x10000), vec![0; 0x1000])],
3466
..Default::default()
3467
};
3468
3469
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
3470
assert_ne!(regs.rdx, 1, "guest has no XSAVES support");
3471
assert_ne!(regs.rdx, 2, "guest has no CET support");
3472
assert_ne!(regs.rdx, 3, "guest didn't restore PL0_SSP as expected");
3473
assert_eq!(regs.rdx, 0, "test failed unexpectedly");
3474
};
3475
3476
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3477
VcpuExit::Hlt => {
3478
true // Break VM runloop
3479
}
3480
VcpuExit::Cpuid { entry } => {
3481
vcpu.handle_cpuid(entry)
3482
.expect("should handle cpuid successfully");
3483
false
3484
}
3485
VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl
3486
r => panic!("unexpected exit reason: {:?}", r),
3487
};
3488
3489
run_tests!(setup, regs_matcher, exit_matcher);
3490
}
3491
3492
/// Tests that XSAVES is disabled in gHAXM (it's unsupported).
3493
///
3494
/// Note: this test passing in CI is not necessarily a signal that gHAXM is working correctly
3495
/// because XSAVES is disabled in some nested virtualization environments (e.g. CI).
3496
#[cfg(feature = "haxm")]
3497
#[test]
3498
fn test_xsaves_is_disabled_on_haxm() {
3499
global_asm_data!(
3500
pub no_xsaves_asm,
3501
".code64",
3502
3503
"mov eax, 0xd",
3504
"mov ecx, 1",
3505
"cpuid",
3506
"bt eax, 3",
3507
"jnc NoXSAVES",
3508
"mov rdx, 1",
3509
"hlt",
3510
"NoXSAVES:",
3511
"mov rdx, 0",
3512
"hlt",
3513
);
3514
3515
let code_addr = 0x1000;
3516
let setup = TestSetup {
3517
assembly: no_xsaves_asm::data().to_vec(),
3518
mem_size: 0x12000,
3519
load_addr: GuestAddress(code_addr),
3520
initial_regs: Regs {
3521
rip: code_addr,
3522
rdx: 0x2,
3523
rflags: 0x2,
3524
..Default::default()
3525
},
3526
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3527
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3528
})),
3529
memory_initializations: vec![],
3530
..Default::default()
3531
};
3532
3533
let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {
3534
assert_ne!(regs.rdx, 1, "guest has XSAVES support and shouldn't");
3535
assert_eq!(regs.rdx, 0, "test failed unexpectedly");
3536
};
3537
3538
let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3539
VcpuExit::Hlt => {
3540
true // Break VM runloop
3541
}
3542
VcpuExit::Cpuid { entry } => {
3543
vcpu.handle_cpuid(entry)
3544
.expect("should handle cpuid successfully");
3545
false
3546
}
3547
VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl
3548
r => panic!("unexpected exit reason: {r:?}"),
3549
};
3550
3551
run_tests!(setup, regs_matcher, exit_matcher);
3552
}
3553
3554
/// Tests whether SLAT is updated properly when a region is removed from the guest. A correctly
3555
/// implemented hypervisor will flush the TLB such that this immediately hits a SLAT fault and comes
3556
/// to us as MMIO. If we don't see that, and the guest actually reads from the removed region, the
3557
/// test will fail. In the real world, this would be a guest read from a random pfn, which is
3558
/// UB (and a major security problem).
3559
///
3560
/// Flakes should be treated as real failures (this test can show a false negative, but never a
3561
/// false positive).
3562
#[test]
3563
fn test_slat_on_region_removal_is_mmio() {
3564
global_asm_data!(
3565
pub test_asm,
3566
".code64",
3567
3568
// Load the TLB with a mapping for the test region.
3569
"mov al, byte ptr [0x20000]",
3570
3571
// Signal to the host that VM is running. On this vmexit, the host will unmap the test
3572
// region.
3573
"out 0x5, al",
3574
3575
// This read should result in MMIO, and if it does, the test passes. If we hit the hlt, then
3576
// the test fails (since it means we were able to satisfy this read without exiting).
3577
"mov al, byte ptr [0x20000]",
3578
"hlt"
3579
);
3580
3581
const TEST_MEM_REGION_SIZE: usize = 0x1000;
3582
let memslot: Arc<Mutex<Option<MemSlot>>> = Arc::new(Mutex::new(None));
3583
let memslot_for_func = memslot.clone();
3584
3585
let code_addr = 0x1000;
3586
let setup = TestSetup {
3587
assembly: test_asm::data().to_vec(),
3588
mem_size: 0x12000,
3589
load_addr: GuestAddress(code_addr),
3590
initial_regs: Regs {
3591
rip: code_addr,
3592
rflags: 0x2,
3593
..Default::default()
3594
},
3595
extra_vm_setup: Some(Box::new(
3596
move |vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
3597
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
3598
3599
// Create a test pinned memory region that is all 0xFF.
3600
let shm = SharedMemory::new("test", TEST_MEM_REGION_SIZE as u64).unwrap();
3601
let test_region = Box::new(
3602
MemoryMappingBuilder::new(TEST_MEM_REGION_SIZE)
3603
.from_shared_memory(&shm)
3604
.build()
3605
.unwrap(),
3606
);
3607
let ff_init = [0xFFu8; TEST_MEM_REGION_SIZE];
3608
test_region.write_slice(&ff_init, 0).unwrap();
3609
let test_region = Box::new(
3610
PinnedMemoryRegion::new(test_region).expect("failed to pin test region"),
3611
);
3612
*memslot_for_func.lock() = Some(
3613
vm.add_memory_region(
3614
GuestAddress(0x20000),
3615
test_region,
3616
false,
3617
false,
3618
MemCacheType::CacheCoherent,
3619
)
3620
.unwrap(),
3621
);
3622
},
3623
)),
3624
memory_initializations: vec![],
3625
..Default::default()
3626
};
3627
3628
// Holds the test memory region after it's unmapped and the VM is still running. Without this,
3629
// incorrect access to the region by the VM would be unsafe / UB.
3630
let test_region_arc: Arc<Mutex<Option<Box<dyn MappedRegion>>>> = Arc::new(Mutex::new(None));
3631
let test_region_arc_for_exit = test_region_arc.clone();
3632
3633
let exit_matcher =
3634
move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| match exit {
3635
VcpuExit::Io => {
3636
// WHPX insists on data being returned here or it throws MemoryCallbackFailed.
3637
//
3638
// We strictly don't care what this data is, since the VM exits before running any
3639
// further instructions.
3640
vcpu.handle_io(&mut |_| {})
3641
.expect("should handle IO successfully");
3642
3643
// Remove the test memory region to cause a SLAT fault (in the passing case).
3644
//
3645
// This also ensures the memory region remains pinned in host physical memory so any
3646
// incorrect accesses to it by the VM will remain safe.
3647
*test_region_arc_for_exit.lock() =
3648
Some(vm.remove_memory_region(memslot.lock().unwrap()).unwrap());
3649
false
3650
}
3651
VcpuExit::Mmio => {
3652
vcpu.handle_mmio(&mut |IoParams { address, operation }| {
3653
assert_eq!(address, 0x20000, "MMIO for wrong address");
3654
match operation {
3655
IoOperation::Read(data) => {
3656
assert_eq!(data.len(), 1);
3657
data[0] = 0;
3658
Ok(())
3659
}
3660
IoOperation::Write(_) => {
3661
panic!("got unexpected IO operation {operation:?}");
3662
}
3663
}
3664
})
3665
.unwrap();
3666
true
3667
}
3668
VcpuExit::Hlt => {
3669
panic!("VM should not reach the hlt instruction (MMIO should've ended the VM)");
3670
}
3671
r => panic!("unexpected exit reason: {r:?}"),
3672
};
3673
3674
// We want to catch if the hypervisor doesn't clear the VM's TLB. If we hop between CPUs, then
3675
// we're likely to end up with a clean TLB on another CPU.
3676
set_cpu_affinity(vec![0]).unwrap();
3677
3678
run_tests!(setup, move |_, _, _| {}, &exit_matcher);
3679
}
3680
3681
struct PinnedMemoryRegion {
3682
mem_region: Box<dyn MappedRegion>,
3683
}
3684
3685
impl PinnedMemoryRegion {
3686
fn new(mem_region: Box<dyn MappedRegion>) -> base::Result<Self> {
3687
// SAFETY:
3688
// ptr is a valid pointer and points to a region of the supplied size.
3689
unsafe { pin_memory(mem_region.as_ptr() as *mut _, mem_region.size()) }?;
3690
Ok(Self { mem_region })
3691
}
3692
}
3693
3694
// SAFETY:
3695
// Safe because ptr & size a memory range owned by this MemoryMapping that won't be unmapped
3696
// until it's dropped.
3697
unsafe impl MappedRegion for PinnedMemoryRegion {
3698
fn as_ptr(&self) -> *mut u8 {
3699
self.mem_region.as_ptr()
3700
}
3701
3702
fn size(&self) -> usize {
3703
self.mem_region.size()
3704
}
3705
}
3706
3707
impl Drop for PinnedMemoryRegion {
3708
fn drop(&mut self) {
3709
// SAFETY:
3710
// memory region passed is a valid pointer and points to a region of the
3711
// supplied size. We also panic on failure.
3712
unsafe { unpin_memory(self.mem_region.as_ptr() as *mut _, self.mem_region.size()) }
3713
.expect("failed to unpin memory")
3714
}
3715
}
3716
3717
unsafe fn pin_memory(ptr: *mut c_void, len: usize) -> base::Result<()> {
3718
#[cfg(windows)]
3719
{
3720
VirtualLock(ptr, len).map_err(|e| base::Error::new(e.code().0))
3721
}
3722
#[cfg(unix)]
3723
{
3724
if libc::mlock(ptr, len) != 0 {
3725
Err(base::Error::last())
3726
} else {
3727
Ok(())
3728
}
3729
}
3730
}
3731
3732
unsafe fn unpin_memory(ptr: *mut c_void, len: usize) -> base::Result<()> {
3733
#[cfg(windows)]
3734
{
3735
VirtualUnlock(ptr, len).map_err(|e| base::Error::new(e.code().0))
3736
}
3737
#[cfg(unix)]
3738
{
3739
if libc::munlock(ptr, len) != 0 {
3740
Err(base::Error::last())
3741
} else {
3742
Ok(())
3743
}
3744
}
3745
}
3746
3747
#[test]
3748
fn test_interrupt_injection_when_not_ready() {
3749
// This test ensures that if we inject an interrupt when it's not ready for interrupt, we
3750
// shouldn't end up with crash or hang. And if the interrupt is delivered, it shouldn't be
3751
// delivered before we reenable the interrupt.
3752
mod assembly {
3753
use super::*;
3754
3755
global_asm_data!(
3756
pub init,
3757
".code16",
3758
// Set the IDT
3759
"lidt [0x200]",
3760
// Set up the stack, which will be used when CPU transfers the control to the ISR on
3761
// interrupt.
3762
"mov sp, 0x900",
3763
// Set ax to 0.
3764
"xor ax, ax",
3765
// Set the address 0x910 to 1 when we disable the interrupt, and restore it to 0 after
3766
// we renable the interrupt.
3767
"mov word ptr [0x910], 1",
3768
"cli",
3769
// We can't use hlt for VMEXIT, because HAXM unconditionally allows interrupt injection
3770
// for hlt. We will inject an interrupt here although all hypervisors should report not
3771
// ready for injection an interrupt. And we don't care if the injection succeeds or not.
3772
"out 0x10, ax",
3773
"sti",
3774
// Set the address 0x910 to 0 when we renable the interrupt.
3775
"mov word ptr [0x910], 0",
3776
// For hypervisor that injects the interrupt later when it's ready, the interrupt will
3777
// be delivered here.
3778
"nop",
3779
"hlt",
3780
);
3781
3782
// We still need an ISR in case the hypervisor actually delivers an interrupt.
3783
global_asm_data!(
3784
pub isr,
3785
".code16",
3786
// ax will be 0 if the interrupt is delivered after we reenable the interrupt.
3787
// Otherwise, ax will be 1, and the test fails.
3788
"mov ax, word ptr [0x910]",
3789
"iret",
3790
);
3791
}
3792
3793
let start_addr: u32 = 0x200;
3794
// Allocate exceed 0x900, where we set up our stack.
3795
let mem_size: u32 = 0x1000;
3796
3797
let mut setup = TestSetup {
3798
load_addr: GuestAddress(start_addr.into()),
3799
initial_regs: Regs {
3800
rax: 0,
3801
// Set RFLAGS.IF to enable interrupt at the beginning.
3802
rflags: 2 | FLAGS_IF_BIT,
3803
..Default::default()
3804
},
3805
mem_size: mem_size.into(),
3806
..Default::default()
3807
};
3808
3809
let mut cur_addr = start_addr;
3810
3811
let idtr_size: u32 = 6;
3812
assert_eq!(
3813
Ok(std::mem::size_of::<Idtr32>()),
3814
usize::try_from(idtr_size)
3815
);
3816
// The limit is calculated from 256 entries timed by 4 bytes per entry.
3817
let idt_size = 256u16 * 4u16;
3818
let idtr = Idtr32 {
3819
limit: idt_size - 1,
3820
// The IDT right follows the IDTR.
3821
base_address: start_addr + idtr_size,
3822
};
3823
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());
3824
cur_addr += idtr_size;
3825
3826
let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();
3827
// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.
3828
// We set all entries to the same ISR.
3829
let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();
3830
setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());
3831
cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");
3832
3833
let isr_assembly = assembly::isr::data().to_vec();
3834
setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());
3835
cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");
3836
3837
let init_assembly = assembly::init::data().to_vec();
3838
setup.initial_regs.rip = cur_addr.into();
3839
setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());
3840
cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");
3841
3842
assert!(mem_size > cur_addr);
3843
3844
run_tests!(
3845
setup,
3846
|_, regs, _| {
3847
assert_eq!(
3848
regs.rax, 0,
3849
"the interrupt should be either not delivered(ax is kept as the initial value 0) \
3850
or is delivered after we reenable the interrupt(when the ax is set from 0x910, \
3851
0x910 is 0)"
3852
);
3853
},
3854
|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {
3855
match exit {
3856
// We exit and pass the test either the VCPU run fails or we hit hlt.
3857
VcpuExit::FailEntry { .. } | VcpuExit::Shutdown(..) | VcpuExit::Hlt => true,
3858
VcpuExit::Io => {
3859
// We are always handling out IO port, so no data to return.
3860
vcpu.handle_io(&mut |_| {})
3861
.expect("should handle IO successfully");
3862
assert!(!vcpu.ready_for_interrupt());
3863
// We don't care whether we inject the interrupt successfully or not.
3864
let _ = vcpu.interrupt(32);
3865
false
3866
}
3867
r => panic!("unexpected VMEXIT reason: {r:?}"),
3868
}
3869
}
3870
);
3871
}
3872
3873
#[test]
3874
fn test_ready_for_interrupt_for_intercepted_instructions() {
3875
global_asm_data!(
3876
assembly,
3877
// We will use out instruction to cause VMEXITs and test ready_for_interrupt then.
3878
".code16",
3879
// Disable the interrupt.
3880
"cli",
3881
// ready_for_interrupt should be false here.
3882
"out 0x10, ax",
3883
"sti",
3884
// ready_for_interrupt should be false here, because of the one instruction
3885
// interruptibility window for sti. And this is also an intercepted instruction.
3886
"out 0x20, ax",
3887
// ready_for_interrupt should be true here except for WHPX.
3888
"out 0x30, ax",
3889
// Restore the interruptibility for WHPX.
3890
"nop",
3891
"mov ax, ss",
3892
"mov ss, ax",
3893
// ready_for_interrupt should be false here, because of the one instruction
3894
// interruptibility window for mov ss. And this is also an intercepted instruction.
3895
"out 0x40, ax",
3896
// ready_for_interrupt should be true here except for WHPX.
3897
"out 0x50, ax",
3898
"hlt"
3899
);
3900
3901
let assembly = assembly::data().to_vec();
3902
let setup = TestSetup {
3903
assembly: assembly.clone(),
3904
load_addr: GuestAddress(0x1000),
3905
initial_regs: Regs {
3906
rip: 0x1000,
3907
rflags: 2,
3908
..Default::default()
3909
},
3910
..Default::default()
3911
};
3912
3913
run_tests!(
3914
setup,
3915
|_, regs, _| {
3916
// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.
3917
assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);
3918
},
3919
|hypervisor_type, exit, vcpu, _: &mut dyn Vm| {
3920
match exit {
3921
VcpuExit::Hlt => true,
3922
VcpuExit::Io => {
3923
let ready_for_interrupt = vcpu.ready_for_interrupt();
3924
let mut io_port = 0;
3925
vcpu.handle_io(&mut |params| {
3926
io_port = params.address;
3927
// We are always handling out IO port, so no data to return.
3928
})
3929
.expect("should handle port IO successfully");
3930
match io_port {
3931
0x10 | 0x20 | 0x40 => assert!(!ready_for_interrupt),
3932
0x30 | 0x50 => {
3933
// WHPX needs a not intercepted instruction to recover to the proper
3934
// interruptibility state.
3935
if hypervisor_type != HypervisorType::Whpx {
3936
assert!(ready_for_interrupt);
3937
}
3938
}
3939
_ => panic!("unexpected port {io_port}"),
3940
}
3941
false
3942
}
3943
r => panic!("unexpected exit reason: {r:?}"),
3944
}
3945
}
3946
);
3947
}
3948
3949
#[cfg(feature = "haxm")]
3950
#[test]
3951
fn test_cpuid_mwait_not_supported() {
3952
global_asm_data!(
3953
cpuid_code,
3954
".code64",
3955
"mov eax, 1", // CPUID function 1
3956
"cpuid",
3957
"hlt"
3958
);
3959
3960
let setup = TestSetup {
3961
assembly: cpuid_code::data().to_vec(),
3962
load_addr: GuestAddress(0x1000),
3963
initial_regs: Regs {
3964
rip: 0x1000,
3965
rflags: 2,
3966
..Default::default()
3967
},
3968
..Default::default()
3969
};
3970
3971
let regs_matcher = |_: HypervisorType, regs: &Regs, _: &Sregs| {
3972
// Check if MWAIT is not supported
3973
assert_eq!(
3974
regs.rcx & (1 << 3),
3975
0,
3976
"MWAIT is supported, but it should not be."
3977
);
3978
};
3979
3980
let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
3981
VcpuExit::Hlt => {
3982
true // Break VM runloop
3983
}
3984
r => panic!("unexpected exit reason: {r:?}"),
3985
};
3986
3987
run_tests!(setup, regs_matcher, exit_matcher);
3988
}
3989
3990
#[test]
3991
fn test_hardware_breakpoint_with_isr() {
3992
global_asm_data!(
3993
setup_debug_handler_code,
3994
".code64",
3995
// Set up the stack
3996
"mov sp, 0x900",
3997
"mov rax, 0x1019", // Address of the instruction to trigger the breakpoint
3998
"mov dr0, rax",
3999
"mov rax, 0x00000001", // Enable the first breakpoint (local, exact) for execution
4000
"mov dr7, rax",
4001
"nop", // This should trigger the debug exception
4002
"nop",
4003
"hlt"
4004
);
4005
4006
global_asm_data!(
4007
debug_isr_code,
4008
".code64",
4009
"mov rbx, 0xf00dbabe", // Set a value to indicate the ISR was called
4010
"mov rax, 0",
4011
"mov dr7, rax", // Disable debugging again
4012
"mov rax, dr6",
4013
"iretq" // Return from interrupt
4014
);
4015
4016
global_asm_data!(
4017
null_isr_code,
4018
".code64",
4019
"mov rbx, 0xbaadf00d", // This ISR should never get called
4020
"hlt"
4021
);
4022
4023
let debug_isr_offset = 0x800;
4024
let null_isr_offset = 0x700;
4025
let debug_idt_entry = IdtEntry64::new(debug_isr_offset);
4026
let null_idt_entry = IdtEntry64::new(null_isr_offset);
4027
4028
let setup = TestSetup {
4029
assembly: setup_debug_handler_code::data().to_vec(),
4030
load_addr: GuestAddress(0x1000),
4031
mem_size: 0x20000,
4032
initial_regs: Regs {
4033
rip: 0x1000,
4034
rflags: 2 | FLAGS_IF_BIT,
4035
..Default::default()
4036
},
4037
extra_vm_setup: Some(Box::new(
4038
move |vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
4039
let guest_mem = vm.get_memory();
4040
4041
guest_mem
4042
.write_at_addr(
4043
debug_isr_code::data().to_vec().as_bytes(),
4044
GuestAddress(debug_isr_offset),
4045
)
4046
.expect("Failed to write debug ISR entry");
4047
4048
guest_mem
4049
.write_at_addr(
4050
null_isr_code::data().to_vec().as_bytes(),
4051
GuestAddress(null_isr_offset),
4052
)
4053
.expect("Failed to write null ISR entry");
4054
4055
let mut long_mode_config = ModeConfig::default_long_mode();
4056
long_mode_config
4057
.set_idt_long_mode((0..256).map(|i| {
4058
if i == 0x01 {
4059
debug_idt_entry
4060
} else {
4061
null_idt_entry
4062
}
4063
}))
4064
.set_idt_base_addr(0x12_000);
4065
long_mode_config.enter_long_mode(vcpu, vm);
4066
},
4067
)),
4068
..Default::default()
4069
};
4070
4071
let regs_matcher = |_: HypervisorType, regs: &Regs, _: &Sregs| {
4072
assert_eq!(regs.rax & 1, 1, "Breakpoint #0 not hit");
4073
assert_eq!(
4074
regs.rip,
4075
0x1000 + (setup_debug_handler_code::data().len() as u64),
4076
"rIP not at the right HLT"
4077
);
4078
assert_eq!(regs.rbx, 0xf00dbabe, "Debug ISR was not called");
4079
};
4080
4081
let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {
4082
VcpuExit::Hlt => {
4083
true // Break VM runloop
4084
}
4085
r => panic!("unexpected exit reason: {r:?}"),
4086
};
4087
4088
run_tests!(setup, regs_matcher, exit_matcher);
4089
}
4090
4091
#[test]
4092
fn test_debug_register_persistence() {
4093
global_asm_data!(
4094
test_debug_registers_code,
4095
".code64",
4096
"mov dr0, rax",
4097
"inc rax",
4098
"mov dr1, rax",
4099
"inc rax",
4100
"mov dr2, rax",
4101
"inc rax",
4102
"mov dr3, rax",
4103
// Perform HLT to cause VMEXIT
4104
"hlt",
4105
"mov r8, dr0",
4106
"mov r9, dr1",
4107
"mov r10, dr2",
4108
"mov r11, dr3",
4109
"hlt"
4110
);
4111
4112
let initial_dr_value: u64 = 0x12345678;
4113
4114
let setup = TestSetup {
4115
assembly: test_debug_registers_code::data().to_vec(),
4116
mem_size: 0x11000,
4117
load_addr: GuestAddress(0x1000),
4118
initial_regs: Regs {
4119
rax: initial_dr_value,
4120
rip: 0x1000,
4121
rflags: 2,
4122
..Default::default()
4123
},
4124
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
4125
ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);
4126
})),
4127
..Default::default()
4128
};
4129
4130
let mut hlt_count = 0;
4131
4132
run_tests!(
4133
setup,
4134
|_, regs, _| {
4135
assert_eq!(regs.r8, initial_dr_value, "DR0 value mismatch after VMEXIT");
4136
assert_eq!(
4137
regs.r9,
4138
initial_dr_value + 1,
4139
"DR1 value mismatch after VMEXIT"
4140
);
4141
assert_eq!(
4142
regs.r10,
4143
initial_dr_value + 2,
4144
"DR2 value mismatch after VMEXIT"
4145
);
4146
assert_eq!(
4147
regs.r11,
4148
initial_dr_value + 3,
4149
"DR3 value mismatch after VMEXIT"
4150
);
4151
},
4152
|_, exit, _, _: &mut dyn Vm| match exit {
4153
VcpuExit::Hlt => {
4154
hlt_count += 1;
4155
hlt_count > 1 // Halt execution after the second HLT
4156
}
4157
r => panic!("unexpected exit reason: {r:?}"),
4158
}
4159
);
4160
}
4161
4162
#[test]
4163
fn test_minimal_exception_injection() {
4164
// This test tries to write an invalid MSR, causing a General Protection exception to be
4165
// injected by the hypervisor (since MSR writes cause a VMEXIT). We run it in long mode since
4166
// real mode exception handling isn't always well supported (failed on Intel HAXM).
4167
mod assembly {
4168
use super::*;
4169
4170
// An ISR that handles any generic interrupt.
4171
global_asm_data!(
4172
pub isr_generic,
4173
".code64",
4174
// Set EBX to 888 to observe this is where we halted.
4175
"mov ebx, 888",
4176
"hlt"
4177
);
4178
4179
// An ISR that handles the General Protection fault specifically.
4180
global_asm_data!(
4181
pub isr_gp,
4182
".code64",
4183
// Set EBX to 999 to observe this is where we halted.
4184
"mov ebx, 999",
4185
"hlt"
4186
);
4187
4188
// Our VM entry (in long mode).
4189
global_asm_data!(
4190
pub init,
4191
".code64",
4192
// Set up the stack, which will be used when CPU transfers the control to the ISR. If
4193
// not set up, can cause faults (stack should be aligned).
4194
"mov esp, 0x900",
4195
// We will verify EBX, set it here first.
4196
"mov ebx, 777",
4197
// Should trigger GP fault when we try to write to MSR 0.
4198
"wrmsr",
4199
// We should never get here since we halt in the fault handlers.
4200
"hlt",
4201
);
4202
}
4203
4204
let mem_size: u64 = 0x20000;
4205
4206
let setup = TestSetup {
4207
initial_regs: Regs {
4208
// WRMSR will try to write to ECX, we set it to zero to point to an old read-only MSR
4209
// (IA32_P5_MC_ADDR).
4210
rcx: 0,
4211
// Intentionally not setting IF flag since exceptions don't check it.
4212
rflags: 2,
4213
..Default::default()
4214
},
4215
mem_size,
4216
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
4217
let start_addr: u64 = 0x1000;
4218
let guest_mem = vm.get_memory();
4219
4220
let isr_assembly = assembly::isr_generic::data().to_vec();
4221
let isr_assembly_len =
4222
u64::try_from(isr_assembly.len()).expect("ISR size should be within u64");
4223
4224
let isr_gp_assembly = assembly::isr_gp::data().to_vec();
4225
let isr_gp_assembly_len =
4226
u64::try_from(isr_gp_assembly.len()).expect("GP ISR size should be within u64");
4227
4228
let mut cur_addr = start_addr;
4229
4230
guest_mem
4231
.write_at_addr(&isr_assembly, GuestAddress(cur_addr))
4232
.expect("Failed to write ISR to guest memory");
4233
cur_addr += isr_assembly_len;
4234
4235
guest_mem
4236
.write_at_addr(&isr_gp_assembly, GuestAddress(cur_addr))
4237
.expect("Failed to write ISR to guest memory");
4238
cur_addr += isr_gp_assembly_len;
4239
4240
let mut regs = vcpu.get_regs().expect("Failed to get regs");
4241
regs.rip = cur_addr;
4242
vcpu.set_regs(&regs).expect("Failed to set regs");
4243
4244
let init_assembly = assembly::init::data().to_vec();
4245
guest_mem
4246
.write_at_addr(&init_assembly, GuestAddress(cur_addr))
4247
.expect("Failed to write init assembly to guest memory");
4248
4249
let idt_entry_generic = IdtEntry64::new(start_addr);
4250
let idt_entry_gp = IdtEntry64::new(start_addr + isr_assembly_len);
4251
4252
let mut long_mode_config = ModeConfig::default_long_mode();
4253
long_mode_config
4254
.set_idt_long_mode((0..256).map(|i| {
4255
// GP handler is vector 13.
4256
if i == 0x0D {
4257
idt_entry_gp
4258
} else {
4259
idt_entry_generic
4260
}
4261
}))
4262
.set_idt_base_addr(0x12_000);
4263
long_mode_config.enter_long_mode(vcpu, vm);
4264
})),
4265
..Default::default()
4266
};
4267
4268
run_tests!(
4269
setup,
4270
|_, regs, _| {
4271
// If EBX is 999 the GP handler ran.
4272
assert_eq!(regs.rbx, 999);
4273
},
4274
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
4275
);
4276
}
4277
4278
#[test]
4279
fn test_pmode_segment_limit() {
4280
// This test configures 32-bit protected mode and verifies that segment limits are converted
4281
// correctly. The test setup configures a segment with the 20-bit limit field set to 0xFFFFF and
4282
// the 4096-byte granularity bit set, which should result in a 4 GB limit (0xFFFFFFFF).
4283
mod assembly {
4284
use super::*;
4285
4286
global_asm_data!(
4287
pub init,
4288
".code32",
4289
// Load the CS segment limit into EAX.
4290
"mov cx, cs",
4291
"lsl eax, cx",
4292
"hlt",
4293
);
4294
}
4295
4296
let mem_size: u64 = 0x20000;
4297
4298
let setup = TestSetup {
4299
initial_regs: Regs {
4300
..Default::default()
4301
},
4302
mem_size,
4303
extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {
4304
ModeConfig::default_protected_mode().enter_protected_mode(vcpu, vm);
4305
4306
let guest_mem = vm.get_memory();
4307
4308
let mut regs = vcpu.get_regs().expect("Failed to get regs");
4309
regs.rax = 12345;
4310
regs.rip = 0x1000;
4311
vcpu.set_regs(&regs).expect("Failed to set regs");
4312
4313
let init_assembly = assembly::init::data().to_vec();
4314
guest_mem
4315
.write_at_addr(&init_assembly, GuestAddress(0x1000))
4316
.expect("Failed to write init assembly to guest memory");
4317
})),
4318
..Default::default()
4319
};
4320
4321
run_tests!(
4322
setup,
4323
|_, regs, _| {
4324
// The output of the LSL instruction should be 4GB - 1.
4325
assert_eq!(regs.rax, 0xFFFFFFFF);
4326
},
4327
|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)
4328
);
4329
}
4330
4331