Path: blob/main/hypervisor/tests/hypervisor_virtualization.rs
5394 views
// Copyright 2024 The ChromiumOS Authors1// Use of this source code is governed by a BSD-style license that can be2// found in the LICENSE file.34#![cfg(target_arch = "x86_64")]5#![cfg(any(feature = "whpx", feature = "gvm", feature = "haxm", unix))]67use core::mem;8use std::arch::asm;9use std::cell::RefCell;10use std::ffi::c_void;11use std::sync::atomic::AtomicU8;12use std::sync::atomic::Ordering;13use std::sync::Arc;1415use base::set_cpu_affinity;16use base::MappedRegion;17use base::MemoryMappingBuilder;18use base::SharedMemory;19#[cfg(feature = "gvm")]20use hypervisor::gvm::*;21#[cfg(all(windows, feature = "haxm"))]22use hypervisor::haxm::*;23#[cfg(any(target_os = "android", target_os = "linux"))]24use hypervisor::kvm::*;25#[cfg(all(windows, feature = "whpx"))]26use hypervisor::whpx::*;27#[cfg(any(target_os = "android", target_os = "linux"))]28use hypervisor::MemCacheType::CacheCoherent;29use hypervisor::*;30use hypervisor_test_macro::global_asm_data;31use sync::Mutex;32use vm_memory::GuestAddress;33use vm_memory::GuestMemory;34#[cfg(windows)]35use windows::Win32::System::Memory::VirtualLock;36#[cfg(windows)]37use windows::Win32::System::Memory::VirtualUnlock;38use zerocopy::FromBytes;39use zerocopy::Immutable;40use zerocopy::IntoBytes;41use zerocopy::KnownLayout;4243const FLAGS_IF_BIT: u64 = 0x200;4445#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]46pub enum HypervisorType {47Kvm,48Whpx,49Haxm,50Gvm,51}5253#[repr(C, packed)]54#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]55/// Define IDTR value used in real mode or 32bit protected mode.56struct Idtr32 {57// The lower 2 bytes are limit.58limit: u16,59// The higher 4 bytes are base address.60base_address: u32,61}6263#[repr(C, packed)]64#[derive(Debug, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)]65/// IDT entries for long mode.66struct IdtEntry64 {67address_low: u16,68selector: u16,69ist: u8,70flags: u8,71address_mid: u16,72address_high: u32,73reserved: u32,74}7576impl IdtEntry64 {77pub fn new(handler_addr: u64) -> Self {78IdtEntry64 {79address_low: (handler_addr & 0xFFFF) as u16,80selector: 0x10, // Our long mode CS is the third entry (0x0, 0x8, 0x10).81ist: 0,82flags: 0x8E, // Present, interrupt gate, DPL 083address_mid: ((handler_addr >> 16) & 0xFFFF) as u16,84address_high: (handler_addr >> 32) as u32,85reserved: 0,86}87}88}8990impl std::fmt::Display for HypervisorType {91fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {92match self {93HypervisorType::Kvm => write!(f, "KVM"),94HypervisorType::Whpx => write!(f, "WHPX"),95HypervisorType::Haxm => write!(f, "HAXM"),96HypervisorType::Gvm => write!(f, "GVM"),97}98}99}100101pub trait HypervisorTestSetup {102type Hypervisor: Hypervisor;103type Vm: VmX86_64;104105fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm);106}107108#[cfg(any(target_os = "android", target_os = "linux"))]109impl HypervisorTestSetup for Kvm {110type Hypervisor = Kvm;111type Vm = KvmVm;112113fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {114let kvm = Kvm::new().expect("failed to create kvm");115let vm = KvmVm::new(&kvm, guest_mem, Default::default()).expect("failed to create vm");116(kvm, vm)117}118}119120#[cfg(all(windows, feature = "whpx"))]121impl HypervisorTestSetup for Whpx {122type Hypervisor = Whpx;123type Vm = WhpxVm;124125fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {126let whpx = Whpx::new().expect("failed to create whpx");127let vm = WhpxVm::new(&whpx, 1, guest_mem, CpuId::new(0), false, None)128.expect("failed to create vm");129(whpx, vm)130}131}132133#[cfg(all(windows, feature = "haxm"))]134impl HypervisorTestSetup for Haxm {135type Hypervisor = Haxm;136type Vm = HaxmVm;137138fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {139let haxm = Haxm::new().expect("failed to create haxm");140let vm = HaxmVm::new(&haxm, guest_mem).expect("failed to create vm");141(haxm, vm)142}143}144145#[cfg(feature = "gvm")]146impl HypervisorTestSetup for Gvm {147type Hypervisor = Gvm;148type Vm = GvmVm;149150fn create_vm(guest_mem: GuestMemory) -> (Self::Hypervisor, Self::Vm) {151let gvm = Gvm::new().expect("failed to create gvm");152let vm = GvmVm::new(&gvm, guest_mem).expect("failed to create vm");153(gvm, vm)154}155}156157pub struct TestSetup {158pub assembly: Vec<u8>,159pub load_addr: GuestAddress,160pub mem_size: u64,161pub initial_regs: Regs,162pub extra_vm_setup: Option<Box<dyn Fn(&mut dyn VcpuX86_64, &mut dyn Vm) + Send>>,163pub memory_initializations: Vec<(GuestAddress, Vec<u8>)>,164pub expect_run_success: bool,165166/// Whether the `exit_matcher` should recieve [`VcpuExit::Intr`]. Default to `false`.167///168/// Hypervisors may occasinally receive [`VcpuExit::Intr`] if external interrupt intercept is169/// enabled. In such case, we should proceed to the next VCPU run to handle it. HAXM doesn't170/// distinguish between [`VcpuExit::Intr`] and [`VcpuExit::IrqWindowOpen`], so it may be171/// necessary to intercept [`VcpuExit::Intr`] for testing172/// [`VcpuX86_64::set_interrupt_window_requested`].173pub intercept_intr: bool,174}175176impl Default for TestSetup {177fn default() -> Self {178TestSetup {179assembly: Vec::new(),180load_addr: GuestAddress(0),181mem_size: 0xF000, // Big enough default for long mode setup182initial_regs: Regs::default(),183extra_vm_setup: None,184memory_initializations: Vec::new(),185expect_run_success: true,186intercept_intr: false,187}188}189}190191impl TestSetup {192pub fn new() -> Self {193Default::default()194}195196pub fn add_memory_initialization(&mut self, addr: GuestAddress, data: Vec<u8>) {197self.memory_initializations.push((addr, data));198}199}200201pub fn run_configurable_test<H: HypervisorTestSetup>(202hypervisor_type: HypervisorType,203setup: &TestSetup,204regs_matcher: impl Fn(HypervisorType, &Regs, &Sregs),205mut exit_matcher: impl FnMut(HypervisorType, &VcpuExit, &mut dyn VcpuX86_64, &mut dyn Vm) -> bool,206) {207println!("Running test on hypervisor: {hypervisor_type}");208209let guest_mem =210GuestMemory::new(&[(GuestAddress(0), setup.mem_size)]).expect("failed to create guest mem");211212for (addr, data) in &setup.memory_initializations {213guest_mem214.write_at_addr(data, *addr)215.expect("failed to write memory initialization");216}217218guest_mem219.write_at_addr(&setup.assembly, setup.load_addr)220.expect("failed to write to guest memory");221222let (_, mut vm) = H::create_vm(guest_mem);223224let mut vcpu = vm.create_vcpu(0).expect("new vcpu failed");225226let mut sregs = vcpu.get_sregs().expect("get sregs failed");227sregs.cs.base = 0;228sregs.cs.selector = 0;229vcpu.set_sregs(&sregs).expect("set sregs failed");230vcpu.set_regs(&setup.initial_regs).expect("set regs failed");231232if let Some(ref setup_fn) = setup.extra_vm_setup {233setup_fn(&mut *vcpu, &mut vm);234}235236if !vm.check_capability(VmCap::EarlyInitCpuid) {237let cpuid = vm238.get_hypervisor()239.get_supported_cpuid()240.expect("get_supported_cpuid() failed");241vcpu.set_cpuid(&cpuid).expect("set_cpuid() failed");242}243244loop {245match vcpu.run() {246Ok(exit) => match exit {247// Handle interrupts by continuing the loop248VcpuExit::Intr if !setup.intercept_intr => continue,249other_exit => {250if !setup.expect_run_success {251panic!("Expected vcpu.run() to fail, but it succeeded");252}253if exit_matcher(hypervisor_type, &other_exit, &mut *vcpu, &mut vm) {254break;255}256}257},258Err(e) => {259if setup.expect_run_success {260panic!("Expected vcpu.run() to succeed, but it failed with error: {e:?}");261} else {262println!("Expected failure occurred: {e:?}");263break;264}265}266}267}268269let final_regs = vcpu.get_regs().expect("failed to get regs");270let final_sregs = vcpu.get_sregs().expect("failed to get sregs");271272regs_matcher(hypervisor_type, &final_regs, &final_sregs);273}274275macro_rules! run_tests {276($setup:expr, $regs_matcher:expr, $exit_matcher:expr) => {277#[cfg(any(target_os = "android", target_os = "linux"))]278run_configurable_test::<Kvm>(HypervisorType::Kvm, &$setup, $regs_matcher, $exit_matcher);279280#[cfg(all(windows, feature = "whpx"))]281run_configurable_test::<Whpx>(HypervisorType::Whpx, &$setup, $regs_matcher, $exit_matcher);282283#[cfg(all(windows, feature = "haxm"))]284run_configurable_test::<Haxm>(HypervisorType::Haxm, &$setup, $regs_matcher, $exit_matcher);285286#[cfg(feature = "gvm")]287run_configurable_test::<Gvm>(HypervisorType::Gvm, &$setup, $regs_matcher, $exit_matcher);288};289}290291const DEFAULT_GDT_OFFSET: u64 = 0x1500;292const DEFAULT_IDT_OFFSET: u64 = 0x1528;293294const DESC_ACCESS_EXEC: u8 = 1 << 3;295const DESC_ACCESS_RW: u8 = 1 << 1;296const DESC_ACCESS_ACCESSED: u8 = 1 << 0;297298#[derive(Debug, Clone, Copy)]299struct LongModePageTableEntry {300execute_disable: bool,301protection_key: u8,302address: u64,303global: bool,304page_attribute_table: bool,305dirty: bool,306accessed: bool,307cache_disable: bool,308write_through: bool,309user_supervisor: bool,310read_write: bool,311present: bool,312}313314impl LongModePageTableEntry {315fn from_address(address: u64) -> Self {316assert!(address < 1 << 52, "the address must fit in 52 bits");317assert!(address & 0xFFF == 0, "the address must be aligned to 4k");318Self {319execute_disable: false,320protection_key: 0,321address,322global: false,323page_attribute_table: false,324dirty: false,325accessed: false,326cache_disable: false,327write_through: false,328user_supervisor: false,329read_write: false,330present: false,331}332}333}334335impl From<LongModePageTableEntry> for u64 {336fn from(page_table_entry: LongModePageTableEntry) -> Self {337let mut res = 0;338if page_table_entry.present {339res |= 1;340}341if page_table_entry.read_write {342res |= 1 << 1;343}344if page_table_entry.user_supervisor {345res |= 1 << 2;346}347if page_table_entry.write_through {348res |= 1 << 3;349}350if page_table_entry.cache_disable {351res |= 1 << 4;352}353if page_table_entry.accessed {354res |= 1 << 5;355}356if page_table_entry.dirty {357res |= 1 << 6;358}359if page_table_entry.page_attribute_table {360res |= 1 << 7;361}362if page_table_entry.global {363res |= 1 << 8;364}365assert!(page_table_entry.address < 1 << 52);366assert!(page_table_entry.address & 0xFFF == 0);367res |= page_table_entry.address;368assert!(page_table_entry.protection_key < 1 << 4);369res |= u64::from(page_table_entry.protection_key) << 59;370if page_table_entry.execute_disable {371res |= 1 << 63;372}373res374}375}376377#[derive(Debug, Clone)]378struct ModeConfig {379idt: Vec<u8>,380idt_base_addr: u64,381gdt: Vec<Segment>,382gdt_base_addr: u64,383code_segment_index: u16,384task_segment_index: Option<u16>,385page_table: Option<Box<[u8; 0x1000]>>,386long_mode: bool,387}388389impl ModeConfig {390const IDT64_SIZE: usize = std::mem::size_of::<IdtEntry64>() * 256;391const IDT32_SIZE: usize = 8 * 256;392393/// Set the IDT for long mode.394fn set_idt_long_mode(&mut self, idt: impl IntoIterator<Item = IdtEntry64>) -> &mut Self {395let entries = idt.into_iter().collect::<Vec<_>>();396assert_eq!(entries.len(), 256, "IDT must contain 256 entries");397self.idt = entries398.into_iter()399.flat_map(|entry| entry.as_bytes().to_owned())400.collect();401self402}403404fn set_idt_base_addr(&mut self, idt_base_addr: u64) -> &mut Self {405self.idt_base_addr = idt_base_addr;406self407}408409fn default_code_segment_long_mode() -> Segment {410Segment {411base: 0,412limit_bytes: 0xffff_ffff,413type_: DESC_ACCESS_EXEC | DESC_ACCESS_RW | DESC_ACCESS_ACCESSED,414present: 1,415dpl: 0,416db: 0,417s: 1,418l: 1,419g: 1,420..Default::default()421}422}423424fn default_code_segment_protected_mode() -> Segment {425Segment {426base: 0,427limit_bytes: 0xffff_ffff,428type_: DESC_ACCESS_EXEC | DESC_ACCESS_RW | DESC_ACCESS_ACCESSED,429present: 1,430dpl: 0,431db: 1,432s: 1,433l: 0,434g: 1,435..Default::default()436}437}438439fn segment_to_bytes(segment: &Segment, long_mode: bool) -> Vec<u8> {440if *segment == Segment::default() {441// Special handle for null descriptor, so that it won't be recognized as a 64442// bit system segment.443return vec![0u8; 8];444}445let Segment {446base,447limit_bytes,448type_,449present,450dpl,451db,452s,453l,454g,455..456} = *segment;457458let limit = if g != 0 {459// 4096-byte granularity460limit_bytes / 4096461} else {462// 1-byte granularity463limit_bytes464};465466assert!(limit < (1 << 20)); // limit value must fit in 20 bits467let flags = {468let mut flags = 0;469if g != 0 {470flags |= 1 << 3;471}472if db != 0 {473flags |= 1 << 2;474}475if l != 0 {476flags |= 1 << 1;477}478flags << 4479};480assert!(flags & 0x0F == 0x00); // flags must be in the high 4 bits only481let access = {482assert!(type_ < (1 << 4), "type must fit in 4 bits");483let mut access = type_;484if present != 0 {485access |= 1 << 7;486}487assert!(dpl < (1 << 2), "DPL must fit in 2 bits");488access |= dpl << 5;489if s != 0 {490access |= 1 << 4;491}492access493};494495let limit_lo = (limit & 0xffff).try_into().unwrap();496let base_lo = (base & 0xffff).try_into().unwrap();497let base_mid0 = ((base >> 16) & 0xff).try_into().unwrap();498let limit_hi_and_flags = u8::try_from((limit >> 16) & 0xf).unwrap() | flags;499let base_mid1 = ((base >> 24) & 0xff).try_into().unwrap();500let base_hi = (base >> 32).try_into().unwrap();501502if long_mode && s == 0 {503// 64 bit system segment descriptor.504#[repr(C, packed)]505#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]506struct Descriptor {507limit_lo: u16,508base_lo: u16,509base_mid0: u8,510access: u8,511limit_hi_and_flags: u8,512base_mid1: u8,513base_hi: u32,514_reserved: [u8; 4],515}516517Descriptor {518limit_lo,519base_lo,520base_mid0,521access,522limit_hi_and_flags,523base_mid1,524base_hi,525_reserved: [0; 4],526}527.as_bytes()528.to_owned()529} else {530#[repr(C, packed)]531#[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]532struct Descriptor {533limit_lo: u16,534base_lo: u16,535base_mid: u8,536access: u8,537limit_hi_and_flags: u8,538base_hi: u8,539}540541assert_eq!(base_hi, 0, "the base address must be within 32 bit range");542Descriptor {543limit_lo,544base_lo,545base_mid: base_mid0,546access,547limit_hi_and_flags,548base_hi: base_mid1,549}550.as_bytes()551.to_owned()552}553}554555fn get_gdt_bytes(&self) -> Vec<u8> {556self.gdt557.iter()558.flat_map(|segment| Self::segment_to_bytes(segment, self.long_mode))559.collect()560}561562fn configure_gdt_memory(&self, guest_mem: &GuestMemory) {563let gdt_bytes = self.get_gdt_bytes();564let gdt_start_addr = GuestAddress(self.gdt_base_addr);565let gdt_end_addr = gdt_start_addr566.checked_add(567gdt_bytes568.len()569.try_into()570.expect("the GDT size must be within usize"),571)572.expect("the end of GDT address shouldn't overflow");573assert!(574guest_mem.range_overlap(GuestAddress(self.gdt_base_addr), gdt_end_addr),575"the address for GDT is not mapped"576);577guest_mem578.write_at_addr(&gdt_bytes, GuestAddress(self.gdt_base_addr))579.expect("Failed to write GDT entry to guest memory");580}581582fn configure_idt_memory(&self, guest_mem: &GuestMemory) {583let expected_length = if self.long_mode {584Self::IDT64_SIZE585} else {586Self::IDT32_SIZE587};588589let idt_addr = GuestAddress(self.idt_base_addr);590assert_eq!(self.idt.len(), expected_length);591assert!(592guest_mem.range_overlap(593idt_addr,594idt_addr595.checked_add(596self.idt597.len()598.try_into()599.expect("The IDT length must be within the u64 range.")600)601.expect("The end address of IDT should not overflow")602),603"The IDT that starts at {:#x} isn't properly mapped as the guest memory.",604self.idt_base_addr605);606guest_mem607.write_at_addr(&self.idt, idt_addr)608.expect("failed to write IDT entry to guest memory");609}610611fn get_idtr_value(&self) -> DescriptorTable {612DescriptorTable {613base: self.idt_base_addr,614limit: {615let expected_length = if self.long_mode {616Self::IDT64_SIZE617} else {618Self::IDT32_SIZE619};620assert_eq!(self.idt.len(), expected_length, "the IDT size should match",);621// The IDT limit should be the number of bytes of IDT - 1.622(self.idt.len() - 1)623.try_into()624.expect("the IDT limit should be within the range of u16")625},626}627}628629fn get_gdtr_value(&self) -> DescriptorTable {630DescriptorTable {631base: self.gdt_base_addr,632limit: (self.get_gdt_bytes().len() - 1)633.try_into()634.expect("the GDT limit should fit in 16 bits"),635}636}637638fn get_segment_register_value(&self, segment_index: u16) -> Segment {639let offset: usize = self640.gdt641.iter()642.take(segment_index.into())643.map(|segment| Self::segment_to_bytes(segment, self.long_mode).len())644.sum();645Segment {646selector: offset647.try_into()648.expect("the offset should be within the range of u16"),649..self.gdt[usize::from(segment_index)]650}651}652653pub fn configure_long_mode_memory(&self, vm: &mut dyn Vm) {654let guest_mem = vm.get_memory();655656self.configure_gdt_memory(guest_mem);657self.configure_idt_memory(guest_mem);658659// Setup paging660let pml4_addr = GuestAddress(0x9000);661let pdpte_addr = GuestAddress(0xa000);662let pde_addr = GuestAddress(0xb000);663let pte_addr = GuestAddress(0xc000);664665assert!(666guest_mem.range_overlap(GuestAddress(0x9000), GuestAddress(0xd000)),667"the memory range for page tables should be mapped."668);669670// Pointing to PDPTE with present and RW flags671guest_mem672.write_at_addr(&(pdpte_addr.0 | 3).to_le_bytes(), pml4_addr)673.expect("failed to write PML4 entry");674675// Pointing to PD with present and RW flags676guest_mem677.write_at_addr(&(pde_addr.0 | 3).to_le_bytes(), pdpte_addr)678.expect("failed to write PDPTE entry");679680for i in 0..512 {681// All pages are present and RW.682let flags: u64 = if i == 0 {6833684} else {685// The first 2MiB are 4K pages, the rest are 2M pages.6860x83687};688let addr = if i == 0 { pte_addr.offset() } else { i << 21 };689let pd_entry_bytes = (addr | flags).to_le_bytes();690guest_mem691.write_at_addr(692&pd_entry_bytes,693pde_addr.unchecked_add(i * mem::size_of::<u64>() as u64),694)695.expect("Failed to write PDE entry");696}697698guest_mem699.write_at_addr(700self.page_table701.as_ref()702.expect("page table must present for long mode")703.as_slice(),704pte_addr,705)706.expect("Failed to write PTE entry");707}708709pub fn enter_long_mode(&self, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm) {710self.configure_long_mode_memory(vm);711712let mut sregs = vcpu.get_sregs().expect("failed to get sregs");713714sregs.gdt = self.get_gdtr_value();715sregs.idt = self.get_idtr_value();716sregs.cs = self.get_segment_register_value(self.code_segment_index);717718if let Some(task_segment_index) = self.task_segment_index {719sregs.tr = self.get_segment_register_value(task_segment_index);720}721722// Long mode723let pml4_addr = GuestAddress(0x9000);724sregs.cr0 |= 0x1 | 0x80000000; // PE & PG725sregs.efer |= 0x100 | 0x400; // LME & LMA (Must be auto-enabled with CR0_PG)726sregs.cr3 = pml4_addr.offset();727sregs.cr4 |= 0x80 | 0x20; // PGE & PAE728729vcpu.set_sregs(&sregs).expect("failed to set sregs");730}731732pub fn configure_flat_protected_mode_memory(&self, vm: &mut dyn Vm) {733let guest_mem = vm.get_memory();734735self.configure_gdt_memory(guest_mem);736self.configure_idt_memory(guest_mem);737}738739pub fn enter_protected_mode(&self, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm) {740self.configure_flat_protected_mode_memory(vm);741742let mut sregs = vcpu.get_sregs().expect("failed to get sregs");743744sregs.cs = self.get_segment_register_value(self.code_segment_index);745sregs.gdt = self.get_gdtr_value();746sregs.idt = self.get_idtr_value();747748assert!(749self.task_segment_index.is_none(),750"task segment not supported for protected mode yet."751);752753assert!(754self.page_table.is_none(),755"setting page tables for protected mode is not supported yet"756);757// 32-bit protected mode, paging disabled758sregs.cr0 |= 0x1; // PE759sregs.cr0 &= !0x80000000; // ~PG760761vcpu.set_sregs(&sregs).expect("failed to set sregs");762}763764fn default_long_mode() -> Self {765let page_table = (0u64..512)766.flat_map(|page_frame_number| {767let page_table_entry = LongModePageTableEntry {768present: true,769read_write: true,770..LongModePageTableEntry::from_address(page_frame_number << 12)771};772u64::from(page_table_entry).as_bytes().to_owned()773})774.collect::<Box<[u8]>>()775.try_into()776.expect("the length of the slice must match");777Self {778idt_base_addr: DEFAULT_IDT_OFFSET,779idt: vec![0; Self::IDT64_SIZE],780gdt_base_addr: DEFAULT_GDT_OFFSET,781gdt: vec![782Segment::default(),783Segment::default(),784Self::default_code_segment_long_mode(),785],786code_segment_index: 2,787task_segment_index: None,788page_table: Some(page_table),789long_mode: true,790}791}792793fn default_protected_mode() -> Self {794Self {795idt_base_addr: DEFAULT_IDT_OFFSET,796idt: vec![0; Self::IDT32_SIZE],797gdt_base_addr: DEFAULT_GDT_OFFSET,798gdt: vec![799Segment::default(),800Segment::default(),801Self::default_code_segment_protected_mode(),802],803code_segment_index: 2,804task_segment_index: None,805page_table: None,806long_mode: false,807}808}809}810811global_asm_data!(812test_minimal_virtualization_code,813".code16",814"add ax, bx",815"hlt"816);817818// This runs a minimal program under virtualization.819// It should require only the ability to execute instructions under virtualization, physical820// memory, the ability to get and set some guest VM registers, and intercepting HLT.821#[test]822fn test_minimal_virtualization() {823let assembly = test_minimal_virtualization_code::data().to_vec();824let setup = TestSetup {825assembly: assembly.clone(),826load_addr: GuestAddress(0x1000),827initial_regs: Regs {828rip: 0x1000,829rax: 1,830rbx: 2,831rflags: 2,832..Default::default()833},834..Default::default()835};836837run_tests!(838setup,839|_, regs, _| {840assert_eq!(regs.rax, 3); // 1 + 2841842// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.843assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);844},845|_, exit: &_, _: &mut _, _: &mut _| -> bool { matches!(exit, VcpuExit::Hlt) }846);847}848849global_asm_data!(850test_io_exit_handler_code,851".code16",852"out 0x10, al",853"in al, 0x20",854"add ax, bx",855"hlt",856);857858#[test]859fn test_io_exit_handler() {860// Use the OUT/IN instructions, which cause an Io exit in order to861// read/write data using a given port.862let load_addr = GuestAddress(0x1000);863let setup = TestSetup {864assembly: test_io_exit_handler_code::data().to_vec(),865load_addr,866initial_regs: Regs {867rip: load_addr.offset(),868rax: 0x34, // Only AL (lower byte of RAX) is used869rbx: 0x42,870rflags: 2,871..Default::default()872},873..Default::default()874};875876let regs_matcher = |_, regs: &Regs, _: &_| {877// The result in AX should be double the initial value of AX878// plus the initial value of BX.879assert_eq!(regs.rax, (0x34 * 2) + 0x42);880};881882let cached_byte = AtomicU8::new(0);883let exit_matcher =884move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {885VcpuExit::Io => {886vcpu.handle_io(&mut |IoParams { address, operation }| {887match operation {888IoOperation::Read(data) => {889assert_eq!(address, 0x20);890assert_eq!(data.len(), 1);891// The original number written below will be doubled and892// passed back.893data[0] = cached_byte.load(Ordering::SeqCst) * 2;894}895IoOperation::Write(data) => {896assert_eq!(address, 0x10);897assert_eq!(data.len(), 1);898assert_eq!(data[0], 0x34);899cached_byte.fetch_add(data[0], Ordering::SeqCst);900}901}902})903.expect("failed to set the data");904false // Continue VM runloop905}906VcpuExit::Hlt => {907true // Break VM runloop908}909r => panic!("unexpected exit reason: {r:?}"),910};911run_tests!(setup, regs_matcher, &exit_matcher);912}913914global_asm_data!(915test_io_rep_string_code,916".code16",917"cld",918"mov dx, 0x80", // read data from I/O port 80h919"mov di, 0x100", // write data to memory address 0x100920"mov cx, 5", // repeat 5 times921"rep insb",922"mov si, 0x100", // read data from memory address 0x100923"mov dx, 0x80", // write data to I/O port 80h924"mov cx, 5", // repeat 5 times925"rep outsb",926"mov cx, 0x5678",927"hlt",928);929930#[cfg(not(feature = "haxm"))]931#[test]932fn test_io_rep_string() {933// Test the REP OUTS*/REP INS* string I/O instructions, which should call the IO handler934// multiple times to handle the requested repeat count.935let load_addr = GuestAddress(0x1000);936let setup = TestSetup {937assembly: test_io_rep_string_code::data().to_vec(),938load_addr,939initial_regs: Regs {940rip: load_addr.offset(),941rax: 0x1234,942rflags: 2,943..Default::default()944},945..Default::default()946};947948let regs_matcher = |_, regs: &Regs, _: &_| {949// The string I/O instructions should not modify AX.950assert_eq!(regs.rax, 0x1234);951assert_eq!(regs.rcx, 0x5678);952};953954let read_data = AtomicU8::new(0);955let write_data = AtomicU8::new(0);956let exit_matcher =957move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| match exit {958VcpuExit::Io => {959vcpu.handle_io(&mut |IoParams { address, operation }| {960match operation {961IoOperation::Read(data) => {962assert_eq!(address, 0x80);963assert_eq!(data.len(), 1);964// Return 0, 1, 2, 3, 4 for subsequent reads.965data[0] = read_data.fetch_add(1, Ordering::SeqCst);966}967IoOperation::Write(data) => {968assert_eq!(address, 0x80);969assert_eq!(data.len(), 1);970// Expect 0, 1, 2, 3, 4 to be written.971let expected_write = write_data.fetch_add(1, Ordering::SeqCst);972assert_eq!(data[0], expected_write);973}974}975})976.expect("failed to set the data");977false // Continue VM runloop978}979VcpuExit::Hlt => {980// Verify 5 reads and writes occurred.981assert_eq!(read_data.load(Ordering::SeqCst), 5);982assert_eq!(write_data.load(Ordering::SeqCst), 5);983984// Verify the data that should have been written to memory by REP INSB.985let mem = vm.get_memory();986let mut data = [0u8; 5];987mem.read_exact_at_addr(&mut data, GuestAddress(0x100))988.unwrap();989assert_eq!(data, [0, 1, 2, 3, 4]);990991true // Break VM runloop992}993r => panic!("unexpected exit reason: {r:?}"),994};995run_tests!(setup, regs_matcher, &exit_matcher);996}997998global_asm_data!(999test_mmio_exit_cross_page_code,1000".code16",1001"mov byte ptr [ebx], al",1002"mov al, byte ptr [ecx]",1003"hlt",1004);10051006// This test is similar to mmio_fetch_memory.rs (remove eventually)1007// but applies to all hypervisors.1008#[test]1009fn test_mmio_exit_cross_page() {1010let page_size = 4096u64;1011let load_addr = GuestAddress(page_size - 1); // Last byte of the first page10121013let setup = TestSetup {1014assembly: test_mmio_exit_cross_page_code::data().to_vec(),1015load_addr,1016mem_size: 0x2000,1017initial_regs: Regs {1018rip: load_addr.offset(),1019rax: 0x33,1020rbx: 0x3000,1021rcx: 0x3010,1022rflags: 2,1023..Default::default()1024},1025..Default::default()1026};10271028let regs_matcher = |_, regs: &Regs, _: &_| {1029assert_eq!(regs.rax, 0x66, "Should match the MMIO read bytes below");1030};10311032let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1033VcpuExit::Mmio => {1034vcpu.handle_mmio(&mut |IoParams { address, operation }| {1035match operation {1036IoOperation::Read(data) => {1037match (address, data.len()) {1038// First MMIO read asks to load the first 8 bytes1039// of a new execution page, when an instruction1040// crosses page boundary.1041// Return the rest of instructions that are1042// supposed to be on the second page.1043(0x1000, 8) => {1044// Ensure this instruction is the first read1045// in the sequence.1046data.copy_from_slice(&[0x88, 0x03, 0x67, 0x8a, 0x01, 0xf4, 0, 0]);1047Ok(())1048}1049// Second MMIO read is a regular read from an1050// unmapped memory (pointed to by initial EAX).1051(0x3010, 1) => {1052data.copy_from_slice(&[0x66]);1053Ok(())1054}1055_ => {1056panic!("invalid address({:#x})/size({})", address, data.len())1057}1058}1059}1060IoOperation::Write(data) => {1061assert_eq!(address, 0x3000);1062assert_eq!(data[0], 0x33);1063assert_eq!(data.len(), 1);1064Ok(())1065}1066}1067})1068.expect("failed to set the data");1069false // Continue VM runloop1070}1071VcpuExit::Hlt => {1072true // Break VM runloop1073}1074r => panic!("unexpected exit reason: {r:?}"),1075};10761077run_tests!(setup, regs_matcher, exit_matcher);1078}10791080global_asm_data!(1081test_mmio_exit_readonly_memory_code,1082".code16",1083"mov al,BYTE PTR es:[bx]",1084"add al, 0x1",1085"mov BYTE PTR es:[bx], al",1086"hlt",1087);10881089#[test]1090#[cfg(any(target_os = "android", target_os = "linux"))] // Not working for WHXP yet.1091fn test_mmio_exit_readonly_memory() {1092// Read from read-only memory and then write back to it,1093// which should trigger an MMIO exit.1094let setup = TestSetup {1095assembly: test_mmio_exit_readonly_memory_code::data().to_vec(),1096load_addr: GuestAddress(0x1000),1097mem_size: 0x2000,1098initial_regs: Regs {1099rip: 0x1000,1100rax: 1,1101rbx: 0,1102rflags: 2,1103..Default::default()1104},1105extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {1106// Add a read-only region of memory to the VM, at address 0x5000.1107let prot_mem_size = 0x1000;1108let prot_mem =1109SharedMemory::new("test", prot_mem_size).expect("failed to create shared memory");1110let mmap_ro = MemoryMappingBuilder::new(prot_mem_size as usize)1111.from_shared_memory(&prot_mem)1112.build()1113.expect("failed to create memory mapping");1114mmap_ro1115.write_obj(0x66, 0)1116.expect("failed writing data to ro memory");1117vm.add_memory_region(1118GuestAddress(0x5000),1119Box::new(1120MemoryMappingBuilder::new(prot_mem_size as usize)1121.from_shared_memory(&prot_mem)1122.build()1123.expect("failed to create memory mapping"),1124),1125true,1126false,1127CacheCoherent,1128)1129.expect("failed to register memory");11301131// Set up segments needed by the assembly addressing above.1132let mut sregs = vcpu.get_sregs().expect("get sregs failed");1133sregs.cs.s = 1;1134sregs.cs.type_ = 0b1011;1135sregs.es.base = 0x5000;1136sregs.es.selector = 0;1137sregs.es.s = 1;1138sregs.es.type_ = 0b1011;11391140vcpu.set_sregs(&sregs).expect("set sregs failed");1141})),1142..Default::default()1143};11441145let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1146VcpuExit::Mmio => {1147vcpu.handle_mmio(&mut |IoParams { address, operation }| match operation {1148IoOperation::Read(_) => {1149panic!("unexpected mmio read call");1150}1151IoOperation::Write(data) => {1152assert_eq!(data.len(), 1);1153assert_eq!(address, 0x5000);1154assert_eq!(data[0], 0x67);1155Ok(())1156}1157})1158.expect("failed to set the data");1159false // Continue VM runloop1160}1161VcpuExit::Hlt => {1162true // Break VM runloop1163}1164r => panic!("unexpected exit reason: {r:?}"),1165};11661167run_tests!(1168setup,1169|_, regs, _| {1170assert_eq!(regs.rax, 0x67);1171},1172exit_matcher1173);1174}11751176#[rustfmt::skip::macros(global_asm_data)]1177global_asm_data!(1178test_cpuid_exit_handler_code,1179".code16",1180"cpuid",1181"hlt",1182);11831184#[test]1185fn test_cpuid_exit_handler() {1186let setup = TestSetup {1187assembly: test_cpuid_exit_handler_code::data().to_vec(),1188load_addr: GuestAddress(0x1000),1189initial_regs: Regs {1190rip: 0x1000,1191rax: 1, // CPUID input EAX=1 to get virtualization bits.1192rflags: 2,1193..Default::default()1194},1195..Default::default()1196};11971198let regs_matcher = move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| {1199if hypervisor_type == HypervisorType::Haxm {1200let hypervisor_bit = regs.rcx & (1 << 31) != 0;1201assert!(hypervisor_bit, "Hypervisor bit in CPUID should be set!");1202assert_eq!(regs.rip, 0x1003, "CPUID did not execute correctly.");1203}1204};12051206let exit_matcher =1207|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1208match hypervisor_type {1209HypervisorType::Whpx => match exit {1210VcpuExit::Cpuid { entry } => {1211println!("Got Cpuid {entry:?}");1212true // Break runloop1213}1214r => panic!("unexpected exit reason: {r:?}"),1215},1216_ => match exit {1217VcpuExit::Hlt => {1218true // Break VM runloop1219}1220r => panic!("unexpected exit reason: {r:?}"),1221},1222}1223};12241225run_tests!(setup, regs_matcher, exit_matcher);1226}12271228global_asm_data!(1229test_control_register_access_invalid_code,1230".code16",1231// Test setting an unused bit in addition to the Protected Mode Enable and Monitor co-processor1232// bits, which causes a triple fault and hence the invalid bit should never make it to RCX.1233"mov cr0, eax",1234"mov ecx, cr0",1235"hlt",1236);12371238#[test]1239fn test_control_register_access_invalid() {1240let setup = TestSetup {1241assembly: test_control_register_access_invalid_code::data().to_vec(),1242load_addr: GuestAddress(0x1000),1243initial_regs: Regs {1244rip: 0x1000,1245rax: 0x80000011,1246rcx: 0,1247rflags: 2,1248..Default::default()1249},1250..Default::default()1251};12521253// Matcher to check that the RAX value never made it to RCX.1254let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {1255assert_eq!(1256regs.rcx, 0,1257"RCX value mismatch: expected 0, found {:X}",1258regs.rcx1259)1260};12611262let exit_matcher =1263move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1264match hypervisor_type {1265HypervisorType::Kvm | HypervisorType::Haxm => {1266match exit {1267VcpuExit::Shutdown(_) => {1268true // Break VM runloop1269}1270r => panic!("unexpected exit reason: {r:?}"),1271}1272}1273_ => {1274match exit {1275VcpuExit::UnrecoverableException => {1276true // Break VM runloop1277}1278r => panic!("unexpected exit reason: {r:?}"),1279}1280}1281}1282};1283run_tests!(setup, regs_matcher, exit_matcher);1284}12851286global_asm_data!(1287test_control_register_access_valid_code,1288// Set the 0th bit (Protected Mode Enable) of CR0, which should succeed.1289".code16",1290"mov cr0, eax",1291"mov eax, cr0",1292"hlt",1293);12941295#[test]1296fn test_control_register_access_valid() {1297let setup = TestSetup {1298assembly: test_control_register_access_invalid_code::data().to_vec(),1299load_addr: GuestAddress(0x1000),1300initial_regs: Regs {1301rip: 0x1000,1302rax: 0x1,1303rflags: 2,1304..Default::default()1305},1306..Default::default()1307};13081309// Matcher to check the final state of EAX after reading from CR01310let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {1311assert!(1312(regs.rax & 0x1) != 0,1313"CR0 value mismatch: expected the 0th bit to be set, found {:X}",1314regs.rax1315);1316};13171318let exit_matcher =1319move |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1320VcpuExit::Hlt => {1321true // Break VM runloop1322}1323r => panic!("unexpected exit reason: {r:?}"),1324};1325run_tests!(setup, regs_matcher, exit_matcher);1326}13271328global_asm_data!(1329test_debug_register_access_code,1330".code16",1331"mov dr2, eax",1332"mov ebx, dr2",1333"hlt",1334);13351336#[test]1337fn test_debug_register_access() {1338let setup = TestSetup {1339assembly: test_debug_register_access_code::data().to_vec(),1340load_addr: GuestAddress(0x1000),1341initial_regs: Regs {1342rip: 0x1000,1343rax: 0x1234,1344rflags: 2,1345..Default::default()1346},1347..Default::default()1348};13491350let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {1351assert_eq!(1352regs.rbx, 0x1234,1353"DR2 value mismatch: expected 0x1234, found {:X}",1354regs.rbx1355);1356};13571358let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1359VcpuExit::Hlt => {1360true // Break VM runloop1361}1362r => panic!("unexpected exit reason: {r:?}"),1363};13641365run_tests!(setup, regs_matcher, exit_matcher);1366}13671368// This test only succeeds (by failing Vcpu::Run) on haxm.1369#[cfg(all(windows, feature = "haxm"))]1370#[test]1371fn test_msr_access_invalid() {1372let msr_index = 0xC0000080; // EFER MSR13731374let setup = TestSetup {1375/*13760: 0f 32 rdmsr13772: 83 c8 02 or ax,0x2 (1st bit is reserved)13785: 0f 30 wrmsr13797: f4 hlt1380*/1381assembly: vec![0x0F, 0x32, 0x83, 0xC8, 0x02, 0x0F, 0x30, 0xF4],1382mem_size: 0x5000,1383load_addr: GuestAddress(0x1000),1384initial_regs: Regs {1385rip: 0x1000,1386rcx: msr_index, // MSR index to read/write1387rflags: 2,1388..Default::default()1389},1390..Default::default()1391};13921393let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1394VcpuExit::Shutdown(..) => {1395true // Break VM runloop1396}1397r => panic!("unexpected exit reason: {r:?}"),1398};13991400run_tests!(1401setup,1402|_, regs, _| {1403assert_eq!(regs.rip, 0x1005); // Should stop at the wrmsr1404},1405exit_matcher1406);1407}14081409global_asm_data!(1410test_msr_access_valid_code,1411".code16",1412"rdmsr",1413"add ax, 1",1414"wrmsr",1415"hlt",1416);14171418#[test]1419fn test_msr_access_valid() {1420let msr_index = 0x10; // TSC MSR index14211422let setup = TestSetup {1423assembly: test_msr_access_valid_code::data().to_vec(),1424load_addr: GuestAddress(0x1000),1425initial_regs: Regs {1426rip: 0x1000,1427rcx: msr_index, // MSR index for TSC1428rflags: 0x2,1429..Default::default()1430},1431..Default::default()1432};14331434let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {1435assert!(regs.rax > 0x0, "TSC value should be >0");1436assert_eq!(regs.rip, 0x1008, "Should stop after the hlt instruction");1437};14381439let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1440VcpuExit::Hlt => {1441true // Break VM runloop1442}1443r => panic!("unexpected exit reason: {r:?}"),1444};1445run_tests!(setup, regs_matcher, exit_matcher);1446}14471448#[rustfmt::skip::macros(global_asm_data)]1449global_asm_data!(1450test_getsec_instruction_code,1451".code16",1452"getsec",1453"hlt",1454);14551456#[cfg(not(unix))]1457#[test]1458fn test_getsec_instruction() {1459let setup = TestSetup {1460assembly: test_getsec_instruction_code::data().to_vec(),1461load_addr: GuestAddress(0x1000),1462initial_regs: Regs {1463rip: 0x1000,1464rflags: 2,1465..Default::default()1466},1467extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {1468ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);1469})),1470..Default::default()1471};14721473let regs_matcher =1474move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1475HypervisorType::Whpx => {}1476HypervisorType::Haxm => {}1477_ => {1478assert_eq!(regs.rip, 0x1000, "GETSEC; expected RIP at 0x1002");1479}1480};14811482let exit_matcher =1483move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1484match hypervisor_type {1485HypervisorType::Whpx => {1486match exit {1487VcpuExit::UnrecoverableException => {1488true // Break VM runloop1489}1490r => panic!("unexpected exit reason: {r:?}"),1491}1492}1493_ => {1494match exit {1495VcpuExit::Shutdown(_) => {1496true // Break VM runloop1497}1498r => panic!("unexpected exit reason: {r:?}"),1499}1500}1501}1502};15031504run_tests!(setup, regs_matcher, exit_matcher);1505}15061507#[rustfmt::skip::macros(global_asm_data)]1508global_asm_data!(1509test_invd_instruction_code,1510".code16",1511"invd",1512"hlt",1513);15141515#[test]1516fn test_invd_instruction() {1517let setup = TestSetup {1518assembly: test_invd_instruction_code::data().to_vec(),1519load_addr: GuestAddress(0x1000),1520initial_regs: Regs {1521rip: 0x1000,1522rflags: 2,1523..Default::default()1524},1525..Default::default()1526};15271528let regs_matcher =1529move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1530HypervisorType::Haxm => {}1531_ => {1532assert_eq!(regs.rip, 0x1003, "INVD; expected RIP at 0x1003");1533}1534};1535let exit_matcher =1536move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1537match hypervisor_type {1538HypervisorType::Haxm => {1539match exit {1540VcpuExit::Shutdown(_) => {1541true // Break VM runloop1542}1543r => panic!("unexpected exit reason: {r:?}"),1544}1545}1546_ => {1547match exit {1548VcpuExit::Hlt => {1549true // Break VM runloop1550}1551r => panic!("unexpected exit reason: {r:?}"),1552}1553}1554}1555};15561557run_tests!(setup, regs_matcher, exit_matcher);1558}15591560global_asm_data!(1561test_xsetbv_instruction_code,1562".code16",1563"mov eax, cr4",1564// Set the OSXSAVE bit in CR4 (bit 9)1565"or ax, 0x200",1566"mov cr4, eax",1567"xgetbv",1568"xsetbv",1569"hlt",1570);15711572#[test]1573fn test_xsetbv_instruction() {1574let setup = TestSetup {1575assembly: test_xsetbv_instruction_code::data().to_vec(),1576load_addr: GuestAddress(0x1000),1577initial_regs: Regs {1578rip: 0x1000,1579rax: 1, // Set bit 0 in EAX1580rdx: 0, // XSETBV also uses EDX:EAX, must be initialized1581rcx: 0, // XCR01582rflags: 2,1583..Default::default()1584},1585extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {1586ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);1587})),1588..Default::default()1589};15901591let regs_matcher =1592move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1593HypervisorType::Whpx => {}1594HypervisorType::Haxm => {}1595HypervisorType::Kvm => {}1596_ => {1597assert_eq!(regs.rip, 0x100D, "XSETBV; expected RIP at 0x100D");1598}1599};16001601let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1602match exit {1603VcpuExit::Mmio => {1604true // Break VM runloop1605}1606r => panic!("unexpected exit reason: {r:?}"),1607}1608};16091610run_tests!(setup, regs_matcher, exit_matcher);1611}16121613global_asm_data!(1614test_invept_instruction_code,1615".code16",1616"invept eax, [eax]",1617"hlt",1618);16191620#[test]1621fn test_invept_instruction() {1622let setup = TestSetup {1623assembly: test_invept_instruction_code::data().to_vec(),1624load_addr: GuestAddress(0x1000),1625initial_regs: Regs {1626rax: 0x2000,1627rip: 0x1000,1628rflags: 2,1629..Default::default()1630},1631extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {1632ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);1633})),1634..Default::default()1635};16361637let regs_matcher =1638move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1639HypervisorType::Whpx => {}1640HypervisorType::Haxm => {}1641HypervisorType::Kvm => {}1642_ => {1643assert_eq!(regs.rip, 0x1005, "invept; expected RIP at 0x1005");1644}1645};16461647let exit_matcher =1648move |hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1649match hypervisor_type {1650HypervisorType::Whpx => {1651match exit {1652VcpuExit::UnrecoverableException => {1653true // Break VM runloop1654}1655r => panic!("unexpected exit reason: {r:?}"),1656}1657}1658_ => {1659match exit {1660VcpuExit::Shutdown(_) => {1661true // Break VM runloop1662}1663r => panic!("unexpected exit reason: {r:?}"),1664}1665}1666}1667};16681669run_tests!(setup, regs_matcher, exit_matcher);1670}16711672global_asm_data!(1673test_invvpid_instruction_code,1674".code16",1675"invvpid eax, [eax]",1676"hlt",1677);16781679// TODO(b/342183625): invvpid instruction is not valid in real mode. Reconsider how we should write1680// this test.1681#[cfg(not(unix))]1682#[test]1683fn test_invvpid_instruction() {1684let setup = TestSetup {1685assembly: test_invvpid_instruction_code::data().to_vec(),1686load_addr: GuestAddress(0x1000),1687initial_regs: Regs {1688rip: 0x1000,1689rax: 0x1500,1690rflags: 2,1691..Default::default()1692},1693extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {1694ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);1695})),1696..Default::default()1697};16981699let regs_matcher = move |_, regs: &Regs, _: &_| {1700assert_eq!(regs.rip, 0x1000, "INVVPID; expected RIP at 0x1000");1701};17021703let exit_matcher =1704move |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {1705VcpuExit::Mmio | VcpuExit::Shutdown(_) | VcpuExit::InternalError => {1706true // Break VM runloop1707}1708r => panic!("unexpected exit reason: {r:?}"),1709};17101711run_tests!(setup, regs_matcher, exit_matcher);1712}17131714#[test]1715fn test_vm_instruction_set() {1716let instructions = vec![1717(vec![0x0F, 0x01, 0xC1], 0x1000, "VMCALL"), // VMCALL1718(vec![0x66, 0x0F, 0xC7, 0x30], 0x1004, "VMCLEAR"), // VMCLEAR1719(vec![0x0F, 0x01, 0xC2], 0x1003, "VMLAUNCH"), // VMLAUNCH1720(vec![0x0F, 0xC7, 0x30], 0x1003, "VMPTRLD"), // VMPTRLD1721(vec![0x0F, 0xC7, 0x31], 0x1003, "VMPTRST"), // VMPTRST1722(vec![0x0F, 0x01, 0xC3], 0x1003, "VMRESUME"), // VMRESUME1723(vec![0x0F, 0x01, 0xC4], 0x1003, "VMXOFF"), // VMXOFF1724(vec![0x0F, 0x01, 0xC4], 0x1003, "VMXON"), // VMXON1725];17261727for (bytes, expected_rip, name) in instructions {1728let mut assembly = bytes;1729assembly.push(0xF4); // Append HLT to each instruction set17301731let setup = TestSetup {1732assembly,1733load_addr: GuestAddress(0x1000),1734initial_regs: Regs {1735rip: 0x1000,1736rflags: 2,1737..Default::default()1738},1739..Default::default()1740};17411742let regs_matcher =1743move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1744HypervisorType::Whpx => {}1745HypervisorType::Kvm => {}1746HypervisorType::Haxm => {}1747_ => {1748assert_eq!(1749regs.rip, expected_rip,1750"{name}; expected RIP at {expected_rip}"1751);1752}1753};17541755let exit_matcher =1756|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1757match hypervisor_type {1758HypervisorType::Whpx => {1759match exit {1760VcpuExit::Mmio => {1761true // Break VM runloop1762}1763r => panic!("unexpected exit reason: {r:?}"),1764}1765}1766HypervisorType::Kvm => {1767true // Break VM runloop1768}1769_ => {1770match exit {1771VcpuExit::Shutdown(_) => {1772true // Break VM runloop1773}1774r => panic!("unexpected exit reason: {r:?}"),1775}1776}1777}1778};17791780run_tests!(setup, regs_matcher, exit_matcher);1781}1782}17831784#[rustfmt::skip::macros(global_asm_data)]1785global_asm_data!(1786test_software_interrupt_code,1787"int 0x80",1788"hlt",1789);17901791#[test]1792fn test_software_interrupt() {1793let start_addr = 0x1000;1794let setup = TestSetup {1795assembly: test_software_interrupt_code::data().to_vec(),1796load_addr: GuestAddress(0x1000),1797initial_regs: Regs {1798rip: start_addr,1799rflags: 2,1800..Default::default()1801},1802..Default::default()1803};18041805let regs_matcher =1806move |hypervisor_type: HypervisorType, regs: &Regs, _: &_| match hypervisor_type {1807HypervisorType::Whpx => {}1808HypervisorType::Haxm => {}1809HypervisorType::Kvm => {}1810_ => {1811let expect_rip_addr = start_addr1812+ u64::try_from(test_software_interrupt_code::data().len())1813.expect("the code length should within the range of u64");1814assert_eq!(1815regs.rip, expect_rip_addr,1816"Expected RIP at {expect_rip_addr:#x}"1817);1818}1819};18201821let exit_matcher =1822|hypervisor_type, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {1823match hypervisor_type {1824HypervisorType::Kvm | HypervisorType::Whpx => {1825match exit {1826VcpuExit::Mmio => {1827true // Break VM runloop1828}1829r => panic!("unexpected exit reason: {r:?}"),1830}1831}1832_ => {1833match exit {1834VcpuExit::Shutdown(_) => {1835true // Break VM runloop1836}1837r => panic!("unexpected exit reason: {r:?}"),1838}1839}1840}1841};18421843run_tests!(setup, regs_matcher, exit_matcher);1844}18451846#[rustfmt::skip::macros(global_asm_data)]1847global_asm_data!(1848test_rdtsc_instruction_code,1849".code16",1850"rdtsc",1851"hlt",1852);18531854#[test]1855fn test_rdtsc_instruction() {1856let setup = TestSetup {1857assembly: test_rdtsc_instruction_code::data().to_vec(),1858load_addr: GuestAddress(0x1000),1859initial_regs: Regs {1860rip: 0x1000,1861rflags: 2,1862..Default::default()1863},1864..Default::default()1865};18661867// This matcher checks that the timestamp counter has been incremented and read into EAX and EDX1868let regs_matcher = |_: HypervisorType, regs: &Regs, _: &_| {1869assert!(1870regs.rax != 0 || regs.rdx != 0,1871"RDTSC returned a zero value, which is unlikely."1872);1873};18741875let exit_matcher = |_: HypervisorType,1876exit: &VcpuExit,1877_: &mut dyn VcpuX86_64,1878_: &mut dyn Vm| { matches!(exit, VcpuExit::Hlt) };18791880run_tests!(setup, regs_matcher, exit_matcher);1881}18821883global_asm_data!(1884test_register_access_code,1885".code16",1886"xchg ax, bx",1887"xchg cx, dx",1888"xchg sp, bp",1889"xchg si, di",1890"hlt",1891);18921893// This tests that we can write and read GPRs to/from the VM.1894#[test]1895fn test_register_access() {1896let start_addr = 0x1000;1897let setup = TestSetup {1898assembly: test_register_access_code::data().to_vec(),1899load_addr: GuestAddress(start_addr),1900initial_regs: Regs {1901rip: start_addr,1902rax: 2,1903rbx: 1,1904rcx: 4,1905rdx: 3,1906rsp: 6,1907rbp: 5,1908rsi: 8,1909rdi: 7,1910rflags: 2,1911..Default::default()1912},1913..Default::default()1914};19151916run_tests!(1917setup,1918|_, regs, _| {1919assert_eq!(regs.rax, 1);1920assert_eq!(regs.rbx, 2);1921assert_eq!(regs.rcx, 3);1922assert_eq!(regs.rdx, 4);1923assert_eq!(regs.rsp, 5);1924assert_eq!(regs.rbp, 6);1925assert_eq!(regs.rsi, 7);1926assert_eq!(regs.rdi, 8);1927assert_eq!(1928regs.rip,1929start_addr + test_register_access_code::data().len() as u641930);1931},1932|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)1933);1934}19351936global_asm_data!(1937test_flags_register_code,1938".code16",1939"jnz fin",1940"test ax, ax",1941"fin:",1942"hlt",1943);19441945// This tests that we can get/set the flags register from the VMM.1946#[test]1947fn test_flags_register() {1948let start_addr = 0x1000;1949let setup = TestSetup {1950assembly: test_flags_register_code::data().to_vec(),1951load_addr: GuestAddress(start_addr),1952initial_regs: Regs {1953rip: start_addr,1954rax: 0xffffffff,1955rflags: 0x42, // zero flag set, sign flag clear1956..Default::default()1957},1958..Default::default()1959};19601961run_tests!(1962setup,1963|_, regs, _| {1964assert_eq!(regs.rflags & 0x40, 0); // zero flag is clear1965assert_ne!(regs.rflags & 0x80, 0); // sign flag is set1966assert_eq!(1967regs.rip,1968start_addr + test_flags_register_code::data().len() as u641969);1970},1971|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)1972);1973}19741975global_asm_data!(1976test_vmm_set_segs_code,1977".code16",1978"mov ax, ds:0",1979"mov bx, es:0",1980"mov cx, fs:0",1981"mov dx, gs:0",1982"mov sp, ss:0",1983"hlt",1984);19851986// This tests that the VMM can set segment registers and have them used by the VM.1987#[test]1988fn test_vmm_set_segs() {1989let start_addr = 0x1000;1990let data_addr = 0x2000;1991let setup = TestSetup {1992assembly: test_vmm_set_segs_code::data().to_vec(),1993load_addr: GuestAddress(start_addr),1994mem_size: 0x4000,1995initial_regs: Regs {1996rip: start_addr,1997rflags: 0x42,1998..Default::default()1999},2000// simple memory pattern where the value of a byte is (addr - data_addr + 1)2001memory_initializations: vec![(GuestAddress(data_addr), (1..=32).collect())],2002extra_vm_setup: Some(Box::new(move |vcpu: &mut dyn VcpuX86_64, _| {2003let mut sregs = vcpu.get_sregs().expect("failed to get sregs");2004sregs.ds.base = data_addr;2005sregs.ds.selector = 0;2006sregs.es.base = data_addr + 4;2007sregs.es.selector = 0;2008sregs.fs.base = data_addr + 8;2009sregs.fs.selector = 0;2010sregs.gs.base = data_addr + 12;2011sregs.gs.selector = 0;2012sregs.ss.base = data_addr + 16;2013sregs.ss.selector = 0;2014vcpu.set_sregs(&sregs).expect("failed to set sregs");2015})),2016..Default::default()2017};20182019run_tests!(2020setup,2021|_, regs, sregs| {2022assert_eq!(sregs.ds.base, data_addr);2023assert_eq!(sregs.es.base, data_addr + 4);2024assert_eq!(sregs.fs.base, data_addr + 8);2025assert_eq!(sregs.gs.base, data_addr + 12);2026assert_eq!(sregs.ss.base, data_addr + 16);20272028// ax was loaded from ds:0, which has offset 0, so is [1, 2]2029assert_eq!(regs.rax, 0x0201);2030// bx was loaded from es:0, which has offset 4, so is [5, 6]2031assert_eq!(regs.rbx, 0x0605);2032// cx was loaded from fs:0, which has offset 8, so is [9, 10]2033assert_eq!(regs.rcx, 0x0a09);2034// dx was loaded from gs:0, which has offset 12, so is [13, 14]2035assert_eq!(regs.rdx, 0x0e0d);2036// sp was loaded from ss:0, which has offset 16, so is [17, 18]2037assert_eq!(regs.rsp, 0x1211);20382039let expect_rip_addr = start_addr2040+ u64::try_from(test_vmm_set_segs_code::data().len())2041.expect("the code length should within the range of u64");2042assert_eq!(2043regs.rip, expect_rip_addr,2044"Expected RIP at {expect_rip_addr:#x}"2045);2046},2047|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)2048);2049}20502051global_asm_data!(2052test_set_cr_vmm_code,2053".code16",2054"mov eax, cr0",2055"mov ebx, cr3",2056"mov ecx, cr4",2057"hlt",2058);20592060// Tests that the VMM can read and write CRs and they become visible in the guest.2061#[test]2062fn test_set_cr_vmm() {2063let asm_addr = 0x1000;2064let setup = TestSetup {2065assembly: test_set_cr_vmm_code::data().to_vec(),2066load_addr: GuestAddress(asm_addr),2067initial_regs: Regs {2068rip: asm_addr,2069rflags: 2,2070..Default::default()2071},2072extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, _| {2073let mut sregs = vcpu.get_sregs().expect("failed to get sregs");2074sregs.cr0 |= 1 << 18; // Alignment Mask; does nothing without other config bits2075sregs.cr3 = 0xfeedface; // arbitrary value; CR3 is not used in this configuration2076sregs.cr4 |= 1 << 2; // Time Stamp Disable; not relevant here2077vcpu.set_sregs(&sregs).expect("failed to set sregs");2078})),2079..Default::default()2080};20812082run_tests!(2083setup,2084|_, regs, sregs| {2085assert_eq!(regs.rax, sregs.cr0);2086assert_eq!(regs.rbx, sregs.cr3);2087assert_eq!(regs.rcx, sregs.cr4);2088assert_eq!(sregs.cr3, 0xfeedface);2089assert_ne!(sregs.cr0 & (1 << 18), 0);2090assert_ne!(sregs.cr4 & (1 << 2), 0);2091assert_eq!(regs.rip, asm_addr + setup.assembly.len() as u64); // after hlt2092},2093|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)2094);2095}20962097global_asm_data!(2098test_set_cr_guest_code,2099".code16",2100"mov eax, cr0",2101"or eax, (1 << 18)",2102"mov cr0, eax",2103"mov ebx, 0xfeedface",2104"mov cr3, ebx",2105"mov ecx, cr4",2106"or ecx, (1 << 2)",2107"mov cr4, ecx",2108"hlt",2109);21102111// Tests that the guest can read and write CRs and they become visible to the VMM.2112#[test]2113fn test_set_cr_guest() {2114let asm_addr = 0x1000;2115let setup = TestSetup {2116assembly: test_set_cr_guest_code::data().to_vec(),2117load_addr: GuestAddress(asm_addr),2118initial_regs: Regs {2119rip: asm_addr,2120rflags: 2,2121..Default::default()2122},2123..Default::default()2124};21252126run_tests!(2127setup,2128|_, regs, sregs| {2129assert_eq!(regs.rax, sregs.cr0);2130assert_eq!(regs.rbx, sregs.cr3);2131assert_eq!(regs.rcx, sregs.cr4);2132assert_eq!(sregs.cr3, 0xfeedface);2133assert_ne!(sregs.cr0 & (1 << 18), 0);2134assert_ne!(sregs.cr4 & (1 << 2), 0);2135assert_eq!(regs.rip, asm_addr + setup.assembly.len() as u64); // after hlt2136},2137|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)2138);2139}21402141mod test_minimal_interrupt_injection_code {2142use super::*;21432144global_asm_data!(2145pub init,2146".code16",2147// Set the IDT2148"lidt [0x200]",2149// Set up the stack, which will be used when CPU transfers the control to the ISR on2150// interrupt.2151"mov sp, 0x900",2152"mov eax, 902",2153// We inject our exception on this hlt command.2154"hlt",2155"mov ebx, 990",2156"hlt"2157);21582159global_asm_data!(2160pub isr,2161".code16",2162"mov eax, 888",2163"iret"2164);2165}21662167#[test]2168fn test_minimal_interrupt_injection() {2169let start_addr: u32 = 0x200;2170// Allocate exceed 0x900, where we set up our stack.2171let mem_size: u32 = 0x1000;21722173let mut setup = TestSetup {2174load_addr: GuestAddress(start_addr.into()),2175initial_regs: Regs {2176rax: 0,2177rbx: 0,2178// Set RFLAGS.IF to enable interrupt.2179rflags: 2 | FLAGS_IF_BIT,2180..Default::default()2181},2182mem_size: mem_size.into(),2183..Default::default()2184};21852186let mut cur_addr = start_addr;21872188let idtr_size: u32 = 6;2189assert_eq!(2190Ok(std::mem::size_of::<Idtr32>()),2191usize::try_from(idtr_size)2192);2193// The limit is calculated from 256 entries timed by 4 bytes per entry.2194let idt_size = 256u16 * 4u16;2195let idtr = Idtr32 {2196limit: idt_size - 1,2197// The IDT right follows the IDTR.2198base_address: start_addr + idtr_size,2199};2200setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());2201cur_addr += idtr_size;22022203let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();2204// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.2205// We set all entries to the same ISR.2206let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();2207setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());2208cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");22092210let isr_assembly = test_minimal_interrupt_injection_code::isr::data().to_vec();2211setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());2212cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");22132214let init_assembly = test_minimal_interrupt_injection_code::init::data().to_vec();2215setup.initial_regs.rip = cur_addr.into();2216setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());2217cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");2218let init_end_addr = cur_addr;22192220assert!(mem_size > cur_addr);22212222let mut counter = 0;2223run_tests!(2224setup,2225|_, regs, _| {2226assert_eq!(regs.rip, u64::from(init_end_addr));2227assert_eq!(regs.rax, 888);2228assert_eq!(regs.rbx, 990);2229},2230|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {2231match exit {2232VcpuExit::Hlt => {2233let regs = vcpu2234.get_regs()2235.expect("should retrieve registers successfully");2236counter += 1;2237if counter > 1 {2238return true;2239}2240assert!(vcpu.ready_for_interrupt());2241assert_eq!(regs.rax, 902);2242assert_eq!(regs.rbx, 0);2243// Inject an external custom interrupt.2244vcpu.interrupt(32)2245.expect("should be able to inject an interrupt");2246false2247}2248r => panic!("unexpected VMEXIT reason: {r:?}"),2249}2250}2251);2252}22532254mod test_multiple_interrupt_injection_code {2255use super::*;22562257global_asm_data!(2258pub init,2259".code16",2260// Set the IDT2261"lidt [0x200]",2262// Set up the stack, which will be used when CPU transfers the control to the ISR on2263// interrupt.2264"mov esp, 0x900",2265"mov eax, 1",2266"mov ebx, 2",2267"mov ecx, 3",2268"mov edx, 4",2269// We inject our interrupts on this hlt command.2270"hlt",2271"mov edx, 281",2272"hlt",2273);22742275global_asm_data!(2276pub isr_intr_32,2277".code16",2278"mov eax, 32",2279"iret",2280);22812282global_asm_data!(2283pub isr_intr_33,2284".code16",2285"mov ebx, 33",2286"iret",2287);22882289global_asm_data!(2290pub isr_default,2291".code16",2292"mov ecx, 761",2293"iret",2294);2295}22962297#[test]2298fn test_multiple_interrupt_injection() {2299let start_addr: u32 = 0x200;2300// Allocate exceed 0x900, where we set up our stack.2301let mem_size: u32 = 0x1000;23022303let mut setup = TestSetup {2304load_addr: GuestAddress(start_addr.into()),2305initial_regs: Regs {2306rax: 0,2307rbx: 0,2308rcx: 0,2309rdx: 0,2310// Set RFLAGS.IF to enable interrupt.2311rflags: 2 | FLAGS_IF_BIT,2312..Default::default()2313},2314mem_size: mem_size.into(),2315..Default::default()2316};23172318let mut cur_addr = start_addr;23192320let idtr_size: u32 = 6;2321assert_eq!(2322Ok(std::mem::size_of::<Idtr32>()),2323usize::try_from(idtr_size)2324);2325// The limit is calculated from 256 entries timed by 4 bytes per entry.2326let idt_size = 256u16 * 4u16;2327let idtr = Idtr32 {2328limit: idt_size - 1,2329// The IDT right follows the IDTR.2330base_address: start_addr + idtr_size,2331};2332setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());2333cur_addr += idtr_size;23342335let isr_intr_32_assembly = test_multiple_interrupt_injection_code::isr_intr_32::data().to_vec();2336let isr_intr_33_assembly = test_multiple_interrupt_injection_code::isr_intr_33::data().to_vec();2337let isr_default_assembly = test_multiple_interrupt_injection_code::isr_default::data().to_vec();2338// The ISR for intr 32 right follows the IDT.2339let isr_intr_32_addr = cur_addr + u32::from(idt_size);2340// The ISR for intr 33 right follows the ISR for intr 32.2341let isr_intr_33_addr = isr_intr_32_addr2342+ u32::try_from(isr_intr_32_assembly.len())2343.expect("the size of the ISR for intr 32 should be within the u32 range");2344// The ISR for other interrupts right follows the ISR for intr 33.2345let isr_default_addr = isr_intr_33_addr2346+ u32::try_from(isr_intr_33_assembly.len())2347.expect("the size of the ISR for intr 33 should be within the u32 range");23482349// IDT entries are far pointers(CS:IP pair) to the correspondent ISR.2350let idt = (0..256)2351.map(|intr_vec| match intr_vec {235232 => isr_intr_32_addr,235333 => isr_intr_33_addr,2354_ => isr_default_addr,2355})2356.flat_map(u32::to_ne_bytes)2357.collect::<Vec<_>>();2358setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());2359assert_eq!(idt.len(), usize::from(idt_size));2360cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");23612362assert_eq!(cur_addr, isr_intr_32_addr);2363setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_intr_32_assembly.clone());2364cur_addr += u32::try_from(isr_intr_32_assembly.len()).expect("ISR size should be within u32");23652366assert_eq!(cur_addr, isr_intr_33_addr);2367setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_intr_33_assembly.clone());2368cur_addr += u32::try_from(isr_intr_33_assembly.len()).expect("ISR size should be within u32");23692370assert_eq!(cur_addr, isr_default_addr);2371setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_default_assembly.clone());2372cur_addr += u32::try_from(isr_default_assembly.len()).expect("ISR size should be within u32");23732374let init_assembly = test_multiple_interrupt_injection_code::init::data().to_vec();2375setup.initial_regs.rip = cur_addr.into();2376setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());2377cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");2378let init_end_addr = cur_addr;23792380assert!(mem_size > cur_addr);23812382let mut counter = 0;2383run_tests!(2384setup,2385|hypervisor_type, regs, _| {2386// Different hypervisors behave differently on how the first injected exception should2387// handled: for WHPX and KVM, the later injected interrupt overrides the earlier2388// injected interrupt, while for HAXM, both interrupts are marked as pending.2389match hypervisor_type {2390HypervisorType::Haxm => assert_eq!(regs.rax, 32),2391_ => assert_eq!(regs.rax, 1),2392}23932394assert_eq!(regs.rip, u64::from(init_end_addr));2395assert_eq!(regs.rbx, 33);2396assert_eq!(regs.rcx, 3);2397assert_eq!(regs.rdx, 281);2398},2399|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {2400match exit {2401VcpuExit::Hlt => {2402let regs = vcpu2403.get_regs()2404.expect("should retrieve registers successfully");2405counter += 1;2406if counter > 1 {2407return true;2408}2409assert_eq!(regs.rax, 1);2410assert_eq!(regs.rbx, 2);2411assert_eq!(regs.rcx, 3);2412assert_eq!(regs.rdx, 4);2413// Inject external custom interrupts.2414assert!(vcpu.ready_for_interrupt());2415vcpu.interrupt(32)2416.expect("should be able to inject an interrupt");2417assert!(vcpu.ready_for_interrupt());2418vcpu.interrupt(33)2419.expect("should be able to inject an interrupt");2420false2421}2422r => panic!("unexpected VMEXIT reason: {r:?}"),2423}2424}2425);2426}24272428mod test_interrupt_ready_when_not_interruptible_code {2429use super::*;24302431#[derive(Debug, PartialEq, Eq, Clone, Copy)]2432pub enum Instrumentation {2433BeforeMovSs,2434AfterMovSs,2435AfterAfterMovSs,2436BeforeSti,2437AfterSti,2438AfterAfterSti,2439InIsr,2440}24412442impl From<u64> for Instrumentation {2443fn from(value: u64) -> Self {2444match value {24450x10 => Instrumentation::BeforeMovSs,24460x20 => Instrumentation::AfterMovSs,24470x30 => Instrumentation::AfterAfterMovSs,24480x40 => Instrumentation::BeforeSti,24490x50 => Instrumentation::AfterSti,24500x60 => Instrumentation::AfterAfterSti,24510xf0 => Instrumentation::InIsr,2452_ => panic!("Unknown instrumentation IO port: {value}"),2453}2454}2455}24562457// We use port IO to trigger the VMEXIT instead of MMIO, because access to out of bound memory2458// doesn't trigger MMIO VMEXIT on WHPX under simple real-mode set up.2459global_asm_data!(2460pub init,2461".code16",2462// Set up the stack, which will be used when CPU transfers the control to the ISR on2463// interrupt.2464"mov sp, 0x1900",2465// Set the IDT.2466"lidt [0x200]",2467// Load the ss register, so that the later mov ss instruction is actually a no-op.2468"mov ax, ss",2469"out 0x10, ax",2470// Hypervisors shouldn't allow interrupt injection right after the mov ss instruction.2471"mov ss, ax",2472"out 0x20, ax",2473// On WHPX we need some other instructions to bring the interuptibility back to normal.2474// While this is not needed for other hypervisors, we add this instruction unconditionally.2475"nop",2476"out 0x30, ax",2477"out 0x40, ax",2478// Test hypervisors' interruptibilities right after sti instruction when FLAGS.IF is2479// cleared.2480"cli",2481"sti",2482"out 0x50, ax",2483// On WHPX we need some other instructions to bring the interuptibility back to normal.2484// While this is not needed for other hypervisors, we add this instruction unconditionally.2485"nop",2486"out 0x60, ax",2487"hlt",2488);24892490global_asm_data!(2491pub isr,2492".code16",2493"out 0xf0, ax",2494"iret",2495);2496}24972498// Physical x86 processor won't allow interrupt to be injected after mov ss or sti, while VM can.2499#[test]2500fn test_interrupt_ready_when_normally_not_interruptible() {2501use test_interrupt_ready_when_not_interruptible_code::Instrumentation;25022503let start_addr: u32 = 0x200;2504// Allocate exceed 0x1900, where we set up our stack.2505let mem_size: u32 = 0x2000;25062507let mut setup = TestSetup {2508load_addr: GuestAddress(start_addr.into()),2509initial_regs: Regs {2510rax: 0,2511rbx: 0,2512// Set RFLAGS.IF to enable interrupt.2513rflags: 2 | 0x202,2514..Default::default()2515},2516mem_size: mem_size.into(),2517..Default::default()2518};25192520let mut cur_addr = start_addr;25212522let idtr_size: u32 = 6;2523assert_eq!(2524Ok(std::mem::size_of::<Idtr32>()),2525usize::try_from(idtr_size)2526);2527// The limit is calculated from 256 entries timed by 4 bytes per entry.2528let idt_size = 256u16 * 4u16;2529let idtr = Idtr32 {2530limit: idt_size - 1,2531// The IDT right follows the IDTR.2532base_address: start_addr + idtr_size,2533};2534setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());2535cur_addr += idtr_size;25362537let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();2538// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.2539// We set all entries to the same ISR.2540let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();2541setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());2542cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");25432544let isr_assembly = test_interrupt_ready_when_not_interruptible_code::isr::data().to_vec();2545setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());2546cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");25472548let init_assembly = test_interrupt_ready_when_not_interruptible_code::init::data().to_vec();2549setup.initial_regs.rip = cur_addr.into();2550setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());2551cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");25522553assert!(mem_size > cur_addr);25542555// This helps us check the interruptibility under different situations.2556let interruptibility_traces = RefCell::<Vec<_>>::default();2557// This helps us check when the interrupt actually delivers.2558let instrumentation_traces = RefCell::<Vec<_>>::default();25592560run_tests!(2561setup,2562|_, regs, _| {2563use Instrumentation::*;2564assert_eq!(2565*interruptibility_traces.borrow(),2566[2567(BeforeMovSs, true),2568// Hypervisors don't allow interrupt injection right after mov ss.2569(AfterMovSs, false),2570(AfterAfterMovSs, true),2571(BeforeSti, true),2572// Hypervisors don't allow interrupt injection right after sti when FLAGS.IF is2573// not set.2574(AfterSti, false),2575(AfterAfterSti, true)2576]2577);2578// Hypervisors always deliver the interrupt right after we inject it in the next VCPU2579// run.2580assert_eq!(2581*instrumentation_traces.borrow(),2582[2583BeforeMovSs,2584InIsr,2585AfterMovSs,2586AfterAfterMovSs,2587InIsr,2588BeforeSti,2589InIsr,2590AfterSti,2591AfterAfterSti,2592InIsr,2593]2594);2595assert_eq!(regs.rip, u64::from(cur_addr));2596},2597|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {2598match exit {2599VcpuExit::Io => {2600let ready_for_interrupt = vcpu.ready_for_interrupt();2601let mut should_inject_interrupt = ready_for_interrupt;2602vcpu.handle_io(&mut |io_params| {2603let instrumentation = Instrumentation::from(io_params.address);2604match instrumentation {2605Instrumentation::InIsr => {2606// Only inject interrupt outside ISR.2607should_inject_interrupt = false;2608}2609_ => {2610// Only the interuptibility outside the ISR is important for this2611// test.2612interruptibility_traces2613.borrow_mut()2614.push((instrumentation, ready_for_interrupt));2615}2616}2617instrumentation_traces.borrow_mut().push(instrumentation);2618// We are always handling out IO port, so no data to return.2619})2620.expect("should handle IO successfully");2621if should_inject_interrupt {2622vcpu.interrupt(32)2623.expect("interrupt injection should succeed when ready for interrupt");2624}2625false2626}2627VcpuExit::Hlt => true,2628r => panic!("unexpected VMEXIT reason: {r:?}"),2629}2630}2631);2632}26332634global_asm_data!(2635test_interrupt_ready_when_interrupt_enable_flag_not_set_code,2636".code16",2637"cli",2638// We can't use hlt for VMEXIT, because HAXM unconditionally allows interrupt injection for2639// hlt.2640"out 0x10, ax",2641"sti",2642// nop is necessary to avoid the one instruction ineterrupt disable window for sti when2643// FLAGS.IF is not set.2644"nop",2645"out 0x20, ax",2646"hlt",2647);26482649#[test]2650fn test_interrupt_ready_when_interrupt_enable_flag_not_set() {2651let assembly = test_interrupt_ready_when_interrupt_enable_flag_not_set_code::data().to_vec();2652let setup = TestSetup {2653assembly: assembly.clone(),2654load_addr: GuestAddress(0x1000),2655initial_regs: Regs {2656rip: 0x1000,2657rflags: 2,2658..Default::default()2659},2660..Default::default()2661};26622663run_tests!(2664setup,2665|_, regs, _| {2666// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.2667assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);2668},2669|_, exit, vcpu, _: &mut dyn Vm| {2670match exit {2671VcpuExit::Io => {2672let mut addr = 0;2673vcpu.handle_io(&mut |io_params| {2674addr = io_params.address;2675// We are always handling out IO port, so no data to return.2676})2677.expect("should handle IO successfully");2678let regs = vcpu2679.get_regs()2680.expect("should retrieve the registers successfully");2681match addr {26820x10 => {2683assert_eq!(regs.rflags & FLAGS_IF_BIT, 0);2684assert!(!vcpu.ready_for_interrupt());2685}26860x20 => {2687assert_eq!(regs.rflags & FLAGS_IF_BIT, FLAGS_IF_BIT);2688assert!(vcpu.ready_for_interrupt());2689}2690_ => panic!("unexpected addr: {addr}"),2691}2692false2693}2694VcpuExit::Hlt => true,2695r => panic!("unexpected VMEXIT reason: {r:?}"),2696}2697}2698);2699}27002701#[test]2702fn test_enter_long_mode_direct() {2703global_asm_data!(2704pub long_mode_asm,2705".code64",2706"mov rdx, rax",2707"mov rbx, [0x10000]",2708"hlt"2709);27102711let bigly_mem_value: u64 = 0x1_0000_0000;2712let biglier_mem_value: u64 = 0x1_0000_0001;2713let mut setup = TestSetup {2714assembly: long_mode_asm::data().to_vec(),2715mem_size: 0x11000,2716load_addr: GuestAddress(0x1000),2717initial_regs: Regs {2718rax: bigly_mem_value,2719rip: 0x1000,2720rflags: 0x2,2721..Default::default()2722},2723extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {2724ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);2725})),27262727..Default::default()2728};27292730setup.add_memory_initialization(2731GuestAddress(0x10000),2732biglier_mem_value.to_le_bytes().to_vec(),2733);2734let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {2735assert!((sregs.efer & 0x400) != 0, "Long-Mode Active bit not set");2736assert_eq!(2737regs.rdx, bigly_mem_value,2738"Did not execute instructions correctly in long mode."2739);2740assert_eq!(2741regs.rbx, biglier_mem_value,2742"Was not able to access translated memory in long mode."2743);2744assert_eq!((sregs.cs.l), 1, "Long-mode bit not set in CS");2745};27462747let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {2748VcpuExit::Hlt => {2749true // Break VM runloop2750}2751r => panic!("unexpected exit reason: {r:?}"),2752};27532754run_tests!(setup, regs_matcher, exit_matcher);2755}27562757#[test]2758fn test_enter_long_mode_asm() {2759global_asm_data!(2760pub enter_long_mode_asm,2761".code16",2762"lidt [0xd100]", // Address of the IDT limit + base2763"mov eax, cr4",2764"or ax, 1 << 7 | 1 << 5", // Set the PAE-bit (bit 5) and PGE (bit 7).2765"mov cr4, eax",27662767"mov bx, 0x9000", // Address of the page table.2768"mov cr3, ebx",27692770"mov ecx, 0xC0000080", // Set ECX to EFER MSR (0xC0000080)2771"rdmsr", // Read from the MSR2772"or ax, 1 << 8", // Set the LM-bit (bit 8).2773"wrmsr", // Write to the MSR27742775"mov eax, cr0",2776"or eax, 1 << 31 | 1 << 0", // Set PG (31nd bit) & PM (0th bit).2777"mov cr0, eax",27782779"lgdt [0xd000]", // Address of the GDT limit + base2780"ljmp 16, 0xe000" // Address of long_mode_asm2781);27822783global_asm_data!(2784pub long_mode_asm,2785".code64",2786"mov rdx, r8",2787"mov rbx, [0x10000]",2788"hlt"2789);27902791let bigly_mem_value: u64 = 0x1_0000_0000;2792let biglier_mem_value: u64 = 0x1_0000_0001;2793let mut setup = TestSetup {2794assembly: enter_long_mode_asm::data().to_vec(),2795mem_size: 0x13000,2796load_addr: GuestAddress(0x1000),2797initial_regs: Regs {2798r8: bigly_mem_value,2799rip: 0x1000,2800rflags: 0x2,2801..Default::default()2802},2803extra_vm_setup: Some(Box::new(|_: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {2804// TODO(b/354901961): configure_long_mode_memory loads GDT and IDT for 64 bit usage, and2805// the ABI doesn't match real mode and protected mode, but in this test, we first launch2806// in real mode.28072808ModeConfig::default_long_mode().configure_long_mode_memory(vm);2809})),28102811..Default::default()2812};28132814setup.add_memory_initialization(2815GuestAddress(0x10000),2816biglier_mem_value.to_le_bytes().to_vec(),2817);2818setup.add_memory_initialization(GuestAddress(0xe000), long_mode_asm::data().to_vec());28192820// GDT limit + base, to be loaded by the lgdt instruction.2821// Must be within 0xFFFF as it's executed in real-mode.2822setup.add_memory_initialization(GuestAddress(0xd000), 0xFFFF_u32.to_le_bytes().to_vec());2823setup.add_memory_initialization(2824GuestAddress(0xd000 + 2),2825(DEFAULT_GDT_OFFSET as u32).to_le_bytes().to_vec(),2826);28272828// IDT limit + base, to be loaded by the lidt instruction.2829// Must be within 0xFFFF as it's executed in real-mode.2830setup.add_memory_initialization(GuestAddress(0xd100), 0xFFFF_u32.to_le_bytes().to_vec());2831setup.add_memory_initialization(2832GuestAddress(0xd100 + 2),2833(DEFAULT_IDT_OFFSET as u32).to_le_bytes().to_vec(),2834);28352836let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {2837assert!((sregs.efer & 0x400) != 0, "Long-Mode Active bit not set");2838assert_eq!(2839regs.rdx, bigly_mem_value,2840"Did not execute instructions correctly in long mode."2841);2842assert_eq!(2843regs.rbx, biglier_mem_value,2844"Was not able to access translated memory in long mode."2845);2846assert_eq!((sregs.cs.l), 1, "Long-mode bit not set in CS");2847};28482849let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {2850VcpuExit::Hlt => {2851true // Break VM runloop2852}2853r => panic!("unexpected exit reason: {r:?}"),2854};28552856run_tests!(setup, regs_matcher, exit_matcher);2857}28582859#[test]2860fn test_request_interrupt_window() {2861global_asm_data!(2862assembly,2863".code16",2864// Disable the interrupt, and the interrupt window shouldn't cause a vcpu exit until the2865// interrupt is enabled again.2866"cli",2867// vcpu exit here to request an interrupt window when interrupt is not ready. We can't use2868// hlt for VMEXIT, because HAXM unconditionally allows interrupt injection for hlt.2869"out 0x10, ax",2870// Enable the interrupt.2871"sti",2872// Another instruction window for interrupt delivery after sti. We shouldn't receive the2873// interrupt window exit until we complete this instruction. We use another intercepted2874// instruction here to make sure the hypervisor doesn't shadow the not delivered interrupt2875// request window on an intercepted instruction.2876"out 0x10, ax",2877// WHPX requires another not intercepted instruction to restore from the not interruptible2878// state.2879"nop",2880// The interrupt window exit should happen either right before nop or right after nop.2881"hlt",2882);28832884let assembly = assembly::data().to_vec();2885let setup = TestSetup {2886assembly: assembly.clone(),2887load_addr: GuestAddress(0x1000),2888initial_regs: Regs {2889rip: 0x1000,2890rflags: 2,2891..Default::default()2892},2893intercept_intr: true,2894..Default::default()2895};28962897run_tests!(2898setup,2899|_, regs, _| assert_eq!(regs.rip, 0x1000 + assembly.len() as u64),2900{2901let mut io_counter = 0;2902let mut irq_window_received = false;2903move |hypervisor_type, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {2904let is_irq_window = if hypervisor_type == HypervisorType::Haxm {2905matches!(exit, VcpuExit::Intr) && io_counter == 22906} else {2907matches!(exit, VcpuExit::IrqWindowOpen)2908};2909if is_irq_window {2910assert_eq!(io_counter, 2);2911assert!(vcpu.ready_for_interrupt());2912vcpu.set_interrupt_window_requested(false);29132914irq_window_received = true;2915return false;2916}2917match exit {2918VcpuExit::Intr => false,2919VcpuExit::Io => {2920// We are always handling out IO port, so no data to return.2921vcpu.handle_io(&mut |_| {})2922.expect("should handle IO successfully");29232924assert!(!vcpu.ready_for_interrupt());29252926// Only set the interrupt window request on the first out instruction.2927if io_counter == 0 {2928vcpu.set_interrupt_window_requested(true);2929}2930io_counter += 1;2931false2932}2933VcpuExit::Hlt => {2934assert!(irq_window_received);2935true2936}2937r => panic!("unexpected VMEXIT: {r:?}"),2938}2939}2940}2941);2942}29432944#[test]2945fn test_fsgsbase() {2946global_asm_data!(2947pub fsgsbase_asm,2948".code64",2949"wrfsbase rax",2950"wrgsbase rbx",2951"rdfsbase rcx",2952"rdgsbase rdx",2953"mov rax, fs:0",2954"mov rbx, gs:0",2955"hlt"2956);29572958let code_addr = 0x1000;2959let fs = 0x10000;2960let gs = 0x10100;29612962let setup = TestSetup {2963assembly: fsgsbase_asm::data().to_vec(),2964mem_size: 0x11000,2965load_addr: GuestAddress(code_addr),2966initial_regs: Regs {2967rax: fs,2968rbx: gs,2969rip: code_addr,2970rflags: 0x2,2971..Default::default()2972},2973extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {2974ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);29752976let mut sregs = vcpu.get_sregs().expect("unable to get sregs");2977sregs.cr4 |= 1 << 16; // FSGSBASE (bit 16)2978vcpu.set_sregs(&sregs).expect("unable to set sregs");2979})),2980memory_initializations: vec![2981(GuestAddress(fs), [0xaa; 8].into()),2982(GuestAddress(gs), [0xbb; 8].into()),2983],2984..Default::default()2985};29862987let regs_matcher = move |_: HypervisorType, regs: &Regs, sregs: &Sregs| {2988assert_eq!(regs.rcx, fs);2989assert_eq!(regs.rdx, gs);2990assert_eq!(regs.rax, 0xaaaaaaaaaaaaaaaa);2991assert_eq!(regs.rbx, 0xbbbbbbbbbbbbbbbb);2992assert_eq!(sregs.fs.base, fs);2993assert_eq!(sregs.gs.base, gs);2994};29952996let exit_matcher = |_, exit: &VcpuExit, _vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {2997VcpuExit::Hlt => {2998true // Break VM runloop2999}3000r => panic!("unexpected exit reason: {r:?}"),3001};30023003run_tests!(setup, regs_matcher, exit_matcher);3004}30053006/// Tests whether MMX state is being preserved by the hypervisor correctly (e.g. the hypervisor is3007/// properly using fxsave/fxrstor, or xsave/xrstor (or xsaves/xrstors)).3008#[test]3009fn test_mmx_state_is_preserved_by_hypervisor() {3010// This program stores a sentinel value into mm0 (the first MMX register) and verifies3011// that after a vmexit, that value is properly restored (we copy it to rbx so it can be checked3012// by the reg matcher when the VM hlts). In the vmexit handler function below, we make sure the3013// sentinel value is NOT in mm0. This way we know the mm0 value has changed, so we're guaranteed3014// the hypervisor has to restore the guest's sentinel value for the test to pass. (The read3015// from mm0 to rbx happens *after* the vmexit, so the hypervisor has to restore the guest's3016// mm0 otherwise there will be random garbage in there from the host. This would also be a3017// security issue.)3018//3019// Note: this program also verifies the guest has MMX support. If it does not, rdx will be 1 and3020// no MMX instructions will be attempted.3021let sentinel_mm0_value = 0x1337FFFFu64;3022global_asm_data!(3023pub mmx_ops_asm,3024".code64",3025"mov eax, 1",3026"cpuid",3027"bt edx, 23",3028"jc HasMMX",3029"mov rdx, 1",3030"hlt",3031"HasMMX:",3032"xor rdx, rdx",3033"mov rax, 0x1337FFFF",3034"mov rbx, 0x0",3035"movq mm0, rax",3036"out 0x5, al",3037"movq rbx, mm0",3038"emms",3039"hlt",3040);30413042let code_addr = 0x1000;3043let setup = TestSetup {3044assembly: mmx_ops_asm::data().to_vec(),3045mem_size: 0x12000,3046load_addr: GuestAddress(code_addr),3047initial_regs: Regs {3048rip: code_addr,3049rflags: 0x2,3050..Default::default()3051},3052extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3053ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);3054})),3055memory_initializations: vec![],3056..Default::default()3057};30583059let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {3060assert_ne!(regs.rdx, 1, "guest has no MMX support");3061assert_eq!(3062regs.rbx, sentinel_mm0_value,3063"guest MMX register not restored by hypervisor"3064);3065};30663067let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3068VcpuExit::Hlt => {3069true // Break VM runloop3070}3071VcpuExit::Cpuid { entry } => {3072vcpu.handle_cpuid(entry)3073.expect("should handle cpuid successfully");3074false3075}3076VcpuExit::Io => {3077vcpu.handle_io(&mut |_| {})3078.expect("should handle IO successfully");30793080// kaiyili@ pointed out we should check the XSAVE state exposed by the hypervisor via3081// its API (e.g. vm.get_xsave_state). This is used in snapshotting, so if it's wrong,3082// that would break things. It's also a good cross-check that the hypervisor is properly3083// handling xsave state.3084//3085// There are a couple of things blocking us from doing that today:3086// 1. gHAXM, our hypervisor of interest, doesn't expose its xsave area state for3087// the guest.3088// 2. We don't have an xsave area parser (yet).30893090// mm0 MUST NOT have the guest's sentinel value. If it somehow does, the hypervisor3091// didn't save the guest's FPU/MMX state / restore the host's state before exiting to3092// CrosVM.3093//3094// Note: MMX is ubiquitous on x86_64, so we don't check for support on the host (the3095// guest checks, so unless the guest's support is software implemented, it's highly3096// likely the host has MMX support).3097let mut mm0_value: u64;3098// SAFETY: we do not clobber any undeclared registers. Technically emms changes some3099// x87 state, so there's some UB risk here, but it is not explicitly called out by3100// the Rust docs as a bad idea.3101unsafe {3102asm!(3103"movq rax, mm0",3104"emms",3105out("rax") mm0_value);3106}3107assert_ne!(3108mm0_value, sentinel_mm0_value,3109"host mm0 value is the same as the guest sentinel value"3110);3111false3112}3113r => panic!("unexpected exit reason: {r:?}"),3114};31153116run_tests!(setup, regs_matcher, exit_matcher);3117}31183119/// Tests whether AVX state is being preserved by the hypervisor correctly (e.g. the hypervisor is3120/// properly using xsave/xrstor (or xsaves/xrstors)). This is very similar to the MMX test, but3121/// AVX state is *not* captured by fxsave, so that's how we guarantee xsave state of some kind is3122/// being handled properly.3123#[test]3124fn test_avx_state_is_preserved_by_hypervisor() {3125if !is_x86_feature_detected!("avx") {3126panic!("this test requires host AVX support and it was not detected");3127}31283129let sentinel_value = 0x1337FFFFu64;3130global_asm_data!(3131pub avx_ops_asm,3132".code64",3133"mov eax, 1",3134"cpuid",3135"bt ecx, 28",3136"jc HasAVX",3137"mov rdx, 1",3138"hlt",3139"HasAVX:",31403141// Turn on OSXSAVE (we can't touch XCR0 without it).3142"mov rax, cr4",3143"or eax, 1 << 18",3144"mov cr4, rax",31453146// AVX won't work unless we enable it.3147//3148// Set the relevant XCR0 bits:3149// 0: X873150// 1: SSE3151// 2: AVX3152"xor rcx, rcx",3153"xgetbv",3154// (7 = 111b)3155"or eax, 7",3156"xsetbv",31573158// Now that AVX is ready to use, let's start with a clean slate (and signify we have AVX3159// support to the test assert below by zeroing rdx).3160"xor rdx, rdx",3161"xor rax, rax",3162"xor rbx, rbx",3163"vzeroall",31643165// Here's the actual test (finally). Since AVX is a little tricky to follow, here's what3166// the test does:3167// 1. We load 0x1337FFFF into ymm1 via xmm0.3168// 2. We perform port IO to exit out to CrosVM (our vmexit handler below).3169// 3. The vmexit handler makes sure ymm1 does NOT contain 0x1337FFFF.3170// 4. We return to this program. Then we dump the value of ymm1 into ebx. The exit3171// register matcher verifies that 0x1337FFFF is in ebx. This means the hypervisor3172// properly restored ymm1 for the guest on vmenter.3173"mov eax, 0x1337FFFF",3174"vpinsrd xmm0, xmm1, eax, 3",3175"vinserti128 ymm1, ymm2, xmm0, 1",3176"out 0x5, al",3177"vextracti128 xmm3, ymm1, 1",3178"vpextrd ebx, xmm3, 3",3179"hlt",3180);31813182let code_addr = 0x1000;3183let setup = TestSetup {3184assembly: avx_ops_asm::data().to_vec(),3185mem_size: 0x12000,3186load_addr: GuestAddress(code_addr),3187initial_regs: Regs {3188rip: code_addr,3189rflags: 0x2,3190..Default::default()3191},3192extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3193ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);3194})),3195memory_initializations: vec![],3196..Default::default()3197};31983199let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {3200assert_ne!(regs.rdx, 1, "guest has no AVX support");3201assert_eq!(3202regs.rbx, sentinel_value,3203"guest AVX register not restored by hypervisor"3204);3205};32063207let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3208VcpuExit::Hlt => {3209true // Break VM runloop3210}3211VcpuExit::Cpuid { entry } => {3212vcpu.handle_cpuid(entry)3213.expect("should handle cpuid successfully");3214false3215}3216VcpuExit::Io => {3217vcpu.handle_io(&mut |_| {})3218.expect("should handle IO successfully");32193220// kaiyili@ pointed out we should check the XSAVE state exposed by the hypervisor via3221// its API (e.g. vm.get_xsave_state). This is used in snapshotting, so if it's wrong,3222// that would break things. It's also a good cross-check that the hypervisor is properly3223// handling xsave state.3224//3225// There are a couple of things blocking us from doing that today:3226// 1. gHAXM, our hypervisor of interest, doesn't expose its xsave area state for3227// the guest.3228// 2. We don't have a xsave area parser (yet).32293230// ymm1 MUST NOT have the guest's sentinel value. If it somehow does, the hypervisor3231// didn't save the guest's AVX state / restore the host's state before exiting to3232// CrosVM.3233//3234// Note: AVX is ubiquitous on x86_64, so we don't check for support on the host (the3235// guest checks, so unless the guest's support is software implemented, it's highly3236// likely the host has AVX support).3237let mut ymm1_sub_value: u64;3238// SAFETY: we don't clobber any undeclared registers.3239unsafe {3240asm!(3241"vextracti128 xmm4, ymm1, 1",3242"vpextrd eax, xmm4, 3",3243out("rax") ymm1_sub_value,3244out("xmm4") _);3245}3246assert_ne!(3247ymm1_sub_value, sentinel_value,3248"host ymm1 value is the same as the guest sentinel value. Hypervisor likely didn't \3249save guest's state."3250);3251false3252}3253r => panic!("unexpected exit reason: {r:?}"),3254};32553256run_tests!(setup, regs_matcher, exit_matcher);3257}32583259/// Tests whether XSAVE works inside a guest.3260#[test]3261fn test_xsave() {3262let sentinel_xmm0_value = 0x1337FFFFu64;3263global_asm_data!(3264pub xsave_ops_asm,3265".code64",32663267// Make sure XSAVE is supported.3268"mov eax, 1",3269"mov ecx, 0",3270"cpuid",3271"bt ecx, 26",3272"jc HasXSAVE",3273"mov rdx, 1",3274"hlt",3275"HasXSAVE:",3276"xor rdx, rdx",32773278// Turn on OSXSAVE.3279"mov rax, cr4",3280"or eax, 1 << 18",3281"mov cr4, rax",32823283// Enable X87, SSE, and AVX.3284//3285// Set the relevant XCR0 bits:3286// 0: X873287// 1: SSE3288// 3: AVX3289"xor rcx, rcx",3290"xgetbv",3291// (7 = 111b)3292"or eax, 7",3293"xsetbv",32943295// Put the sentinel value in xmm0, and save it off.3296"mov eax, 0x1337FFFF",3297"vzeroall",3298"vpinsrd xmm0, xmm1, eax, 3",3299"xor edx, edx",3300"mov eax, 7",3301"xsave dword ptr [0x10000]",33023303// Clear xmm0.3304"vpxor xmm0, xmm0, xmm0",33053306// Restoring should put the sentinel value back.3307"xor edx, edx",3308"mov eax, 7",3309"xrstor dword ptr [0x10000]",33103311"xor rbx, rbx",3312"vpextrd ebx, xmm0, 3",3313"hlt",3314);33153316let code_addr = 0x1000;3317let setup = TestSetup {3318assembly: xsave_ops_asm::data().to_vec(),3319mem_size: 0x12000,3320load_addr: GuestAddress(code_addr),3321initial_regs: Regs {3322rip: code_addr,3323rflags: 0x2,3324..Default::default()3325},3326extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3327ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);3328})),3329memory_initializations: vec![(GuestAddress(0x10000), vec![0; 0x1000])],3330..Default::default()3331};33323333let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {3334assert_ne!(regs.rdx, 1, "guest has no XSAVE support");3335assert_eq!(3336regs.rbx, sentinel_xmm0_value,3337"guest SSE register not restored by XRSTOR",3338);3339};33403341let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3342VcpuExit::Hlt => {3343true // Break VM runloop3344}3345VcpuExit::Cpuid { entry } => {3346vcpu.handle_cpuid(entry)3347.expect("should handle cpuid successfully");3348false3349}3350VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl3351r => panic!("unexpected exit reason: {r:?}"),3352};33533354run_tests!(setup, regs_matcher, exit_matcher);3355}33563357/// Tests whether XSAVES works inside a guest.3358///3359/// Ignored because CET is not available in some nested virtualization3360/// environments (such as CI). (CET is the feature we use to test XSAVES.)3361#[ignore]3362#[cfg(feature = "whpx")]3363#[test]3364fn test_xsaves() {3365global_asm_data!(3366pub xsaves_ops_asm,3367".code64",33683369// Make sure XSAVES is supported.3370"mov eax, 0xd",3371"mov ecx, 1",3372"cpuid",3373"bt eax, 3",3374"jc HasXSAVES",3375"mov rdx, 1",3376"hlt",3377"HasXSAVES:",33783379// Make sure CET is supported.3380"mov eax, 7",3381"mov ecx, 0",3382"cpuid",3383"bt ecx, 7",3384"jc HasCET",3385"mov rdx, 2",3386"hlt",3387"HasCET:",33883389// Turn on write protection for ring 0 (required by CET).3390"mov rax, cr0",3391"or eax, 1 << 16",3392"mov cr0, rax",33933394// Turn on OSXSAVE (18) and CET (23).3395"mov rax, cr4",3396"or eax, 1 << 18",3397"or eax, 1 << 23",3398"mov cr4, rax",33993400// Set up XSAVES to manage CET state.3401// IA32_XSS = 0x0DA03402"mov ecx, 0x0DA0",3403"rdmsr",3404"or eax, 1 << 12",3405"wrmsr",34063407// Enable CET.3408"mov ecx, 0x6A2",3409"rdmsr",3410"or eax, 1",3411"wrmsr",34123413// Now CET is usable and managed by XSAVES. Let's set a sentinel value and make sure xsaves3414// restores it as expected. Note that PL0_SSP's linear address must be 8 byte aligned.3415// PL0_SSP = 0x06A53416"mov ecx, 0x06A4",3417"xor edx, edx",3418"xor eax, eax",3419"mov eax, 0x13370000",3420"wrmsr",34213422// Set the RFBM / feature mask to include CET.3423"xor edx, edx",3424"mov eax, 1 << 12",3425"xsaves dword ptr [0x10000]",34263427// Clear PL0_SSP3428"xor edx, edx",3429"xor eax, eax",3430"mov ecx, 0x06A4",3431"wrmsr",34323433// Set the RFBM / feature mask to include CET.3434"xor edx, edx",3435"mov eax, 1 << 12",3436"xrstors dword ptr [0x10000]",34373438// Check to see if PL0_SSP was restored.3439"mov ecx, 0x06A4",3440"rdmsr",3441"cmp eax, 0x13370000",3442"jz TestPasses",3443"mov rdx, 3",3444"hlt",3445"TestPasses:",3446"xor rdx, rdx",3447"hlt",3448);34493450let code_addr = 0x1000;3451let setup = TestSetup {3452assembly: xsaves_ops_asm::data().to_vec(),3453mem_size: 0x12000,3454load_addr: GuestAddress(code_addr),3455initial_regs: Regs {3456rip: code_addr,3457rdx: 0x4,3458rflags: 0x2,3459..Default::default()3460},3461extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3462ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);3463})),3464memory_initializations: vec![(GuestAddress(0x10000), vec![0; 0x1000])],3465..Default::default()3466};34673468let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {3469assert_ne!(regs.rdx, 1, "guest has no XSAVES support");3470assert_ne!(regs.rdx, 2, "guest has no CET support");3471assert_ne!(regs.rdx, 3, "guest didn't restore PL0_SSP as expected");3472assert_eq!(regs.rdx, 0, "test failed unexpectedly");3473};34743475let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3476VcpuExit::Hlt => {3477true // Break VM runloop3478}3479VcpuExit::Cpuid { entry } => {3480vcpu.handle_cpuid(entry)3481.expect("should handle cpuid successfully");3482false3483}3484VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl3485r => panic!("unexpected exit reason: {:?}", r),3486};34873488run_tests!(setup, regs_matcher, exit_matcher);3489}34903491/// Tests that XSAVES is disabled in gHAXM (it's unsupported).3492///3493/// Note: this test passing in CI is not necessarily a signal that gHAXM is working correctly3494/// because XSAVES is disabled in some nested virtualization environments (e.g. CI).3495#[cfg(feature = "haxm")]3496#[test]3497fn test_xsaves_is_disabled_on_haxm() {3498global_asm_data!(3499pub no_xsaves_asm,3500".code64",35013502"mov eax, 0xd",3503"mov ecx, 1",3504"cpuid",3505"bt eax, 3",3506"jnc NoXSAVES",3507"mov rdx, 1",3508"hlt",3509"NoXSAVES:",3510"mov rdx, 0",3511"hlt",3512);35133514let code_addr = 0x1000;3515let setup = TestSetup {3516assembly: no_xsaves_asm::data().to_vec(),3517mem_size: 0x12000,3518load_addr: GuestAddress(code_addr),3519initial_regs: Regs {3520rip: code_addr,3521rdx: 0x2,3522rflags: 0x2,3523..Default::default()3524},3525extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3526ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);3527})),3528memory_initializations: vec![],3529..Default::default()3530};35313532let regs_matcher = move |_: HypervisorType, regs: &Regs, _: &_| {3533assert_ne!(regs.rdx, 1, "guest has XSAVES support and shouldn't");3534assert_eq!(regs.rdx, 0, "test failed unexpectedly");3535};35363537let exit_matcher = |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3538VcpuExit::Hlt => {3539true // Break VM runloop3540}3541VcpuExit::Cpuid { entry } => {3542vcpu.handle_cpuid(entry)3543.expect("should handle cpuid successfully");3544false3545}3546VcpuExit::MsrAccess => false, // MsrAccess handled by hypervisor impl3547r => panic!("unexpected exit reason: {r:?}"),3548};35493550run_tests!(setup, regs_matcher, exit_matcher);3551}35523553/// Tests whether SLAT is updated properly when a region is removed from the guest. A correctly3554/// implemented hypervisor will flush the TLB such that this immediately hits a SLAT fault and comes3555/// to us as MMIO. If we don't see that, and the guest actually reads from the removed region, the3556/// test will fail. In the real world, this would be a guest read from a random pfn, which is3557/// UB (and a major security problem).3558///3559/// Flakes should be treated as real failures (this test can show a false negative, but never a3560/// false positive).3561#[test]3562fn test_slat_on_region_removal_is_mmio() {3563global_asm_data!(3564pub test_asm,3565".code64",35663567// Load the TLB with a mapping for the test region.3568"mov al, byte ptr [0x20000]",35693570// Signal to the host that VM is running. On this vmexit, the host will unmap the test3571// region.3572"out 0x5, al",35733574// This read should result in MMIO, and if it does, the test passes. If we hit the hlt, then3575// the test fails (since it means we were able to satisfy this read without exiting).3576"mov al, byte ptr [0x20000]",3577"hlt"3578);35793580const TEST_MEM_REGION_SIZE: usize = 0x1000;3581let memslot: Arc<Mutex<Option<MemSlot>>> = Arc::new(Mutex::new(None));3582let memslot_for_func = memslot.clone();35833584let code_addr = 0x1000;3585let setup = TestSetup {3586assembly: test_asm::data().to_vec(),3587mem_size: 0x12000,3588load_addr: GuestAddress(code_addr),3589initial_regs: Regs {3590rip: code_addr,3591rflags: 0x2,3592..Default::default()3593},3594extra_vm_setup: Some(Box::new(3595move |vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {3596ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);35973598// Create a test pinned memory region that is all 0xFF.3599let shm = SharedMemory::new("test", TEST_MEM_REGION_SIZE as u64).unwrap();3600let test_region = Box::new(3601MemoryMappingBuilder::new(TEST_MEM_REGION_SIZE)3602.from_shared_memory(&shm)3603.build()3604.unwrap(),3605);3606let ff_init = [0xFFu8; TEST_MEM_REGION_SIZE];3607test_region.write_slice(&ff_init, 0).unwrap();3608let test_region = Box::new(3609PinnedMemoryRegion::new(test_region).expect("failed to pin test region"),3610);3611*memslot_for_func.lock() = Some(3612vm.add_memory_region(3613GuestAddress(0x20000),3614test_region,3615false,3616false,3617MemCacheType::CacheCoherent,3618)3619.unwrap(),3620);3621},3622)),3623memory_initializations: vec![],3624..Default::default()3625};36263627// Holds the test memory region after it's unmapped and the VM is still running. Without this,3628// incorrect access to the region by the VM would be unsafe / UB.3629let test_region_arc: Arc<Mutex<Option<Box<dyn MappedRegion>>>> = Arc::new(Mutex::new(None));3630let test_region_arc_for_exit = test_region_arc.clone();36313632let exit_matcher =3633move |_, exit: &VcpuExit, vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| match exit {3634VcpuExit::Io => {3635// WHPX insists on data being returned here or it throws MemoryCallbackFailed.3636//3637// We strictly don't care what this data is, since the VM exits before running any3638// further instructions.3639vcpu.handle_io(&mut |_| {})3640.expect("should handle IO successfully");36413642// Remove the test memory region to cause a SLAT fault (in the passing case).3643//3644// This also ensures the memory region remains pinned in host physical memory so any3645// incorrect accesses to it by the VM will remain safe.3646*test_region_arc_for_exit.lock() =3647Some(vm.remove_memory_region(memslot.lock().unwrap()).unwrap());3648false3649}3650VcpuExit::Mmio => {3651vcpu.handle_mmio(&mut |IoParams { address, operation }| {3652assert_eq!(address, 0x20000, "MMIO for wrong address");3653match operation {3654IoOperation::Read(data) => {3655assert_eq!(data.len(), 1);3656data[0] = 0;3657Ok(())3658}3659IoOperation::Write(_) => {3660panic!("got unexpected IO operation {operation:?}");3661}3662}3663})3664.unwrap();3665true3666}3667VcpuExit::Hlt => {3668panic!("VM should not reach the hlt instruction (MMIO should've ended the VM)");3669}3670r => panic!("unexpected exit reason: {r:?}"),3671};36723673// We want to catch if the hypervisor doesn't clear the VM's TLB. If we hop between CPUs, then3674// we're likely to end up with a clean TLB on another CPU.3675set_cpu_affinity(vec![0]).unwrap();36763677run_tests!(setup, move |_, _, _| {}, &exit_matcher);3678}36793680struct PinnedMemoryRegion {3681mem_region: Box<dyn MappedRegion>,3682}36833684impl PinnedMemoryRegion {3685fn new(mem_region: Box<dyn MappedRegion>) -> base::Result<Self> {3686// SAFETY:3687// ptr is a valid pointer and points to a region of the supplied size.3688unsafe { pin_memory(mem_region.as_ptr() as *mut _, mem_region.size()) }?;3689Ok(Self { mem_region })3690}3691}36923693// SAFETY:3694// Safe because ptr & size a memory range owned by this MemoryMapping that won't be unmapped3695// until it's dropped.3696unsafe impl MappedRegion for PinnedMemoryRegion {3697fn as_ptr(&self) -> *mut u8 {3698self.mem_region.as_ptr()3699}37003701fn size(&self) -> usize {3702self.mem_region.size()3703}3704}37053706impl Drop for PinnedMemoryRegion {3707fn drop(&mut self) {3708// SAFETY:3709// memory region passed is a valid pointer and points to a region of the3710// supplied size. We also panic on failure.3711unsafe { unpin_memory(self.mem_region.as_ptr() as *mut _, self.mem_region.size()) }3712.expect("failed to unpin memory")3713}3714}37153716unsafe fn pin_memory(ptr: *mut c_void, len: usize) -> base::Result<()> {3717#[cfg(windows)]3718{3719VirtualLock(ptr, len).map_err(|e| base::Error::new(e.code().0))3720}3721#[cfg(unix)]3722{3723if libc::mlock(ptr, len) != 0 {3724Err(base::Error::last())3725} else {3726Ok(())3727}3728}3729}37303731unsafe fn unpin_memory(ptr: *mut c_void, len: usize) -> base::Result<()> {3732#[cfg(windows)]3733{3734VirtualUnlock(ptr, len).map_err(|e| base::Error::new(e.code().0))3735}3736#[cfg(unix)]3737{3738if libc::munlock(ptr, len) != 0 {3739Err(base::Error::last())3740} else {3741Ok(())3742}3743}3744}37453746#[test]3747fn test_interrupt_injection_when_not_ready() {3748// This test ensures that if we inject an interrupt when it's not ready for interrupt, we3749// shouldn't end up with crash or hang. And if the interrupt is delivered, it shouldn't be3750// delivered before we reenable the interrupt.3751mod assembly {3752use super::*;37533754global_asm_data!(3755pub init,3756".code16",3757// Set the IDT3758"lidt [0x200]",3759// Set up the stack, which will be used when CPU transfers the control to the ISR on3760// interrupt.3761"mov sp, 0x900",3762// Set ax to 0.3763"xor ax, ax",3764// Set the address 0x910 to 1 when we disable the interrupt, and restore it to 0 after3765// we renable the interrupt.3766"mov word ptr [0x910], 1",3767"cli",3768// We can't use hlt for VMEXIT, because HAXM unconditionally allows interrupt injection3769// for hlt. We will inject an interrupt here although all hypervisors should report not3770// ready for injection an interrupt. And we don't care if the injection succeeds or not.3771"out 0x10, ax",3772"sti",3773// Set the address 0x910 to 0 when we renable the interrupt.3774"mov word ptr [0x910], 0",3775// For hypervisor that injects the interrupt later when it's ready, the interrupt will3776// be delivered here.3777"nop",3778"hlt",3779);37803781// We still need an ISR in case the hypervisor actually delivers an interrupt.3782global_asm_data!(3783pub isr,3784".code16",3785// ax will be 0 if the interrupt is delivered after we reenable the interrupt.3786// Otherwise, ax will be 1, and the test fails.3787"mov ax, word ptr [0x910]",3788"iret",3789);3790}37913792let start_addr: u32 = 0x200;3793// Allocate exceed 0x900, where we set up our stack.3794let mem_size: u32 = 0x1000;37953796let mut setup = TestSetup {3797load_addr: GuestAddress(start_addr.into()),3798initial_regs: Regs {3799rax: 0,3800// Set RFLAGS.IF to enable interrupt at the beginning.3801rflags: 2 | FLAGS_IF_BIT,3802..Default::default()3803},3804mem_size: mem_size.into(),3805..Default::default()3806};38073808let mut cur_addr = start_addr;38093810let idtr_size: u32 = 6;3811assert_eq!(3812Ok(std::mem::size_of::<Idtr32>()),3813usize::try_from(idtr_size)3814);3815// The limit is calculated from 256 entries timed by 4 bytes per entry.3816let idt_size = 256u16 * 4u16;3817let idtr = Idtr32 {3818limit: idt_size - 1,3819// The IDT right follows the IDTR.3820base_address: start_addr + idtr_size,3821};3822setup.add_memory_initialization(GuestAddress(cur_addr.into()), idtr.as_bytes().to_vec());3823cur_addr += idtr_size;38243825let idt_entry = (start_addr + idtr_size + u32::from(idt_size)).to_ne_bytes();3826// IDT entries are far pointers(CS:IP pair) to the only ISR, which locates right after the IDT.3827// We set all entries to the same ISR.3828let idt = (0..256).flat_map(|_| idt_entry).collect::<Vec<_>>();3829setup.add_memory_initialization(GuestAddress(cur_addr.into()), idt.clone());3830cur_addr += u32::try_from(idt.len()).expect("IDT size should be within u32");38313832let isr_assembly = assembly::isr::data().to_vec();3833setup.add_memory_initialization(GuestAddress(cur_addr.into()), isr_assembly.clone());3834cur_addr += u32::try_from(isr_assembly.len()).expect("ISR size should be within u32");38353836let init_assembly = assembly::init::data().to_vec();3837setup.initial_regs.rip = cur_addr.into();3838setup.add_memory_initialization(GuestAddress(cur_addr.into()), init_assembly.clone());3839cur_addr += u32::try_from(init_assembly.len()).expect("init size should be within u32");38403841assert!(mem_size > cur_addr);38423843run_tests!(3844setup,3845|_, regs, _| {3846assert_eq!(3847regs.rax, 0,3848"the interrupt should be either not delivered(ax is kept as the initial value 0) \3849or is delivered after we reenable the interrupt(when the ax is set from 0x910, \38500x910 is 0)"3851);3852},3853|_, exit, vcpu: &mut dyn VcpuX86_64, _: &mut dyn Vm| {3854match exit {3855// We exit and pass the test either the VCPU run fails or we hit hlt.3856VcpuExit::FailEntry { .. } | VcpuExit::Shutdown(..) | VcpuExit::Hlt => true,3857VcpuExit::Io => {3858// We are always handling out IO port, so no data to return.3859vcpu.handle_io(&mut |_| {})3860.expect("should handle IO successfully");3861assert!(!vcpu.ready_for_interrupt());3862// We don't care whether we inject the interrupt successfully or not.3863let _ = vcpu.interrupt(32);3864false3865}3866r => panic!("unexpected VMEXIT reason: {r:?}"),3867}3868}3869);3870}38713872#[test]3873fn test_ready_for_interrupt_for_intercepted_instructions() {3874global_asm_data!(3875assembly,3876// We will use out instruction to cause VMEXITs and test ready_for_interrupt then.3877".code16",3878// Disable the interrupt.3879"cli",3880// ready_for_interrupt should be false here.3881"out 0x10, ax",3882"sti",3883// ready_for_interrupt should be false here, because of the one instruction3884// interruptibility window for sti. And this is also an intercepted instruction.3885"out 0x20, ax",3886// ready_for_interrupt should be true here except for WHPX.3887"out 0x30, ax",3888// Restore the interruptibility for WHPX.3889"nop",3890"mov ax, ss",3891"mov ss, ax",3892// ready_for_interrupt should be false here, because of the one instruction3893// interruptibility window for mov ss. And this is also an intercepted instruction.3894"out 0x40, ax",3895// ready_for_interrupt should be true here except for WHPX.3896"out 0x50, ax",3897"hlt"3898);38993900let assembly = assembly::data().to_vec();3901let setup = TestSetup {3902assembly: assembly.clone(),3903load_addr: GuestAddress(0x1000),3904initial_regs: Regs {3905rip: 0x1000,3906rflags: 2,3907..Default::default()3908},3909..Default::default()3910};39113912run_tests!(3913setup,3914|_, regs, _| {3915// For VMEXIT caused by HLT, the hypervisor will automatically advance the rIP register.3916assert_eq!(regs.rip, 0x1000 + assembly.len() as u64);3917},3918|hypervisor_type, exit, vcpu, _: &mut dyn Vm| {3919match exit {3920VcpuExit::Hlt => true,3921VcpuExit::Io => {3922let ready_for_interrupt = vcpu.ready_for_interrupt();3923let mut io_port = 0;3924vcpu.handle_io(&mut |params| {3925io_port = params.address;3926// We are always handling out IO port, so no data to return.3927})3928.expect("should handle port IO successfully");3929match io_port {39300x10 | 0x20 | 0x40 => assert!(!ready_for_interrupt),39310x30 | 0x50 => {3932// WHPX needs a not intercepted instruction to recover to the proper3933// interruptibility state.3934if hypervisor_type != HypervisorType::Whpx {3935assert!(ready_for_interrupt);3936}3937}3938_ => panic!("unexpected port {io_port}"),3939}3940false3941}3942r => panic!("unexpected exit reason: {r:?}"),3943}3944}3945);3946}39473948#[cfg(feature = "haxm")]3949#[test]3950fn test_cpuid_mwait_not_supported() {3951global_asm_data!(3952cpuid_code,3953".code64",3954"mov eax, 1", // CPUID function 13955"cpuid",3956"hlt"3957);39583959let setup = TestSetup {3960assembly: cpuid_code::data().to_vec(),3961load_addr: GuestAddress(0x1000),3962initial_regs: Regs {3963rip: 0x1000,3964rflags: 2,3965..Default::default()3966},3967..Default::default()3968};39693970let regs_matcher = |_: HypervisorType, regs: &Regs, _: &Sregs| {3971// Check if MWAIT is not supported3972assert_eq!(3973regs.rcx & (1 << 3),39740,3975"MWAIT is supported, but it should not be."3976);3977};39783979let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {3980VcpuExit::Hlt => {3981true // Break VM runloop3982}3983r => panic!("unexpected exit reason: {r:?}"),3984};39853986run_tests!(setup, regs_matcher, exit_matcher);3987}39883989#[test]3990fn test_hardware_breakpoint_with_isr() {3991global_asm_data!(3992setup_debug_handler_code,3993".code64",3994// Set up the stack3995"mov sp, 0x900",3996"mov rax, 0x1019", // Address of the instruction to trigger the breakpoint3997"mov dr0, rax",3998"mov rax, 0x00000001", // Enable the first breakpoint (local, exact) for execution3999"mov dr7, rax",4000"nop", // This should trigger the debug exception4001"nop",4002"hlt"4003);40044005global_asm_data!(4006debug_isr_code,4007".code64",4008"mov rbx, 0xf00dbabe", // Set a value to indicate the ISR was called4009"mov rax, 0",4010"mov dr7, rax", // Disable debugging again4011"mov rax, dr6",4012"iretq" // Return from interrupt4013);40144015global_asm_data!(4016null_isr_code,4017".code64",4018"mov rbx, 0xbaadf00d", // This ISR should never get called4019"hlt"4020);40214022let debug_isr_offset = 0x800;4023let null_isr_offset = 0x700;4024let debug_idt_entry = IdtEntry64::new(debug_isr_offset);4025let null_idt_entry = IdtEntry64::new(null_isr_offset);40264027let setup = TestSetup {4028assembly: setup_debug_handler_code::data().to_vec(),4029load_addr: GuestAddress(0x1000),4030mem_size: 0x20000,4031initial_regs: Regs {4032rip: 0x1000,4033rflags: 2 | FLAGS_IF_BIT,4034..Default::default()4035},4036extra_vm_setup: Some(Box::new(4037move |vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {4038let guest_mem = vm.get_memory();40394040guest_mem4041.write_at_addr(4042debug_isr_code::data().to_vec().as_bytes(),4043GuestAddress(debug_isr_offset),4044)4045.expect("Failed to write debug ISR entry");40464047guest_mem4048.write_at_addr(4049null_isr_code::data().to_vec().as_bytes(),4050GuestAddress(null_isr_offset),4051)4052.expect("Failed to write null ISR entry");40534054let mut long_mode_config = ModeConfig::default_long_mode();4055long_mode_config4056.set_idt_long_mode((0..256).map(|i| {4057if i == 0x01 {4058debug_idt_entry4059} else {4060null_idt_entry4061}4062}))4063.set_idt_base_addr(0x12_000);4064long_mode_config.enter_long_mode(vcpu, vm);4065},4066)),4067..Default::default()4068};40694070let regs_matcher = |_: HypervisorType, regs: &Regs, _: &Sregs| {4071assert_eq!(regs.rax & 1, 1, "Breakpoint #0 not hit");4072assert_eq!(4073regs.rip,40740x1000 + (setup_debug_handler_code::data().len() as u64),4075"rIP not at the right HLT"4076);4077assert_eq!(regs.rbx, 0xf00dbabe, "Debug ISR was not called");4078};40794080let exit_matcher = |_, exit: &VcpuExit, _: &mut dyn VcpuX86_64, _: &mut dyn Vm| match exit {4081VcpuExit::Hlt => {4082true // Break VM runloop4083}4084r => panic!("unexpected exit reason: {r:?}"),4085};40864087run_tests!(setup, regs_matcher, exit_matcher);4088}40894090#[test]4091fn test_debug_register_persistence() {4092global_asm_data!(4093test_debug_registers_code,4094".code64",4095"mov dr0, rax",4096"inc rax",4097"mov dr1, rax",4098"inc rax",4099"mov dr2, rax",4100"inc rax",4101"mov dr3, rax",4102// Perform HLT to cause VMEXIT4103"hlt",4104"mov r8, dr0",4105"mov r9, dr1",4106"mov r10, dr2",4107"mov r11, dr3",4108"hlt"4109);41104111let initial_dr_value: u64 = 0x12345678;41124113let setup = TestSetup {4114assembly: test_debug_registers_code::data().to_vec(),4115mem_size: 0x11000,4116load_addr: GuestAddress(0x1000),4117initial_regs: Regs {4118rax: initial_dr_value,4119rip: 0x1000,4120rflags: 2,4121..Default::default()4122},4123extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {4124ModeConfig::default_long_mode().enter_long_mode(vcpu, vm);4125})),4126..Default::default()4127};41284129let mut hlt_count = 0;41304131run_tests!(4132setup,4133|_, regs, _| {4134assert_eq!(regs.r8, initial_dr_value, "DR0 value mismatch after VMEXIT");4135assert_eq!(4136regs.r9,4137initial_dr_value + 1,4138"DR1 value mismatch after VMEXIT"4139);4140assert_eq!(4141regs.r10,4142initial_dr_value + 2,4143"DR2 value mismatch after VMEXIT"4144);4145assert_eq!(4146regs.r11,4147initial_dr_value + 3,4148"DR3 value mismatch after VMEXIT"4149);4150},4151|_, exit, _, _: &mut dyn Vm| match exit {4152VcpuExit::Hlt => {4153hlt_count += 1;4154hlt_count > 1 // Halt execution after the second HLT4155}4156r => panic!("unexpected exit reason: {r:?}"),4157}4158);4159}41604161#[test]4162fn test_minimal_exception_injection() {4163// This test tries to write an invalid MSR, causing a General Protection exception to be4164// injected by the hypervisor (since MSR writes cause a VMEXIT). We run it in long mode since4165// real mode exception handling isn't always well supported (failed on Intel HAXM).4166mod assembly {4167use super::*;41684169// An ISR that handles any generic interrupt.4170global_asm_data!(4171pub isr_generic,4172".code64",4173// Set EBX to 888 to observe this is where we halted.4174"mov ebx, 888",4175"hlt"4176);41774178// An ISR that handles the General Protection fault specifically.4179global_asm_data!(4180pub isr_gp,4181".code64",4182// Set EBX to 999 to observe this is where we halted.4183"mov ebx, 999",4184"hlt"4185);41864187// Our VM entry (in long mode).4188global_asm_data!(4189pub init,4190".code64",4191// Set up the stack, which will be used when CPU transfers the control to the ISR. If4192// not set up, can cause faults (stack should be aligned).4193"mov esp, 0x900",4194// We will verify EBX, set it here first.4195"mov ebx, 777",4196// Should trigger GP fault when we try to write to MSR 0.4197"wrmsr",4198// We should never get here since we halt in the fault handlers.4199"hlt",4200);4201}42024203let mem_size: u64 = 0x20000;42044205let setup = TestSetup {4206initial_regs: Regs {4207// WRMSR will try to write to ECX, we set it to zero to point to an old read-only MSR4208// (IA32_P5_MC_ADDR).4209rcx: 0,4210// Intentionally not setting IF flag since exceptions don't check it.4211rflags: 2,4212..Default::default()4213},4214mem_size,4215extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {4216let start_addr: u64 = 0x1000;4217let guest_mem = vm.get_memory();42184219let isr_assembly = assembly::isr_generic::data().to_vec();4220let isr_assembly_len =4221u64::try_from(isr_assembly.len()).expect("ISR size should be within u64");42224223let isr_gp_assembly = assembly::isr_gp::data().to_vec();4224let isr_gp_assembly_len =4225u64::try_from(isr_gp_assembly.len()).expect("GP ISR size should be within u64");42264227let mut cur_addr = start_addr;42284229guest_mem4230.write_at_addr(&isr_assembly, GuestAddress(cur_addr))4231.expect("Failed to write ISR to guest memory");4232cur_addr += isr_assembly_len;42334234guest_mem4235.write_at_addr(&isr_gp_assembly, GuestAddress(cur_addr))4236.expect("Failed to write ISR to guest memory");4237cur_addr += isr_gp_assembly_len;42384239let mut regs = vcpu.get_regs().expect("Failed to get regs");4240regs.rip = cur_addr;4241vcpu.set_regs(®s).expect("Failed to set regs");42424243let init_assembly = assembly::init::data().to_vec();4244guest_mem4245.write_at_addr(&init_assembly, GuestAddress(cur_addr))4246.expect("Failed to write init assembly to guest memory");42474248let idt_entry_generic = IdtEntry64::new(start_addr);4249let idt_entry_gp = IdtEntry64::new(start_addr + isr_assembly_len);42504251let mut long_mode_config = ModeConfig::default_long_mode();4252long_mode_config4253.set_idt_long_mode((0..256).map(|i| {4254// GP handler is vector 13.4255if i == 0x0D {4256idt_entry_gp4257} else {4258idt_entry_generic4259}4260}))4261.set_idt_base_addr(0x12_000);4262long_mode_config.enter_long_mode(vcpu, vm);4263})),4264..Default::default()4265};42664267run_tests!(4268setup,4269|_, regs, _| {4270// If EBX is 999 the GP handler ran.4271assert_eq!(regs.rbx, 999);4272},4273|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)4274);4275}42764277#[test]4278fn test_pmode_segment_limit() {4279// This test configures 32-bit protected mode and verifies that segment limits are converted4280// correctly. The test setup configures a segment with the 20-bit limit field set to 0xFFFFF and4281// the 4096-byte granularity bit set, which should result in a 4 GB limit (0xFFFFFFFF).4282mod assembly {4283use super::*;42844285global_asm_data!(4286pub init,4287".code32",4288// Load the CS segment limit into EAX.4289"mov cx, cs",4290"lsl eax, cx",4291"hlt",4292);4293}42944295let mem_size: u64 = 0x20000;42964297let setup = TestSetup {4298initial_regs: Regs {4299..Default::default()4300},4301mem_size,4302extra_vm_setup: Some(Box::new(|vcpu: &mut dyn VcpuX86_64, vm: &mut dyn Vm| {4303ModeConfig::default_protected_mode().enter_protected_mode(vcpu, vm);43044305let guest_mem = vm.get_memory();43064307let mut regs = vcpu.get_regs().expect("Failed to get regs");4308regs.rax = 12345;4309regs.rip = 0x1000;4310vcpu.set_regs(®s).expect("Failed to set regs");43114312let init_assembly = assembly::init::data().to_vec();4313guest_mem4314.write_at_addr(&init_assembly, GuestAddress(0x1000))4315.expect("Failed to write init assembly to guest memory");4316})),4317..Default::default()4318};43194320run_tests!(4321setup,4322|_, regs, _| {4323// The output of the LSL instruction should be 4GB - 1.4324assert_eq!(regs.rax, 0xFFFFFFFF);4325},4326|_, exit, _, _: &mut dyn Vm| matches!(exit, VcpuExit::Hlt)4327);4328}432943304331