Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/haxm/vm.rs
5394 views
1
// Copyright 2020 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use core::ffi::c_void;
6
use std::cmp::Reverse;
7
use std::collections::BTreeMap;
8
use std::collections::BinaryHeap;
9
use std::sync::Arc;
10
11
use base::errno_result;
12
use base::error;
13
use base::ioctl_with_mut_ref;
14
use base::ioctl_with_ref;
15
use base::warn;
16
use base::AsRawDescriptor;
17
use base::Error;
18
use base::Event;
19
use base::MappedRegion;
20
use base::MmapError;
21
use base::Protection;
22
use base::RawDescriptor;
23
use base::Result;
24
use base::SafeDescriptor;
25
use fnv::FnvHashMap;
26
use libc::E2BIG;
27
use libc::EEXIST;
28
use libc::EFAULT;
29
use libc::EINVAL;
30
use libc::EIO;
31
use libc::ENOENT;
32
use libc::ENOSPC;
33
use libc::ENOTSUP;
34
use libc::EOVERFLOW;
35
use sync::Mutex;
36
use vm_memory::GuestAddress;
37
use vm_memory::GuestMemory;
38
#[cfg(windows)]
39
use win_util::win32_wide_string;
40
41
use super::*;
42
use crate::host_phys_addr_bits;
43
use crate::ClockState;
44
use crate::Datamatch;
45
use crate::DeviceKind;
46
use crate::Hypervisor;
47
use crate::HypervisorKind;
48
use crate::IoEventAddress;
49
use crate::MemCacheType;
50
use crate::MemSlot;
51
use crate::VcpuX86_64;
52
use crate::Vm;
53
use crate::VmCap;
54
use crate::VmX86_64;
55
56
/// A wrapper around creating and using a HAXM VM.
57
pub struct HaxmVm {
58
haxm: Haxm,
59
vm_id: u32,
60
descriptor: SafeDescriptor,
61
guest_mem: GuestMemory,
62
mem_regions: Arc<Mutex<BTreeMap<MemSlot, (GuestAddress, Box<dyn MappedRegion>)>>>,
63
/// A min heap of MemSlot numbers that were used and then removed and can now be re-used
64
mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
65
// HAXM's implementation of ioevents makes several assumptions about how crosvm uses ioevents:
66
// 1. All ioevents are registered during device setup, and thus can be cloned when the vm is
67
// cloned instead of locked in an Arc<Mutex<>>. This will make handling ioevents in each
68
// vcpu thread easier because no locks will need to be acquired.
69
// 2. All ioevents use Datamatch::AnyLength. We don't bother checking the datamatch, which
70
// will make this faster.
71
// 3. We only ever register one eventfd to each address. This simplifies our data structure.
72
ioevents: FnvHashMap<IoEventAddress, Event>,
73
}
74
75
impl HaxmVm {
76
/// Constructs a new `HaxmVm` using the given `Haxm` instance.
77
pub fn new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm> {
78
let mut vm_id: u32 = 0;
79
// SAFETY:
80
// Safe because we know descriptor is a real haxm descriptor as this module is the only
81
// one that can make Haxm objects.
82
let ret = unsafe { ioctl_with_mut_ref(haxm, HAX_IOCTL_CREATE_VM, &mut vm_id) };
83
if ret != 0 {
84
return errno_result();
85
}
86
87
// Haxm creates additional device paths when VMs are created
88
let vm_descriptor = open_haxm_vm_device(USE_GHAXM.load(Ordering::Relaxed), vm_id)?;
89
90
for region in guest_mem.regions() {
91
// SAFETY:
92
// Safe because the guest regions are guaranteed not to overlap.
93
unsafe {
94
set_user_memory_region(
95
&vm_descriptor,
96
false,
97
region.guest_addr.offset(),
98
region.size as u64,
99
MemoryRegionOp::Add(region.host_addr as *mut u8 as u64),
100
)
101
}?;
102
}
103
104
Ok(HaxmVm {
105
vm_id,
106
haxm: haxm.try_clone()?,
107
descriptor: vm_descriptor,
108
guest_mem,
109
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
110
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
111
ioevents: FnvHashMap::default(),
112
})
113
}
114
115
pub fn check_raw_capability(&self, cap: u32) -> bool {
116
let mut capability_info = hax_capabilityinfo::default();
117
let ret =
118
// SAFETY:
119
// Safe because we know that our file is a VM fd and we verify the return result.
120
unsafe { ioctl_with_mut_ref(&self.haxm, HAX_IOCTL_CAPABILITY, &mut capability_info) };
121
122
if ret != 0 {
123
return false;
124
}
125
126
// If wstatus is zero, HAXM is not usable.
127
// In this case, the winfo bits indicate why, rather than communicating capability
128
// information.
129
if capability_info.wstatus == 0 {
130
return false;
131
}
132
133
(cap & capability_info.winfo as u32) != 0
134
}
135
136
pub fn register_log_file(&self, path: &str) -> Result<()> {
137
// The IOCTL here is only avilable on internal fork of HAXM and only works on Windows.
138
#[cfg(windows)]
139
if get_use_ghaxm() {
140
let mut log_file = hax_log_file::default();
141
142
// Although it would be more efficient to do this check prior to allocating the log_file
143
// struct, the code would be more complex and less maintainable. This is only ever
144
// called once per-vm so the extra temporary memory and time shouldn't be a
145
// problem.
146
if path.len() >= log_file.path.len() {
147
return Err(Error::new(E2BIG));
148
}
149
150
let wstring = &win32_wide_string(path);
151
log_file.path[..wstring.len()].clone_from_slice(wstring);
152
153
// SAFETY:
154
// Safe because we know that our file is a VM fd and we verify the return result.
155
let ret = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_REGISTER_LOG_FILE, &log_file) };
156
157
if ret != 0 {
158
return errno_result();
159
}
160
}
161
Ok(())
162
}
163
}
164
165
impl AsRawDescriptor for HaxmVm {
166
fn as_raw_descriptor(&self) -> RawDescriptor {
167
self.descriptor.as_raw_descriptor()
168
}
169
}
170
171
enum MemoryRegionOp {
172
// Map a memory region for the given host address.
173
Add(u64),
174
// Remove the memory region.
175
Remove,
176
}
177
178
unsafe fn set_user_memory_region(
179
descriptor: &SafeDescriptor,
180
read_only: bool,
181
guest_addr: u64,
182
size: u64,
183
op: MemoryRegionOp,
184
) -> Result<()> {
185
let (va, flags) = match op {
186
MemoryRegionOp::Add(va) => {
187
let mut flags = HAX_RAM_INFO_STANDALONE;
188
if read_only {
189
flags |= HAX_RAM_INFO_ROM
190
}
191
(va, flags)
192
}
193
MemoryRegionOp::Remove => (0, HAX_RAM_INFO_INVALID),
194
};
195
let ram_info = hax_set_ram_info2 {
196
pa_start: guest_addr,
197
size,
198
va,
199
flags,
200
..Default::default()
201
};
202
203
// SAFETY:
204
// Safe because we know that our file is a VM fd and we verify the return result.
205
let ret = ioctl_with_ref(descriptor, HAX_VM_IOCTL_SET_RAM2, &ram_info);
206
if ret != 0 {
207
return errno_result();
208
}
209
Ok(())
210
}
211
212
impl Vm for HaxmVm {
213
fn try_clone(&self) -> Result<Self> {
214
let mut ioevents = FnvHashMap::default();
215
for (addr, evt) in self.ioevents.iter() {
216
ioevents.insert(*addr, evt.try_clone()?);
217
}
218
Ok(HaxmVm {
219
vm_id: self.vm_id,
220
haxm: self.haxm.try_clone()?,
221
descriptor: self.descriptor.try_clone()?,
222
guest_mem: self.guest_mem.clone(),
223
mem_regions: self.mem_regions.clone(),
224
mem_slot_gaps: self.mem_slot_gaps.clone(),
225
ioevents,
226
})
227
}
228
229
fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
230
Err(Error::new(ENOTSUP))
231
}
232
233
fn hypervisor_kind(&self) -> HypervisorKind {
234
HypervisorKind::Haxm
235
}
236
237
fn check_capability(&self, c: VmCap) -> bool {
238
match c {
239
VmCap::DirtyLog => false,
240
VmCap::PvClock => false,
241
VmCap::Protected => false,
242
VmCap::EarlyInitCpuid => false,
243
VmCap::BusLockDetect => false,
244
VmCap::ReadOnlyMemoryRegion => false,
245
VmCap::MemNoncoherentDma => false,
246
}
247
}
248
249
fn get_memory(&self) -> &GuestMemory {
250
&self.guest_mem
251
}
252
253
fn add_memory_region(
254
&mut self,
255
guest_addr: GuestAddress,
256
mem: Box<dyn MappedRegion>,
257
read_only: bool,
258
_log_dirty_pages: bool,
259
_cache: MemCacheType,
260
) -> Result<MemSlot> {
261
let size = mem.size() as u64;
262
let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
263
if self.guest_mem.range_overlap(guest_addr, end_addr) {
264
return Err(Error::new(ENOSPC));
265
}
266
let mut regions = self.mem_regions.lock();
267
let mut gaps = self.mem_slot_gaps.lock();
268
let slot = match gaps.pop() {
269
Some(gap) => gap.0,
270
None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
271
};
272
273
// SAFETY:
274
// Safe because we check that the given guest address is valid and has no overlaps. We also
275
// know that the pointer and size are correct because the MemoryMapping interface ensures
276
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
277
// is removed.
278
let res = unsafe {
279
set_user_memory_region(
280
&self.descriptor,
281
read_only,
282
guest_addr.offset(),
283
size,
284
MemoryRegionOp::Add(mem.as_ptr() as u64),
285
)
286
};
287
288
if let Err(e) = res {
289
gaps.push(Reverse(slot));
290
return Err(e);
291
}
292
regions.insert(slot, (guest_addr, mem));
293
Ok(slot)
294
}
295
296
fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
297
let mut regions = self.mem_regions.lock();
298
let (_, mem) = regions.get_mut(&slot).ok_or(Error::new(ENOENT))?;
299
300
mem.msync(offset, size).map_err(|err| match err {
301
MmapError::InvalidAddress => Error::new(EFAULT),
302
MmapError::NotPageAligned => Error::new(EINVAL),
303
MmapError::SystemCallFailed(e) => e,
304
_ => Error::new(EIO),
305
})
306
}
307
308
fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
309
let mut regions = self.mem_regions.lock();
310
311
if let Some((guest_addr, mem)) = regions.get(&slot) {
312
// SAFETY:
313
// Safe because the slot is checked against the list of memory slots.
314
unsafe {
315
set_user_memory_region(
316
&self.descriptor,
317
false,
318
guest_addr.offset(),
319
mem.size() as u64,
320
MemoryRegionOp::Remove,
321
)?;
322
}
323
self.mem_slot_gaps.lock().push(Reverse(slot));
324
Ok(regions.remove(&slot).unwrap().1)
325
} else {
326
Err(Error::new(ENOENT))
327
}
328
}
329
330
fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
331
// Haxm does not support in-kernel devices
332
Err(Error::new(libc::ENXIO))
333
}
334
335
fn get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()> {
336
// Haxm does not support VmCap::DirtyLog
337
Err(Error::new(libc::ENXIO))
338
}
339
340
fn register_ioevent(
341
&mut self,
342
evt: &Event,
343
addr: IoEventAddress,
344
datamatch: Datamatch,
345
) -> Result<()> {
346
if datamatch != Datamatch::AnyLength {
347
error!("HAXM currently only supports Datamatch::AnyLength");
348
return Err(Error::new(ENOTSUP));
349
}
350
351
if self.ioevents.contains_key(&addr) {
352
error!("HAXM does not support multiple ioevents for the same address");
353
return Err(Error::new(EEXIST));
354
}
355
356
self.ioevents.insert(addr, evt.try_clone()?);
357
358
Ok(())
359
}
360
361
fn unregister_ioevent(
362
&mut self,
363
evt: &Event,
364
addr: IoEventAddress,
365
datamatch: Datamatch,
366
) -> Result<()> {
367
if datamatch != Datamatch::AnyLength {
368
error!("HAXM only supports Datamatch::AnyLength");
369
return Err(Error::new(ENOTSUP));
370
}
371
372
match self.ioevents.get(&addr) {
373
Some(existing_evt) => {
374
// evt should match the existing evt associated with addr
375
if evt != existing_evt {
376
return Err(Error::new(ENOENT));
377
}
378
self.ioevents.remove(&addr);
379
}
380
381
None => {
382
return Err(Error::new(ENOENT));
383
}
384
};
385
Ok(())
386
}
387
388
/// Trigger any io events based on the memory mapped IO at `addr`. If the hypervisor does
389
/// in-kernel IO event delivery, this is a no-op.
390
fn handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()> {
391
if let Some(evt) = self.ioevents.get(&addr) {
392
evt.signal()?;
393
}
394
Ok(())
395
}
396
397
fn enable_hypercalls(&mut self, _nr: u64, _count: usize) -> Result<()> {
398
Err(Error::new(ENOTSUP))
399
}
400
401
fn get_pvclock(&self) -> Result<ClockState> {
402
// Haxm does not support VmCap::PvClock
403
Err(Error::new(libc::ENXIO))
404
}
405
406
fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
407
// Haxm does not support VmCap::PvClock
408
Err(Error::new(libc::ENXIO))
409
}
410
411
fn add_fd_mapping(
412
&mut self,
413
slot: u32,
414
offset: usize,
415
size: usize,
416
fd: &dyn AsRawDescriptor,
417
fd_offset: u64,
418
prot: Protection,
419
) -> Result<()> {
420
let mut regions = self.mem_regions.lock();
421
let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
422
423
match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
424
Ok(()) => Ok(()),
425
Err(MmapError::SystemCallFailed(e)) => Err(e),
426
Err(_) => Err(Error::new(EIO)),
427
}
428
}
429
430
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
431
let mut regions = self.mem_regions.lock();
432
let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
433
434
match region.remove_mapping(offset, size) {
435
Ok(()) => Ok(()),
436
Err(MmapError::SystemCallFailed(e)) => Err(e),
437
Err(_) => Err(Error::new(EIO)),
438
}
439
}
440
441
fn handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()> {
442
// TODO(b/233773610): implement ballooning support in haxm
443
warn!("Memory ballooning attempted but not supported on haxm hypervisor");
444
// no-op
445
Ok(())
446
}
447
448
fn get_guest_phys_addr_bits(&self) -> u8 {
449
// Assume the guest physical address size is the same as the host.
450
host_phys_addr_bits()
451
}
452
}
453
454
impl VmX86_64 for HaxmVm {
455
fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
456
&self.haxm
457
}
458
459
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
460
// SAFETY:
461
// Safe because we know that our file is a VM fd and we verify the return result.
462
let fd = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_VCPU_CREATE, &(id as u32)) };
463
if fd < 0 {
464
return errno_result();
465
}
466
467
let descriptor =
468
open_haxm_vcpu_device(USE_GHAXM.load(Ordering::Relaxed), self.vm_id, id as u32)?;
469
470
let mut tunnel_info = hax_tunnel_info::default();
471
472
// SAFETY:
473
// Safe because we created tunnel_info and we check the return code for errors
474
let ret = unsafe {
475
ioctl_with_mut_ref(&descriptor, HAX_VCPU_IOCTL_SETUP_TUNNEL, &mut tunnel_info)
476
};
477
478
if ret != 0 {
479
return errno_result();
480
}
481
482
Ok(Box::new(HaxmVcpu {
483
descriptor,
484
id,
485
tunnel: tunnel_info.va as *mut hax_tunnel,
486
io_buffer: tunnel_info.io_va as *mut c_void,
487
}))
488
}
489
490
/// Sets the address of the three-page region in the VM's address space.
491
/// This function is only necessary for 16 bit guests, which we do not support for HAXM.
492
fn set_tss_addr(&self, _addr: GuestAddress) -> Result<()> {
493
Ok(())
494
}
495
496
/// Sets the address of a one-page region in the VM's address space.
497
/// This function is only necessary for 16 bit guests, which we do not support for HAXM.
498
fn set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()> {
499
Ok(())
500
}
501
502
fn load_protected_vm_firmware(
503
&mut self,
504
_fw_addr: GuestAddress,
505
_fw_max_size: u64,
506
) -> Result<()> {
507
// Haxm does not support protected VMs
508
Err(Error::new(libc::ENXIO))
509
}
510
}
511
512
// TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
513
#[cfg(test)]
514
#[cfg(feature = "enable_haxm_tests")]
515
mod tests {
516
use std::time::Duration;
517
518
use base::EventWaitResult;
519
use base::MemoryMappingBuilder;
520
use base::SharedMemory;
521
522
use super::*;
523
524
#[test]
525
fn create_vm() {
526
let haxm = Haxm::new().expect("failed to instantiate HAXM");
527
let mem =
528
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
529
HaxmVm::new(&haxm, mem).expect("failed to create vm");
530
}
531
532
#[test]
533
fn create_vcpu() {
534
let haxm = Haxm::new().expect("failed to instantiate HAXM");
535
let mem =
536
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
537
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
538
vm.create_vcpu(0).expect("failed to create vcpu");
539
}
540
541
#[test]
542
fn register_ioevent() {
543
let haxm = Haxm::new().expect("failed to create haxm");
544
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
545
let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
546
let evt = Event::new().expect("failed to create event");
547
let otherevt = Event::new().expect("failed to create event");
548
vm.register_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
549
.unwrap();
550
vm.register_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
551
.unwrap();
552
553
vm.register_ioevent(
554
&otherevt,
555
IoEventAddress::Mmio(0x1000),
556
Datamatch::AnyLength,
557
)
558
.expect_err("HAXM should not allow you to register two events for the same address");
559
560
vm.register_ioevent(
561
&otherevt,
562
IoEventAddress::Mmio(0x1000),
563
Datamatch::U8(None),
564
)
565
.expect_err(
566
"HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
567
);
568
569
vm.register_ioevent(
570
&otherevt,
571
IoEventAddress::Mmio(0x1000),
572
Datamatch::U32(Some(0xf6)),
573
)
574
.expect_err(
575
"HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
576
);
577
578
vm.unregister_ioevent(&otherevt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
579
.expect_err("unregistering an unknown event should fail");
580
vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf5), Datamatch::AnyLength)
581
.expect_err("unregistering an unknown PIO address should fail");
582
vm.unregister_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
583
.expect_err("unregistering an unknown PIO address should fail");
584
vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0xf4), Datamatch::AnyLength)
585
.expect_err("unregistering an unknown MMIO address should fail");
586
vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
587
.unwrap();
588
vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
589
.unwrap();
590
}
591
592
#[test]
593
fn handle_io_events() {
594
let haxm = Haxm::new().expect("failed to create haxm");
595
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
596
let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
597
let evt = Event::new().expect("failed to create event");
598
let evt2 = Event::new().expect("failed to create event");
599
vm.register_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
600
.unwrap();
601
vm.register_ioevent(&evt2, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
602
.unwrap();
603
604
// Check a pio address
605
vm.handle_io_events(IoEventAddress::Pio(0x1000), &[])
606
.expect("failed to handle_io_events");
607
assert_ne!(
608
evt.wait_timeout(Duration::from_millis(10))
609
.expect("failed to read event"),
610
EventWaitResult::TimedOut
611
);
612
assert_eq!(
613
evt2.wait_timeout(Duration::from_millis(10))
614
.expect("failed to read event"),
615
EventWaitResult::TimedOut
616
);
617
// Check an mmio address
618
vm.handle_io_events(IoEventAddress::Mmio(0x1000), &[])
619
.expect("failed to handle_io_events");
620
assert_eq!(
621
evt.wait_timeout(Duration::from_millis(10))
622
.expect("failed to read event"),
623
EventWaitResult::TimedOut
624
);
625
assert_ne!(
626
evt2.wait_timeout(Duration::from_millis(10))
627
.expect("failed to read event"),
628
EventWaitResult::TimedOut
629
);
630
631
// Check an address that does not match any registered ioevents
632
vm.handle_io_events(IoEventAddress::Pio(0x1001), &[])
633
.expect("failed to handle_io_events");
634
assert_eq!(
635
evt.wait_timeout(Duration::from_millis(10))
636
.expect("failed to read event"),
637
EventWaitResult::TimedOut
638
);
639
assert_eq!(
640
evt2.wait_timeout(Duration::from_millis(10))
641
.expect("failed to read event"),
642
EventWaitResult::TimedOut
643
);
644
}
645
646
#[test]
647
fn remove_memory() {
648
let haxm = Haxm::new().unwrap();
649
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
650
let mut vm = HaxmVm::new(&haxm, gm).unwrap();
651
let mem_size = 0x1000;
652
let shm = SharedMemory::new("test", mem_size as u64).unwrap();
653
let mem = MemoryMappingBuilder::new(mem_size)
654
.from_shared_memory(&shm)
655
.build()
656
.unwrap();
657
let mem_ptr = mem.as_ptr();
658
let slot = vm
659
.add_memory_region(
660
GuestAddress(0x1000),
661
Box::new(mem),
662
false,
663
false,
664
MemCacheType::CacheCoherent,
665
)
666
.unwrap();
667
let removed_mem = vm.remove_memory_region(slot).unwrap();
668
assert_eq!(removed_mem.size(), mem_size);
669
assert_eq!(removed_mem.as_ptr(), mem_ptr);
670
}
671
672
#[cfg(windows)]
673
#[test]
674
fn register_log_file() {
675
let haxm = Haxm::new().unwrap();
676
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
677
let vm = HaxmVm::new(&haxm, gm).unwrap();
678
679
if !vm.check_raw_capability(HAX_CAP_VM_LOG) {
680
return;
681
}
682
683
let dir = tempfile::TempDir::new().unwrap();
684
let mut file_path = dir.path().to_owned();
685
file_path.push("test");
686
687
vm.register_log_file(file_path.to_str().unwrap())
688
.expect("failed to register log file");
689
690
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
691
692
// Setting cpuid will force some logs
693
let cpuid = haxm.get_supported_cpuid().unwrap();
694
vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
695
696
assert!(file_path.exists());
697
}
698
}
699
700