Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/haxm/vcpu.rs
5394 views
1
// Copyright 2020 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use core::ffi::c_void;
6
use std::arch::x86_64::CpuidResult;
7
use std::collections::BTreeMap;
8
use std::mem::size_of;
9
10
use base::errno_result;
11
use base::ioctl;
12
use base::ioctl_with_mut_ref;
13
use base::ioctl_with_ptr_sized;
14
use base::ioctl_with_ref;
15
use base::warn;
16
use base::AsRawDescriptor;
17
use base::Error;
18
use base::RawDescriptor;
19
use base::Result;
20
use base::SafeDescriptor;
21
use data_model::vec_with_array_field;
22
use libc::EINVAL;
23
use libc::ENOENT;
24
use libc::ENXIO;
25
use libc::EOPNOTSUPP;
26
use snapshot::AnySnapshot;
27
use vm_memory::GuestAddress;
28
29
use super::*;
30
use crate::CpuId;
31
use crate::CpuIdEntry;
32
use crate::DebugRegs;
33
use crate::DescriptorTable;
34
use crate::Fpu;
35
use crate::FpuReg;
36
use crate::IoOperation;
37
use crate::IoParams;
38
use crate::Regs;
39
use crate::Segment;
40
use crate::Sregs;
41
use crate::Vcpu;
42
use crate::VcpuExit;
43
use crate::VcpuShutdownError;
44
use crate::VcpuShutdownErrorKind;
45
use crate::VcpuX86_64;
46
use crate::Xsave;
47
48
// HAXM exit reasons
49
// IO port request
50
const HAX_EXIT_IO: u32 = 1;
51
// MMIO instruction emulation, should not happen anymore, replaced with
52
// HAX_EXIT_FAST_MMIO
53
#[allow(dead_code)]
54
const HAX_EXIT_MMIO: u32 = 2;
55
// Real mode emulation when unrestricted guest is disabled
56
#[allow(dead_code)]
57
const HAX_EXIT_REALMODE: u32 = 3;
58
// Interrupt window open, crosvm can inject an interrupt now.
59
// Also used when vcpu thread receives a signal
60
const HAX_EXIT_INTERRUPT: u32 = 4;
61
// Unknown vmexit, mostly trigger reboot
62
#[allow(dead_code)]
63
const HAX_EXIT_UNKNOWN: u32 = 5;
64
// HALT from guest
65
const HAX_EXIT_HLT: u32 = 6;
66
// VCPU panic, like because of triple fault in guest
67
const HAX_EXIT_VCPU_PANIC: u32 = 7;
68
// Paused by crosvm setting _exit_reason to HAX_EXIT_PAUSED before entry
69
pub(crate) const HAX_EXIT_PAUSED: u32 = 8;
70
// MMIO instruction emulation through io_buffer
71
const HAX_EXIT_FAST_MMIO: u32 = 9;
72
// Page fault that was not able to be handled by HAXM
73
const HAX_EXIT_PAGEFAULT: u32 = 10;
74
// A debug exception caused a vmexit
75
const HAX_EXIT_DEBUG: u32 = 11;
76
77
// HAXM exit directions
78
const HAX_EXIT_DIRECTION_PIO_IN: u32 = 1;
79
const HAX_EXIT_DIRECTION_PIO_OUT: u32 = 0;
80
const HAX_EXIT_DIRECTION_MMIO_READ: u8 = 0;
81
const HAX_EXIT_DIRECTION_MMIO_WRITE: u8 = 1;
82
83
pub struct HaxmVcpu {
84
pub(super) descriptor: SafeDescriptor,
85
pub(super) id: usize,
86
pub(super) tunnel: *mut hax_tunnel,
87
pub(super) io_buffer: *mut c_void,
88
}
89
90
// TODO(b/315998194): Add safety comment
91
#[allow(clippy::undocumented_unsafe_blocks)]
92
unsafe impl Send for HaxmVcpu {}
93
// TODO(b/315998194): Add safety comment
94
#[allow(clippy::undocumented_unsafe_blocks)]
95
unsafe impl Sync for HaxmVcpu {}
96
97
impl AsRawDescriptor for HaxmVcpu {
98
fn as_raw_descriptor(&self) -> RawDescriptor {
99
self.descriptor.as_raw_descriptor()
100
}
101
}
102
103
impl HaxmVcpu {
104
fn get_vcpu_state(&self) -> Result<VcpuState> {
105
let mut state = vcpu_state_t::default();
106
107
// SAFETY: trivially safe with return value checked.
108
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_GET_REGS, &mut state) };
109
if ret != 0 {
110
return errno_result();
111
}
112
113
// Also read efer MSR
114
state.efer = self.get_msr(IA32_EFER)? as u32;
115
116
Ok(VcpuState { state })
117
}
118
119
fn set_vcpu_state(&self, state: &mut VcpuState) -> Result<()> {
120
// SAFETY: trivially safe with return value checked.
121
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_SET_REGS, &mut state.state) };
122
if ret != 0 {
123
return errno_result();
124
}
125
126
// Also set efer MSR
127
self.set_msr(IA32_EFER, state.state.efer as u64)
128
}
129
}
130
131
impl Vcpu for HaxmVcpu {
132
/// Makes a shallow clone of this `Vcpu`.
133
fn try_clone(&self) -> Result<Self> {
134
Ok(HaxmVcpu {
135
descriptor: self.descriptor.try_clone()?,
136
id: self.id,
137
tunnel: self.tunnel,
138
io_buffer: self.io_buffer,
139
})
140
}
141
142
fn as_vcpu(&self) -> &dyn Vcpu {
143
self
144
}
145
146
/// Returns the vcpu id.
147
fn id(&self) -> usize {
148
self.id
149
}
150
151
/// Sets the bit that requests an immediate exit.
152
fn set_immediate_exit(&self, exit: bool) {
153
// SAFETY:
154
// Safe because we know the tunnel is a pointer to a hax_tunnel and we know its size.
155
// Crosvm's HAXM implementation does not use the _exit_reason, so it's fine if we
156
// overwrite it.
157
unsafe {
158
(*self.tunnel).exit_reason = if exit { HAX_EXIT_PAUSED } else { 0 };
159
}
160
}
161
162
/// Signals to the hypervisor that this guest is being paused by userspace.
163
fn on_suspend(&self) -> Result<()> {
164
Ok(())
165
}
166
167
/// Enables a hypervisor-specific extension on this Vcpu. `cap` is a constant defined by the
168
/// hypervisor API. `args` are the arguments for enabling the feature, if any.
169
unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
170
// Haxm does not support enable_capability
171
Err(Error::new(libc::ENXIO))
172
}
173
174
/// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`.
175
///
176
/// Once called, it will determine whether a mmio read or mmio write was the reason for the mmio
177
/// exit, call `handle_fn` with the respective IoOperation to perform the mmio read or
178
/// write, and set the return data in the vcpu so that the vcpu can resume running.
179
fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
180
// SAFETY:
181
// Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
182
// kernel told us how large it was.
183
// Verify that the handler is called for mmio context only.
184
unsafe {
185
assert!((*self.tunnel).exit_status == HAX_EXIT_FAST_MMIO);
186
}
187
let mmio = self.io_buffer as *mut hax_fastmmio;
188
let (address, size, direction) =
189
// SAFETY:
190
// Safe because the exit_reason (which comes from the kernel) told us which
191
// union field to use.
192
unsafe { ((*mmio).gpa, (*mmio).size as usize, (*mmio).direction) };
193
// SAFETY:
194
// Safe because the exit_reason (which comes from the kernel) told us which
195
// union field to use. We use `addr_of_mut!()` to get a potentially unaligned u64 pointer,
196
// but it is then cast via a u8 pointer to a u8 slice, which has no alignment requirements.
197
let data = unsafe {
198
assert!(size <= size_of::<u64>());
199
std::slice::from_raw_parts_mut(
200
std::ptr::addr_of_mut!((*mmio).__bindgen_anon_1.value) as *mut u8,
201
size,
202
)
203
};
204
205
match direction {
206
HAX_EXIT_DIRECTION_MMIO_READ => {
207
handle_fn(IoParams {
208
address,
209
operation: IoOperation::Read(data),
210
})
211
// We have to unwrap/panic here because HAXM doesn't have a
212
// facility to inject a GP fault here. Once HAXM can do that, we
213
// should inject a GP fault & bubble the error.
214
.unwrap();
215
Ok(())
216
}
217
HAX_EXIT_DIRECTION_MMIO_WRITE => {
218
handle_fn(IoParams {
219
address,
220
operation: IoOperation::Write(data),
221
})
222
// Similarly to the read direction, we MUST panic here.
223
.unwrap();
224
Ok(())
225
}
226
_ => Err(Error::new(EINVAL)),
227
}
228
}
229
230
/// This function should be called after `Vcpu::run` returns `VcpuExit::Io`.
231
///
232
/// Once called, it will determine whether an io in or io out was the reason for the io exit,
233
/// call `handle_fn` with the respective IoOperation to perform the io in or io out,
234
/// and set the return data in the vcpu so that the vcpu can resume running.
235
#[allow(clippy::cast_ptr_alignment)]
236
fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
237
// SAFETY:
238
// Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
239
// kernel told us how large it was.
240
// Verify that the handler is called for io context only.
241
unsafe {
242
assert!((*self.tunnel).exit_status == HAX_EXIT_IO);
243
}
244
// SAFETY:
245
// Safe because the exit_reason (which comes from the kernel) told us which
246
// union field to use.
247
let io = unsafe { (*self.tunnel).__bindgen_anon_1.io };
248
let address = io.port.into();
249
let size = io.size as usize;
250
let count = io.count as usize;
251
let data_len = count * size;
252
// SAFETY:
253
// Safe because the exit_reason (which comes from the kernel) told us that this is port io,
254
// where the iobuf can be treated as a *u8
255
let buffer: &mut [u8] =
256
unsafe { std::slice::from_raw_parts_mut(self.io_buffer as *mut u8, data_len) };
257
let data_chunks = buffer.chunks_mut(size);
258
259
match io.direction as u32 {
260
HAX_EXIT_DIRECTION_PIO_IN => {
261
for data in data_chunks {
262
handle_fn(IoParams {
263
address,
264
operation: IoOperation::Read(data),
265
});
266
}
267
Ok(())
268
}
269
HAX_EXIT_DIRECTION_PIO_OUT => {
270
for data in data_chunks {
271
handle_fn(IoParams {
272
address,
273
operation: IoOperation::Write(data),
274
});
275
}
276
Ok(())
277
}
278
_ => Err(Error::new(EINVAL)),
279
}
280
}
281
282
#[allow(clippy::cast_ptr_alignment)]
283
// The pointer is page aligned so casting to a different type is well defined, hence the clippy
284
// allow attribute.
285
fn run(&mut self) -> Result<VcpuExit> {
286
// TODO(b/315998194): Add safety comment
287
#[allow(clippy::undocumented_unsafe_blocks)]
288
let ret = unsafe { ioctl(self, HAX_VCPU_IOCTL_RUN) };
289
if ret != 0 {
290
return errno_result();
291
}
292
293
// SAFETY:
294
// Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
295
// kernel told us how large it was.
296
let exit_status = unsafe { (*self.tunnel).exit_status };
297
298
match exit_status {
299
HAX_EXIT_IO => Ok(VcpuExit::Io),
300
HAX_EXIT_INTERRUPT => Ok(VcpuExit::Intr),
301
HAX_EXIT_HLT => Ok(VcpuExit::Hlt),
302
HAX_EXIT_VCPU_PANIC => {
303
// SAFETY:
304
// 1) we mapped enough memory to hold the hax_tunnel struct because the kernel told
305
// us how large it was. That memory is still alive here.
306
let panic_reason = unsafe { (*self.tunnel).vcpu_panic_reason };
307
Ok(VcpuExit::Shutdown(Err(VcpuShutdownError::new(
308
VcpuShutdownErrorKind::Other,
309
panic_reason as u64,
310
))))
311
}
312
HAX_EXIT_FAST_MMIO => Ok(VcpuExit::Mmio),
313
HAX_EXIT_PAGEFAULT => Ok(VcpuExit::Exception),
314
HAX_EXIT_DEBUG => Ok(VcpuExit::Debug),
315
HAX_EXIT_PAUSED => Ok(VcpuExit::Exception),
316
r => panic!("unknown exit reason: {r}"),
317
}
318
}
319
}
320
321
impl VcpuX86_64 for HaxmVcpu {
322
/// Sets or clears the flag that requests the VCPU to exit when it becomes possible to inject
323
/// interrupts into the guest.
324
fn set_interrupt_window_requested(&self, requested: bool) {
325
// SAFETY:
326
// Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
327
// kernel told us how large it was.
328
unsafe {
329
(*self.tunnel).request_interrupt_window = i32::from(requested);
330
}
331
}
332
333
/// Checks if we can inject an interrupt into the VCPU.
334
fn ready_for_interrupt(&self) -> bool {
335
// SAFETY:
336
// Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
337
// kernel told us how large it was.
338
unsafe { (*self.tunnel).ready_for_interrupt_injection != 0 }
339
}
340
341
/// Injects interrupt vector `irq` into the VCPU.
342
fn interrupt(&self, irq: u8) -> Result<()> {
343
let irq: u32 = irq.into();
344
// TODO(b/315998194): Add safety comment
345
#[allow(clippy::undocumented_unsafe_blocks)]
346
let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_INTERRUPT, &irq) };
347
if ret != 0 {
348
return errno_result();
349
}
350
Ok(())
351
}
352
353
/// Injects a non-maskable interrupt into the VCPU.
354
fn inject_nmi(&self) -> Result<()> {
355
warn!("HAXM does not support injecting NMIs");
356
Ok(())
357
}
358
359
/// Gets the VCPU general purpose registers.
360
fn get_regs(&self) -> Result<Regs> {
361
Ok(self.get_vcpu_state()?.get_regs())
362
}
363
364
/// Sets the VCPU general purpose registers.
365
fn set_regs(&self, regs: &Regs) -> Result<()> {
366
let mut state = self.get_vcpu_state()?;
367
state.set_regs(regs);
368
self.set_vcpu_state(&mut state)?;
369
Ok(())
370
}
371
372
/// Gets the VCPU special registers.
373
fn get_sregs(&self) -> Result<Sregs> {
374
Ok(self.get_vcpu_state()?.get_sregs())
375
}
376
377
/// Sets the VCPU special registers.
378
fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
379
let mut state = self.get_vcpu_state()?;
380
state.set_sregs(sregs);
381
self.set_vcpu_state(&mut state)?;
382
Ok(())
383
}
384
385
/// Gets the VCPU FPU registers.
386
fn get_fpu(&self) -> Result<Fpu> {
387
let mut fpu = fx_layout::default();
388
// TODO(b/315998194): Add safety comment
389
#[allow(clippy::undocumented_unsafe_blocks)]
390
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU, &mut fpu) };
391
392
if ret != 0 {
393
return errno_result();
394
}
395
396
Ok(Fpu::from(&fpu))
397
}
398
399
/// Sets the VCPU FPU registers.
400
fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
401
let mut current_fpu = fx_layout::default();
402
// TODO(b/315998194): Add safety comment
403
#[allow(clippy::undocumented_unsafe_blocks)]
404
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU, &mut current_fpu) };
405
406
if ret != 0 {
407
return errno_result();
408
}
409
410
let mut new_fpu = fx_layout::from(fpu);
411
412
// the mxcsr mask is something that isn't part of the Fpu state, so we make the new
413
// fpu state's mxcsr_mask matches its current value
414
new_fpu.mxcsr_mask = current_fpu.mxcsr_mask;
415
416
// TODO(b/315998194): Add safety comment
417
#[allow(clippy::undocumented_unsafe_blocks)]
418
let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_SET_FPU, &new_fpu) };
419
420
if ret != 0 {
421
return errno_result();
422
}
423
424
Ok(())
425
}
426
427
fn get_xsave(&self) -> Result<Xsave> {
428
Err(Error::new(EOPNOTSUPP))
429
}
430
431
fn set_xsave(&self, _xsave: &Xsave) -> Result<()> {
432
Err(Error::new(EOPNOTSUPP))
433
}
434
435
fn get_hypervisor_specific_state(&self) -> Result<AnySnapshot> {
436
Err(Error::new(EOPNOTSUPP))
437
}
438
439
fn set_hypervisor_specific_state(&self, _data: AnySnapshot) -> Result<()> {
440
Err(Error::new(EOPNOTSUPP))
441
}
442
443
/// Gets the VCPU debug registers.
444
fn get_debugregs(&self) -> Result<DebugRegs> {
445
Ok(self.get_vcpu_state()?.get_debugregs())
446
}
447
448
/// Sets the VCPU debug registers.
449
fn set_debugregs(&self, debugregs: &DebugRegs) -> Result<()> {
450
let mut state = self.get_vcpu_state()?;
451
state.set_debugregs(debugregs);
452
self.set_vcpu_state(&mut state)?;
453
Ok(())
454
}
455
456
/// Gets the VCPU extended control registers.
457
fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
458
// Haxm does not support getting XCRs
459
Err(Error::new(libc::ENXIO))
460
}
461
462
/// Sets a VCPU extended control register.
463
fn set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()> {
464
// Haxm does not support setting XCRs
465
Err(Error::new(libc::ENXIO))
466
}
467
468
/// Gets the value of one model-specific register.
469
fn get_msr(&self, msr_index: u32) -> Result<u64> {
470
let mut msr_data = hax_msr_data {
471
nr_msr: 1,
472
..Default::default()
473
};
474
msr_data.entries[0].entry = u64::from(msr_index);
475
476
// TODO(b/315998194): Add safety comment
477
#[allow(clippy::undocumented_unsafe_blocks)]
478
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS, &mut msr_data) };
479
if ret != 0 {
480
return errno_result();
481
}
482
483
Ok(msr_data.entries[0].value)
484
}
485
486
fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
487
Err(Error::new(EOPNOTSUPP))
488
}
489
490
/// Sets the value of one model-specific register.
491
fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
492
let mut msr_data = hax_msr_data {
493
nr_msr: 1,
494
..Default::default()
495
};
496
msr_data.entries[0].entry = u64::from(msr_index);
497
msr_data.entries[0].value = value;
498
499
// TODO(b/315998194): Add safety comment
500
#[allow(clippy::undocumented_unsafe_blocks)]
501
let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS, &mut msr_data) };
502
if ret != 0 {
503
return errno_result();
504
}
505
506
Ok(())
507
}
508
509
/// Sets up the data returned by the CPUID instruction.
510
fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
511
let total = cpuid.cpu_id_entries.len();
512
let mut hax = vec_with_array_field::<hax_cpuid, hax_cpuid_entry>(total);
513
hax[0].total = total as u32;
514
// TODO(b/315998194): Add safety comment
515
#[allow(clippy::undocumented_unsafe_blocks)]
516
let entries = unsafe { hax[0].entries.as_mut_slice(total) };
517
for (i, e) in cpuid.cpu_id_entries.iter().enumerate() {
518
entries[i] = hax_cpuid_entry::from(e);
519
}
520
521
// TODO(b/315998194): Add safety comment
522
#[allow(clippy::undocumented_unsafe_blocks)]
523
let ret = unsafe {
524
ioctl_with_ptr_sized(
525
self,
526
HAX_VCPU_IOCTL_SET_CPUID,
527
hax.as_ptr(),
528
size_of::<hax_cpuid>() + total * size_of::<hax_cpuid_entry>(),
529
)
530
};
531
532
if ret != 0 {
533
return errno_result();
534
}
535
Ok(())
536
}
537
538
/// This function should be called after `Vcpu::run` returns `VcpuExit::Cpuid`, and `entry`
539
/// should represent the result of emulating the CPUID instruction. The `handle_cpuid` function
540
/// will then set the appropriate registers on the vcpu.
541
/// HAXM does not support the VcpuExit::Cpuid exit type.
542
fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
543
Err(Error::new(ENXIO))
544
}
545
546
fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
547
// TODO(b/173807302): Implement this
548
Err(Error::new(ENOENT))
549
}
550
551
fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
552
// HAXM sets TSC_OFFSET based on what we set TSC to; however, it does
553
// not yet handle syncing. This means it computes
554
// TSC_OFFSET = new_tsc - rdtsc(), so if we want to target the same
555
// offset value, we need new_tsc = rdtsc() + target_offset. This is what
556
// Self::set_tsc_offset does.
557
//
558
// TODO(b/311793539): haxm doesn't yet support syncing TSCs across VCPUs
559
// if the TSC value is non-zero. Once we have that support, we can
560
// switch to calling Self::set_tsc_value here with the common host
561
// reference moment. (Alternatively, we may just expose a way to set the
562
// offset directly.)
563
self.set_tsc_offset(tsc_offset)
564
}
565
}
566
567
struct VcpuState {
568
state: vcpu_state_t,
569
}
570
571
impl VcpuState {
572
fn get_regs(&self) -> Regs {
573
// TODO(b/315998194): Add safety comment
574
#[allow(clippy::undocumented_unsafe_blocks)]
575
unsafe {
576
Regs {
577
rax: self
578
.state
579
.__bindgen_anon_1
580
.__bindgen_anon_1
581
.__bindgen_anon_1
582
.rax,
583
rbx: self
584
.state
585
.__bindgen_anon_1
586
.__bindgen_anon_1
587
.__bindgen_anon_4
588
.rbx,
589
rcx: self
590
.state
591
.__bindgen_anon_1
592
.__bindgen_anon_1
593
.__bindgen_anon_2
594
.rcx,
595
rdx: self
596
.state
597
.__bindgen_anon_1
598
.__bindgen_anon_1
599
.__bindgen_anon_3
600
.rdx,
601
rsi: self
602
.state
603
.__bindgen_anon_1
604
.__bindgen_anon_1
605
.__bindgen_anon_7
606
.rsi,
607
rdi: self
608
.state
609
.__bindgen_anon_1
610
.__bindgen_anon_1
611
.__bindgen_anon_8
612
.rdi,
613
rsp: self
614
.state
615
.__bindgen_anon_1
616
.__bindgen_anon_1
617
.__bindgen_anon_5
618
.rsp,
619
rbp: self
620
.state
621
.__bindgen_anon_1
622
.__bindgen_anon_1
623
.__bindgen_anon_6
624
.rbp,
625
r8: self.state.__bindgen_anon_1.__bindgen_anon_1.r8,
626
r9: self.state.__bindgen_anon_1.__bindgen_anon_1.r9,
627
r10: self.state.__bindgen_anon_1.__bindgen_anon_1.r10,
628
r11: self.state.__bindgen_anon_1.__bindgen_anon_1.r11,
629
r12: self.state.__bindgen_anon_1.__bindgen_anon_1.r12,
630
r13: self.state.__bindgen_anon_1.__bindgen_anon_1.r13,
631
r14: self.state.__bindgen_anon_1.__bindgen_anon_1.r14,
632
r15: self.state.__bindgen_anon_1.__bindgen_anon_1.r15,
633
rip: self.state.__bindgen_anon_2.rip,
634
rflags: self.state.__bindgen_anon_3.rflags,
635
}
636
}
637
}
638
639
fn set_regs(&mut self, regs: &Regs) {
640
self.state
641
.__bindgen_anon_1
642
.__bindgen_anon_1
643
.__bindgen_anon_1
644
.rax = regs.rax;
645
self.state
646
.__bindgen_anon_1
647
.__bindgen_anon_1
648
.__bindgen_anon_4
649
.rbx = regs.rbx;
650
self.state
651
.__bindgen_anon_1
652
.__bindgen_anon_1
653
.__bindgen_anon_2
654
.rcx = regs.rcx;
655
self.state
656
.__bindgen_anon_1
657
.__bindgen_anon_1
658
.__bindgen_anon_3
659
.rdx = regs.rdx;
660
self.state
661
.__bindgen_anon_1
662
.__bindgen_anon_1
663
.__bindgen_anon_7
664
.rsi = regs.rsi;
665
self.state
666
.__bindgen_anon_1
667
.__bindgen_anon_1
668
.__bindgen_anon_8
669
.rdi = regs.rdi;
670
self.state
671
.__bindgen_anon_1
672
.__bindgen_anon_1
673
.__bindgen_anon_5
674
.rsp = regs.rsp;
675
self.state
676
.__bindgen_anon_1
677
.__bindgen_anon_1
678
.__bindgen_anon_6
679
.rbp = regs.rbp;
680
self.state.__bindgen_anon_1.__bindgen_anon_1.r8 = regs.r8;
681
self.state.__bindgen_anon_1.__bindgen_anon_1.r9 = regs.r9;
682
self.state.__bindgen_anon_1.__bindgen_anon_1.r10 = regs.r10;
683
self.state.__bindgen_anon_1.__bindgen_anon_1.r11 = regs.r11;
684
self.state.__bindgen_anon_1.__bindgen_anon_1.r12 = regs.r12;
685
self.state.__bindgen_anon_1.__bindgen_anon_1.r13 = regs.r13;
686
self.state.__bindgen_anon_1.__bindgen_anon_1.r14 = regs.r14;
687
self.state.__bindgen_anon_1.__bindgen_anon_1.r15 = regs.r15;
688
self.state.__bindgen_anon_2.rip = regs.rip;
689
self.state.__bindgen_anon_3.rflags = regs.rflags;
690
}
691
692
fn get_sregs(&self) -> Sregs {
693
Sregs {
694
cs: Segment::from(&self.state.cs),
695
ds: Segment::from(&self.state.ds),
696
es: Segment::from(&self.state.es),
697
fs: Segment::from(&self.state.fs),
698
gs: Segment::from(&self.state.gs),
699
ss: Segment::from(&self.state.ss),
700
tr: Segment::from(&self.state.tr),
701
ldt: Segment::from(&self.state.ldt),
702
gdt: DescriptorTable::from(&self.state.gdt),
703
idt: DescriptorTable::from(&self.state.idt),
704
cr0: self.state.cr0,
705
cr2: self.state.cr2,
706
cr3: self.state.cr3,
707
cr4: self.state.cr4,
708
// HAXM does not support setting cr8
709
cr8: 0,
710
efer: self.state.efer as u64,
711
}
712
}
713
714
fn set_sregs(&mut self, sregs: &Sregs) {
715
self.state.cs = segment_desc_t::from(&sregs.cs);
716
self.state.ds = segment_desc_t::from(&sregs.ds);
717
self.state.es = segment_desc_t::from(&sregs.es);
718
self.state.fs = segment_desc_t::from(&sregs.fs);
719
self.state.gs = segment_desc_t::from(&sregs.gs);
720
self.state.ss = segment_desc_t::from(&sregs.ss);
721
self.state.tr = segment_desc_t::from(&sregs.tr);
722
self.state.ldt = segment_desc_t::from(&sregs.ldt);
723
self.state.gdt = segment_desc_t::from(&sregs.gdt);
724
self.state.idt = segment_desc_t::from(&sregs.idt);
725
self.state.cr0 = sregs.cr0;
726
self.state.cr2 = sregs.cr2;
727
self.state.cr3 = sregs.cr3;
728
self.state.cr4 = sregs.cr4;
729
self.state.efer = sregs.efer as u32;
730
}
731
732
fn get_debugregs(&self) -> DebugRegs {
733
DebugRegs {
734
db: [
735
self.state.dr0,
736
self.state.dr1,
737
self.state.dr2,
738
self.state.dr3,
739
],
740
dr6: self.state.dr6,
741
dr7: self.state.dr7,
742
}
743
}
744
745
fn set_debugregs(&mut self, debugregs: &DebugRegs) {
746
self.state.dr0 = debugregs.db[0];
747
self.state.dr1 = debugregs.db[1];
748
self.state.dr2 = debugregs.db[2];
749
self.state.dr3 = debugregs.db[3];
750
self.state.dr6 = debugregs.dr6;
751
self.state.dr7 = debugregs.dr7;
752
}
753
}
754
755
// HAXM's segment descriptor format matches exactly with the VMCS structure. The format
756
// of the AR bits is described in the Intel System Programming Guide Part 3, chapter 24.4.1,
757
// table 24-2. The main confusing thing is that the type_ field in haxm is 4 bits, meaning
758
// the 3 least significant bits represent the normal type field, and the most significant
759
// bit represents the "descriptor type" field.
760
761
impl From<&segment_desc_t> for Segment {
762
fn from(item: &segment_desc_t) -> Self {
763
// TODO(b/315998194): Add safety comment
764
#[allow(clippy::undocumented_unsafe_blocks)]
765
unsafe {
766
Segment {
767
base: item.base,
768
limit_bytes: item.limit,
769
selector: item.selector,
770
type_: item.__bindgen_anon_1.__bindgen_anon_1.type_() as u8,
771
present: item.__bindgen_anon_1.__bindgen_anon_1.present() as u8,
772
dpl: item.__bindgen_anon_1.__bindgen_anon_1.dpl() as u8,
773
db: item.__bindgen_anon_1.__bindgen_anon_1.operand_size() as u8,
774
s: item.__bindgen_anon_1.__bindgen_anon_1.desc() as u8,
775
l: item.__bindgen_anon_1.__bindgen_anon_1.long_mode() as u8,
776
g: item.__bindgen_anon_1.__bindgen_anon_1.granularity() as u8,
777
avl: item.__bindgen_anon_1.__bindgen_anon_1.available() as u8,
778
}
779
}
780
}
781
}
782
783
impl From<&Segment> for segment_desc_t {
784
fn from(item: &Segment) -> Self {
785
let mut segment = segment_desc_t {
786
base: item.base,
787
limit: item.limit_bytes,
788
selector: item.selector,
789
..Default::default()
790
};
791
792
// TODO(b/315998194): Add safety comment
793
#[allow(clippy::undocumented_unsafe_blocks)]
794
unsafe {
795
segment
796
.__bindgen_anon_1
797
.__bindgen_anon_1
798
.set_type(item.type_ as u32);
799
segment
800
.__bindgen_anon_1
801
.__bindgen_anon_1
802
.set_desc(item.s as u32);
803
segment
804
.__bindgen_anon_1
805
.__bindgen_anon_1
806
.set_present(item.present as u32);
807
segment
808
.__bindgen_anon_1
809
.__bindgen_anon_1
810
.set_dpl(item.dpl as u32);
811
segment
812
.__bindgen_anon_1
813
.__bindgen_anon_1
814
.set_operand_size(item.db as u32);
815
segment
816
.__bindgen_anon_1
817
.__bindgen_anon_1
818
.set_long_mode(item.l as u32);
819
segment
820
.__bindgen_anon_1
821
.__bindgen_anon_1
822
.set_granularity(item.g as u32);
823
segment
824
.__bindgen_anon_1
825
.__bindgen_anon_1
826
.set_available(item.avl as u32);
827
}
828
829
segment
830
}
831
}
832
833
impl From<&segment_desc_t> for DescriptorTable {
834
fn from(item: &segment_desc_t) -> Self {
835
DescriptorTable {
836
base: item.base,
837
limit: item.limit as u16,
838
}
839
}
840
}
841
842
impl From<&DescriptorTable> for segment_desc_t {
843
fn from(item: &DescriptorTable) -> Self {
844
segment_desc_t {
845
base: item.base,
846
limit: item.limit as u32,
847
..Default::default()
848
}
849
}
850
}
851
852
impl From<&fx_layout> for Fpu {
853
fn from(item: &fx_layout) -> Self {
854
let mut fpu = Fpu {
855
fpr: FpuReg::from_16byte_arrays(&item.st_mm),
856
fcw: item.fcw,
857
fsw: item.fsw,
858
ftwx: item.ftw,
859
last_opcode: item.fop,
860
// SAFETY: trivially safe
861
last_ip: unsafe { item.__bindgen_anon_1.fpu_ip },
862
// SAFETY: trivially safe
863
last_dp: unsafe { item.__bindgen_anon_2.fpu_dp },
864
xmm: [[0; 16]; 16],
865
mxcsr: item.mxcsr,
866
};
867
868
fpu.xmm[..8].copy_from_slice(&item.mmx_1[..]);
869
fpu.xmm[8..].copy_from_slice(&item.mmx_2[..]);
870
871
fpu
872
}
873
}
874
875
impl From<&Fpu> for fx_layout {
876
fn from(item: &Fpu) -> Self {
877
let mut fpu = fx_layout {
878
fcw: item.fcw,
879
fsw: item.fsw,
880
ftw: item.ftwx,
881
res1: 0,
882
fop: item.last_opcode,
883
__bindgen_anon_1: fx_layout__bindgen_ty_1 {
884
fpu_ip: item.last_ip,
885
},
886
__bindgen_anon_2: fx_layout__bindgen_ty_2 {
887
fpu_dp: item.last_dp,
888
},
889
mxcsr: item.mxcsr,
890
mxcsr_mask: 0,
891
st_mm: FpuReg::to_16byte_arrays(&item.fpr),
892
mmx_1: [[0; 16]; 8],
893
mmx_2: [[0; 16]; 8],
894
pad: [0; 96],
895
};
896
897
fpu.mmx_1.copy_from_slice(&item.xmm[..8]);
898
fpu.mmx_2.copy_from_slice(&item.xmm[8..]);
899
900
fpu
901
}
902
}
903
904
impl From<&hax_cpuid_entry> for CpuIdEntry {
905
fn from(item: &hax_cpuid_entry) -> Self {
906
CpuIdEntry {
907
function: item.function,
908
index: item.index,
909
flags: item.flags,
910
cpuid: CpuidResult {
911
eax: item.eax,
912
ebx: item.ebx,
913
ecx: item.ecx,
914
edx: item.edx,
915
},
916
}
917
}
918
}
919
920
impl From<&CpuIdEntry> for hax_cpuid_entry {
921
fn from(item: &CpuIdEntry) -> Self {
922
hax_cpuid_entry {
923
function: item.function,
924
index: item.index,
925
flags: item.flags,
926
eax: item.cpuid.eax,
927
ebx: item.cpuid.ebx,
928
ecx: item.cpuid.ecx,
929
edx: item.cpuid.edx,
930
pad: Default::default(),
931
}
932
}
933
}
934
935
// TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
936
#[cfg(test)]
937
#[cfg(feature = "enable_haxm_tests")]
938
mod tests {
939
use vm_memory::GuestAddress;
940
use vm_memory::GuestMemory;
941
942
use super::*;
943
use crate::VmX86_64;
944
945
// EFER Bits
946
const EFER_SCE: u64 = 0x00000001;
947
const EFER_LME: u64 = 0x00000100;
948
const EFER_LMA: u64 = 0x00000400;
949
const EFER_SVME: u64 = 1 << 12;
950
951
// CR0 bits
952
const CR0_PG: u64 = 1 << 31;
953
954
#[test]
955
fn get_regs() {
956
let haxm = Haxm::new().expect("failed to instantiate HAXM");
957
let mem =
958
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
959
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
960
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
961
962
vcpu.get_regs().expect("failed to get regs");
963
}
964
965
#[test]
966
fn get_fpu() {
967
let haxm = Haxm::new().expect("failed to instantiate HAXM");
968
let mem =
969
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
970
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
971
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
972
973
vcpu.get_fpu().expect("failed to get fpu");
974
}
975
976
#[test]
977
fn set_msr() {
978
let haxm = Haxm::new().expect("failed to instantiate HAXM");
979
let mem =
980
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
981
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
982
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
983
984
vcpu.set_msr(38, 0x300).expect("failed to set MSR");
985
}
986
987
#[test]
988
fn get_msr() {
989
let haxm = Haxm::new().expect("failed to instantiate HAXM");
990
let mem =
991
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
992
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
993
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
994
995
let _value = vcpu.get_msr(38).expect("failed to get MSR");
996
}
997
998
#[test]
999
fn set_cpuid() {
1000
let haxm = Haxm::new().expect("failed to instantiate HAXM");
1001
let mem =
1002
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1003
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1004
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1005
1006
let mut cpuid = haxm
1007
.get_supported_cpuid()
1008
.expect("failed to get supported cpuids");
1009
for entry in &mut cpuid.cpu_id_entries {
1010
if entry.function == 1 {
1011
// Disable XSAVE and OSXSAVE
1012
entry.cpuid.ecx &= !(1 << 26);
1013
entry.cpuid.ecx &= !(1 << 27);
1014
}
1015
}
1016
1017
vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
1018
}
1019
1020
#[test]
1021
fn set_efer() {
1022
// HAXM efer setting requires some extra code, so we have this test specifically
1023
// checking that it's working.
1024
let haxm = Haxm::new().expect("failed to instantiate HAXM");
1025
let mem =
1026
GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1027
let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1028
let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1029
1030
let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
1031
// Initial value should be 0
1032
assert_eq!(sregs.efer & !EFER_SVME, 0);
1033
1034
// Enable and activate long mode
1035
sregs.efer = EFER_LMA | EFER_LME;
1036
// Need to enable paging or LMA will be turned off
1037
sregs.cr0 |= CR0_PG;
1038
vcpu.set_sregs(&sregs).expect("failed to set sregs");
1039
1040
// Verify that setting stuck
1041
let sregs = vcpu.get_sregs().expect("failed to get sregs");
1042
assert_eq!(sregs.efer & !EFER_SVME, EFER_LMA | EFER_LME);
1043
1044
// IA32_EFER register value should match
1045
let efer = vcpu.get_msr(IA32_EFER).expect("failed to get msr");
1046
assert_eq!(efer & !EFER_SVME, EFER_LMA | EFER_LME);
1047
1048
// Enable SCE via set_msrs
1049
vcpu.set_msr(IA32_EFER, efer | EFER_SCE)
1050
.expect("failed to set msr");
1051
1052
// Verify that setting stuck
1053
let sregs = vcpu.get_sregs().expect("failed to get sregs");
1054
assert_eq!(sregs.efer & !EFER_SVME, EFER_SCE | EFER_LME | EFER_LMA);
1055
let new_efer = vcpu.get_msr(IA32_EFER).expect("failed to get msrs");
1056
assert_eq!(new_efer & !EFER_SVME, EFER_SCE | EFER_LME | EFER_LMA);
1057
}
1058
}
1059
1060