Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/kvm/x86_64.rs
5394 views
1
// Copyright 2020 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use std::arch::x86_64::CpuidResult;
6
use std::collections::BTreeMap;
7
8
use base::errno_result;
9
use base::error;
10
use base::ioctl;
11
use base::ioctl_with_mut_ptr;
12
use base::ioctl_with_mut_ref;
13
use base::ioctl_with_ptr;
14
use base::ioctl_with_ref;
15
use base::ioctl_with_val;
16
use base::AsRawDescriptor;
17
use base::Error;
18
use base::IoctlNr;
19
use base::MappedRegion;
20
use base::Result;
21
use kvm_sys::*;
22
use libc::E2BIG;
23
use libc::EAGAIN;
24
use libc::EINVAL;
25
use libc::EIO;
26
use libc::ENOMEM;
27
use libc::ENXIO;
28
use serde::Deserialize;
29
use serde::Serialize;
30
use snapshot::AnySnapshot;
31
use vm_memory::GuestAddress;
32
use zerocopy::FromZeros;
33
34
use super::Config;
35
use super::Kvm;
36
use super::KvmCap;
37
use super::KvmVcpu;
38
use super::KvmVm;
39
use crate::host_phys_addr_bits;
40
use crate::ClockState;
41
use crate::CpuId;
42
use crate::CpuIdEntry;
43
use crate::DebugRegs;
44
use crate::DescriptorTable;
45
use crate::DeviceKind;
46
use crate::Fpu;
47
use crate::FpuReg;
48
use crate::HypervisorX86_64;
49
use crate::IoapicRedirectionTableEntry;
50
use crate::IoapicState;
51
use crate::IrqSourceChip;
52
use crate::LapicState;
53
use crate::PicSelect;
54
use crate::PicState;
55
use crate::PitChannelState;
56
use crate::PitState;
57
use crate::ProtectionType;
58
use crate::Regs;
59
use crate::Segment;
60
use crate::Sregs;
61
use crate::VcpuExit;
62
use crate::VcpuX86_64;
63
use crate::VmCap;
64
use crate::VmX86_64;
65
use crate::Xsave;
66
use crate::NUM_IOAPIC_PINS;
67
68
const KVM_XSAVE_MAX_SIZE: usize = 4096;
69
const MSR_IA32_APICBASE: u32 = 0x0000001b;
70
71
#[derive(Debug, Clone, Serialize, Deserialize)]
72
pub struct VcpuEvents {
73
pub exception: VcpuExceptionState,
74
pub interrupt: VcpuInterruptState,
75
pub nmi: VcpuNmiState,
76
pub sipi_vector: Option<u32>,
77
pub smi: VcpuSmiState,
78
pub triple_fault: VcpuTripleFaultState,
79
pub exception_payload: Option<u64>,
80
}
81
82
#[derive(Debug, Clone, Serialize, Deserialize)]
83
pub struct VcpuExceptionState {
84
pub injected: bool,
85
pub nr: u8,
86
pub has_error_code: bool,
87
pub pending: Option<bool>,
88
pub error_code: u32,
89
}
90
91
#[derive(Debug, Clone, Serialize, Deserialize)]
92
pub struct VcpuInterruptState {
93
pub injected: bool,
94
pub nr: u8,
95
pub soft: bool,
96
pub shadow: Option<u8>,
97
}
98
99
#[derive(Debug, Clone, Serialize, Deserialize)]
100
pub struct VcpuNmiState {
101
pub injected: bool,
102
pub pending: Option<bool>,
103
pub masked: bool,
104
}
105
106
#[derive(Debug, Clone, Serialize, Deserialize)]
107
pub struct VcpuSmiState {
108
pub smm: Option<bool>,
109
pub pending: bool,
110
pub smm_inside_nmi: bool,
111
pub latched_init: u8,
112
}
113
114
#[derive(Debug, Clone, Serialize, Deserialize)]
115
pub struct VcpuTripleFaultState {
116
pub pending: Option<bool>,
117
}
118
119
pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
120
descriptor: &T,
121
kind: IoctlNr,
122
initial_capacity: usize,
123
) -> Result<CpuId> {
124
let mut entries: usize = initial_capacity;
125
126
loop {
127
let mut kvm_cpuid =
128
kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(entries).unwrap();
129
kvm_cpuid.nent = entries.try_into().unwrap();
130
131
let ret = {
132
// SAFETY:
133
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
134
// memory allocated for the struct. The limit is read from nent within kvm_cpuid2,
135
// which is set to the allocated size above.
136
unsafe { ioctl_with_mut_ref(descriptor, kind, &mut *kvm_cpuid) }
137
};
138
if ret < 0 {
139
let err = Error::last();
140
match err.errno() {
141
E2BIG => {
142
// double the available memory for cpuid entries for kvm.
143
if let Some(val) = entries.checked_mul(2) {
144
entries = val;
145
} else {
146
return Err(err);
147
}
148
}
149
_ => return Err(err),
150
}
151
} else {
152
return Ok(CpuId::from(&*kvm_cpuid));
153
}
154
}
155
}
156
157
impl Kvm {
158
pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
159
const KVM_MAX_ENTRIES: usize = 256;
160
get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
161
}
162
163
pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
164
if protection_type.isolates_memory() {
165
Ok(KVM_X86_PKVM_PROTECTED_VM)
166
} else {
167
Ok(KVM_X86_DEFAULT_VM)
168
}
169
}
170
171
/// Get the size of guest physical addresses in bits.
172
pub fn get_guest_phys_addr_bits(&self) -> u8 {
173
// Assume the guest physical address size is the same as the host.
174
host_phys_addr_bits()
175
}
176
}
177
178
impl HypervisorX86_64 for Kvm {
179
fn get_supported_cpuid(&self) -> Result<CpuId> {
180
self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
181
}
182
183
fn get_msr_index_list(&self) -> Result<Vec<u32>> {
184
const MAX_KVM_MSR_ENTRIES: usize = 256;
185
186
let mut msr_list = kvm_msr_list::<[u32; MAX_KVM_MSR_ENTRIES]>::new_zeroed();
187
msr_list.nmsrs = MAX_KVM_MSR_ENTRIES as u32;
188
189
let ret = {
190
// SAFETY:
191
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
192
// allocated for the struct. The limit is read from nmsrs, which is set to the allocated
193
// size (MAX_KVM_MSR_ENTRIES) above.
194
unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list) }
195
};
196
if ret < 0 {
197
return errno_result();
198
}
199
200
let mut nmsrs = msr_list.nmsrs;
201
if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
202
nmsrs = MAX_KVM_MSR_ENTRIES as u32;
203
}
204
205
Ok(msr_list.indices[..nmsrs as usize].to_vec())
206
}
207
}
208
209
impl KvmVm {
210
/// Does platform specific initialization for the KvmVm.
211
pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
212
Ok(())
213
}
214
215
/// Checks if a particular `VmCap` is available, or returns None if arch-independent
216
/// Vm.check_capability() should handle the check.
217
pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
218
match c {
219
VmCap::PvClock => Some(true),
220
_ => None,
221
}
222
}
223
224
/// Returns the params to pass to KVM_CREATE_DEVICE for a `kind` device on this arch, or None to
225
/// let the arch-independent `KvmVm::create_device` handle it.
226
pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
227
None
228
}
229
230
/// Arch-specific implementation of `Vm::get_pvclock`.
231
pub fn get_pvclock_arch(&self) -> Result<ClockState> {
232
let mut clock_data: kvm_clock_data = Default::default();
233
let ret =
234
// SAFETY:
235
// Safe because we know that our file is a VM fd, we know the kernel will only write correct
236
// amount of memory to our pointer, and we verify the return result.
237
unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
238
if ret == 0 {
239
Ok(ClockState::from(&clock_data))
240
} else {
241
errno_result()
242
}
243
}
244
245
/// Arch-specific implementation of `Vm::set_pvclock`.
246
pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
247
let clock_data = kvm_clock_data::from(state);
248
// SAFETY:
249
// Safe because we know that our file is a VM fd, we know the kernel will only read correct
250
// amount of memory from our pointer, and we verify the return result.
251
let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
252
if ret == 0 {
253
Ok(())
254
} else {
255
errno_result()
256
}
257
}
258
259
/// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
260
///
261
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
262
pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
263
let mut irqchip_state = kvm_irqchip {
264
chip_id: id as u32,
265
..Default::default()
266
};
267
let ret = {
268
// SAFETY:
269
// Safe because we know our file is a VM fd, we know the kernel will only write
270
// correct amount of memory to our pointer, and we verify the return result.
271
unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
272
};
273
if ret == 0 {
274
Ok(
275
// SAFETY:
276
// Safe as we know that we are retrieving data related to the
277
// PIC (primary or secondary) and not IOAPIC.
278
unsafe { irqchip_state.chip.pic },
279
)
280
} else {
281
errno_result()
282
}
283
}
284
285
/// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
286
///
287
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
288
pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
289
let mut irqchip_state = kvm_irqchip {
290
chip_id: id as u32,
291
..Default::default()
292
};
293
irqchip_state.chip.pic = *state;
294
// SAFETY:
295
// Safe because we know that our file is a VM fd, we know the kernel will only read
296
// correct amount of memory from our pointer, and we verify the return result.
297
let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
298
if ret == 0 {
299
Ok(())
300
} else {
301
errno_result()
302
}
303
}
304
305
/// Retrieves the number of pins for emulated IO-APIC.
306
pub fn get_ioapic_num_pins(&self) -> Result<usize> {
307
Ok(NUM_IOAPIC_PINS)
308
}
309
310
/// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
311
///
312
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
313
pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
314
let mut irqchip_state = kvm_irqchip {
315
chip_id: 2,
316
..Default::default()
317
};
318
let ret = {
319
// SAFETY:
320
// Safe because we know our file is a VM fd, we know the kernel will only write
321
// correct amount of memory to our pointer, and we verify the return result.
322
unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
323
};
324
if ret == 0 {
325
Ok(
326
// SAFETY:
327
// Safe as we know that we are retrieving data related to the
328
// IOAPIC and not PIC.
329
unsafe { irqchip_state.chip.ioapic },
330
)
331
} else {
332
errno_result()
333
}
334
}
335
336
/// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
337
///
338
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
339
pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
340
let mut irqchip_state = kvm_irqchip {
341
chip_id: 2,
342
..Default::default()
343
};
344
irqchip_state.chip.ioapic = *state;
345
// SAFETY:
346
// Safe because we know that our file is a VM fd, we know the kernel will only read
347
// correct amount of memory from our pointer, and we verify the return result.
348
let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
349
if ret == 0 {
350
Ok(())
351
} else {
352
errno_result()
353
}
354
}
355
356
/// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
357
///
358
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
359
pub fn create_pit(&self) -> Result<()> {
360
let pit_config = kvm_pit_config::default();
361
// SAFETY:
362
// Safe because we know that our file is a VM fd, we know the kernel will only read the
363
// correct amount of memory from our pointer, and we verify the return result.
364
let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
365
if ret == 0 {
366
Ok(())
367
} else {
368
errno_result()
369
}
370
}
371
372
/// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
373
///
374
/// Note that this call can only succeed after a call to `Vm::create_pit`.
375
pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
376
let mut pit_state = Default::default();
377
// SAFETY:
378
// Safe because we know that our file is a VM fd, we know the kernel will only write
379
// correct amount of memory to our pointer, and we verify the return result.
380
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
381
if ret == 0 {
382
Ok(pit_state)
383
} else {
384
errno_result()
385
}
386
}
387
388
/// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
389
///
390
/// Note that this call can only succeed after a call to `Vm::create_pit`.
391
pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
392
// SAFETY:
393
// Safe because we know that our file is a VM fd, we know the kernel will only read
394
// correct amount of memory from our pointer, and we verify the return result.
395
let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
396
if ret == 0 {
397
Ok(())
398
} else {
399
errno_result()
400
}
401
}
402
403
/// Set MSR_PLATFORM_INFO read access.
404
pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
405
let mut cap = kvm_enable_cap {
406
cap: KVM_CAP_MSR_PLATFORM_INFO,
407
..Default::default()
408
};
409
cap.args[0] = allow_read as u64;
410
411
// SAFETY:
412
// Safe because we know that our file is a VM fd, we know that the
413
// kernel will only read correct amount of memory from our pointer, and
414
// we verify the return result.
415
let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
416
if ret < 0 {
417
errno_result()
418
} else {
419
Ok(())
420
}
421
}
422
423
/// Enable support for split-irqchip.
424
pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
425
let mut cap = kvm_enable_cap {
426
cap: KVM_CAP_SPLIT_IRQCHIP,
427
..Default::default()
428
};
429
cap.args[0] = ioapic_pins as u64;
430
// SAFETY:
431
// safe becuase we allocated the struct and we know the kernel will read
432
// exactly the size of the struct
433
let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
434
if ret < 0 {
435
errno_result()
436
} else {
437
Ok(())
438
}
439
}
440
441
/// Get pKVM hypervisor details, e.g. the firmware size.
442
///
443
/// Returns `Err` if not running under pKVM.
444
///
445
/// Uses `KVM_ENABLE_CAP` internally, but it is only a getter, there should be no side effects
446
/// in KVM.
447
fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
448
let mut info = KvmProtectedVmInfo {
449
firmware_size: 0,
450
reserved: [0; 7],
451
};
452
// SAFETY:
453
// Safe because we allocated the struct and we know the kernel won't write beyond the end of
454
// the struct or keep a pointer to it.
455
unsafe {
456
self.enable_raw_capability(
457
KvmCap::X86ProtectedVm,
458
KVM_CAP_X86_PROTECTED_VM_FLAGS_INFO,
459
&[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
460
)
461
}?;
462
Ok(info)
463
}
464
465
fn set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()> {
466
// SAFETY:
467
// Safe because none of the args are pointers.
468
unsafe {
469
self.enable_raw_capability(
470
KvmCap::X86ProtectedVm,
471
KVM_CAP_X86_PROTECTED_VM_FLAGS_SET_FW_GPA,
472
&[fw_addr.0, 0, 0, 0],
473
)
474
}
475
}
476
}
477
478
#[repr(C)]
479
struct KvmProtectedVmInfo {
480
firmware_size: u64,
481
reserved: [u64; 7],
482
}
483
484
impl VmX86_64 for KvmVm {
485
fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
486
&self.kvm
487
}
488
489
fn load_protected_vm_firmware(
490
&mut self,
491
fw_addr: GuestAddress,
492
fw_max_size: u64,
493
) -> Result<()> {
494
let info = self.get_protected_vm_info()?;
495
if info.firmware_size == 0 {
496
Err(Error::new(EINVAL))
497
} else {
498
if info.firmware_size > fw_max_size {
499
return Err(Error::new(ENOMEM));
500
}
501
self.set_protected_vm_firmware_gpa(fw_addr)
502
}
503
}
504
505
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
506
// create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
507
// or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
508
Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
509
}
510
511
/// Sets the address of the three-page region in the VM's address space.
512
///
513
/// See the documentation on the KVM_SET_TSS_ADDR ioctl.
514
fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
515
// SAFETY:
516
// Safe because we know that our file is a VM fd and we verify the return result.
517
let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
518
if ret == 0 {
519
Ok(())
520
} else {
521
errno_result()
522
}
523
}
524
525
/// Sets the address of a one-page region in the VM's address space.
526
///
527
/// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
528
fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
529
// SAFETY:
530
// Safe because we know that our file is a VM fd and we verify the return result.
531
let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
532
if ret == 0 {
533
Ok(())
534
} else {
535
errno_result()
536
}
537
}
538
}
539
540
impl KvmVcpu {
541
/// Handles a `KVM_EXIT_SYSTEM_EVENT` with event type `KVM_SYSTEM_EVENT_RESET` with the given
542
/// event flags and returns the appropriate `VcpuExit` value for the run loop to handle.
543
pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
544
Ok(VcpuExit::SystemEventReset)
545
}
546
547
/// Gets the Xsave size by checking the extension KVM_CAP_XSAVE2.
548
///
549
/// Size should always be >=0. If size is negative, an error occurred.
550
/// If size <= 4096, XSAVE2 is not supported by the CPU or the kernel. KVM_XSAVE_MAX_SIZE is
551
/// returned (4096).
552
/// Otherwise, the size will be returned.
553
fn xsave_size(&self) -> Result<usize> {
554
let size = {
555
// SAFETY:
556
// Safe because we know that our file is a valid VM fd
557
unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
558
};
559
if size < 0 {
560
return errno_result();
561
}
562
// Safe to unwrap since we already tested for negative values
563
let size: usize = size.try_into().unwrap();
564
Ok(size.max(KVM_XSAVE_MAX_SIZE))
565
}
566
567
#[inline]
568
pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
569
match run.exit_reason {
570
KVM_EXIT_IO => Some(VcpuExit::Io),
571
KVM_EXIT_IOAPIC_EOI => {
572
// SAFETY:
573
// Safe because the exit_reason (which comes from the kernel) told us which
574
// union field to use.
575
let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
576
Some(VcpuExit::IoapicEoi { vector })
577
}
578
KVM_EXIT_HLT => Some(VcpuExit::Hlt),
579
KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
580
KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
581
KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
582
_ => None,
583
}
584
}
585
}
586
587
#[derive(Debug, Serialize, Deserialize)]
588
struct HypervisorState {
589
interrupts: VcpuEvents,
590
nested_state: Vec<u8>,
591
}
592
593
impl VcpuX86_64 for KvmVcpu {
594
#[allow(clippy::cast_ptr_alignment)]
595
fn set_interrupt_window_requested(&self, requested: bool) {
596
// SAFETY:
597
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
598
// kernel told us how large it was. The pointer is page aligned so casting to a different
599
// type is well defined, hence the clippy allow attribute.
600
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
601
run.request_interrupt_window = requested.into();
602
}
603
604
#[allow(clippy::cast_ptr_alignment)]
605
fn ready_for_interrupt(&self) -> bool {
606
// SAFETY:
607
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
608
// kernel told us how large it was. The pointer is page aligned so casting to a different
609
// type is well defined, hence the clippy allow attribute.
610
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
611
run.ready_for_interrupt_injection != 0 && run.if_flag != 0
612
}
613
614
/// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
615
///
616
/// While this ioctl exists on PPC and MIPS as well as x86, the semantics are different and
617
/// ChromeOS doesn't support PPC or MIPS.
618
fn interrupt(&self, irq: u8) -> Result<()> {
619
if !self.ready_for_interrupt() {
620
return Err(Error::new(EAGAIN));
621
}
622
623
let interrupt = kvm_interrupt { irq: irq.into() };
624
// SAFETY:
625
// safe becuase we allocated the struct and we know the kernel will read
626
// exactly the size of the struct
627
let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
628
if ret == 0 {
629
Ok(())
630
} else {
631
errno_result()
632
}
633
}
634
635
fn inject_nmi(&self) -> Result<()> {
636
// SAFETY:
637
// Safe because we know that our file is a VCPU fd.
638
let ret = unsafe { ioctl(self, KVM_NMI) };
639
if ret == 0 {
640
Ok(())
641
} else {
642
errno_result()
643
}
644
}
645
646
fn get_regs(&self) -> Result<Regs> {
647
let mut regs: kvm_regs = Default::default();
648
let ret = {
649
// SAFETY:
650
// Safe because we know that our file is a VCPU fd, we know the kernel will only read
651
// the correct amount of memory from our pointer, and we verify the return
652
// result.
653
unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
654
};
655
if ret == 0 {
656
Ok(Regs::from(&regs))
657
} else {
658
errno_result()
659
}
660
}
661
662
fn set_regs(&self, regs: &Regs) -> Result<()> {
663
let regs = kvm_regs::from(regs);
664
let ret = {
665
// SAFETY:
666
// Safe because we know that our file is a VCPU fd, we know the kernel will only read
667
// the correct amount of memory from our pointer, and we verify the return
668
// result.
669
unsafe { ioctl_with_ref(self, KVM_SET_REGS, &regs) }
670
};
671
if ret == 0 {
672
Ok(())
673
} else {
674
errno_result()
675
}
676
}
677
678
fn get_sregs(&self) -> Result<Sregs> {
679
let mut regs: kvm_sregs = Default::default();
680
let ret = {
681
// SAFETY:
682
// Safe because we know that our file is a VCPU fd, we know the kernel will only write
683
// the correct amount of memory to our pointer, and we verify the return
684
// result.
685
unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
686
};
687
if ret == 0 {
688
Ok(Sregs::from(&regs))
689
} else {
690
errno_result()
691
}
692
}
693
694
fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
695
// Get the current `kvm_sregs` so we can use its `apic_base` and `interrupt_bitmap`, which
696
// are not present in `Sregs`.
697
let mut kvm_sregs: kvm_sregs = Default::default();
698
// SAFETY:
699
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
700
// correct amount of memory to our pointer, and we verify the return result.
701
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
702
if ret != 0 {
703
return errno_result();
704
}
705
706
kvm_sregs.cs = kvm_segment::from(&sregs.cs);
707
kvm_sregs.ds = kvm_segment::from(&sregs.ds);
708
kvm_sregs.es = kvm_segment::from(&sregs.es);
709
kvm_sregs.fs = kvm_segment::from(&sregs.fs);
710
kvm_sregs.gs = kvm_segment::from(&sregs.gs);
711
kvm_sregs.ss = kvm_segment::from(&sregs.ss);
712
kvm_sregs.tr = kvm_segment::from(&sregs.tr);
713
kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
714
kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
715
kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
716
kvm_sregs.cr0 = sregs.cr0;
717
kvm_sregs.cr2 = sregs.cr2;
718
kvm_sregs.cr3 = sregs.cr3;
719
kvm_sregs.cr4 = sregs.cr4;
720
kvm_sregs.cr8 = sregs.cr8;
721
kvm_sregs.efer = sregs.efer;
722
723
// SAFETY:
724
// Safe because we know that our file is a VCPU fd, we know the kernel will only read the
725
// correct amount of memory from our pointer, and we verify the return result.
726
let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
727
if ret == 0 {
728
Ok(())
729
} else {
730
errno_result()
731
}
732
}
733
734
fn get_fpu(&self) -> Result<Fpu> {
735
let mut fpu: kvm_fpu = Default::default();
736
// SAFETY:
737
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
738
// correct amount of memory to our pointer, and we verify the return result.
739
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
740
if ret == 0 {
741
Ok(Fpu::from(&fpu))
742
} else {
743
errno_result()
744
}
745
}
746
747
fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
748
let fpu = kvm_fpu::from(fpu);
749
let ret = {
750
// SAFETY:
751
// Here we trust the kernel not to read past the end of the kvm_fpu struct.
752
unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
753
};
754
if ret == 0 {
755
Ok(())
756
} else {
757
errno_result()
758
}
759
}
760
761
/// If the VM reports using XSave2, the function will call XSave2.
762
fn get_xsave(&self) -> Result<Xsave> {
763
let size = self.xsave_size()?;
764
let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
765
KVM_GET_XSAVE2
766
} else {
767
KVM_GET_XSAVE
768
};
769
let mut xsave = Xsave::new(size);
770
771
// SAFETY:
772
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
773
// correct amount of memory to our pointer, and we verify the return result.
774
let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
775
if ret == 0 {
776
Ok(xsave)
777
} else {
778
errno_result()
779
}
780
}
781
782
fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
783
let size = self.xsave_size()?;
784
// Ensure xsave is the same size as used in get_xsave.
785
// Return err if sizes don't match => not the same extensions are enabled for CPU.
786
if xsave.len() != size {
787
return Err(Error::new(EIO));
788
}
789
790
// SAFETY:
791
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
792
// correct amount of memory to our pointer, and we verify the return result.
793
// Because of the len check above, and because the layout of `struct kvm_xsave` is
794
// compatible with a slice of `u32`, we can pass the pointer to `xsave` directly.
795
let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
796
if ret == 0 {
797
Ok(())
798
} else {
799
errno_result()
800
}
801
}
802
803
fn get_hypervisor_specific_state(&self) -> Result<AnySnapshot> {
804
let mut vcpu_evts: kvm_vcpu_events = Default::default();
805
// SAFETY:
806
// Safe because we know that our file is a VCPU fd, we know the kernel will only write
807
// the correct amount of memory to our pointer, and we verify the return
808
// result.
809
let ret = { unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) } };
810
if ret != 0 {
811
return errno_result();
812
}
813
let interrupts = VcpuEvents::from(&vcpu_evts);
814
let ret =
815
// SAFETY:
816
// Safe because we know that our file is a valid VM fd
817
unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_NESTED_STATE as u64) };
818
if ret < 0 {
819
return errno_result();
820
}
821
// 0 == unsupported
822
let nested_state = if ret == 0 {
823
Vec::new()
824
} else {
825
let mut nested_state: Vec<u8> = vec![0; ret as usize];
826
let nested_state_ptr = nested_state.as_ptr() as *mut kvm_nested_state;
827
assert!(nested_state_ptr.is_aligned());
828
// SAFETY:
829
// Casting this vector to kvm_nested_state meets all the requirements mentioned at
830
// https://doc.rust-lang.org/std/ptr/index.html#pointer-to-reference-conversion
831
// The pointer is validated to be aligned, the value is non-null, can be dereferenced,
832
// the pointer points to kvm_nested_state, which holds zeroes and is valid.
833
// No other references to this point exist and no other operation happens. The memory
834
// is only accessed via the reference the lifetime of the reference
835
unsafe {
836
(*nested_state_ptr).size = ret as u32;
837
}
838
assert!(nested_state.as_ptr().is_aligned());
839
// SAFETY:
840
// Safe because we know out FD is a valid VCPU fd, and we got the size
841
// of nested state from the KVM_CAP_NESTED_STATE call.
842
let ret = unsafe {
843
ioctl_with_mut_ptr(self, KVM_GET_NESTED_STATE, nested_state.as_mut_ptr())
844
};
845
if ret < 0 {
846
return errno_result();
847
}
848
nested_state
849
};
850
AnySnapshot::to_any(HypervisorState {
851
interrupts,
852
nested_state,
853
})
854
.map_err(|e| {
855
error!("failed to serialize hypervisor state: {:?}", e);
856
Error::new(EIO)
857
})
858
}
859
860
fn set_hypervisor_specific_state(&self, data: AnySnapshot) -> Result<()> {
861
let hypervisor_state = AnySnapshot::from_any::<HypervisorState>(data).map_err(|e| {
862
error!("failed to deserialize hypervisor_state: {:?}", e);
863
Error::new(EIO)
864
})?;
865
let vcpu_events = kvm_vcpu_events::from(&hypervisor_state.interrupts);
866
let ret = {
867
// SAFETY:
868
// Safe because we know that our file is a VCPU fd, we know the kernel will only read
869
// the correct amount of memory from our pointer, and we verify the return
870
// result.
871
unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
872
};
873
if ret != 0 {
874
return errno_result();
875
}
876
if hypervisor_state.nested_state.is_empty() {
877
return Ok(());
878
}
879
// SAFETY:
880
// Casting this vector to kvm_nested_state meets all the requirements mentioned at
881
// https://doc.rust-lang.org/std/ptr/index.html#pointer-to-reference-conversion
882
// The pointer is validated to be aligned, the value is non-null, can be dereferenced,
883
// the pointer points to Vec<u8>, which is initialized and a valid value.
884
// No other references to this point exist and no other operation happens. The memory
885
// is not modified by any operation. The pointer is dropped after validating that size is
886
// smaller than the vector length.
887
unsafe {
888
let vec_len = hypervisor_state.nested_state.len();
889
assert!(
890
(hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state).is_aligned()
891
);
892
if (*(hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state)).size
893
> vec_len as u32
894
{
895
error!("Invalued nested state data, size larger than vec allocated.");
896
return Err(Error::new(EINVAL));
897
}
898
}
899
// SAFETY:
900
// Safe because we know that our file is a VCPU fd, we know the kernel will only read
901
// the correct amount of memory from our pointer, and we verify the return
902
// result.
903
let ret = unsafe {
904
ioctl_with_ptr(
905
self,
906
KVM_SET_NESTED_STATE,
907
hypervisor_state.nested_state.as_ptr(),
908
)
909
};
910
if ret == 0 {
911
Ok(())
912
} else {
913
errno_result()
914
}
915
}
916
917
fn get_debugregs(&self) -> Result<DebugRegs> {
918
let mut regs: kvm_debugregs = Default::default();
919
// SAFETY:
920
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
921
// correct amount of memory to our pointer, and we verify the return result.
922
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
923
if ret == 0 {
924
Ok(DebugRegs::from(&regs))
925
} else {
926
errno_result()
927
}
928
}
929
930
fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
931
let dregs = kvm_debugregs::from(dregs);
932
let ret = {
933
// SAFETY:
934
// Here we trust the kernel not to read past the end of the kvm_debugregs struct.
935
unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
936
};
937
if ret == 0 {
938
Ok(())
939
} else {
940
errno_result()
941
}
942
}
943
944
fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
945
let mut regs: kvm_xcrs = Default::default();
946
// SAFETY:
947
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
948
// correct amount of memory to our pointer, and we verify the return result.
949
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
950
if ret < 0 {
951
return errno_result();
952
}
953
954
Ok(regs
955
.xcrs
956
.iter()
957
.take(regs.nr_xcrs as usize)
958
.map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
959
.collect())
960
}
961
962
fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
963
let mut kvm_xcr = kvm_xcrs {
964
nr_xcrs: 1,
965
..Default::default()
966
};
967
kvm_xcr.xcrs[0].xcr = xcr_index;
968
kvm_xcr.xcrs[0].value = value;
969
970
let ret = {
971
// SAFETY:
972
// Here we trust the kernel not to read past the end of the kvm_xcrs struct.
973
unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
974
};
975
if ret == 0 {
976
Ok(())
977
} else {
978
errno_result()
979
}
980
}
981
982
fn get_msr(&self, msr_index: u32) -> Result<u64> {
983
let mut msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
984
msrs.nmsrs = 1;
985
msrs.entries[0].index = msr_index;
986
987
let ret = {
988
// SAFETY:
989
// Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
990
unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs) }
991
};
992
if ret < 0 {
993
return errno_result();
994
}
995
996
// KVM_GET_MSRS returns the number of msr entries written.
997
if ret != 1 {
998
return Err(base::Error::new(libc::ENOENT));
999
}
1000
1001
Ok(msrs.entries[0].data)
1002
}
1003
1004
fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
1005
let msr_index_list = self.kvm.get_msr_index_list()?;
1006
1007
let mut kvm_msrs =
1008
kvm_msrs::<[kvm_msr_entry]>::new_box_zeroed_with_elems(msr_index_list.len()).unwrap();
1009
kvm_msrs.nmsrs = msr_index_list.len() as u32;
1010
kvm_msrs
1011
.entries
1012
.iter_mut()
1013
.zip(msr_index_list.iter())
1014
.for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
1015
1016
let ret = {
1017
// SAFETY:
1018
// Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
1019
unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut *kvm_msrs) }
1020
};
1021
if ret < 0 {
1022
return errno_result();
1023
}
1024
1025
// KVM_GET_MSRS returns the number of msr entries written.
1026
let count = ret as usize;
1027
if count != msr_index_list.len() {
1028
error!(
1029
"failed to get all MSRs: requested {}, got {}",
1030
msr_index_list.len(),
1031
count,
1032
);
1033
return Err(base::Error::new(libc::EPERM));
1034
}
1035
1036
let msrs = BTreeMap::from_iter(
1037
kvm_msrs
1038
.entries
1039
.iter()
1040
.map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
1041
);
1042
1043
Ok(msrs)
1044
}
1045
1046
fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1047
let mut kvm_msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
1048
kvm_msrs.nmsrs = 1;
1049
kvm_msrs.entries[0].index = msr_index;
1050
kvm_msrs.entries[0].data = value;
1051
1052
let ret = {
1053
// SAFETY:
1054
// Here we trust the kernel not to read past the end of the kvm_msrs struct.
1055
unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs) }
1056
};
1057
if ret < 0 {
1058
return errno_result();
1059
}
1060
1061
// KVM_SET_MSRS returns the number of msr entries written.
1062
if ret != 1 {
1063
error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1064
return Err(base::Error::new(libc::EPERM));
1065
}
1066
1067
Ok(())
1068
}
1069
1070
fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1071
let cpuid = Box::<kvm_cpuid2<[kvm_cpuid_entry2]>>::from(cpuid);
1072
let ret = {
1073
// SAFETY:
1074
// Here we trust the kernel not to read past the end of the kvm_msrs struct.
1075
unsafe { ioctl_with_ref(self, KVM_SET_CPUID2, &*cpuid) }
1076
};
1077
if ret == 0 {
1078
Ok(())
1079
} else {
1080
errno_result()
1081
}
1082
}
1083
1084
fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1085
use kvm_sys::*;
1086
let mut dbg: kvm_guest_debug = Default::default();
1087
1088
if addrs.len() > 4 {
1089
error!(
1090
"Support 4 breakpoints at most but {} addresses are passed",
1091
addrs.len()
1092
);
1093
return Err(base::Error::new(libc::EINVAL));
1094
}
1095
1096
dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1097
if enable_singlestep {
1098
dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1099
}
1100
1101
// Set bits 9 and 10.
1102
// bit 9: GE (global exact breakpoint enable) flag.
1103
// bit 10: always 1.
1104
dbg.arch.debugreg[7] = 0x0600;
1105
1106
for (i, addr) in addrs.iter().enumerate() {
1107
dbg.arch.debugreg[i] = addr.0;
1108
// Set global breakpoint enable flag
1109
dbg.arch.debugreg[7] |= 2 << (i * 2);
1110
}
1111
1112
let ret = {
1113
// SAFETY:
1114
// Here we trust the kernel not to read past the end of the kvm_guest_debug struct.
1115
unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1116
};
1117
if ret == 0 {
1118
Ok(())
1119
} else {
1120
errno_result()
1121
}
1122
}
1123
1124
/// KVM does not support the VcpuExit::Cpuid exit type.
1125
fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1126
Err(Error::new(ENXIO))
1127
}
1128
1129
fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1130
// On KVM, the TSC MSR is restored as part of SET_MSRS, and no further action is required.
1131
Ok(())
1132
}
1133
}
1134
1135
impl KvmVcpu {
1136
/// X86 specific call to get the state of the "Local Advanced Programmable Interrupt
1137
/// Controller".
1138
///
1139
/// See the documentation for KVM_GET_LAPIC.
1140
pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1141
let mut klapic: kvm_lapic_state = Default::default();
1142
1143
let ret = {
1144
// SAFETY:
1145
// The ioctl is unsafe unless you trust the kernel not to write past the end of the
1146
// local_apic struct.
1147
unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1148
};
1149
if ret < 0 {
1150
return errno_result();
1151
}
1152
Ok(klapic)
1153
}
1154
1155
/// X86 specific call to set the state of the "Local Advanced Programmable Interrupt
1156
/// Controller".
1157
///
1158
/// See the documentation for KVM_SET_LAPIC.
1159
pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1160
let ret = {
1161
// SAFETY:
1162
// The ioctl is safe because the kernel will only read from the klapic struct.
1163
unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1164
};
1165
if ret < 0 {
1166
return errno_result();
1167
}
1168
Ok(())
1169
}
1170
1171
/// X86 specific call to get the value of the APIC_BASE MSR.
1172
///
1173
/// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
1174
pub fn get_apic_base(&self) -> Result<u64> {
1175
self.get_msr(MSR_IA32_APICBASE)
1176
}
1177
1178
/// X86 specific call to set the value of the APIC_BASE MSR.
1179
///
1180
/// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
1181
pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1182
self.set_msr(MSR_IA32_APICBASE, apic_base)
1183
}
1184
1185
/// Call to get pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1186
///
1187
/// See the documentation for KVM_GET_SREGS.
1188
pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1189
let mut regs: kvm_sregs = Default::default();
1190
// SAFETY:
1191
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1192
// correct amount of memory to our pointer, and we verify the return result.
1193
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1194
if ret >= 0 {
1195
Ok(regs.interrupt_bitmap)
1196
} else {
1197
errno_result()
1198
}
1199
}
1200
1201
/// Call to set pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1202
///
1203
/// See the documentation for KVM_GET_SREGS.
1204
pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1205
// Potentially racy code. Vcpu registers are set in a separate thread and this could result
1206
// in Sregs being modified from the Vcpu initialization thread and the Irq restoring
1207
// thread.
1208
let mut regs: kvm_sregs = Default::default();
1209
// SAFETY:
1210
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1211
// correct amount of memory to our pointer, and we verify the return result.
1212
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1213
if ret >= 0 {
1214
regs.interrupt_bitmap = interrupt_bitmap;
1215
// SAFETY:
1216
// Safe because we know that our file is a VCPU fd, we know the kernel will only read
1217
// the correct amount of memory from our pointer, and we verify the return
1218
// result.
1219
let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &regs) };
1220
if ret >= 0 {
1221
Ok(())
1222
} else {
1223
errno_result()
1224
}
1225
} else {
1226
errno_result()
1227
}
1228
}
1229
}
1230
1231
impl<'a> From<&'a kvm_cpuid2<[kvm_cpuid_entry2]>> for CpuId {
1232
fn from(kvm_cpuid: &'a kvm_cpuid2<[kvm_cpuid_entry2]>) -> CpuId {
1233
let kvm_entries = &kvm_cpuid.entries[..kvm_cpuid.nent as usize];
1234
let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1235
1236
for entry in kvm_entries {
1237
let cpu_id_entry = CpuIdEntry {
1238
function: entry.function,
1239
index: entry.index,
1240
flags: entry.flags,
1241
cpuid: CpuidResult {
1242
eax: entry.eax,
1243
ebx: entry.ebx,
1244
ecx: entry.ecx,
1245
edx: entry.edx,
1246
},
1247
};
1248
cpu_id_entries.push(cpu_id_entry)
1249
}
1250
CpuId { cpu_id_entries }
1251
}
1252
}
1253
1254
impl From<&CpuId> for Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1255
fn from(cpuid: &CpuId) -> Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1256
let mut kvm =
1257
kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(cpuid.cpu_id_entries.len())
1258
.unwrap();
1259
kvm.nent = cpuid.cpu_id_entries.len().try_into().unwrap();
1260
for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1261
kvm.entries[i] = kvm_cpuid_entry2 {
1262
function: e.function,
1263
index: e.index,
1264
flags: e.flags,
1265
eax: e.cpuid.eax,
1266
ebx: e.cpuid.ebx,
1267
ecx: e.cpuid.ecx,
1268
edx: e.cpuid.edx,
1269
..Default::default()
1270
};
1271
}
1272
kvm
1273
}
1274
}
1275
1276
impl From<&ClockState> for kvm_clock_data {
1277
fn from(state: &ClockState) -> Self {
1278
kvm_clock_data {
1279
clock: state.clock,
1280
..Default::default()
1281
}
1282
}
1283
}
1284
1285
impl From<&kvm_clock_data> for ClockState {
1286
fn from(clock_data: &kvm_clock_data) -> Self {
1287
ClockState {
1288
clock: clock_data.clock,
1289
}
1290
}
1291
}
1292
1293
impl From<&kvm_pic_state> for PicState {
1294
fn from(item: &kvm_pic_state) -> Self {
1295
PicState {
1296
last_irr: item.last_irr,
1297
irr: item.irr,
1298
imr: item.imr,
1299
isr: item.isr,
1300
priority_add: item.priority_add,
1301
irq_base: item.irq_base,
1302
read_reg_select: item.read_reg_select != 0,
1303
poll: item.poll != 0,
1304
special_mask: item.special_mask != 0,
1305
init_state: item.init_state.into(),
1306
auto_eoi: item.auto_eoi != 0,
1307
rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1308
special_fully_nested_mode: item.special_fully_nested_mode != 0,
1309
use_4_byte_icw: item.init4 != 0,
1310
elcr: item.elcr,
1311
elcr_mask: item.elcr_mask,
1312
}
1313
}
1314
}
1315
1316
impl From<&PicState> for kvm_pic_state {
1317
fn from(item: &PicState) -> Self {
1318
kvm_pic_state {
1319
last_irr: item.last_irr,
1320
irr: item.irr,
1321
imr: item.imr,
1322
isr: item.isr,
1323
priority_add: item.priority_add,
1324
irq_base: item.irq_base,
1325
read_reg_select: item.read_reg_select as u8,
1326
poll: item.poll as u8,
1327
special_mask: item.special_mask as u8,
1328
init_state: item.init_state as u8,
1329
auto_eoi: item.auto_eoi as u8,
1330
rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1331
special_fully_nested_mode: item.special_fully_nested_mode as u8,
1332
init4: item.use_4_byte_icw as u8,
1333
elcr: item.elcr,
1334
elcr_mask: item.elcr_mask,
1335
}
1336
}
1337
}
1338
1339
impl From<&kvm_ioapic_state> for IoapicState {
1340
fn from(item: &kvm_ioapic_state) -> Self {
1341
let mut state = IoapicState {
1342
base_address: item.base_address,
1343
ioregsel: item.ioregsel as u8,
1344
ioapicid: item.id,
1345
current_interrupt_level_bitmap: item.irr,
1346
redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1347
};
1348
for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1349
*out_state = in_state.into();
1350
}
1351
state
1352
}
1353
}
1354
1355
impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
1356
fn from(item: &IoapicRedirectionTableEntry) -> Self {
1357
kvm_ioapic_state__bindgen_ty_1 {
1358
// IoapicRedirectionTableEntry layout matches the exact bit layout of a hardware
1359
// ioapic redirection table entry, so we can simply do a 64-bit copy
1360
bits: item.get(0, 64),
1361
}
1362
}
1363
}
1364
1365
impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
1366
fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1367
let mut entry = IoapicRedirectionTableEntry::default();
1368
// SAFETY:
1369
// Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys
1370
// table entry layout
1371
entry.set(0, 64, unsafe { item.bits });
1372
entry
1373
}
1374
}
1375
1376
impl From<&IoapicState> for kvm_ioapic_state {
1377
fn from(item: &IoapicState) -> Self {
1378
let mut state = kvm_ioapic_state {
1379
base_address: item.base_address,
1380
ioregsel: item.ioregsel as u32,
1381
id: item.ioapicid,
1382
irr: item.current_interrupt_level_bitmap,
1383
..Default::default()
1384
};
1385
for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1386
*out_state = in_state.into();
1387
}
1388
state
1389
}
1390
}
1391
1392
impl From<&LapicState> for kvm_lapic_state {
1393
fn from(item: &LapicState) -> Self {
1394
let mut state = kvm_lapic_state::default();
1395
// There are 64 lapic registers
1396
for (reg, value) in item.regs.iter().enumerate() {
1397
// Each lapic register is 16 bytes, but only the first 4 are used
1398
let reg_offset = 16 * reg;
1399
let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1400
1401
// to_le_bytes() produces an array of u8, not i8(c_char), so we can't directly use
1402
// copy_from_slice().
1403
for (i, v) in value.to_le_bytes().iter().enumerate() {
1404
regs_slice[i] = *v as i8;
1405
}
1406
}
1407
state
1408
}
1409
}
1410
1411
impl From<&kvm_lapic_state> for LapicState {
1412
fn from(item: &kvm_lapic_state) -> Self {
1413
let mut state = LapicState { regs: [0; 64] };
1414
// There are 64 lapic registers
1415
for reg in 0..64 {
1416
// Each lapic register is 16 bytes, but only the first 4 are used
1417
let reg_offset = 16 * reg;
1418
1419
// from_le_bytes() only works on arrays of u8, not i8(c_char).
1420
let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1421
let mut bytes = [0u8; 4];
1422
for i in 0..4 {
1423
bytes[i] = reg_slice[i] as u8;
1424
}
1425
state.regs[reg] = u32::from_le_bytes(bytes);
1426
}
1427
state
1428
}
1429
}
1430
1431
impl From<&PitState> for kvm_pit_state2 {
1432
fn from(item: &PitState) -> Self {
1433
kvm_pit_state2 {
1434
channels: [
1435
kvm_pit_channel_state::from(&item.channels[0]),
1436
kvm_pit_channel_state::from(&item.channels[1]),
1437
kvm_pit_channel_state::from(&item.channels[2]),
1438
],
1439
flags: item.flags,
1440
..Default::default()
1441
}
1442
}
1443
}
1444
1445
impl From<&kvm_pit_state2> for PitState {
1446
fn from(item: &kvm_pit_state2) -> Self {
1447
PitState {
1448
channels: [
1449
PitChannelState::from(&item.channels[0]),
1450
PitChannelState::from(&item.channels[1]),
1451
PitChannelState::from(&item.channels[2]),
1452
],
1453
flags: item.flags,
1454
}
1455
}
1456
}
1457
1458
impl From<&PitChannelState> for kvm_pit_channel_state {
1459
fn from(item: &PitChannelState) -> Self {
1460
kvm_pit_channel_state {
1461
count: item.count,
1462
latched_count: item.latched_count,
1463
count_latched: item.count_latched as u8,
1464
status_latched: item.status_latched as u8,
1465
status: item.status,
1466
read_state: item.read_state as u8,
1467
write_state: item.write_state as u8,
1468
// kvm's write_latch only stores the low byte of the reload value
1469
write_latch: item.reload_value as u8,
1470
rw_mode: item.rw_mode as u8,
1471
mode: item.mode,
1472
bcd: item.bcd as u8,
1473
gate: item.gate as u8,
1474
count_load_time: item.count_load_time as i64,
1475
}
1476
}
1477
}
1478
1479
impl From<&kvm_pit_channel_state> for PitChannelState {
1480
fn from(item: &kvm_pit_channel_state) -> Self {
1481
PitChannelState {
1482
count: item.count,
1483
latched_count: item.latched_count,
1484
count_latched: item.count_latched.into(),
1485
status_latched: item.status_latched != 0,
1486
status: item.status,
1487
read_state: item.read_state.into(),
1488
write_state: item.write_state.into(),
1489
// kvm's write_latch only stores the low byte of the reload value
1490
reload_value: item.write_latch as u16,
1491
rw_mode: item.rw_mode.into(),
1492
mode: item.mode,
1493
bcd: item.bcd != 0,
1494
gate: item.gate != 0,
1495
count_load_time: item.count_load_time as u64,
1496
}
1497
}
1498
}
1499
1500
// This function translates an IrqSrouceChip to the kvm u32 equivalent. It has a different
1501
// implementation between x86_64 and aarch64 because the irqchip KVM constants are not defined on
1502
// all architectures.
1503
pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1504
match chip {
1505
IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1506
IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1507
IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1508
_ => {
1509
error!("Invalid IrqChipSource for X86 {:?}", chip);
1510
0
1511
}
1512
}
1513
}
1514
1515
impl From<&kvm_regs> for Regs {
1516
fn from(r: &kvm_regs) -> Self {
1517
Regs {
1518
rax: r.rax,
1519
rbx: r.rbx,
1520
rcx: r.rcx,
1521
rdx: r.rdx,
1522
rsi: r.rsi,
1523
rdi: r.rdi,
1524
rsp: r.rsp,
1525
rbp: r.rbp,
1526
r8: r.r8,
1527
r9: r.r9,
1528
r10: r.r10,
1529
r11: r.r11,
1530
r12: r.r12,
1531
r13: r.r13,
1532
r14: r.r14,
1533
r15: r.r15,
1534
rip: r.rip,
1535
rflags: r.rflags,
1536
}
1537
}
1538
}
1539
1540
impl From<&Regs> for kvm_regs {
1541
fn from(r: &Regs) -> Self {
1542
kvm_regs {
1543
rax: r.rax,
1544
rbx: r.rbx,
1545
rcx: r.rcx,
1546
rdx: r.rdx,
1547
rsi: r.rsi,
1548
rdi: r.rdi,
1549
rsp: r.rsp,
1550
rbp: r.rbp,
1551
r8: r.r8,
1552
r9: r.r9,
1553
r10: r.r10,
1554
r11: r.r11,
1555
r12: r.r12,
1556
r13: r.r13,
1557
r14: r.r14,
1558
r15: r.r15,
1559
rip: r.rip,
1560
rflags: r.rflags,
1561
}
1562
}
1563
}
1564
1565
impl From<&VcpuEvents> for kvm_vcpu_events {
1566
fn from(ve: &VcpuEvents) -> Self {
1567
let mut kvm_ve: kvm_vcpu_events = Default::default();
1568
1569
kvm_ve.exception.injected = ve.exception.injected as u8;
1570
kvm_ve.exception.nr = ve.exception.nr;
1571
kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1572
if let Some(pending) = ve.exception.pending {
1573
kvm_ve.exception.pending = pending as u8;
1574
if ve.exception_payload.is_some() {
1575
kvm_ve.exception_has_payload = true as u8;
1576
}
1577
kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1578
kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1579
}
1580
kvm_ve.exception.error_code = ve.exception.error_code;
1581
1582
kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1583
kvm_ve.interrupt.nr = ve.interrupt.nr;
1584
kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1585
if let Some(shadow) = ve.interrupt.shadow {
1586
kvm_ve.interrupt.shadow = shadow;
1587
kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1588
}
1589
1590
kvm_ve.nmi.injected = ve.nmi.injected as u8;
1591
if let Some(pending) = ve.nmi.pending {
1592
kvm_ve.nmi.pending = pending as u8;
1593
kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1594
}
1595
kvm_ve.nmi.masked = ve.nmi.masked as u8;
1596
1597
if let Some(sipi_vector) = ve.sipi_vector {
1598
kvm_ve.sipi_vector = sipi_vector;
1599
kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1600
}
1601
1602
if let Some(smm) = ve.smi.smm {
1603
kvm_ve.smi.smm = smm as u8;
1604
kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1605
}
1606
kvm_ve.smi.pending = ve.smi.pending as u8;
1607
kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1608
kvm_ve.smi.latched_init = ve.smi.latched_init;
1609
1610
if let Some(pending) = ve.triple_fault.pending {
1611
kvm_ve.triple_fault.pending = pending as u8;
1612
kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1613
}
1614
kvm_ve
1615
}
1616
}
1617
1618
impl From<&kvm_vcpu_events> for VcpuEvents {
1619
fn from(ve: &kvm_vcpu_events) -> Self {
1620
let exception = VcpuExceptionState {
1621
injected: ve.exception.injected != 0,
1622
nr: ve.exception.nr,
1623
has_error_code: ve.exception.has_error_code != 0,
1624
pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1625
Some(ve.exception.pending != 0)
1626
} else {
1627
None
1628
},
1629
error_code: ve.exception.error_code,
1630
};
1631
1632
let interrupt = VcpuInterruptState {
1633
injected: ve.interrupt.injected != 0,
1634
nr: ve.interrupt.nr,
1635
soft: ve.interrupt.soft != 0,
1636
shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1637
Some(ve.interrupt.shadow)
1638
} else {
1639
None
1640
},
1641
};
1642
1643
let nmi = VcpuNmiState {
1644
injected: ve.interrupt.injected != 0,
1645
pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1646
Some(ve.nmi.pending != 0)
1647
} else {
1648
None
1649
},
1650
masked: ve.nmi.masked != 0,
1651
};
1652
1653
let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1654
Some(ve.sipi_vector)
1655
} else {
1656
None
1657
};
1658
1659
let smi = VcpuSmiState {
1660
smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1661
Some(ve.smi.smm != 0)
1662
} else {
1663
None
1664
},
1665
pending: ve.smi.pending != 0,
1666
smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1667
latched_init: ve.smi.latched_init,
1668
};
1669
1670
let triple_fault = VcpuTripleFaultState {
1671
pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1672
Some(ve.triple_fault.pending != 0)
1673
} else {
1674
None
1675
},
1676
};
1677
1678
let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1679
Some(ve.exception_payload)
1680
} else {
1681
None
1682
};
1683
1684
VcpuEvents {
1685
exception,
1686
interrupt,
1687
nmi,
1688
sipi_vector,
1689
smi,
1690
triple_fault,
1691
exception_payload,
1692
}
1693
}
1694
}
1695
1696
impl From<&kvm_segment> for Segment {
1697
fn from(s: &kvm_segment) -> Self {
1698
Segment {
1699
base: s.base,
1700
limit_bytes: s.limit,
1701
selector: s.selector,
1702
type_: s.type_,
1703
present: s.present,
1704
dpl: s.dpl,
1705
db: s.db,
1706
s: s.s,
1707
l: s.l,
1708
g: s.g,
1709
avl: s.avl,
1710
}
1711
}
1712
}
1713
1714
impl From<&Segment> for kvm_segment {
1715
fn from(s: &Segment) -> Self {
1716
kvm_segment {
1717
base: s.base,
1718
limit: s.limit_bytes,
1719
selector: s.selector,
1720
type_: s.type_,
1721
present: s.present,
1722
dpl: s.dpl,
1723
db: s.db,
1724
s: s.s,
1725
l: s.l,
1726
g: s.g,
1727
avl: s.avl,
1728
unusable: match s.present {
1729
0 => 1,
1730
_ => 0,
1731
},
1732
..Default::default()
1733
}
1734
}
1735
}
1736
1737
impl From<&kvm_dtable> for DescriptorTable {
1738
fn from(dt: &kvm_dtable) -> Self {
1739
DescriptorTable {
1740
base: dt.base,
1741
limit: dt.limit,
1742
}
1743
}
1744
}
1745
1746
impl From<&DescriptorTable> for kvm_dtable {
1747
fn from(dt: &DescriptorTable) -> Self {
1748
kvm_dtable {
1749
base: dt.base,
1750
limit: dt.limit,
1751
..Default::default()
1752
}
1753
}
1754
}
1755
1756
impl From<&kvm_sregs> for Sregs {
1757
fn from(r: &kvm_sregs) -> Self {
1758
Sregs {
1759
cs: Segment::from(&r.cs),
1760
ds: Segment::from(&r.ds),
1761
es: Segment::from(&r.es),
1762
fs: Segment::from(&r.fs),
1763
gs: Segment::from(&r.gs),
1764
ss: Segment::from(&r.ss),
1765
tr: Segment::from(&r.tr),
1766
ldt: Segment::from(&r.ldt),
1767
gdt: DescriptorTable::from(&r.gdt),
1768
idt: DescriptorTable::from(&r.idt),
1769
cr0: r.cr0,
1770
cr2: r.cr2,
1771
cr3: r.cr3,
1772
cr4: r.cr4,
1773
cr8: r.cr8,
1774
efer: r.efer,
1775
}
1776
}
1777
}
1778
1779
impl From<&kvm_fpu> for Fpu {
1780
fn from(r: &kvm_fpu) -> Self {
1781
Fpu {
1782
fpr: FpuReg::from_16byte_arrays(&r.fpr),
1783
fcw: r.fcw,
1784
fsw: r.fsw,
1785
ftwx: r.ftwx,
1786
last_opcode: r.last_opcode,
1787
last_ip: r.last_ip,
1788
last_dp: r.last_dp,
1789
xmm: r.xmm,
1790
mxcsr: r.mxcsr,
1791
}
1792
}
1793
}
1794
1795
impl From<&Fpu> for kvm_fpu {
1796
fn from(r: &Fpu) -> Self {
1797
kvm_fpu {
1798
fpr: FpuReg::to_16byte_arrays(&r.fpr),
1799
fcw: r.fcw,
1800
fsw: r.fsw,
1801
ftwx: r.ftwx,
1802
last_opcode: r.last_opcode,
1803
last_ip: r.last_ip,
1804
last_dp: r.last_dp,
1805
xmm: r.xmm,
1806
mxcsr: r.mxcsr,
1807
..Default::default()
1808
}
1809
}
1810
}
1811
1812
impl From<&kvm_debugregs> for DebugRegs {
1813
fn from(r: &kvm_debugregs) -> Self {
1814
DebugRegs {
1815
db: r.db,
1816
dr6: r.dr6,
1817
dr7: r.dr7,
1818
}
1819
}
1820
}
1821
1822
impl From<&DebugRegs> for kvm_debugregs {
1823
fn from(r: &DebugRegs) -> Self {
1824
kvm_debugregs {
1825
db: r.db,
1826
dr6: r.dr6,
1827
dr7: r.dr7,
1828
..Default::default()
1829
}
1830
}
1831
}
1832
1833
#[cfg(test)]
1834
mod tests {
1835
use super::*;
1836
1837
#[test]
1838
fn vcpu_event_to_from() {
1839
// All data is random.
1840
let mut kvm_ve: kvm_vcpu_events = Default::default();
1841
kvm_ve.exception.injected = 1;
1842
kvm_ve.exception.nr = 65;
1843
kvm_ve.exception.has_error_code = 1;
1844
kvm_ve.exception.error_code = 110;
1845
kvm_ve.exception.pending = 1;
1846
1847
kvm_ve.interrupt.injected = 1;
1848
kvm_ve.interrupt.nr = 100;
1849
kvm_ve.interrupt.soft = 1;
1850
kvm_ve.interrupt.shadow = 114;
1851
1852
kvm_ve.nmi.injected = 1;
1853
kvm_ve.nmi.pending = 1;
1854
kvm_ve.nmi.masked = 0;
1855
1856
kvm_ve.sipi_vector = 105;
1857
1858
kvm_ve.smi.smm = 1;
1859
kvm_ve.smi.pending = 1;
1860
kvm_ve.smi.smm_inside_nmi = 1;
1861
kvm_ve.smi.latched_init = 100;
1862
1863
kvm_ve.triple_fault.pending = 0;
1864
1865
kvm_ve.exception_payload = 33;
1866
kvm_ve.exception_has_payload = 1;
1867
1868
kvm_ve.flags = 0
1869
| KVM_VCPUEVENT_VALID_PAYLOAD
1870
| KVM_VCPUEVENT_VALID_SMM
1871
| KVM_VCPUEVENT_VALID_NMI_PENDING
1872
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
1873
| KVM_VCPUEVENT_VALID_SHADOW;
1874
1875
let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1876
assert_eq!(ve.exception.injected, true);
1877
assert_eq!(ve.exception.nr, 65);
1878
assert_eq!(ve.exception.has_error_code, true);
1879
assert_eq!(ve.exception.error_code, 110);
1880
assert_eq!(ve.exception.pending.unwrap(), true);
1881
1882
assert_eq!(ve.interrupt.injected, true);
1883
assert_eq!(ve.interrupt.nr, 100);
1884
assert_eq!(ve.interrupt.soft, true);
1885
assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1886
1887
assert_eq!(ve.nmi.injected, true);
1888
assert_eq!(ve.nmi.pending.unwrap(), true);
1889
assert_eq!(ve.nmi.masked, false);
1890
1891
assert_eq!(ve.sipi_vector.unwrap(), 105);
1892
1893
assert_eq!(ve.smi.smm.unwrap(), true);
1894
assert_eq!(ve.smi.pending, true);
1895
assert_eq!(ve.smi.smm_inside_nmi, true);
1896
assert_eq!(ve.smi.latched_init, 100);
1897
1898
assert_eq!(ve.triple_fault.pending, None);
1899
1900
assert_eq!(ve.exception_payload.unwrap(), 33);
1901
1902
let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1903
assert_eq!(kvm_ve_restored.exception.injected, 1);
1904
assert_eq!(kvm_ve_restored.exception.nr, 65);
1905
assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1906
assert_eq!(kvm_ve_restored.exception.error_code, 110);
1907
assert_eq!(kvm_ve_restored.exception.pending, 1);
1908
1909
assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1910
assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1911
assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1912
assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1913
1914
assert_eq!(kvm_ve_restored.nmi.injected, 1);
1915
assert_eq!(kvm_ve_restored.nmi.pending, 1);
1916
assert_eq!(kvm_ve_restored.nmi.masked, 0);
1917
1918
assert_eq!(kvm_ve_restored.sipi_vector, 105);
1919
1920
assert_eq!(kvm_ve_restored.smi.smm, 1);
1921
assert_eq!(kvm_ve_restored.smi.pending, 1);
1922
assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1923
assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1924
1925
assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1926
1927
assert_eq!(kvm_ve_restored.exception_payload, 33);
1928
assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1929
}
1930
}
1931
1932