Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/geniezone/mod.rs
5394 views
1
// Copyright 2023 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
pub mod geniezone_sys;
6
7
use std::cmp::Reverse;
8
use std::collections::BTreeMap;
9
use std::collections::BinaryHeap;
10
use std::convert::TryFrom;
11
use std::ffi::CString;
12
use std::mem::offset_of;
13
use std::os::raw::c_ulong;
14
use std::os::unix::prelude::OsStrExt;
15
use std::path::Path;
16
use std::path::PathBuf;
17
use std::sync::Arc;
18
19
use aarch64_sys_reg::AArch64SysRegId;
20
use anyhow::Context;
21
use base::errno_result;
22
use base::error;
23
use base::ioctl;
24
use base::ioctl_with_mut_ref;
25
use base::ioctl_with_ref;
26
use base::ioctl_with_val;
27
use base::pagesize;
28
use base::AsRawDescriptor;
29
use base::Error;
30
use base::Event;
31
use base::FromRawDescriptor;
32
use base::MappedRegion;
33
use base::MemoryMapping;
34
use base::MemoryMappingBuilder;
35
use base::MmapError;
36
use base::Protection;
37
use base::RawDescriptor;
38
use base::Result;
39
use base::SafeDescriptor;
40
use cros_fdt::Fdt;
41
pub use geniezone_sys::*;
42
use libc::open;
43
use libc::EFAULT;
44
use libc::EINVAL;
45
use libc::EIO;
46
use libc::ENOENT;
47
use libc::ENOMEM;
48
use libc::ENOSPC;
49
use libc::ENOTSUP;
50
use libc::EOVERFLOW;
51
use libc::O_CLOEXEC;
52
use libc::O_RDWR;
53
use snapshot::AnySnapshot;
54
use sync::Mutex;
55
use vm_memory::GuestAddress;
56
use vm_memory::GuestMemory;
57
use vm_memory::MemoryRegionPurpose;
58
59
use crate::BalloonEvent;
60
use crate::ClockState;
61
use crate::Config;
62
use crate::Datamatch;
63
use crate::DeviceKind;
64
use crate::Hypervisor;
65
use crate::HypervisorCap;
66
use crate::HypervisorKind;
67
use crate::IoEventAddress;
68
use crate::IoOperation;
69
use crate::IoParams;
70
use crate::MemCacheType;
71
use crate::MemSlot;
72
use crate::PsciVersion;
73
use crate::Vcpu;
74
use crate::VcpuAArch64;
75
use crate::VcpuExit;
76
use crate::VcpuFeature;
77
use crate::VcpuRegAArch64;
78
use crate::VcpuSignalHandle;
79
use crate::VcpuSignalHandleInner;
80
use crate::Vm;
81
use crate::VmAArch64;
82
use crate::VmCap;
83
use crate::PSCI_0_2;
84
85
impl Geniezone {
86
/// Get the size of guest physical addresses (IPA) in bits.
87
pub fn get_guest_phys_addr_bits(&self) -> u8 {
88
// SAFETY:
89
// Safe because we know self is a real geniezone fd
90
match unsafe { ioctl_with_val(self, GZVM_CHECK_EXTENSION, GZVM_CAP_ARM_VM_IPA_SIZE.into()) }
91
{
92
// Default physical address size is 40 bits if the extension is not supported.
93
ret if ret <= 0 => 40,
94
ipa => ipa as u8,
95
}
96
}
97
}
98
99
impl GeniezoneVm {
100
/// Does platform specific initialization for the GeniezoneVm.
101
pub fn init_arch(&self, cfg: &Config) -> Result<()> {
102
if cfg.mte {
103
// SAFETY:
104
// Safe because it does not take pointer arguments.
105
unsafe {
106
self.ctrl_geniezone_enable_capability(GeniezoneCap::ArmMte, &[0, 0, 0, 0, 0])
107
}?;
108
}
109
Ok(())
110
}
111
112
/// Checks if a particular `VmCap` is available, or returns None if arch-independent
113
/// Vm.check_capability() should handle the check.
114
pub fn check_capability_arch(&self, _c: VmCap) -> Option<bool> {
115
None
116
}
117
118
/// Arch-specific implementation of `Vm::get_pvclock`. Always returns an error on AArch64.
119
pub fn get_pvclock_arch(&self) -> Result<ClockState> {
120
// TODO: Geniezone not support pvclock currently
121
error!("Geniezone: not support get_pvclock_arch");
122
Err(Error::new(EINVAL))
123
}
124
125
/// Arch-specific implementation of `Vm::set_pvclock`. Always returns an error on AArch64.
126
pub fn set_pvclock_arch(&self, _state: &ClockState) -> Result<()> {
127
// TODO: Geniezone not support pvclock currently
128
error!("Geniezone: not support set_pvclock_arch");
129
Err(Error::new(EINVAL))
130
}
131
132
fn get_protected_vm_info(&self) -> Result<u64> {
133
// SAFETY:
134
// Safe because we allocated the struct and we know the kernel won't write beyond the end of
135
// the struct or keep a pointer to it.
136
let cap: gzvm_enable_cap = unsafe {
137
self.ctrl_geniezone_enable_capability(
138
GeniezoneCap::ArmProtectedVm,
139
&[GZVM_CAP_ARM_PVM_GET_PVMFW_SIZE as u64, 0, 0, 0, 0],
140
)
141
}?;
142
Ok(cap.args[1])
143
}
144
145
fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> {
146
// SAFETY:
147
// Safe because none of the args are pointers.
148
unsafe {
149
self.ctrl_geniezone_enable_capability(
150
GeniezoneCap::ArmProtectedVm,
151
&[GZVM_CAP_ARM_PVM_SET_PVMFW_IPA as u64, fw_addr.0, 0, 0, 0],
152
)
153
}?;
154
Ok(())
155
}
156
}
157
158
impl VmAArch64 for GeniezoneVm {
159
fn get_hypervisor(&self) -> &dyn Hypervisor {
160
&self.geniezone
161
}
162
163
fn load_protected_vm_firmware(
164
&mut self,
165
fw_addr: GuestAddress,
166
fw_max_size: u64,
167
) -> Result<()> {
168
let size: u64 = self.get_protected_vm_info()?;
169
if size == 0 {
170
Err(Error::new(EINVAL))
171
} else {
172
if size > fw_max_size {
173
return Err(Error::new(ENOMEM));
174
}
175
self.set_protected_vm_firmware_ipa(fw_addr)
176
}
177
}
178
179
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
180
Ok(Box::new(GeniezoneVm::create_vcpu(self, id)?))
181
}
182
183
fn create_fdt(&self, _fdt: &mut Fdt, _phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
184
Ok(())
185
}
186
187
fn init_arch(
188
&self,
189
_payload_entry_address: GuestAddress,
190
fdt_address: GuestAddress,
191
fdt_size: usize,
192
) -> anyhow::Result<()> {
193
let dtb_config = gzvm_dtb_config {
194
dtb_addr: fdt_address.offset(),
195
dtb_size: fdt_size.try_into().unwrap(),
196
};
197
// SAFETY:
198
// Safe because we allocated the struct and we know the kernel will modify exactly the size
199
// of the struct.
200
let ret = unsafe { ioctl_with_ref(self, GZVM_SET_DTB_CONFIG, &dtb_config) };
201
if ret == 0 {
202
Ok(())
203
} else {
204
errno_result().context("GZVM_SET_DTB_CONFIG failed")
205
}
206
}
207
}
208
209
impl GeniezoneVcpu {
210
fn set_one_geniezone_reg_u64(
211
&self,
212
gzvm_reg_id: GeniezoneVcpuRegister,
213
data: u64,
214
) -> Result<()> {
215
self.set_one_geniezone_reg(gzvm_reg_id, data.to_ne_bytes().as_slice())
216
}
217
218
fn set_one_geniezone_reg(&self, gzvm_reg_id: GeniezoneVcpuRegister, data: &[u8]) -> Result<()> {
219
let onereg = gzvm_one_reg {
220
id: gzvm_reg_id.into(),
221
addr: (data.as_ptr() as usize)
222
.try_into()
223
.expect("can't represent usize as u64"),
224
};
225
// SAFETY:
226
// Safe because we allocated the struct and we know the kernel will read exactly the size of
227
// the struct.
228
let ret = unsafe { ioctl_with_ref(self, GZVM_SET_ONE_REG, &onereg) };
229
if ret == 0 {
230
Ok(())
231
} else {
232
errno_result()
233
}
234
}
235
236
fn get_one_geniezone_reg_u64(&self, gzvm_reg_id: GeniezoneVcpuRegister) -> Result<u64> {
237
let mut bytes = 0u64.to_ne_bytes();
238
self.get_one_geniezone_reg(gzvm_reg_id, bytes.as_mut_slice())?;
239
Ok(u64::from_ne_bytes(bytes))
240
}
241
242
fn get_one_geniezone_reg(
243
&self,
244
gzvm_reg_id: GeniezoneVcpuRegister,
245
data: &mut [u8],
246
) -> Result<()> {
247
let onereg = gzvm_one_reg {
248
id: gzvm_reg_id.into(),
249
addr: (data.as_mut_ptr() as usize)
250
.try_into()
251
.expect("can't represent usize as u64"),
252
};
253
254
// SAFETY:
255
// Safe because we allocated the struct and we know the kernel will read exactly the size of
256
// the struct.
257
let ret = unsafe { ioctl_with_ref(self, GZVM_GET_ONE_REG, &onereg) };
258
if ret == 0 {
259
Ok(())
260
} else {
261
errno_result()
262
}
263
}
264
}
265
266
#[allow(dead_code)]
267
/// GZVM registers as used by the `GET_ONE_REG`/`SET_ONE_REG` ioctl API
268
pub enum GeniezoneVcpuRegister {
269
/// General Purpose Registers X0-X30
270
X(u8),
271
/// Stack Pointer
272
Sp,
273
/// Program Counter
274
Pc,
275
/// Processor State
276
Pstate,
277
/// FP & SIMD Registers V0-V31
278
V(u8),
279
/// Geniezone Firmware Pseudo-Registers
280
Firmware(u16),
281
/// System Registers
282
System(AArch64SysRegId),
283
/// CCSIDR_EL1 Demultiplexed by CSSELR_EL1
284
Ccsidr(u8),
285
}
286
287
/// Gives the `u64` register ID expected by the `GET_ONE_REG`/`SET_ONE_REG` ioctl API.
288
impl From<GeniezoneVcpuRegister> for u64 {
289
fn from(register: GeniezoneVcpuRegister) -> Self {
290
const fn reg(size: u64, kind: u64, fields: u64) -> u64 {
291
GZVM_REG_ARM64 | size | kind | fields
292
}
293
294
const fn gzvm_regs_reg(size: u64, offset: usize) -> u64 {
295
let offset = offset / std::mem::size_of::<u32>();
296
297
reg(size, GZVM_REG_ARM_CORE as u64, offset as u64)
298
}
299
300
const fn gzvm_reg(offset: usize) -> u64 {
301
gzvm_regs_reg(GZVM_REG_SIZE_U64, offset)
302
}
303
304
fn spsr_reg(spsr_reg: u32) -> u64 {
305
let n = std::mem::size_of::<u64>() * (spsr_reg as usize);
306
gzvm_reg(offset_of!(gzvm_regs, spsr) + n)
307
}
308
309
fn user_pt_reg(offset: usize) -> u64 {
310
gzvm_regs_reg(GZVM_REG_SIZE_U64, offset_of!(gzvm_regs, regs) + offset)
311
}
312
313
fn user_fpsimd_state_reg(size: u64, offset: usize) -> u64 {
314
gzvm_regs_reg(size, offset_of!(gzvm_regs, fp_regs) + offset)
315
}
316
317
const fn reg_u64(kind: u64, fields: u64) -> u64 {
318
reg(GZVM_REG_SIZE_U64, kind, fields)
319
}
320
321
const fn demux_reg(size: u64, index: u64, value: u64) -> u64 {
322
let index =
323
(index << GZVM_REG_ARM_DEMUX_ID_SHIFT) & (GZVM_REG_ARM_DEMUX_ID_MASK as u64);
324
let value =
325
(value << GZVM_REG_ARM_DEMUX_VAL_SHIFT) & (GZVM_REG_ARM_DEMUX_VAL_MASK as u64);
326
327
reg(size, GZVM_REG_ARM_DEMUX as u64, index | value)
328
}
329
330
match register {
331
GeniezoneVcpuRegister::X(n @ 0..=30) => {
332
let n = std::mem::size_of::<u64>() * (n as usize);
333
334
user_pt_reg(offset_of!(user_pt_regs, regs) + n)
335
}
336
GeniezoneVcpuRegister::X(n) => {
337
unreachable!("invalid GeniezoneVcpuRegister Xn index: {n}")
338
}
339
GeniezoneVcpuRegister::Sp => user_pt_reg(offset_of!(user_pt_regs, sp)),
340
GeniezoneVcpuRegister::Pc => user_pt_reg(offset_of!(user_pt_regs, pc)),
341
GeniezoneVcpuRegister::Pstate => user_pt_reg(offset_of!(user_pt_regs, pstate)),
342
GeniezoneVcpuRegister::V(n @ 0..=31) => {
343
let n = std::mem::size_of::<u128>() * (n as usize);
344
user_fpsimd_state_reg(GZVM_REG_SIZE_U128, offset_of!(user_fpsimd_state, vregs) + n)
345
}
346
GeniezoneVcpuRegister::V(n) => {
347
unreachable!("invalid GeniezoneVcpuRegister Vn index: {n}")
348
}
349
GeniezoneVcpuRegister::System(aarch64_sys_reg::FPSR) => {
350
user_fpsimd_state_reg(GZVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpsr))
351
}
352
GeniezoneVcpuRegister::System(aarch64_sys_reg::FPCR) => {
353
user_fpsimd_state_reg(GZVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpcr))
354
}
355
GeniezoneVcpuRegister::System(aarch64_sys_reg::SPSR_EL1) => spsr_reg(0),
356
GeniezoneVcpuRegister::System(aarch64_sys_reg::SPSR_abt) => spsr_reg(1),
357
GeniezoneVcpuRegister::System(aarch64_sys_reg::SPSR_und) => spsr_reg(2),
358
GeniezoneVcpuRegister::System(aarch64_sys_reg::SPSR_irq) => spsr_reg(3),
359
GeniezoneVcpuRegister::System(aarch64_sys_reg::SPSR_fiq) => spsr_reg(4),
360
GeniezoneVcpuRegister::System(aarch64_sys_reg::SP_EL1) => {
361
gzvm_reg(offset_of!(gzvm_regs, sp_el1))
362
}
363
GeniezoneVcpuRegister::System(aarch64_sys_reg::ELR_EL1) => {
364
gzvm_reg(offset_of!(gzvm_regs, elr_el1))
365
}
366
GeniezoneVcpuRegister::System(sysreg) => {
367
reg_u64(GZVM_REG_ARM64_SYSREG.into(), sysreg.encoded().into())
368
}
369
GeniezoneVcpuRegister::Firmware(n) => reg_u64(GZVM_REG_ARM, n.into()),
370
GeniezoneVcpuRegister::Ccsidr(n) => demux_reg(GZVM_REG_SIZE_U32, 0, n.into()),
371
}
372
}
373
}
374
375
impl From<VcpuRegAArch64> for GeniezoneVcpuRegister {
376
fn from(reg: VcpuRegAArch64) -> Self {
377
match reg {
378
VcpuRegAArch64::X(n @ 0..=30) => Self::X(n),
379
VcpuRegAArch64::X(n) => unreachable!("invalid VcpuRegAArch64 index: {n}"),
380
VcpuRegAArch64::Sp => Self::Sp,
381
VcpuRegAArch64::Pc => Self::Pc,
382
VcpuRegAArch64::Pstate => Self::Pstate,
383
VcpuRegAArch64::System(sysreg) => Self::System(sysreg),
384
}
385
}
386
}
387
388
impl VcpuAArch64 for GeniezoneVcpu {
389
fn init(&self, _features: &[VcpuFeature]) -> Result<()> {
390
// Geniezone init vcpu in creation
391
// Return Ok since aarch64/src/lib.rs will use this
392
Ok(())
393
}
394
395
fn init_pmu(&self, _irq: u64) -> Result<()> {
396
// TODO: Geniezone not support pmu currently
397
// temporary return ok since aarch64/src/lib.rs will use this
398
Ok(())
399
}
400
401
fn has_pvtime_support(&self) -> bool {
402
// TODO: Geniezone not support pvtime currently
403
false
404
}
405
406
fn init_pvtime(&self, _pvtime_ipa: u64) -> Result<()> {
407
// TODO: Geniezone not support pvtime currently
408
error!("Geniezone: not support init_pvtime");
409
Err(Error::new(EINVAL))
410
}
411
412
fn set_one_reg(&self, reg_id: VcpuRegAArch64, data: u64) -> Result<()> {
413
self.set_one_geniezone_reg_u64(GeniezoneVcpuRegister::from(reg_id), data)
414
}
415
416
fn get_one_reg(&self, reg_id: VcpuRegAArch64) -> Result<u64> {
417
self.get_one_geniezone_reg_u64(GeniezoneVcpuRegister::from(reg_id))
418
}
419
420
fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
421
unimplemented!()
422
}
423
424
fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
425
unimplemented!()
426
}
427
428
fn get_psci_version(&self) -> Result<PsciVersion> {
429
Ok(PSCI_0_2)
430
}
431
432
fn get_max_hw_bps(&self) -> Result<usize> {
433
// TODO: Geniezone not support gdb currently
434
error!("Geniezone: not support get_max_hw_bps");
435
Err(Error::new(EINVAL))
436
}
437
438
fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
439
error!("Geniezone: not support get_system_regs");
440
Err(Error::new(EINVAL))
441
}
442
443
fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
444
error!("Geniezone: not support get_cache_info");
445
Err(Error::new(EINVAL))
446
}
447
448
fn set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()> {
449
error!("Geniezone: not support set_cache_info");
450
Err(Error::new(EINVAL))
451
}
452
453
fn hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot> {
454
// TODO: Geniezone not support gdb currently
455
Err(anyhow::anyhow!(
456
"Geniezone: not support hypervisor_specific_snapshot"
457
))
458
}
459
460
fn hypervisor_specific_restore(&self, _data: AnySnapshot) -> anyhow::Result<()> {
461
// TODO: Geniezone not support gdb currently
462
Err(anyhow::anyhow!(
463
"Geniezone: not support hypervisor_specific_restore"
464
))
465
}
466
467
fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
468
// TODO: Geniezone not support gdb currently
469
error!("Geniezone: not support set_guest_debug");
470
Err(Error::new(EINVAL))
471
}
472
}
473
474
// Wrapper around GZVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
475
// from guest physical to host user pages.
476
//
477
// SAFETY:
478
// Safe when the guest regions are guaranteed not to overlap.
479
unsafe fn set_user_memory_region(
480
descriptor: &SafeDescriptor,
481
slot: MemSlot,
482
_read_only: bool,
483
_log_dirty_pages: bool,
484
guest_addr: u64,
485
memory_size: u64,
486
userspace_addr: *mut u8,
487
flags: u32,
488
) -> Result<()> {
489
let region = gzvm_userspace_memory_region {
490
slot,
491
flags,
492
guest_phys_addr: guest_addr,
493
memory_size,
494
userspace_addr: userspace_addr as u64,
495
};
496
497
let ret = ioctl_with_ref(descriptor, GZVM_SET_USER_MEMORY_REGION, &region);
498
if ret == 0 {
499
Ok(())
500
} else {
501
errno_result()
502
}
503
}
504
505
/// Helper function to determine the size in bytes of a dirty log bitmap for the given memory region
506
/// size.
507
///
508
/// # Arguments
509
///
510
/// * `size` - Number of bytes in the memory region being queried.
511
pub fn dirty_log_bitmap_size(size: usize) -> usize {
512
let page_size = pagesize();
513
size.div_ceil(page_size).div_ceil(8)
514
}
515
516
pub struct Geniezone {
517
geniezone: SafeDescriptor,
518
}
519
520
#[repr(u32)]
521
pub enum GeniezoneCap {
522
ArmMte,
523
ArmProtectedVm = GZVM_CAP_ARM_PROTECTED_VM,
524
}
525
526
impl Geniezone {
527
pub fn new_with_path(device_path: &Path) -> Result<Geniezone> {
528
let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
529
// SAFETY:
530
// Open calls are safe because we give a nul-terminated string and verify the result.
531
let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
532
if ret < 0 {
533
return errno_result();
534
}
535
Ok(Geniezone {
536
// SAFETY:
537
// Safe because we verify that ret is valid and we own the fd.
538
geniezone: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
539
})
540
}
541
542
/// Opens `/dev/gzvm/` and returns a gzvm object on success.
543
pub fn new() -> Result<Geniezone> {
544
Geniezone::new_with_path(&PathBuf::from("/dev/gzvm"))
545
}
546
547
/// Gets the size of the mmap required to use vcpu's `gzvm_vcpu_run` structure.
548
pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
549
// We don't use mmap, return sizeof(gzvm_vcpu_run) directly
550
let res = std::mem::size_of::<gzvm_vcpu_run>();
551
Ok(res)
552
}
553
}
554
555
impl AsRawDescriptor for Geniezone {
556
fn as_raw_descriptor(&self) -> RawDescriptor {
557
self.geniezone.as_raw_descriptor()
558
}
559
}
560
561
impl Hypervisor for Geniezone {
562
fn try_clone(&self) -> Result<Self> {
563
Ok(Geniezone {
564
geniezone: self.geniezone.try_clone()?,
565
})
566
}
567
568
fn check_capability(&self, cap: HypervisorCap) -> bool {
569
match cap {
570
HypervisorCap::UserMemory => true,
571
HypervisorCap::ImmediateExit => true,
572
HypervisorCap::StaticSwiotlbAllocationRequired => true,
573
HypervisorCap::HypervisorInitializedBootContext => false,
574
}
575
}
576
}
577
578
/// A wrapper around creating and using a Geniezone VM.
579
pub struct GeniezoneVm {
580
geniezone: Geniezone,
581
vm: SafeDescriptor,
582
guest_mem: GuestMemory,
583
mem_regions: Arc<Mutex<BTreeMap<MemSlot, Box<dyn MappedRegion>>>>,
584
/// A min heap of MemSlot numbers that were used and then removed and can now be re-used
585
mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
586
}
587
588
impl GeniezoneVm {
589
/// Constructs a new `GeniezoneVm` using the given `Geniezone` instance.
590
pub fn new(geniezone: &Geniezone, guest_mem: GuestMemory, cfg: Config) -> Result<GeniezoneVm> {
591
// SAFETY:
592
// Safe because we know gzvm is a real gzvm fd as this module is the only one that can make
593
// gzvm objects.
594
let ret = unsafe { ioctl(geniezone, GZVM_CREATE_VM) };
595
if ret < 0 {
596
return errno_result();
597
}
598
// SAFETY:
599
// Safe because we verify that ret is valid and we own the fd.
600
let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
601
for region in guest_mem.regions() {
602
let flags = match region.options.purpose {
603
MemoryRegionPurpose::Bios => GZVM_USER_MEM_REGION_GUEST_MEM,
604
MemoryRegionPurpose::GuestMemoryRegion => GZVM_USER_MEM_REGION_GUEST_MEM,
605
MemoryRegionPurpose::ProtectedFirmwareRegion => GZVM_USER_MEM_REGION_PROTECT_FW,
606
MemoryRegionPurpose::ReservedMemory => GZVM_USER_MEM_REGION_GUEST_MEM,
607
MemoryRegionPurpose::StaticSwiotlbRegion => GZVM_USER_MEM_REGION_STATIC_SWIOTLB,
608
};
609
// SAFETY:
610
// Safe because the guest regions are guaranteed not to overlap.
611
unsafe {
612
set_user_memory_region(
613
&vm_descriptor,
614
region.index as MemSlot,
615
false,
616
false,
617
region.guest_addr.offset(),
618
region.size as u64,
619
region.host_addr as *mut u8,
620
flags,
621
)
622
}?;
623
}
624
625
let vm = GeniezoneVm {
626
geniezone: geniezone.try_clone()?,
627
vm: vm_descriptor,
628
guest_mem,
629
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
630
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
631
};
632
vm.init_arch(&cfg)?;
633
Ok(vm)
634
}
635
636
fn create_vcpu(&self, id: usize) -> Result<GeniezoneVcpu> {
637
// run is a data stucture shared with ko and geniezone
638
let run_mmap_size = self.geniezone.get_vcpu_mmap_size()?;
639
640
let fd =
641
// SAFETY:
642
// Safe because we know that our file is a VM fd and we verify the return result.
643
unsafe { ioctl_with_val(self, GZVM_CREATE_VCPU, c_ulong::try_from(id).unwrap()) };
644
645
if fd < 0 {
646
return errno_result();
647
}
648
649
// SAFETY:
650
// Wrap the vcpu now in case the following ? returns early. This is safe because we verified
651
// the value of the fd and we own the fd.
652
let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
653
654
// Memory mapping --> Memory allocation
655
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
656
.build()
657
.map_err(|_| Error::new(ENOSPC))?;
658
659
Ok(GeniezoneVcpu {
660
vm: self.vm.try_clone()?,
661
vcpu,
662
id,
663
run_mmap: Arc::new(run_mmap),
664
})
665
}
666
667
/// Creates an in kernel interrupt controller.
668
///
669
/// See the documentation on the GZVM_CREATE_IRQCHIP ioctl.
670
pub fn create_irq_chip(&self) -> Result<()> {
671
// SAFETY:
672
// Safe because we know that our file is a VM fd and we verify the return result.
673
let ret = unsafe { ioctl(self, GZVM_CREATE_IRQCHIP) };
674
if ret == 0 {
675
Ok(())
676
} else {
677
errno_result()
678
}
679
}
680
681
/// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
682
pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
683
let mut irq_level = gzvm_irq_level::default();
684
irq_level.__bindgen_anon_1.irq = irq;
685
irq_level.level = active as u32;
686
687
// SAFETY:
688
// Safe because we know that our file is a VM fd, we know the kernel will only read the
689
// correct amount of memory from our pointer, and we verify the return result.
690
let ret = unsafe { ioctl_with_ref(self, GZVM_IRQ_LINE, &irq_level) };
691
if ret == 0 {
692
Ok(())
693
} else {
694
errno_result()
695
}
696
}
697
698
/// Registers an event that will, when signalled, trigger the `gsi` irq, and `resample_evt`
699
/// ( when not None ) will be triggered when the irqchip is resampled.
700
pub fn register_irqfd(
701
&self,
702
gsi: u32,
703
evt: &Event,
704
resample_evt: Option<&Event>,
705
) -> Result<()> {
706
let mut irqfd = gzvm_irqfd {
707
fd: evt.as_raw_descriptor() as u32,
708
gsi,
709
..Default::default()
710
};
711
712
if let Some(r_evt) = resample_evt {
713
irqfd.flags = GZVM_IRQFD_FLAG_RESAMPLE;
714
irqfd.resamplefd = r_evt.as_raw_descriptor() as u32;
715
}
716
717
// SAFETY:
718
// Safe because we know that our file is a VM fd, we know the kernel will only read the
719
// correct amount of memory from our pointer, and we verify the return result.
720
let ret = unsafe { ioctl_with_ref(self, GZVM_IRQFD, &irqfd) };
721
if ret == 0 {
722
Ok(())
723
} else {
724
errno_result()
725
}
726
}
727
728
/// Unregisters an event that was previously registered with
729
/// `register_irqfd`.
730
///
731
/// The `evt` and `gsi` pair must be the same as the ones passed into
732
/// `register_irqfd`.
733
pub fn unregister_irqfd(&self, gsi: u32, evt: &Event) -> Result<()> {
734
let irqfd = gzvm_irqfd {
735
fd: evt.as_raw_descriptor() as u32,
736
gsi,
737
flags: GZVM_IRQFD_FLAG_DEASSIGN,
738
..Default::default()
739
};
740
// SAFETY:
741
// Safe because we know that our file is a VM fd, we know the kernel will only read the
742
// correct amount of memory from our pointer, and we verify the return result.
743
let ret = unsafe { ioctl_with_ref(self, GZVM_IRQFD, &irqfd) };
744
if ret == 0 {
745
Ok(())
746
} else {
747
errno_result()
748
}
749
}
750
751
fn ioeventfd(
752
&self,
753
evt: &Event,
754
addr: IoEventAddress,
755
datamatch: Datamatch,
756
deassign: bool,
757
) -> Result<()> {
758
let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
759
Datamatch::AnyLength => (false, 0, 0),
760
Datamatch::U8(v) => match v {
761
Some(u) => (true, u as u64, 1),
762
None => (false, 0, 1),
763
},
764
Datamatch::U16(v) => match v {
765
Some(u) => (true, u as u64, 2),
766
None => (false, 0, 2),
767
},
768
Datamatch::U32(v) => match v {
769
Some(u) => (true, u as u64, 4),
770
None => (false, 0, 4),
771
},
772
Datamatch::U64(v) => match v {
773
Some(u) => (true, u, 8),
774
None => (false, 0, 8),
775
},
776
};
777
let mut flags = 0;
778
if deassign {
779
flags |= 1 << gzvm_ioeventfd_flag_nr_deassign;
780
}
781
if do_datamatch {
782
flags |= 1 << gzvm_ioeventfd_flag_nr_datamatch
783
}
784
if let IoEventAddress::Pio(_) = addr {
785
flags |= 1 << gzvm_ioeventfd_flag_nr_pio;
786
}
787
let ioeventfd = gzvm_ioeventfd {
788
datamatch: datamatch_value,
789
len: datamatch_len,
790
addr: match addr {
791
IoEventAddress::Pio(p) => p,
792
IoEventAddress::Mmio(m) => m,
793
},
794
fd: evt.as_raw_descriptor(),
795
flags,
796
..Default::default()
797
};
798
// SAFETY:
799
// Safe because we know that our file is a VM fd, we know the kernel will only read the
800
// correct amount of memory from our pointer, and we verify the return result.
801
let ret = unsafe { ioctl_with_ref(self, GZVM_IOEVENTFD, &ioeventfd) };
802
if ret == 0 {
803
Ok(())
804
} else {
805
errno_result()
806
}
807
}
808
809
/// Checks whether a particular GZVM-specific capability is available for this VM.
810
fn check_raw_capability(&self, capability: GeniezoneCap) -> bool {
811
let mut cap: u64 = capability as u64;
812
// SAFETY:
813
// Safe because we know that our file is a GZVM fd, and if the cap is invalid GZVM assumes
814
// it's an unavailable extension and returns 0.
815
unsafe {
816
ioctl_with_mut_ref(self, GZVM_CHECK_EXTENSION, &mut cap);
817
}
818
cap == 1
819
}
820
821
// Currently only used on aarch64, but works on any architecture.
822
#[allow(dead_code)]
823
/// Enables a GZVM-specific capability for this VM, with the given arguments.
824
///
825
/// # Safety
826
/// This function is marked as unsafe because `args` may be interpreted as pointers for some
827
/// capabilities. The caller must ensure that any pointers passed in the `args` array are
828
/// allocated as the kernel expects, and that mutable pointers are owned.
829
unsafe fn ctrl_geniezone_enable_capability(
830
&self,
831
capability: GeniezoneCap,
832
args: &[u64; 5],
833
) -> Result<gzvm_enable_cap> {
834
let gzvm_cap = gzvm_enable_cap {
835
cap: capability as u64,
836
args: *args,
837
};
838
// Safe because we allocated the struct and we know the kernel will read exactly the size of
839
// the struct, and because we assume the caller has allocated the args appropriately.
840
let ret = ioctl_with_ref(self, GZVM_ENABLE_CAP, &gzvm_cap);
841
if ret == 0 {
842
Ok(gzvm_cap)
843
} else {
844
errno_result()
845
}
846
}
847
848
pub fn create_geniezone_device(&self, dev: gzvm_create_device) -> Result<()> {
849
// SAFETY:
850
// Safe because we allocated the struct and we know the kernel will modify exactly the size
851
// of the struct and the return value is checked.
852
let ret = unsafe { base::ioctl_with_ref(self, GZVM_CREATE_DEVICE, &dev) };
853
if ret == 0 {
854
Ok(())
855
} else {
856
errno_result()
857
}
858
}
859
860
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
861
match self.guest_mem.remove_range(guest_address, size) {
862
Ok(_) => Ok(()),
863
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
864
Err(_) => Err(Error::new(EIO)),
865
}
866
}
867
868
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
869
// No-op, when the guest attempts to access the pages again, Linux/GZVM will provide them.
870
Ok(())
871
}
872
}
873
874
impl Vm for GeniezoneVm {
875
fn try_clone(&self) -> Result<Self> {
876
Ok(GeniezoneVm {
877
geniezone: self.geniezone.try_clone()?,
878
vm: self.vm.try_clone()?,
879
guest_mem: self.guest_mem.clone(),
880
mem_regions: self.mem_regions.clone(),
881
mem_slot_gaps: self.mem_slot_gaps.clone(),
882
})
883
}
884
885
fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
886
error!("try_clone_descriptor hasn't been tested on geniezone, returning -ENOTSUP");
887
Err(Error::new(ENOTSUP))
888
}
889
890
fn hypervisor_kind(&self) -> HypervisorKind {
891
HypervisorKind::Geniezone
892
}
893
894
fn check_capability(&self, c: VmCap) -> bool {
895
if let Some(val) = self.check_capability_arch(c) {
896
return val;
897
}
898
match c {
899
VmCap::ArmPmuV3 => false,
900
VmCap::DirtyLog => true,
901
VmCap::PvClock => false,
902
VmCap::Protected => self.check_raw_capability(GeniezoneCap::ArmProtectedVm),
903
VmCap::EarlyInitCpuid => false,
904
VmCap::ReadOnlyMemoryRegion => false,
905
VmCap::MemNoncoherentDma => false,
906
VmCap::Sve => false,
907
}
908
}
909
910
fn get_guest_phys_addr_bits(&self) -> u8 {
911
self.geniezone.get_guest_phys_addr_bits()
912
}
913
914
fn get_memory(&self) -> &GuestMemory {
915
&self.guest_mem
916
}
917
918
fn add_memory_region(
919
&mut self,
920
guest_addr: GuestAddress,
921
mem: Box<dyn MappedRegion>,
922
read_only: bool,
923
log_dirty_pages: bool,
924
_cache: MemCacheType,
925
) -> Result<MemSlot> {
926
let pgsz = pagesize() as u64;
927
// GZVM require to set the user memory region with page size aligned size. Safe to extend
928
// the mem.size() to be page size aligned because the mmap will round up the size to be
929
// page size aligned if it is not.
930
let size = (mem.size() as u64).div_ceil(pgsz) * pgsz;
931
let end_addr = guest_addr
932
.checked_add(size)
933
.ok_or_else(|| Error::new(EOVERFLOW))?;
934
if self.guest_mem.range_overlap(guest_addr, end_addr) {
935
return Err(Error::new(ENOSPC));
936
}
937
let mut regions = self.mem_regions.lock();
938
let mut gaps = self.mem_slot_gaps.lock();
939
let slot = match gaps.pop() {
940
Some(gap) => gap.0,
941
None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
942
};
943
let flags = 0;
944
945
// SAFETY:
946
// Safe because we check that the given guest address is valid and has no overlaps. We also
947
// know that the pointer and size are correct because the MemoryMapping interface ensures
948
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
949
// is removed.
950
let res = unsafe {
951
set_user_memory_region(
952
&self.vm,
953
slot,
954
read_only,
955
log_dirty_pages,
956
guest_addr.offset(),
957
size,
958
mem.as_ptr(),
959
flags,
960
)
961
};
962
963
if let Err(e) = res {
964
gaps.push(Reverse(slot));
965
return Err(e);
966
}
967
regions.insert(slot, mem);
968
Ok(slot)
969
}
970
971
fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
972
let mut regions = self.mem_regions.lock();
973
let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
974
975
mem.msync(offset, size).map_err(|err| match err {
976
MmapError::InvalidAddress => Error::new(EFAULT),
977
MmapError::NotPageAligned => Error::new(EINVAL),
978
MmapError::SystemCallFailed(e) => e,
979
_ => Error::new(EIO),
980
})
981
}
982
983
fn madvise_pageout_memory_region(
984
&mut self,
985
_slot: MemSlot,
986
_offset: usize,
987
_size: usize,
988
) -> Result<()> {
989
Err(Error::new(ENOTSUP))
990
}
991
992
fn madvise_remove_memory_region(
993
&mut self,
994
_slot: MemSlot,
995
_offset: usize,
996
_size: usize,
997
) -> Result<()> {
998
Err(Error::new(ENOTSUP))
999
}
1000
1001
fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
1002
let mut regions = self.mem_regions.lock();
1003
if !regions.contains_key(&slot) {
1004
return Err(Error::new(ENOENT));
1005
}
1006
// SAFETY:
1007
// Safe because the slot is checked against the list of memory slots.
1008
unsafe {
1009
set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut(), 0)?;
1010
}
1011
self.mem_slot_gaps.lock().push(Reverse(slot));
1012
// This remove will always succeed because of the contains_key check above.
1013
Ok(regions.remove(&slot).unwrap())
1014
}
1015
1016
fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
1017
// This function should not be invoked because the vgic device is created in irqchip.
1018
errno_result()
1019
}
1020
1021
fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
1022
Err(Error::new(ENOTSUP))
1023
}
1024
1025
fn register_ioevent(
1026
&mut self,
1027
evt: &Event,
1028
addr: IoEventAddress,
1029
datamatch: Datamatch,
1030
) -> Result<()> {
1031
self.ioeventfd(evt, addr, datamatch, false)
1032
}
1033
1034
fn unregister_ioevent(
1035
&mut self,
1036
evt: &Event,
1037
addr: IoEventAddress,
1038
datamatch: Datamatch,
1039
) -> Result<()> {
1040
self.ioeventfd(evt, addr, datamatch, true)
1041
}
1042
1043
fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
1044
// GZVM delivers IO events in-kernel with ioeventfds, so this is a no-op
1045
Ok(())
1046
}
1047
1048
fn enable_hypercalls(&mut self, _nr: u64, _count: usize) -> Result<()> {
1049
Err(Error::new(ENOTSUP))
1050
}
1051
1052
fn get_pvclock(&self) -> Result<ClockState> {
1053
self.get_pvclock_arch()
1054
}
1055
1056
fn set_pvclock(&self, state: &ClockState) -> Result<()> {
1057
self.set_pvclock_arch(state)
1058
}
1059
1060
fn add_fd_mapping(
1061
&mut self,
1062
slot: u32,
1063
offset: usize,
1064
size: usize,
1065
fd: &dyn AsRawDescriptor,
1066
fd_offset: u64,
1067
prot: Protection,
1068
) -> Result<()> {
1069
let mut regions = self.mem_regions.lock();
1070
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
1071
1072
match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
1073
Ok(()) => Ok(()),
1074
Err(MmapError::SystemCallFailed(e)) => Err(e),
1075
Err(_) => Err(Error::new(EIO)),
1076
}
1077
}
1078
1079
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
1080
let mut regions = self.mem_regions.lock();
1081
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
1082
1083
match region.remove_mapping(offset, size) {
1084
Ok(()) => Ok(()),
1085
Err(MmapError::SystemCallFailed(e)) => Err(e),
1086
Err(_) => Err(Error::new(EIO)),
1087
}
1088
}
1089
1090
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
1091
match event {
1092
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
1093
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
1094
BalloonEvent::BalloonTargetReached(_) => Ok(()),
1095
}
1096
}
1097
}
1098
1099
impl AsRawDescriptor for GeniezoneVm {
1100
fn as_raw_descriptor(&self) -> RawDescriptor {
1101
self.vm.as_raw_descriptor()
1102
}
1103
}
1104
1105
struct GeniezoneVcpuSignalHandle {
1106
run_mmap: Arc<MemoryMapping>,
1107
}
1108
1109
impl VcpuSignalHandleInner for GeniezoneVcpuSignalHandle {
1110
fn signal_immediate_exit(&self) {
1111
// SAFETY: we ensure `run_mmap` is a valid mapping of `kvm_run` at creation time, and the
1112
// `Arc` ensures the mapping still exists while we hold a reference to it.
1113
unsafe {
1114
let run = self.run_mmap.as_ptr() as *mut gzvm_vcpu_run;
1115
(*run).immediate_exit = 1;
1116
}
1117
}
1118
}
1119
1120
/// A wrapper around using a Geniezone Vcpu.
1121
pub struct GeniezoneVcpu {
1122
vm: SafeDescriptor,
1123
vcpu: SafeDescriptor,
1124
id: usize,
1125
run_mmap: Arc<MemoryMapping>,
1126
}
1127
1128
impl Vcpu for GeniezoneVcpu {
1129
fn try_clone(&self) -> Result<Self> {
1130
let vm = self.vm.try_clone()?;
1131
let vcpu = self.vcpu.try_clone()?;
1132
1133
Ok(GeniezoneVcpu {
1134
vm,
1135
vcpu,
1136
id: self.id,
1137
run_mmap: self.run_mmap.clone(),
1138
})
1139
}
1140
1141
fn as_vcpu(&self) -> &dyn Vcpu {
1142
self
1143
}
1144
1145
fn id(&self) -> usize {
1146
self.id
1147
}
1148
1149
#[allow(clippy::cast_ptr_alignment)]
1150
fn set_immediate_exit(&self, exit: bool) {
1151
// TODO(b/315998194): Add safety comment
1152
#[allow(clippy::undocumented_unsafe_blocks)]
1153
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gzvm_vcpu_run) };
1154
run.immediate_exit = exit as u8;
1155
}
1156
1157
fn signal_handle(&self) -> VcpuSignalHandle {
1158
VcpuSignalHandle {
1159
inner: Box::new(GeniezoneVcpuSignalHandle {
1160
run_mmap: self.run_mmap.clone(),
1161
}),
1162
}
1163
}
1164
1165
fn on_suspend(&self) -> Result<()> {
1166
Ok(())
1167
}
1168
1169
unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
1170
Err(Error::new(libc::ENXIO))
1171
}
1172
1173
#[allow(clippy::cast_ptr_alignment)]
1174
// The pointer is page aligned so casting to a different type is well defined, hence the clippy
1175
// allow attribute.
1176
fn run(&mut self) -> Result<VcpuExit> {
1177
// SAFETY:
1178
// Safe because we know that our file is a VCPU fd and we verify the return result.
1179
let ret = unsafe { ioctl_with_val(self, GZVM_RUN, self.run_mmap.as_ptr() as u64) };
1180
if ret != 0 {
1181
return errno_result();
1182
}
1183
1184
// SAFETY:
1185
// Safe because we know we mapped enough memory to hold the gzvm_vcpu_run struct because the
1186
// kernel told us how large it was.
1187
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gzvm_vcpu_run) };
1188
1189
match run.exit_reason {
1190
GZVM_EXIT_MMIO => Ok(VcpuExit::Mmio),
1191
GZVM_EXIT_IRQ => Ok(VcpuExit::IrqWindowOpen),
1192
GZVM_EXIT_HVC => Ok(VcpuExit::Hypercall),
1193
GZVM_EXIT_EXCEPTION => Err(Error::new(EINVAL)),
1194
GZVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
1195
GZVM_EXIT_FAIL_ENTRY => {
1196
// SAFETY:
1197
// Safe because the exit_reason (which comes from the kernel) told us which
1198
// union field to use.
1199
let hardware_entry_failure_reason = unsafe {
1200
run.__bindgen_anon_1
1201
.fail_entry
1202
.hardware_entry_failure_reason
1203
};
1204
Ok(VcpuExit::FailEntry {
1205
hardware_entry_failure_reason,
1206
})
1207
}
1208
GZVM_EXIT_SYSTEM_EVENT => {
1209
// SAFETY:
1210
// Safe because the exit_reason (which comes from the kernel) told us which
1211
// union field to use.
1212
let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
1213
match event_type {
1214
GZVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown),
1215
GZVM_SYSTEM_EVENT_RESET => Ok(VcpuExit::SystemEventReset),
1216
GZVM_SYSTEM_EVENT_CRASH => Ok(VcpuExit::SystemEventCrash),
1217
_ => {
1218
error!("Unknown GZVM system event {}", event_type);
1219
Err(Error::new(EINVAL))
1220
}
1221
}
1222
}
1223
GZVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
1224
GZVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown(Ok(()))),
1225
GZVM_EXIT_UNKNOWN => panic!("unknown gzvm exit reason\n"),
1226
r => panic!("unknown gzvm exit reason: {r}"),
1227
}
1228
}
1229
1230
fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
1231
// SAFETY:
1232
// Safe because we know we mapped enough memory to hold the gzvm_vcpu_run struct because the
1233
// kernel told us how large it was. The pointer is page aligned so casting to a different
1234
// type is well defined, hence the clippy allow attribute.
1235
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gzvm_vcpu_run) };
1236
1237
// Verify that the handler is called in the right context.
1238
assert!(run.exit_reason == GZVM_EXIT_MMIO);
1239
// SAFETY:
1240
// Safe because the exit_reason (which comes from the kernel) told us which
1241
// union field to use.
1242
let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
1243
let address = mmio.phys_addr;
1244
let data = &mut mmio.data[..mmio.size as usize];
1245
1246
if mmio.is_write != 0 {
1247
handle_fn(IoParams {
1248
address,
1249
operation: IoOperation::Write(data),
1250
})
1251
} else {
1252
handle_fn(IoParams {
1253
address,
1254
operation: IoOperation::Read(data),
1255
})
1256
}
1257
}
1258
1259
fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
1260
Err(Error::new(EINVAL))
1261
}
1262
}
1263
1264
impl AsRawDescriptor for GeniezoneVcpu {
1265
fn as_raw_descriptor(&self) -> RawDescriptor {
1266
self.vcpu.as_raw_descriptor()
1267
}
1268
}
1269
1270