Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/halla/mod.rs
5394 views
1
// Copyright 2025 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
pub mod halla_sys;
6
7
use std::cmp::Reverse;
8
use std::collections::BTreeMap;
9
use std::collections::BinaryHeap;
10
use std::convert::TryFrom;
11
use std::ffi::CString;
12
use std::mem::offset_of;
13
use std::os::raw::c_ulong;
14
use std::os::unix::prelude::OsStrExt;
15
use std::path::Path;
16
use std::path::PathBuf;
17
use std::sync::Arc;
18
19
use aarch64_sys_reg::AArch64SysRegId;
20
use base::errno_result;
21
use base::error;
22
use base::ioctl_with_mut_ref;
23
use base::ioctl_with_ref;
24
use base::ioctl_with_val;
25
use base::pagesize;
26
use base::AsRawDescriptor;
27
use base::Error;
28
use base::Event;
29
use base::FromRawDescriptor;
30
use base::MappedRegion;
31
use base::MemoryMapping;
32
use base::MemoryMappingBuilder;
33
use base::MmapError;
34
use base::Protection;
35
use base::RawDescriptor;
36
use base::Result;
37
use base::SafeDescriptor;
38
use cros_fdt::Fdt;
39
pub use halla_sys::*;
40
use libc::open;
41
use libc::EFAULT;
42
use libc::EINVAL;
43
use libc::EIO;
44
use libc::ENOENT;
45
use libc::ENOMEM;
46
use libc::ENOSPC;
47
use libc::ENOTSUP;
48
use libc::EOVERFLOW;
49
use libc::O_CLOEXEC;
50
use libc::O_RDWR;
51
use snapshot::AnySnapshot;
52
use sync::Mutex;
53
use vm_memory::GuestAddress;
54
use vm_memory::GuestMemory;
55
use vm_memory::MemoryRegionPurpose;
56
57
use crate::BalloonEvent;
58
use crate::ClockState;
59
use crate::Config;
60
use crate::Datamatch;
61
use crate::DeviceKind;
62
use crate::Hypervisor;
63
use crate::HypervisorCap;
64
use crate::HypervisorKind;
65
use crate::IoEventAddress;
66
use crate::IoOperation;
67
use crate::IoParams;
68
use crate::MemCacheType;
69
use crate::MemSlot;
70
use crate::ProtectionType;
71
use crate::PsciVersion;
72
use crate::Vcpu;
73
use crate::VcpuAArch64;
74
use crate::VcpuExit;
75
use crate::VcpuFeature;
76
use crate::VcpuRegAArch64;
77
use crate::VcpuSignalHandle;
78
use crate::VcpuSignalHandleInner;
79
use crate::Vm;
80
use crate::VmAArch64;
81
use crate::VmCap;
82
use crate::PSCI_0_2;
83
84
impl Halla {
85
/// Get the size of guest physical addresses (IPA) in bits.
86
pub fn get_guest_phys_addr_bits(&self) -> u8 {
87
// SAFETY:
88
// Safe because we know self is a real halla fd
89
match unsafe { ioctl_with_val(self, HVM_CHECK_EXTENSION, HVM_CAP_ARM_VM_IPA_SIZE.into()) } {
90
// Default physical address size is 40 bits if the extension is not supported.
91
ret if ret <= 0 => 40,
92
ipa => ipa as u8,
93
}
94
}
95
96
pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
97
let ipa_size = self.get_guest_phys_addr_bits() as u32;
98
99
let protection_flag = if protection_type.isolates_memory() {
100
HVM_VM_TYPE_ARM_PROTECTED
101
} else {
102
0
103
};
104
Ok((ipa_size & HVM_VM_TYPE_IPA_SIZE_MASK) | protection_flag)
105
}
106
}
107
108
impl HallaVm {
109
/// Does platform specific initialization for the HallaVm.
110
pub fn init_arch(&self, cfg: &Config) -> Result<()> {
111
#[cfg(target_arch = "aarch64")]
112
if cfg.mte {
113
// SAFETY:
114
// Safe because it does not take pointer arguments.
115
unsafe { self.ctrl_halla_enable_capability(HallaCap::ArmMte, &[0, 0, 0, 0, 0]) }?;
116
}
117
Ok(())
118
}
119
120
/// Checks if a particular `VmCap` is available, or returns None if arch-independent
121
/// Vm.check_capability() should handle the check.
122
pub fn check_capability_arch(&self, _c: VmCap) -> Option<bool> {
123
None
124
}
125
126
/// Arch-specific implementation of `Vm::get_pvclock`. Always returns an error on AArch64.
127
pub fn get_pvclock_arch(&self) -> Result<ClockState> {
128
// TODO: Halla not support pvclock currently
129
error!("Halla: not support get_pvclock_arch");
130
Err(Error::new(EINVAL))
131
}
132
133
/// Arch-specific implementation of `Vm::set_pvclock`. Always returns an error on AArch64.
134
pub fn set_pvclock_arch(&self, _state: &ClockState) -> Result<()> {
135
// TODO: Halla not support pvclock currently
136
error!("Halla: not support set_pvclock_arch");
137
Err(Error::new(EINVAL))
138
}
139
140
/// Only return size currently.
141
fn get_protected_vm_info(&self) -> Result<u64> {
142
// SAFETY:
143
// Safe because we allocated the struct and we know the kernel won't write beyond the end of
144
// the struct or keep a pointer to it.
145
let cap: hvm_enable_cap = unsafe {
146
self.ctrl_halla_enable_capability(
147
HallaCap::ArmProtectedVm,
148
&[HVM_CAP_ARM_PVM_GET_PVMFW_SIZE as u64, 0, 0, 0, 0],
149
)
150
}?;
151
Ok(cap.args[1])
152
}
153
154
fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> {
155
// SAFETY:
156
// Safe because none of the args are pointers.
157
unsafe {
158
self.ctrl_halla_enable_capability(
159
HallaCap::ArmProtectedVm,
160
&[HVM_CAP_ARM_PVM_SET_PVMFW_IPA as u64, fw_addr.0, 0, 0, 0],
161
)
162
}?;
163
Ok(())
164
}
165
}
166
167
impl VmAArch64 for HallaVm {
168
fn get_hypervisor(&self) -> &dyn Hypervisor {
169
&self.halla
170
}
171
172
fn load_protected_vm_firmware(
173
&mut self,
174
fw_addr: GuestAddress,
175
fw_max_size: u64,
176
) -> Result<()> {
177
let size: u64 = self.get_protected_vm_info()?;
178
if size == 0 {
179
Err(Error::new(EINVAL))
180
} else {
181
if size > fw_max_size {
182
return Err(Error::new(ENOMEM));
183
}
184
self.set_protected_vm_firmware_ipa(fw_addr)
185
}
186
}
187
188
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
189
Ok(Box::new(HallaVm::create_vcpu(self, id)?))
190
}
191
192
fn create_fdt(&self, _fdt: &mut Fdt, _phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
193
Ok(())
194
}
195
196
fn init_arch(
197
&self,
198
_payload_entry_address: GuestAddress,
199
fdt_address: GuestAddress,
200
fdt_size: usize,
201
) -> std::result::Result<(), anyhow::Error> {
202
let dtb_config = hvm_dtb_config {
203
dtb_addr: fdt_address.offset(),
204
dtb_size: fdt_size.try_into().unwrap(),
205
};
206
// SAFETY:
207
// Safe because we allocated the struct and we know the kernel will modify exactly the size
208
// of the struct.
209
let ret = unsafe { ioctl_with_ref(self, HVM_SET_DTB_CONFIG, &dtb_config) };
210
if ret == 0 {
211
Ok(())
212
} else {
213
errno_result()?
214
}
215
}
216
}
217
218
impl HallaVcpu {
219
fn set_one_halla_reg_u64(&self, hvm_reg_id: HallaVcpuRegister, data: u64) -> Result<()> {
220
self.set_one_halla_reg(hvm_reg_id, data.to_ne_bytes().as_slice())
221
}
222
223
fn set_one_halla_reg(&self, hvm_reg_id: HallaVcpuRegister, data: &[u8]) -> Result<()> {
224
assert_eq!(hvm_reg_id.size(), data.len());
225
let id: u64 = hvm_reg_id.into();
226
let onereg = hvm_one_reg {
227
id,
228
addr: (data.as_ptr() as usize)
229
.try_into()
230
.expect("can't represent usize as u64"),
231
};
232
// SAFETY:
233
// Safe because we allocated the struct and we know the kernel will read exactly the size of
234
// the struct.
235
let ret = unsafe { ioctl_with_ref(self, HVM_SET_ONE_REG, &onereg) };
236
if ret == 0 {
237
Ok(())
238
} else {
239
errno_result()
240
}
241
}
242
243
fn get_one_halla_reg_u64(&self, hvm_reg_id: HallaVcpuRegister) -> Result<u64> {
244
let mut bytes = 0u64.to_ne_bytes();
245
self.get_one_halla_reg(hvm_reg_id, bytes.as_mut_slice())?;
246
Ok(u64::from_ne_bytes(bytes))
247
}
248
249
fn get_one_halla_reg(&self, hvm_reg_id: HallaVcpuRegister, data: &mut [u8]) -> Result<()> {
250
assert_eq!(hvm_reg_id.size(), data.len());
251
let id: u64 = hvm_reg_id.into();
252
let onereg = hvm_one_reg {
253
id,
254
addr: (data.as_mut_ptr() as usize)
255
.try_into()
256
.expect("can't represent usize as u64"),
257
};
258
259
// SAFETY:
260
// Safe because we allocated the struct and we know the kernel will read exactly the size of
261
// the struct.
262
let ret = unsafe { ioctl_with_ref(self, HVM_GET_ONE_REG, &onereg) };
263
if ret == 0 {
264
Ok(())
265
} else {
266
errno_result()
267
}
268
}
269
}
270
271
#[derive(Debug, Copy, Clone)]
272
/// HVM registers as used by the `GET_ONE_REG`/`SET_ONE_REG` ioctl API
273
pub enum HallaVcpuRegister {
274
/// General Purpose Registers X0-X30
275
X(u8),
276
/// Stack Pointer
277
Sp,
278
/// Program Counter
279
Pc,
280
/// Processor State
281
Pstate,
282
/// FP & SIMD Registers V0-V31
283
V(u8),
284
/// Halla Firmware Pseudo-Registers
285
Firmware(u16),
286
/// System Registers
287
System(AArch64SysRegId),
288
/// CCSIDR_EL1 Demultiplexed by CSSELR_EL1
289
Ccsidr(u8),
290
}
291
292
impl HallaVcpuRegister {
293
/// Size of this register in bytes.
294
pub fn size(&self) -> usize {
295
let hvm_reg = u64::from(*self);
296
let size_field = hvm_reg & HVM_REG_SIZE_MASK;
297
const REG_SIZE_U8: u64 = HVM_REG_SIZE_U8 as u64; // cast from bindgen's u32 to u64
298
match size_field {
299
REG_SIZE_U8 => 1,
300
HVM_REG_SIZE_U16 => 2,
301
HVM_REG_SIZE_U32 => 4,
302
HVM_REG_SIZE_U64 => 8,
303
HVM_REG_SIZE_U128 => 16,
304
HVM_REG_SIZE_U256 => 32,
305
HVM_REG_SIZE_U512 => 64,
306
HVM_REG_SIZE_U1024 => 128,
307
HVM_REG_SIZE_U2048 => 256,
308
// `From<HallaVcpuRegister> for u64` should always include a valid size.
309
_ => panic!("invalid size field {size_field}"),
310
}
311
}
312
}
313
314
/// Gives the `u64` register ID expected by the `GET_ONE_REG`/`SET_ONE_REG` ioctl API.
315
impl From<HallaVcpuRegister> for u64 {
316
fn from(register: HallaVcpuRegister) -> Self {
317
const fn reg(size: u64, kind: u64, fields: u64) -> u64 {
318
HVM_REG_ARM64 | size | kind | fields
319
}
320
321
const fn hvm_regs_reg(size: u64, offset: usize) -> u64 {
322
let offset = offset / std::mem::size_of::<u32>();
323
324
reg(size, HVM_REG_ARM_CORE as u64, offset as u64)
325
}
326
327
const fn hvm_reg(offset: usize) -> u64 {
328
hvm_regs_reg(HVM_REG_SIZE_U64, offset)
329
}
330
331
fn spsr_reg(spsr_reg: u32) -> u64 {
332
let n = std::mem::size_of::<u64>() * (spsr_reg as usize);
333
hvm_reg(offset_of!(hvm_regs, spsr) + n)
334
}
335
336
fn user_pt_reg(offset: usize) -> u64 {
337
hvm_regs_reg(HVM_REG_SIZE_U64, offset_of!(hvm_regs, regs) + offset)
338
}
339
340
fn user_fpsimd_state_reg(size: u64, offset: usize) -> u64 {
341
hvm_regs_reg(size, offset_of!(hvm_regs, fp_regs) + offset)
342
}
343
344
const fn reg_u64(kind: u64, fields: u64) -> u64 {
345
reg(HVM_REG_SIZE_U64, kind, fields)
346
}
347
348
const fn demux_reg(size: u64, index: u64, value: u64) -> u64 {
349
let index = (index << HVM_REG_ARM_DEMUX_ID_SHIFT) & (HVM_REG_ARM_DEMUX_ID_MASK as u64);
350
let value =
351
(value << HVM_REG_ARM_DEMUX_VAL_SHIFT) & (HVM_REG_ARM_DEMUX_VAL_MASK as u64);
352
353
reg(size, HVM_REG_ARM_DEMUX as u64, index | value)
354
}
355
356
match register {
357
HallaVcpuRegister::X(n @ 0..=30) => {
358
let n = std::mem::size_of::<u64>() * (n as usize);
359
360
user_pt_reg(offset_of!(user_pt_regs, regs) + n)
361
}
362
HallaVcpuRegister::X(n) => {
363
unreachable!("invalid HallaVcpuRegister Xn index: {n}")
364
}
365
HallaVcpuRegister::Sp => user_pt_reg(offset_of!(user_pt_regs, sp)),
366
HallaVcpuRegister::Pc => user_pt_reg(offset_of!(user_pt_regs, pc)),
367
HallaVcpuRegister::Pstate => user_pt_reg(offset_of!(user_pt_regs, pstate)),
368
HallaVcpuRegister::V(n @ 0..=31) => {
369
let n = std::mem::size_of::<u128>() * (n as usize);
370
user_fpsimd_state_reg(HVM_REG_SIZE_U128, offset_of!(user_fpsimd_state, vregs) + n)
371
}
372
HallaVcpuRegister::V(n) => {
373
unreachable!("invalid HallaVcpuRegister Vn index: {n}")
374
}
375
HallaVcpuRegister::System(aarch64_sys_reg::FPSR) => {
376
user_fpsimd_state_reg(HVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpsr))
377
}
378
HallaVcpuRegister::System(aarch64_sys_reg::FPCR) => {
379
user_fpsimd_state_reg(HVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpcr))
380
}
381
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_EL1) => spsr_reg(0),
382
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_abt) => spsr_reg(1),
383
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_und) => spsr_reg(2),
384
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_irq) => spsr_reg(3),
385
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_fiq) => spsr_reg(4),
386
HallaVcpuRegister::System(aarch64_sys_reg::SP_EL1) => {
387
hvm_reg(offset_of!(hvm_regs, sp_el1))
388
}
389
HallaVcpuRegister::System(aarch64_sys_reg::ELR_EL1) => {
390
hvm_reg(offset_of!(hvm_regs, elr_el1))
391
}
392
HallaVcpuRegister::System(sysreg) => {
393
reg_u64(HVM_REG_ARM64_SYSREG.into(), sysreg.encoded().into())
394
}
395
HallaVcpuRegister::Firmware(n) => reg_u64(HVM_REG_ARM, n.into()),
396
HallaVcpuRegister::Ccsidr(n) => demux_reg(HVM_REG_SIZE_U32, 0, n.into()),
397
}
398
}
399
}
400
401
impl From<VcpuRegAArch64> for HallaVcpuRegister {
402
fn from(reg: VcpuRegAArch64) -> Self {
403
match reg {
404
VcpuRegAArch64::X(n @ 0..=30) => Self::X(n),
405
VcpuRegAArch64::X(n) => unreachable!("invalid VcpuRegAArch64 index: {n}"),
406
VcpuRegAArch64::Sp => Self::Sp,
407
VcpuRegAArch64::Pc => Self::Pc,
408
VcpuRegAArch64::Pstate => Self::Pstate,
409
VcpuRegAArch64::System(sysreg) => Self::System(sysreg),
410
}
411
}
412
}
413
414
impl VcpuAArch64 for HallaVcpu {
415
fn init(&self, _features: &[VcpuFeature]) -> Result<()> {
416
// Halla init vcpu in creation
417
// Return Ok since aarch64/src/lib.rs will use this
418
Ok(())
419
}
420
421
fn init_pmu(&self, _irq: u64) -> Result<()> {
422
// TODO: Halla not support pmu currently
423
// temporary return ok since aarch64/src/lib.rs will use this
424
Ok(())
425
}
426
427
fn has_pvtime_support(&self) -> bool {
428
// TODO: Halla not support pvtime currently
429
false
430
}
431
432
fn init_pvtime(&self, _pvtime_ipa: u64) -> Result<()> {
433
// TODO: Halla not support pvtime currently
434
error!("Halla: not support init_pvtime");
435
Err(Error::new(EINVAL))
436
}
437
438
fn set_one_reg(&self, reg_id: VcpuRegAArch64, data: u64) -> Result<()> {
439
self.set_one_halla_reg_u64(HallaVcpuRegister::from(reg_id), data)
440
}
441
442
fn get_one_reg(&self, reg_id: VcpuRegAArch64) -> Result<u64> {
443
self.get_one_halla_reg_u64(HallaVcpuRegister::from(reg_id))
444
}
445
446
fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
447
unimplemented!()
448
}
449
450
fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
451
unimplemented!()
452
}
453
454
fn get_psci_version(&self) -> Result<PsciVersion> {
455
Ok(PSCI_0_2)
456
}
457
458
fn get_max_hw_bps(&self) -> Result<usize> {
459
// TODO: Halla not support gdb currently
460
error!("Halla: not support get_max_hw_bps");
461
Err(Error::new(EINVAL))
462
}
463
464
fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
465
error!("Halla: not support get_system_regs");
466
Err(Error::new(EINVAL))
467
}
468
469
fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
470
error!("Halla: not support get_cache_info");
471
Err(Error::new(EINVAL))
472
}
473
474
fn set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()> {
475
error!("Halla: not support set_cache_info");
476
Err(Error::new(EINVAL))
477
}
478
479
fn hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot> {
480
// TODO: Halla not support gdb currently
481
Err(anyhow::anyhow!(
482
"Halla: not support hypervisor_specific_snapshot"
483
))
484
}
485
486
fn hypervisor_specific_restore(&self, _data: AnySnapshot) -> anyhow::Result<()> {
487
// TODO: Halla not support gdb currently
488
Err(anyhow::anyhow!(
489
"Halla: not support hypervisor_specific_restore"
490
))
491
}
492
493
fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
494
// TODO: Halla not support gdb currently
495
error!("Halla: not support set_guest_debug");
496
Err(Error::new(EINVAL))
497
}
498
}
499
500
// Wrapper around HVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
501
// from guest physical to host user pages.
502
//
503
// SAFETY:
504
// Safe when the guest regions are guaranteed not to overlap.
505
unsafe fn set_user_memory_region(
506
descriptor: &SafeDescriptor,
507
slot: MemSlot,
508
guest_addr: u64,
509
memory_size: u64,
510
userspace_addr: *mut u8,
511
flags: u32,
512
) -> Result<()> {
513
let region = hvm_userspace_memory_region {
514
slot,
515
flags,
516
guest_phys_addr: guest_addr,
517
memory_size,
518
userspace_addr: userspace_addr as u64,
519
};
520
521
let ret = ioctl_with_ref(descriptor, HVM_SET_USER_MEMORY_REGION, &region);
522
if ret == 0 {
523
Ok(())
524
} else {
525
errno_result()
526
}
527
}
528
529
/// Helper function to determine the size in bytes of a dirty log bitmap for the given memory region
530
/// size.
531
///
532
/// # Arguments
533
///
534
/// * `size` - Number of bytes in the memory region being queried.
535
pub fn dirty_log_bitmap_size(size: usize) -> usize {
536
let page_size = pagesize();
537
size.div_ceil(page_size).div_ceil(8)
538
}
539
540
pub struct Halla {
541
halla: SafeDescriptor,
542
}
543
544
#[repr(u32)]
545
pub enum HallaCap {
546
ArmMte,
547
ArmProtectedVm = HVM_CAP_ARM_PROTECTED_VM,
548
}
549
550
impl Halla {
551
pub fn new_with_path(device_path: &Path) -> Result<Halla> {
552
let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
553
// SAFETY:
554
// Open calls are safe because we give a nul-terminated string and verify the result.
555
let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
556
if ret < 0 {
557
return errno_result();
558
}
559
Ok(Halla {
560
// SAFETY:
561
// Safe because we verify that ret is valid and we own the fd.
562
halla: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
563
})
564
}
565
566
/// Opens `/dev/halla/` and returns a hvm object on success.
567
pub fn new() -> Result<Halla> {
568
Halla::new_with_path(&PathBuf::from("/dev/halla"))
569
}
570
571
/// Gets the size of the mmap required to use vcpu's `hvm_vcpu_run` structure.
572
pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
573
// We don't use mmap, return sizeof(hvm_vcpu_run) directly
574
let res = std::mem::size_of::<hvm_vcpu_run>();
575
Ok(res)
576
}
577
}
578
579
impl AsRawDescriptor for Halla {
580
fn as_raw_descriptor(&self) -> RawDescriptor {
581
self.halla.as_raw_descriptor()
582
}
583
}
584
585
impl Hypervisor for Halla {
586
fn try_clone(&self) -> Result<Self> {
587
Ok(Halla {
588
halla: self.halla.try_clone()?,
589
})
590
}
591
592
fn check_capability(&self, cap: HypervisorCap) -> bool {
593
match cap {
594
HypervisorCap::UserMemory => true,
595
HypervisorCap::ImmediateExit => true,
596
HypervisorCap::StaticSwiotlbAllocationRequired => false,
597
HypervisorCap::HypervisorInitializedBootContext => false,
598
}
599
}
600
}
601
602
/// A wrapper around creating and using a Halla VM.
603
pub struct HallaVm {
604
halla: Halla,
605
vm: SafeDescriptor,
606
guest_mem: GuestMemory,
607
mem_regions: Arc<Mutex<BTreeMap<MemSlot, Box<dyn MappedRegion>>>>,
608
/// A min heap of MemSlot numbers that were used and then removed and can now be re-used
609
mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
610
}
611
612
impl HallaVm {
613
/// Constructs a new `HallaVm` using the given `Halla` instance.
614
pub fn new(halla: &Halla, guest_mem: GuestMemory, cfg: Config) -> Result<HallaVm> {
615
// SAFETY:
616
// Safe because we know hvm is a real hvm fd as this module is the only one that can make
617
// hvm objects.
618
let ret = unsafe {
619
ioctl_with_val(
620
halla,
621
HVM_CREATE_VM,
622
halla.get_vm_type(cfg.protection_type)? as c_ulong,
623
)
624
};
625
if ret < 0 {
626
return errno_result();
627
}
628
// SAFETY:
629
// Safe because we verify that ret is valid and we own the fd.
630
let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
631
for region in guest_mem.regions() {
632
let flags = match region.options.purpose {
633
MemoryRegionPurpose::Bios => HVM_USER_MEM_REGION_GUEST_MEM,
634
MemoryRegionPurpose::GuestMemoryRegion => HVM_USER_MEM_REGION_GUEST_MEM,
635
MemoryRegionPurpose::ProtectedFirmwareRegion => HVM_USER_MEM_REGION_PROTECT_FW,
636
MemoryRegionPurpose::ReservedMemory => HVM_USER_MEM_REGION_GUEST_MEM,
637
MemoryRegionPurpose::StaticSwiotlbRegion => HVM_USER_MEM_REGION_STATIC_SWIOTLB,
638
};
639
// SAFETY:
640
// Safe because the guest regions are guaranteed not to overlap.
641
unsafe {
642
set_user_memory_region(
643
&vm_descriptor,
644
region.index as MemSlot,
645
region.guest_addr.offset(),
646
region.size as u64,
647
region.host_addr as *mut u8,
648
flags,
649
)
650
}?;
651
}
652
653
let vm = HallaVm {
654
halla: halla.try_clone()?,
655
vm: vm_descriptor,
656
guest_mem,
657
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
658
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
659
};
660
vm.init_arch(&cfg)?;
661
Ok(vm)
662
}
663
664
fn create_vcpu(&self, id: usize) -> Result<HallaVcpu> {
665
// run is a data structure shared with ko and halla
666
let run_mmap_size = self.halla.get_vcpu_mmap_size()?;
667
668
let fd =
669
// SAFETY:
670
// Safe because we know that our file is a VM fd and we verify the return result.
671
unsafe { ioctl_with_val(self, HVM_CREATE_VCPU, c_ulong::try_from(id).unwrap()) };
672
673
if fd < 0 {
674
return errno_result();
675
}
676
677
// SAFETY:
678
// Wrap the vcpu now in case the following ? returns early. This is safe because we verified
679
// the value of the fd and we own the fd.
680
let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
681
682
// Memory mapping --> Memory allocation
683
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
684
.build()
685
.map_err(|_| Error::new(ENOSPC))?;
686
687
Ok(HallaVcpu {
688
vm: self.vm.try_clone()?,
689
vcpu,
690
id,
691
run_mmap: Arc::new(run_mmap),
692
})
693
}
694
695
/// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
696
pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
697
let mut irq_level = hvm_irq_level::default();
698
irq_level.__bindgen_anon_1.irq = irq;
699
irq_level.level = active as u32;
700
701
// SAFETY:
702
// Safe because we know that our file is a VM fd, we know the kernel will only read the
703
// correct amount of memory from our pointer, and we verify the return result.
704
let ret = unsafe { ioctl_with_ref(self, HVM_IRQ_LINE, &irq_level) };
705
if ret == 0 {
706
Ok(())
707
} else {
708
errno_result()
709
}
710
}
711
712
/// Registers an event that will, when signalled, trigger the `gsi` irq, and `resample_evt`
713
/// ( when not None ) will be triggered when the irqchip is resampled.
714
pub fn register_irqfd(
715
&self,
716
gsi: u32,
717
evt: &Event,
718
resample_evt: Option<&Event>,
719
) -> Result<()> {
720
let mut irqfd = hvm_irqfd {
721
fd: evt.as_raw_descriptor() as u32,
722
gsi,
723
..Default::default()
724
};
725
726
if let Some(r_evt) = resample_evt {
727
irqfd.flags = HVM_IRQFD_FLAG_RESAMPLE;
728
irqfd.resamplefd = r_evt.as_raw_descriptor() as u32;
729
}
730
731
// SAFETY:
732
// Safe because we know that our file is a VM fd, we know the kernel will only read the
733
// correct amount of memory from our pointer, and we verify the return result.
734
let ret = unsafe { ioctl_with_ref(self, HVM_IRQFD, &irqfd) };
735
if ret == 0 {
736
Ok(())
737
} else {
738
errno_result()
739
}
740
}
741
742
/// Unregisters an event that was previously registered with
743
/// `register_irqfd`.
744
///
745
/// The `evt` and `gsi` pair must be the same as the ones passed into
746
/// `register_irqfd`.
747
pub fn unregister_irqfd(&self, gsi: u32, evt: &Event) -> Result<()> {
748
let irqfd = hvm_irqfd {
749
fd: evt.as_raw_descriptor() as u32,
750
gsi,
751
flags: HVM_IRQFD_FLAG_DEASSIGN,
752
..Default::default()
753
};
754
// SAFETY:
755
// Safe because we know that our file is a VM fd, we know the kernel will only read the
756
// correct amount of memory from our pointer, and we verify the return result.
757
let ret = unsafe { ioctl_with_ref(self, HVM_IRQFD, &irqfd) };
758
if ret == 0 {
759
Ok(())
760
} else {
761
errno_result()
762
}
763
}
764
765
fn ioeventfd(
766
&self,
767
evt: &Event,
768
addr: IoEventAddress,
769
datamatch: Datamatch,
770
deassign: bool,
771
) -> Result<()> {
772
let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
773
Datamatch::AnyLength => (false, 0, 0),
774
Datamatch::U8(v) => match v {
775
Some(u) => (true, u as u64, 1),
776
None => (false, 0, 1),
777
},
778
Datamatch::U16(v) => match v {
779
Some(u) => (true, u as u64, 2),
780
None => (false, 0, 2),
781
},
782
Datamatch::U32(v) => match v {
783
Some(u) => (true, u as u64, 4),
784
None => (false, 0, 4),
785
},
786
Datamatch::U64(v) => match v {
787
Some(u) => (true, u, 8),
788
None => (false, 0, 8),
789
},
790
};
791
let mut flags = 0;
792
if deassign {
793
flags |= 1 << hvm_ioeventfd_flag_nr_deassign;
794
}
795
if do_datamatch {
796
flags |= 1 << hvm_ioeventfd_flag_nr_datamatch
797
}
798
let ioeventfd = hvm_ioeventfd {
799
datamatch: datamatch_value,
800
len: datamatch_len,
801
addr: match addr {
802
IoEventAddress::Mmio(m) => m,
803
// We don't use Pio in aarch64, If we need to support x86, please add it.
804
IoEventAddress::Pio(_) => EINVAL.try_into().unwrap(),
805
},
806
fd: evt.as_raw_descriptor(),
807
flags,
808
..Default::default()
809
};
810
// SAFETY:
811
// Safe because we know that our file is a VM fd, we know the kernel will only read the
812
// correct amount of memory from our pointer, and we verify the return result.
813
let ret = unsafe { ioctl_with_ref(self, HVM_IOEVENTFD, &ioeventfd) };
814
if ret == 0 {
815
Ok(())
816
} else {
817
errno_result()
818
}
819
}
820
821
/// Checks whether a particular HVM-specific capability is available for this VM.
822
fn check_raw_capability(&self, capability: HallaCap) -> bool {
823
let mut cap: u64 = capability as u64;
824
// SAFETY:
825
// Safe because we know that our file is a HVM fd, and if the cap is invalid HVM assumes
826
// it's an unavailable extension and returns 0.
827
unsafe {
828
ioctl_with_mut_ref(self, HVM_CHECK_EXTENSION, &mut cap);
829
}
830
cap == 1
831
}
832
833
#[allow(dead_code)]
834
/// Enables a HVM-specific capability for this VM, with the given arguments.
835
///
836
/// # Safety
837
/// This function is marked as unsafe because `args` may be interpreted as pointers for some
838
/// capabilities. The caller must ensure that any pointers passed in the `args` array are
839
/// allocated as the kernel expects, and that mutable pointers are owned.
840
unsafe fn ctrl_halla_enable_capability(
841
&self,
842
capability: HallaCap,
843
args: &[u64; 5],
844
) -> Result<hvm_enable_cap> {
845
let hvm_cap = hvm_enable_cap {
846
cap: capability as u64,
847
args: *args,
848
};
849
// Safe because we allocated the struct and we know the kernel will read exactly the size of
850
// the struct, and because we assume the caller has allocated the args appropriately.
851
let ret = ioctl_with_ref(self, HVM_ENABLE_CAP, &hvm_cap);
852
if ret == 0 {
853
Ok(hvm_cap)
854
} else {
855
errno_result()
856
}
857
}
858
859
pub fn create_halla_device(&self, dev: hvm_create_device) -> Result<()> {
860
// SAFETY:
861
// Safe because we allocated the struct and we know the kernel will modify exactly the size
862
// of the struct and the return value is checked.
863
let ret = unsafe { base::ioctl_with_ref(self, HVM_CREATE_DEVICE, &dev) };
864
if ret == 0 {
865
Ok(())
866
} else {
867
errno_result()
868
}
869
}
870
871
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
872
match self.guest_mem.remove_range(guest_address, size) {
873
Ok(_) => Ok(()),
874
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
875
Err(_) => Err(Error::new(EIO)),
876
}
877
}
878
879
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
880
// No-op, when the guest attempts to access the pages again, Linux/HVM will provide them.
881
Ok(())
882
}
883
}
884
885
impl Vm for HallaVm {
886
fn try_clone(&self) -> Result<Self> {
887
Ok(HallaVm {
888
halla: self.halla.try_clone()?,
889
vm: self.vm.try_clone()?,
890
guest_mem: self.guest_mem.clone(),
891
mem_regions: self.mem_regions.clone(),
892
mem_slot_gaps: self.mem_slot_gaps.clone(),
893
})
894
}
895
896
fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
897
error!("try_clone_descriptor hasn't been tested on Halla, returning -ENOTSUP");
898
Err(Error::new(ENOTSUP))
899
}
900
901
fn hypervisor_kind(&self) -> HypervisorKind {
902
HypervisorKind::Halla
903
}
904
905
fn check_capability(&self, c: VmCap) -> bool {
906
if let Some(val) = self.check_capability_arch(c) {
907
return val;
908
}
909
match c {
910
VmCap::ArmPmuV3 => false,
911
VmCap::DirtyLog => false,
912
VmCap::PvClock => false,
913
VmCap::Protected => self.check_raw_capability(HallaCap::ArmProtectedVm),
914
VmCap::EarlyInitCpuid => false,
915
VmCap::ReadOnlyMemoryRegion => false,
916
VmCap::MemNoncoherentDma => false,
917
VmCap::Sve => false,
918
}
919
}
920
921
fn get_guest_phys_addr_bits(&self) -> u8 {
922
self.halla.get_guest_phys_addr_bits()
923
}
924
925
fn get_memory(&self) -> &GuestMemory {
926
&self.guest_mem
927
}
928
929
fn add_memory_region(
930
&mut self,
931
guest_addr: GuestAddress,
932
mem: Box<dyn MappedRegion>,
933
_read_only: bool,
934
_log_dirty_pages: bool,
935
_cache: MemCacheType,
936
) -> Result<MemSlot> {
937
let pgsz = pagesize() as u64;
938
// HVM require to set the user memory region with page size aligned size. Safe to extend
939
// the mem.size() to be page size aligned because the mmap will round up the size to be
940
// page size aligned if it is not.
941
let size = (mem.size() as u64).div_ceil(pgsz) * pgsz;
942
let end_addr = guest_addr
943
.checked_add(size)
944
.ok_or_else(|| Error::new(EOVERFLOW))?;
945
if self.guest_mem.range_overlap(guest_addr, end_addr) {
946
return Err(Error::new(ENOSPC));
947
}
948
let mut regions = self.mem_regions.lock();
949
let mut gaps = self.mem_slot_gaps.lock();
950
let slot = match gaps.pop() {
951
Some(gap) => gap.0,
952
None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
953
};
954
let flags = 0;
955
956
// SAFETY:
957
// Safe because we check that the given guest address is valid and has no overlaps. We also
958
// know that the pointer and size are correct because the MemoryMapping interface ensures
959
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
960
// is removed.
961
// We don't use read_only and log_dirty_pages, if we need this, please add it
962
let res = unsafe {
963
set_user_memory_region(
964
&self.vm,
965
slot,
966
guest_addr.offset(),
967
size,
968
mem.as_ptr(),
969
flags,
970
)
971
};
972
973
if let Err(e) = res {
974
gaps.push(Reverse(slot));
975
return Err(e);
976
}
977
regions.insert(slot, mem);
978
Ok(slot)
979
}
980
981
fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
982
let mut regions = self.mem_regions.lock();
983
let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
984
985
mem.msync(offset, size).map_err(|err| match err {
986
MmapError::InvalidAddress => Error::new(EFAULT),
987
MmapError::NotPageAligned => Error::new(EINVAL),
988
MmapError::SystemCallFailed(e) => e,
989
_ => Error::new(EIO),
990
})
991
}
992
993
fn madvise_pageout_memory_region(
994
&mut self,
995
_slot: MemSlot,
996
_offset: usize,
997
_size: usize,
998
) -> Result<()> {
999
Err(Error::new(ENOTSUP))
1000
}
1001
1002
fn madvise_remove_memory_region(
1003
&mut self,
1004
_slot: MemSlot,
1005
_offset: usize,
1006
_size: usize,
1007
) -> Result<()> {
1008
Err(Error::new(ENOTSUP))
1009
}
1010
1011
fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
1012
let mut regions = self.mem_regions.lock();
1013
if !regions.contains_key(&slot) {
1014
return Err(Error::new(ENOENT));
1015
}
1016
// SAFETY:
1017
// Safe because the slot is checked against the list of memory slots.
1018
unsafe {
1019
set_user_memory_region(&self.vm, slot, 0, 0, std::ptr::null_mut(), 0)?;
1020
}
1021
self.mem_slot_gaps.lock().push(Reverse(slot));
1022
// This remove will always succeed because of the contains_key check above.
1023
Ok(regions.remove(&slot).unwrap())
1024
}
1025
1026
fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
1027
// This function should not be invoked because the vgic device is created in irqchip.
1028
errno_result()
1029
}
1030
1031
fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
1032
Err(Error::new(ENOTSUP))
1033
}
1034
1035
fn register_ioevent(
1036
&mut self,
1037
evt: &Event,
1038
addr: IoEventAddress,
1039
datamatch: Datamatch,
1040
) -> Result<()> {
1041
self.ioeventfd(evt, addr, datamatch, false)
1042
}
1043
1044
fn unregister_ioevent(
1045
&mut self,
1046
evt: &Event,
1047
addr: IoEventAddress,
1048
datamatch: Datamatch,
1049
) -> Result<()> {
1050
self.ioeventfd(evt, addr, datamatch, true)
1051
}
1052
1053
fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
1054
// HVM delivers IO events in-kernel with ioeventfds, so this is a no-op
1055
Ok(())
1056
}
1057
1058
fn enable_hypercalls(&mut self, _nr: u64, _count: usize) -> Result<()> {
1059
Err(Error::new(ENOTSUP))
1060
}
1061
1062
fn get_pvclock(&self) -> Result<ClockState> {
1063
self.get_pvclock_arch()
1064
}
1065
1066
fn set_pvclock(&self, state: &ClockState) -> Result<()> {
1067
self.set_pvclock_arch(state)
1068
}
1069
1070
fn add_fd_mapping(
1071
&mut self,
1072
slot: u32,
1073
offset: usize,
1074
size: usize,
1075
fd: &dyn AsRawDescriptor,
1076
fd_offset: u64,
1077
prot: Protection,
1078
) -> Result<()> {
1079
let mut regions = self.mem_regions.lock();
1080
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
1081
1082
match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
1083
Ok(()) => Ok(()),
1084
Err(MmapError::SystemCallFailed(e)) => Err(e),
1085
Err(_) => Err(Error::new(EIO)),
1086
}
1087
}
1088
1089
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
1090
let mut regions = self.mem_regions.lock();
1091
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
1092
1093
match region.remove_mapping(offset, size) {
1094
Ok(()) => Ok(()),
1095
Err(MmapError::SystemCallFailed(e)) => Err(e),
1096
Err(_) => Err(Error::new(EIO)),
1097
}
1098
}
1099
1100
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
1101
match event {
1102
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
1103
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
1104
BalloonEvent::BalloonTargetReached(_) => Ok(()),
1105
}
1106
}
1107
}
1108
1109
impl AsRawDescriptor for HallaVm {
1110
fn as_raw_descriptor(&self) -> RawDescriptor {
1111
self.vm.as_raw_descriptor()
1112
}
1113
}
1114
1115
struct HallaVcpuSignalHandle {
1116
run_mmap: Arc<MemoryMapping>,
1117
}
1118
1119
impl VcpuSignalHandleInner for HallaVcpuSignalHandle {
1120
fn signal_immediate_exit(&self) {
1121
// SAFETY: we ensure `run_mmap` is a valid mapping of `halla_run` at creation time, and the
1122
// `Arc` ensures the mapping still exists while we hold a reference to it.
1123
unsafe {
1124
let run = self.run_mmap.as_ptr() as *mut hvm_vcpu_run;
1125
(*run).immediate_exit = 1;
1126
}
1127
}
1128
}
1129
1130
/// A wrapper around using a Halla Vcpu.
1131
pub struct HallaVcpu {
1132
vm: SafeDescriptor,
1133
vcpu: SafeDescriptor,
1134
id: usize,
1135
run_mmap: Arc<MemoryMapping>,
1136
}
1137
1138
impl Vcpu for HallaVcpu {
1139
fn try_clone(&self) -> Result<Self> {
1140
let vm = self.vm.try_clone()?;
1141
let vcpu = self.vcpu.try_clone()?;
1142
1143
Ok(HallaVcpu {
1144
vm,
1145
vcpu,
1146
id: self.id,
1147
run_mmap: self.run_mmap.clone(),
1148
})
1149
}
1150
1151
fn as_vcpu(&self) -> &dyn Vcpu {
1152
self
1153
}
1154
1155
fn id(&self) -> usize {
1156
self.id
1157
}
1158
1159
#[allow(clippy::cast_ptr_alignment)]
1160
fn set_immediate_exit(&self, exit: bool) {
1161
// TODO(b/315998194): Add safety comment
1162
#[allow(clippy::undocumented_unsafe_blocks)]
1163
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
1164
run.immediate_exit = exit as u8;
1165
}
1166
1167
fn signal_handle(&self) -> VcpuSignalHandle {
1168
VcpuSignalHandle {
1169
inner: Box::new(HallaVcpuSignalHandle {
1170
run_mmap: self.run_mmap.clone(),
1171
}),
1172
}
1173
}
1174
1175
fn on_suspend(&self) -> Result<()> {
1176
Ok(())
1177
}
1178
1179
unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
1180
Err(Error::new(libc::ENXIO))
1181
}
1182
1183
#[allow(clippy::cast_ptr_alignment)]
1184
// The pointer is page aligned so casting to a different type is well defined, hence the clippy
1185
// allow attribute.
1186
fn run(&mut self) -> Result<VcpuExit> {
1187
// SAFETY:
1188
// Safe because we know that our file is a VCPU fd and we verify the return result.
1189
let ret = unsafe { ioctl_with_val(self, HVM_RUN, self.run_mmap.as_ptr() as u64) };
1190
if ret != 0 {
1191
return errno_result();
1192
}
1193
1194
// SAFETY:
1195
// Safe because we know we mapped enough memory to hold the hvm_vcpu_run struct because the
1196
// kernel told us how large it was.
1197
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
1198
1199
match run.exit_reason {
1200
HVM_EXIT_MMIO => Ok(VcpuExit::Mmio),
1201
HVM_EXIT_IRQ => Ok(VcpuExit::IrqWindowOpen),
1202
HVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
1203
HVM_EXIT_SYSTEM_EVENT => {
1204
// SAFETY:
1205
// Safe because the exit_reason (which comes from the kernel) told us which
1206
// union field to use.
1207
let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
1208
match event_type {
1209
HVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown),
1210
HVM_SYSTEM_EVENT_RESET => Ok(VcpuExit::SystemEventReset),
1211
HVM_SYSTEM_EVENT_CRASH => Ok(VcpuExit::SystemEventCrash),
1212
_ => {
1213
error!("Unknown HVM system event {}", event_type);
1214
Err(Error::new(EINVAL))
1215
}
1216
}
1217
}
1218
HVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
1219
HVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown(Ok(()))),
1220
r => panic!("unknown hvm exit reason: {r}"),
1221
}
1222
}
1223
1224
fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
1225
// SAFETY:
1226
// Safe because we know we mapped enough memory to hold the hvm_vcpu_run struct because the
1227
// kernel told us how large it was. The pointer is page aligned so casting to a different
1228
// type is well defined, hence the clippy allow attribute.
1229
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
1230
1231
// Verify that the handler is called in the right context.
1232
assert!(run.exit_reason == HVM_EXIT_MMIO);
1233
// SAFETY:
1234
// Safe because the exit_reason (which comes from the kernel) told us which
1235
// union field to use.
1236
let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
1237
let address = mmio.phys_addr;
1238
let data = &mut mmio.data[..mmio.size as usize];
1239
1240
if mmio.is_write != 0 {
1241
handle_fn(IoParams {
1242
address,
1243
operation: IoOperation::Write(data),
1244
})
1245
} else {
1246
handle_fn(IoParams {
1247
address,
1248
operation: IoOperation::Read(data),
1249
})
1250
}
1251
}
1252
1253
fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
1254
Err(Error::new(EINVAL))
1255
}
1256
}
1257
1258
impl AsRawDescriptor for HallaVcpu {
1259
fn as_raw_descriptor(&self) -> RawDescriptor {
1260
self.vcpu.as_raw_descriptor()
1261
}
1262
}
1263
1264