Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/src/gunyah/mod.rs
5394 views
1
// Copyright 2023 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#[cfg(target_arch = "aarch64")]
6
mod aarch64;
7
8
mod gunyah_sys;
9
use std::cmp::Reverse;
10
use std::collections::BTreeMap;
11
use std::collections::BinaryHeap;
12
use std::collections::HashSet;
13
use std::ffi::CString;
14
use std::fs::File;
15
use std::mem::size_of;
16
use std::os::raw::c_ulong;
17
use std::os::unix::prelude::OsStrExt;
18
use std::path::Path;
19
use std::path::PathBuf;
20
use std::sync::Arc;
21
22
use base::errno_result;
23
use base::error;
24
use base::info;
25
use base::ioctl;
26
use base::ioctl_with_ref;
27
use base::ioctl_with_val;
28
use base::pagesize;
29
use base::warn;
30
use base::Error;
31
use base::FromRawDescriptor;
32
use base::MemoryMapping;
33
use base::MemoryMappingBuilder;
34
use base::MmapError;
35
use base::RawDescriptor;
36
use gunyah_sys::*;
37
use libc::open;
38
use libc::EFAULT;
39
use libc::EINVAL;
40
use libc::EIO;
41
use libc::ENOENT;
42
use libc::ENOSPC;
43
use libc::ENOTSUP;
44
use libc::EOVERFLOW;
45
use libc::O_CLOEXEC;
46
use libc::O_RDWR;
47
use sync::Mutex;
48
use vm_memory::MemoryRegionPurpose;
49
50
use crate::*;
51
52
pub struct Gunyah {
53
gunyah: SafeDescriptor,
54
}
55
56
impl AsRawDescriptor for Gunyah {
57
fn as_raw_descriptor(&self) -> RawDescriptor {
58
self.gunyah.as_raw_descriptor()
59
}
60
}
61
62
impl Gunyah {
63
pub fn new_with_path(device_path: &Path) -> Result<Gunyah> {
64
let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
65
// SAFETY:
66
// Open calls are safe because we give a nul-terminated string and verify the result.
67
let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
68
if ret < 0 {
69
return errno_result();
70
}
71
Ok(Gunyah {
72
// SAFETY:
73
// Safe because we verify that ret is valid and we own the fd.
74
gunyah: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
75
})
76
}
77
78
pub fn new() -> Result<Gunyah> {
79
Gunyah::new_with_path(&PathBuf::from("/dev/gunyah"))
80
}
81
}
82
83
impl Hypervisor for Gunyah {
84
fn try_clone(&self) -> Result<Self>
85
where
86
Self: Sized,
87
{
88
Ok(Gunyah {
89
gunyah: self.gunyah.try_clone()?,
90
})
91
}
92
93
fn check_capability(&self, cap: HypervisorCap) -> bool {
94
match cap {
95
HypervisorCap::UserMemory => true,
96
HypervisorCap::ImmediateExit => true,
97
HypervisorCap::StaticSwiotlbAllocationRequired => true,
98
HypervisorCap::HypervisorInitializedBootContext => true,
99
}
100
}
101
}
102
103
unsafe fn android_lend_user_memory_region(
104
vm: &SafeDescriptor,
105
slot: MemSlot,
106
read_only: bool,
107
guest_addr: u64,
108
memory_size: u64,
109
userspace_addr: *mut u8,
110
) -> Result<()> {
111
let mut flags = 0;
112
113
flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
114
if !read_only {
115
flags |= GH_MEM_ALLOW_WRITE;
116
}
117
118
let region = gh_userspace_memory_region {
119
label: slot,
120
flags,
121
guest_phys_addr: guest_addr,
122
memory_size,
123
userspace_addr: userspace_addr as u64,
124
};
125
126
let ret = ioctl_with_ref(vm, GH_VM_ANDROID_LEND_USER_MEM, &region);
127
if ret == 0 {
128
Ok(())
129
} else {
130
errno_result()
131
}
132
}
133
134
// Wrapper around GH_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
135
// from guest physical to host user pages.
136
//
137
// SAFETY:
138
// Safe when the guest regions are guaranteed not to overlap.
139
unsafe fn set_user_memory_region(
140
vm: &SafeDescriptor,
141
slot: MemSlot,
142
read_only: bool,
143
guest_addr: u64,
144
memory_size: u64,
145
userspace_addr: *mut u8,
146
) -> Result<()> {
147
let mut flags = 0;
148
149
flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
150
if !read_only {
151
flags |= GH_MEM_ALLOW_WRITE;
152
}
153
154
let region = gh_userspace_memory_region {
155
label: slot,
156
flags,
157
guest_phys_addr: guest_addr,
158
memory_size,
159
userspace_addr: userspace_addr as u64,
160
};
161
162
let ret = ioctl_with_ref(vm, GH_VM_SET_USER_MEM_REGION, &region);
163
if ret == 0 {
164
Ok(())
165
} else {
166
errno_result()
167
}
168
}
169
170
fn map_cma_region(
171
vm: &SafeDescriptor,
172
slot: MemSlot,
173
lend: bool,
174
read_only: bool,
175
guest_addr: u64,
176
guest_mem_fd: u32,
177
size: u64,
178
offset: u64,
179
) -> Result<()> {
180
let mut flags = 0;
181
flags |= GUNYAH_MEM_ALLOW_READ | GUNYAH_MEM_ALLOW_EXEC;
182
if !read_only {
183
flags |= GUNYAH_MEM_ALLOW_WRITE;
184
}
185
if lend {
186
flags |= GUNYAH_MEM_FORCE_LEND;
187
} else {
188
flags |= GUNYAH_MEM_FORCE_SHARE;
189
}
190
let region = gunyah_map_cma_mem_args {
191
label: slot,
192
guest_addr,
193
flags,
194
guest_mem_fd,
195
offset,
196
size,
197
};
198
// SAFETY: safe because the return value is checked.
199
let ret = unsafe { ioctl_with_ref(vm, GH_VM_ANDROID_MAP_CMA_MEM, &region) };
200
if ret == 0 {
201
Ok(())
202
} else {
203
errno_result()
204
}
205
}
206
207
#[derive(PartialEq, Eq, Hash)]
208
pub struct GunyahIrqRoute {
209
irq: u32,
210
level: bool,
211
}
212
213
pub struct GunyahVm {
214
gh: Gunyah,
215
vm: SafeDescriptor,
216
vm_id: Option<u16>,
217
pas_id: Option<u32>,
218
guest_mem: GuestMemory,
219
mem_regions: Arc<Mutex<BTreeMap<MemSlot, (Box<dyn MappedRegion>, GuestAddress)>>>,
220
/// A min heap of MemSlot numbers that were used and then removed and can now be re-used
221
mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
222
routes: Arc<Mutex<HashSet<GunyahIrqRoute>>>,
223
hv_cfg: crate::Config,
224
}
225
226
impl AsRawDescriptor for GunyahVm {
227
fn as_raw_descriptor(&self) -> RawDescriptor {
228
self.vm.as_raw_descriptor()
229
}
230
}
231
232
impl GunyahVm {
233
pub fn new(
234
gh: &Gunyah,
235
vm_id: Option<u16>,
236
pas_id: Option<u32>,
237
guest_mem: GuestMemory,
238
cfg: Config,
239
) -> Result<GunyahVm> {
240
// SAFETY:
241
// Safe because we know gunyah is a real gunyah fd as this module is the only one that can
242
// make Gunyah objects.
243
let ret = unsafe { ioctl_with_val(gh, GH_CREATE_VM, 0 as c_ulong) };
244
if ret < 0 {
245
return errno_result();
246
}
247
248
// SAFETY:
249
// Safe because we verify that ret is valid and we own the fd.
250
let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
251
let mut cma_mapped = false;
252
for region in guest_mem.regions() {
253
let lend = if cfg.protection_type.isolates_memory() {
254
match region.options.purpose {
255
MemoryRegionPurpose::Bios => true,
256
MemoryRegionPurpose::GuestMemoryRegion => true,
257
#[cfg(target_arch = "aarch64")]
258
MemoryRegionPurpose::ProtectedFirmwareRegion => true,
259
MemoryRegionPurpose::ReservedMemory => true,
260
#[cfg(target_arch = "aarch64")]
261
MemoryRegionPurpose::StaticSwiotlbRegion => false,
262
}
263
} else {
264
false
265
};
266
// For QTVMs, the first provided file-backed region is always CMA and must be
267
// mapped with GH_VM_ANDROID_MAP_CMA_MEM. Subsequent file-backed regions (if any)
268
// are mapped normally. The `cma_mapped` flag tracks whether the CMA region has
269
// already been handled.
270
if let Some(file_backed) = &region.options.file_backed {
271
if !cma_mapped {
272
map_cma_region(
273
&vm_descriptor,
274
region.index as MemSlot,
275
lend,
276
!file_backed.writable,
277
region.guest_addr.offset(),
278
region.shm.as_raw_descriptor().try_into().unwrap(),
279
region.size.try_into().unwrap(),
280
region.shm_offset,
281
)?;
282
cma_mapped = true;
283
continue;
284
}
285
}
286
let read_only = region
287
.options
288
.file_backed
289
.as_ref()
290
.is_some_and(|fb| !fb.writable);
291
// SAFETY:
292
// Safe because the guest regions are guaranteed not to overlap.
293
unsafe {
294
if lend {
295
android_lend_user_memory_region(
296
&vm_descriptor,
297
region.index as MemSlot,
298
read_only,
299
region.guest_addr.offset(),
300
region.size.try_into().unwrap(),
301
region.host_addr as *mut u8,
302
)?;
303
} else {
304
set_user_memory_region(
305
&vm_descriptor,
306
region.index as MemSlot,
307
read_only,
308
region.guest_addr.offset(),
309
region.size.try_into().unwrap(),
310
region.host_addr as *mut u8,
311
)?;
312
}
313
}
314
}
315
316
Ok(GunyahVm {
317
gh: gh.try_clone()?,
318
vm: vm_descriptor,
319
vm_id,
320
pas_id,
321
guest_mem,
322
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
323
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
324
routes: Arc::new(Mutex::new(HashSet::new())),
325
hv_cfg: cfg,
326
})
327
}
328
329
pub fn set_vm_auth_type_to_qcom_trusted_vm(
330
&self,
331
payload_start: GuestAddress,
332
payload_size: u64,
333
) -> Result<()> {
334
let gunyah_qtvm_auth_arg = gunyah_qtvm_auth_arg {
335
vm_id: self.vm_id.expect("VM ID not specified for a QTVM"),
336
pas_id: self.pas_id.expect("PAS ID not specified for a QTVM"),
337
// QTVMs have the metadata needed for authentication at the start of the guest
338
// addrspace.
339
guest_phys_addr: payload_start.offset(),
340
size: payload_size,
341
};
342
let gunyah_auth_desc = gunyah_auth_desc {
343
type_: gunyah_auth_type_GUNYAH_QCOM_TRUSTED_VM_TYPE,
344
arg_size: size_of::<gunyah_qtvm_auth_arg>() as u32,
345
arg: &gunyah_qtvm_auth_arg as *const gunyah_qtvm_auth_arg as u64,
346
};
347
// SAFETY: safe because the return value is checked.
348
let ret = unsafe { ioctl_with_ref(self, GH_VM_ANDROID_SET_AUTH_TYPE, &gunyah_auth_desc) };
349
if ret == 0 {
350
Ok(())
351
} else {
352
errno_result()
353
}
354
}
355
356
fn create_vcpu(&self, id: usize) -> Result<GunyahVcpu> {
357
let gh_fn_vcpu_arg = gh_fn_vcpu_arg {
358
id: id.try_into().unwrap(),
359
};
360
361
let function_desc = gh_fn_desc {
362
type_: GH_FN_VCPU,
363
arg_size: size_of::<gh_fn_vcpu_arg>() as u32,
364
// Safe because kernel is expecting pointer with non-zero arg_size
365
arg: &gh_fn_vcpu_arg as *const gh_fn_vcpu_arg as u64,
366
};
367
368
// SAFETY:
369
// Safe because we know that our file is a VM fd and we verify the return result.
370
let fd = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
371
if fd < 0 {
372
return errno_result();
373
}
374
375
// SAFETY:
376
// Wrap the vcpu now in case the following ? returns early. This is safe because we verified
377
// the value of the fd and we own the fd.
378
let vcpu = unsafe { File::from_raw_descriptor(fd) };
379
380
// SAFETY:
381
// Safe because we know this is a Gunyah VCPU
382
let res = unsafe { ioctl(&vcpu, GH_VCPU_MMAP_SIZE) };
383
if res < 0 {
384
return errno_result();
385
}
386
let run_mmap_size = res as usize;
387
388
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
389
.from_file(&vcpu)
390
.build()
391
.map_err(|_| Error::new(ENOSPC))?;
392
393
Ok(GunyahVcpu {
394
vm: self.vm.try_clone()?,
395
vcpu,
396
id,
397
run_mmap: Arc::new(run_mmap),
398
})
399
}
400
401
pub fn register_irqfd(&self, label: u32, evt: &Event, level: bool) -> Result<()> {
402
let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
403
fd: evt.as_raw_descriptor() as u32,
404
label,
405
flags: if level { GH_IRQFD_LEVEL } else { 0 },
406
..Default::default()
407
};
408
409
let function_desc = gh_fn_desc {
410
type_: GH_FN_IRQFD,
411
arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
412
// SAFETY:
413
// Safe because kernel is expecting pointer with non-zero arg_size
414
arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
415
};
416
417
// SAFETY: safe because the return value is checked.
418
let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
419
if ret == 0 {
420
self.routes
421
.lock()
422
.insert(GunyahIrqRoute { irq: label, level });
423
Ok(())
424
} else {
425
errno_result()
426
}
427
}
428
429
pub fn unregister_irqfd(&self, label: u32, _evt: &Event) -> Result<()> {
430
let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
431
label,
432
..Default::default()
433
};
434
435
let function_desc = gh_fn_desc {
436
type_: GH_FN_IRQFD,
437
arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
438
// Safe because kernel is expecting pointer with non-zero arg_size
439
arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
440
};
441
442
// SAFETY: safe because memory is not modified and the return value is checked.
443
let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION, &function_desc) };
444
if ret == 0 {
445
Ok(())
446
} else {
447
errno_result()
448
}
449
}
450
451
pub fn try_clone(&self) -> Result<Self>
452
where
453
Self: Sized,
454
{
455
Ok(GunyahVm {
456
gh: self.gh.try_clone()?,
457
vm: self.vm.try_clone()?,
458
vm_id: self.vm_id,
459
pas_id: self.pas_id,
460
guest_mem: self.guest_mem.clone(),
461
mem_regions: self.mem_regions.clone(),
462
mem_slot_gaps: self.mem_slot_gaps.clone(),
463
routes: self.routes.clone(),
464
hv_cfg: self.hv_cfg,
465
})
466
}
467
468
fn set_dtb_config(&self, fdt_address: GuestAddress, fdt_size: usize) -> Result<()> {
469
let dtb_config = gh_vm_dtb_config {
470
guest_phys_addr: fdt_address.offset(),
471
size: fdt_size.try_into().unwrap(),
472
};
473
474
// SAFETY:
475
// Safe because we know this is a Gunyah VM
476
let ret = unsafe { ioctl_with_ref(self, GH_VM_SET_DTB_CONFIG, &dtb_config) };
477
if ret == 0 {
478
Ok(())
479
} else {
480
errno_result()
481
}
482
}
483
484
fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress, fw_size: u64) -> Result<()> {
485
let fw_config = gh_vm_firmware_config {
486
guest_phys_addr: fw_addr.offset(),
487
size: fw_size,
488
};
489
490
// SAFETY:
491
// Safe because we know this is a Gunyah VM
492
let ret = unsafe { ioctl_with_ref(self, GH_VM_ANDROID_SET_FW_CONFIG, &fw_config) };
493
if ret == 0 {
494
Ok(())
495
} else {
496
errno_result()
497
}
498
}
499
500
fn set_boot_pc(&self, value: u64) -> Result<()> {
501
self.set_boot_context(gh_vm_boot_context_reg::REG_SET_PC, 0, value)
502
}
503
504
// Sets the boot context for the Gunyah VM by specifying the register type, index, and value.
505
fn set_boot_context(
506
&self,
507
reg_type: gh_vm_boot_context_reg::Type,
508
reg_idx: u8,
509
value: u64,
510
) -> Result<()> {
511
let reg_id = boot_context_reg_id(reg_type, reg_idx);
512
let boot_context = gh_vm_boot_context {
513
reg: reg_id,
514
value,
515
..Default::default()
516
};
517
518
// SAFETY: Safe because we ensure the boot_context is correctly initialized
519
// and the ioctl call is checked.
520
let ret = unsafe { ioctl_with_ref(self, GH_VM_SET_BOOT_CONTEXT, &boot_context) };
521
if ret == 0 {
522
Ok(())
523
} else {
524
errno_result()
525
}
526
}
527
528
fn start(&self) -> Result<()> {
529
// SAFETY: safe because memory is not modified and the return value is checked.
530
let ret = unsafe { ioctl(self, GH_VM_START) };
531
if ret == 0 {
532
Ok(())
533
} else {
534
errno_result()
535
}
536
}
537
538
fn handle_inflate(&self, guest_addr: GuestAddress, size: u64) -> Result<()> {
539
let range = gunyah_address_range {
540
guest_phys_addr: guest_addr.0,
541
size,
542
};
543
544
// SAFETY: Safe because we know this is a Gunyah VM
545
let ret = unsafe { ioctl_with_ref(self, GH_VM_RECLAIM_REGION, &range) };
546
if ret != 0 {
547
warn!("Gunyah failed to reclaim {:?}", range);
548
return errno_result();
549
}
550
551
match self.guest_mem.remove_range(guest_addr, size) {
552
Ok(_) => Ok(()),
553
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
554
Err(_) => Err(Error::new(EIO)),
555
}
556
}
557
}
558
559
impl Vm for GunyahVm {
560
fn try_clone(&self) -> Result<Self>
561
where
562
Self: Sized,
563
{
564
Ok(GunyahVm {
565
gh: self.gh.try_clone()?,
566
vm: self.vm.try_clone()?,
567
vm_id: self.vm_id,
568
pas_id: self.pas_id,
569
guest_mem: self.guest_mem.clone(),
570
mem_regions: self.mem_regions.clone(),
571
mem_slot_gaps: self.mem_slot_gaps.clone(),
572
routes: self.routes.clone(),
573
hv_cfg: self.hv_cfg,
574
})
575
}
576
577
fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
578
error!("try_clone_descriptor hasn't been tested on gunyah, returning -ENOTSUP");
579
Err(Error::new(ENOTSUP))
580
}
581
582
fn hypervisor_kind(&self) -> HypervisorKind {
583
HypervisorKind::Gunyah
584
}
585
586
fn check_capability(&self, c: VmCap) -> bool {
587
match c {
588
VmCap::ArmPmuV3 => false,
589
VmCap::DirtyLog => false,
590
// Strictly speaking, Gunyah supports pvclock, but Gunyah takes care
591
// of it and crosvm doesn't need to do anything for it
592
VmCap::PvClock => false,
593
VmCap::Protected => true,
594
VmCap::EarlyInitCpuid => false,
595
#[cfg(target_arch = "x86_64")]
596
VmCap::BusLockDetect => false,
597
VmCap::ReadOnlyMemoryRegion => false,
598
VmCap::MemNoncoherentDma => false,
599
#[cfg(target_arch = "aarch64")]
600
VmCap::Sve => false,
601
}
602
}
603
604
fn get_guest_phys_addr_bits(&self) -> u8 {
605
40
606
}
607
608
fn get_memory(&self) -> &GuestMemory {
609
&self.guest_mem
610
}
611
612
fn add_memory_region(
613
&mut self,
614
guest_addr: GuestAddress,
615
mem_region: Box<dyn MappedRegion>,
616
read_only: bool,
617
_log_dirty_pages: bool,
618
_cache: MemCacheType,
619
) -> Result<MemSlot> {
620
let pgsz = pagesize() as u64;
621
// Gunyah require to set the user memory region with page size aligned size. Safe to extend
622
// the mem.size() to be page size aligned because the mmap will round up the size to be
623
// page size aligned if it is not.
624
let size = (mem_region.size() as u64).div_ceil(pgsz) * pgsz;
625
let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
626
627
if self.guest_mem.range_overlap(guest_addr, end_addr) {
628
return Err(Error::new(ENOSPC));
629
}
630
631
let mut regions = self.mem_regions.lock();
632
let mut gaps = self.mem_slot_gaps.lock();
633
let slot = match gaps.pop() {
634
Some(gap) => gap.0,
635
None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
636
};
637
638
// SAFETY: safe because memory is not modified and the return value is checked.
639
let res = unsafe {
640
set_user_memory_region(
641
&self.vm,
642
slot,
643
read_only,
644
guest_addr.offset(),
645
size,
646
mem_region.as_ptr(),
647
)
648
};
649
650
if let Err(e) = res {
651
gaps.push(Reverse(slot));
652
return Err(e);
653
}
654
regions.insert(slot, (mem_region, guest_addr));
655
Ok(slot)
656
}
657
658
fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
659
let mut regions = self.mem_regions.lock();
660
let (mem, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
661
662
mem.msync(offset, size).map_err(|err| match err {
663
MmapError::InvalidAddress => Error::new(EFAULT),
664
MmapError::NotPageAligned => Error::new(EINVAL),
665
MmapError::SystemCallFailed(e) => e,
666
_ => Error::new(EIO),
667
})
668
}
669
670
fn madvise_pageout_memory_region(
671
&mut self,
672
_slot: MemSlot,
673
_offset: usize,
674
_size: usize,
675
) -> Result<()> {
676
Err(Error::new(ENOTSUP))
677
}
678
679
fn madvise_remove_memory_region(
680
&mut self,
681
_slot: MemSlot,
682
_offset: usize,
683
_size: usize,
684
) -> Result<()> {
685
Err(Error::new(ENOTSUP))
686
}
687
688
fn remove_memory_region(&mut self, _slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
689
unimplemented!()
690
}
691
692
fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
693
unimplemented!()
694
}
695
696
fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
697
unimplemented!()
698
}
699
700
fn register_ioevent(
701
&mut self,
702
evt: &Event,
703
addr: IoEventAddress,
704
datamatch: Datamatch,
705
) -> Result<()> {
706
let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
707
Datamatch::AnyLength => (false, 0, 0),
708
Datamatch::U8(v) => match v {
709
Some(u) => (true, u as u64, 1),
710
None => (false, 0, 1),
711
},
712
Datamatch::U16(v) => match v {
713
Some(u) => (true, u as u64, 2),
714
None => (false, 0, 2),
715
},
716
Datamatch::U32(v) => match v {
717
Some(u) => (true, u as u64, 4),
718
None => (false, 0, 4),
719
},
720
Datamatch::U64(v) => match v {
721
Some(u) => (true, u, 8),
722
None => (false, 0, 8),
723
},
724
};
725
726
let mut flags = 0;
727
if do_datamatch {
728
flags |= 1 << GH_IOEVENTFD_DATAMATCH;
729
}
730
731
let maddr = if let IoEventAddress::Mmio(maddr) = addr {
732
maddr
733
} else {
734
todo!()
735
};
736
737
let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
738
fd: evt.as_raw_descriptor(),
739
datamatch: datamatch_value,
740
len: datamatch_len,
741
addr: maddr,
742
flags,
743
..Default::default()
744
};
745
746
let function_desc = gh_fn_desc {
747
type_: GH_FN_IOEVENTFD,
748
arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
749
arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
750
};
751
752
// SAFETY: safe because memory is not modified and the return value is checked.
753
let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
754
if ret == 0 {
755
Ok(())
756
} else {
757
errno_result()
758
}
759
}
760
761
fn unregister_ioevent(
762
&mut self,
763
_evt: &Event,
764
addr: IoEventAddress,
765
_datamatch: Datamatch,
766
) -> Result<()> {
767
let maddr = if let IoEventAddress::Mmio(maddr) = addr {
768
maddr
769
} else {
770
todo!()
771
};
772
773
let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
774
addr: maddr,
775
..Default::default()
776
};
777
778
let function_desc = gh_fn_desc {
779
type_: GH_FN_IOEVENTFD,
780
arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
781
arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
782
};
783
784
// SAFETY: safe because memory is not modified and the return value is checked.
785
let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION, &function_desc) };
786
if ret == 0 {
787
Ok(())
788
} else {
789
errno_result()
790
}
791
}
792
793
fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
794
Ok(())
795
}
796
797
fn enable_hypercalls(&mut self, _nr: u64, _count: usize) -> Result<()> {
798
unimplemented!()
799
}
800
801
fn get_pvclock(&self) -> Result<ClockState> {
802
unimplemented!()
803
}
804
805
fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
806
unimplemented!()
807
}
808
809
fn add_fd_mapping(
810
&mut self,
811
slot: u32,
812
offset: usize,
813
size: usize,
814
fd: &dyn AsRawDescriptor,
815
fd_offset: u64,
816
prot: Protection,
817
) -> Result<()> {
818
let mut regions = self.mem_regions.lock();
819
let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
820
821
match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
822
Ok(()) => Ok(()),
823
Err(MmapError::SystemCallFailed(e)) => Err(e),
824
Err(_) => Err(Error::new(EIO)),
825
}
826
}
827
828
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
829
let mut regions = self.mem_regions.lock();
830
let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
831
832
match region.remove_mapping(offset, size) {
833
Ok(()) => Ok(()),
834
Err(MmapError::SystemCallFailed(e)) => Err(e),
835
Err(_) => Err(Error::new(EIO)),
836
}
837
}
838
839
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
840
match event {
841
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
842
BalloonEvent::Deflate(_) => Ok(()),
843
BalloonEvent::BalloonTargetReached(_) => Ok(()),
844
}
845
}
846
}
847
848
const GH_RM_EXIT_TYPE_VM_EXIT: u16 = 0;
849
const GH_RM_EXIT_TYPE_PSCI_POWER_OFF: u16 = 1;
850
const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET: u16 = 2;
851
const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2: u16 = 3;
852
const GH_RM_EXIT_TYPE_WDT_BITE: u16 = 4;
853
const GH_RM_EXIT_TYPE_HYP_ERROR: u16 = 5;
854
const GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT: u16 = 6;
855
const GH_RM_EXIT_TYPE_VM_FORCE_STOPPED: u16 = 7;
856
857
pub struct GunyahVcpu {
858
vm: SafeDescriptor,
859
vcpu: File,
860
id: usize,
861
run_mmap: Arc<MemoryMapping>,
862
}
863
864
struct GunyahVcpuSignalHandle {
865
run_mmap: Arc<MemoryMapping>,
866
}
867
868
impl VcpuSignalHandleInner for GunyahVcpuSignalHandle {
869
fn signal_immediate_exit(&self) {
870
// SAFETY: we ensure `run_mmap` is a valid mapping of `kvm_run` at creation time, and the
871
// `Arc` ensures the mapping still exists while we hold a reference to it.
872
unsafe {
873
let run = self.run_mmap.as_ptr() as *mut gh_vcpu_run;
874
(*run).immediate_exit = 1;
875
}
876
}
877
}
878
879
impl AsRawDescriptor for GunyahVcpu {
880
fn as_raw_descriptor(&self) -> RawDescriptor {
881
self.vcpu.as_raw_descriptor()
882
}
883
}
884
885
impl Vcpu for GunyahVcpu {
886
fn try_clone(&self) -> Result<Self>
887
where
888
Self: Sized,
889
{
890
let vcpu = self.vcpu.try_clone()?;
891
892
Ok(GunyahVcpu {
893
vm: self.vm.try_clone()?,
894
vcpu,
895
id: self.id,
896
run_mmap: self.run_mmap.clone(),
897
})
898
}
899
900
fn as_vcpu(&self) -> &dyn Vcpu {
901
self
902
}
903
904
fn run(&mut self) -> Result<VcpuExit> {
905
// SAFETY:
906
// Safe because we know our file is a VCPU fd and we verify the return result.
907
let ret = unsafe { ioctl(self, GH_VCPU_RUN) };
908
if ret != 0 {
909
return errno_result();
910
}
911
912
// SAFETY:
913
// Safe because we know we mapped enough memory to hold the gh_vcpu_run struct
914
// because the kernel told us how large it is.
915
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
916
match run.exit_reason {
917
GH_VCPU_EXIT_MMIO => Ok(VcpuExit::Mmio),
918
GH_VCPU_EXIT_STATUS => {
919
// SAFETY:
920
// Safe because the exit_reason (which comes from the kernel) told us which
921
// union field to use.
922
let status = unsafe { &mut run.__bindgen_anon_1.status };
923
match status.status {
924
GH_VM_STATUS_GH_VM_STATUS_LOAD_FAILED => Ok(VcpuExit::FailEntry {
925
hardware_entry_failure_reason: 0,
926
}),
927
GH_VM_STATUS_GH_VM_STATUS_CRASHED => Ok(VcpuExit::SystemEventCrash),
928
GH_VM_STATUS_GH_VM_STATUS_EXITED => {
929
info!("exit type {}", status.exit_info.type_);
930
match status.exit_info.type_ {
931
GH_RM_EXIT_TYPE_VM_EXIT => Ok(VcpuExit::SystemEventShutdown),
932
GH_RM_EXIT_TYPE_PSCI_POWER_OFF => Ok(VcpuExit::SystemEventShutdown),
933
GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET => Ok(VcpuExit::SystemEventReset),
934
GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2 => Ok(VcpuExit::SystemEventReset),
935
GH_RM_EXIT_TYPE_WDT_BITE => Ok(VcpuExit::SystemEventCrash),
936
GH_RM_EXIT_TYPE_HYP_ERROR => Ok(VcpuExit::SystemEventCrash),
937
GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT => Ok(VcpuExit::SystemEventCrash),
938
GH_RM_EXIT_TYPE_VM_FORCE_STOPPED => Ok(VcpuExit::SystemEventShutdown),
939
r => {
940
warn!("Unknown exit type: {}", r);
941
Err(Error::new(EINVAL))
942
}
943
}
944
}
945
r => {
946
warn!("Unknown vm status: {}", r);
947
Err(Error::new(EINVAL))
948
}
949
}
950
}
951
r => {
952
warn!("unknown gh exit reason: {}", r);
953
Err(Error::new(EINVAL))
954
}
955
}
956
}
957
958
fn id(&self) -> usize {
959
self.id
960
}
961
962
fn set_immediate_exit(&self, exit: bool) {
963
// SAFETY:
964
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
965
// kernel told us how large it was. The pointer is page aligned so casting to a different
966
// type is well defined, hence the clippy allow attribute.
967
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
968
run.immediate_exit = exit.into();
969
}
970
971
fn signal_handle(&self) -> VcpuSignalHandle {
972
VcpuSignalHandle {
973
inner: Box::new(GunyahVcpuSignalHandle {
974
run_mmap: self.run_mmap.clone(),
975
}),
976
}
977
}
978
979
fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
980
// SAFETY:
981
// Safe because we know we mapped enough memory to hold the gh_vcpu_run struct because the
982
// kernel told us how large it was. The pointer is page aligned so casting to a different
983
// type is well defined
984
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
985
// Verify that the handler is called in the right context.
986
assert!(run.exit_reason == GH_VCPU_EXIT_MMIO);
987
// SAFETY:
988
// Safe because the exit_reason (which comes from the kernel) told us which
989
// union field to use.
990
let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
991
let address = mmio.phys_addr;
992
let data = &mut mmio.data[..mmio.len as usize];
993
if mmio.is_write != 0 {
994
handle_fn(IoParams {
995
address,
996
operation: IoOperation::Write(data),
997
})
998
} else {
999
handle_fn(IoParams {
1000
address,
1001
operation: IoOperation::Read(data),
1002
})
1003
}
1004
}
1005
1006
fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
1007
unreachable!()
1008
}
1009
1010
fn on_suspend(&self) -> Result<()> {
1011
Ok(())
1012
}
1013
1014
unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
1015
unimplemented!()
1016
}
1017
}
1018
1019