pub mod halla_sys;
use std::cmp::Reverse;
use std::collections::BTreeMap;
use std::collections::BinaryHeap;
use std::convert::TryFrom;
use std::ffi::CString;
use std::mem::offset_of;
use std::os::raw::c_ulong;
use std::os::unix::prelude::OsStrExt;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use aarch64_sys_reg::AArch64SysRegId;
use base::errno_result;
use base::error;
use base::ioctl_with_mut_ref;
use base::ioctl_with_ref;
use base::ioctl_with_val;
use base::pagesize;
use base::AsRawDescriptor;
use base::Error;
use base::Event;
use base::FromRawDescriptor;
use base::MappedRegion;
use base::MemoryMapping;
use base::MemoryMappingBuilder;
use base::MmapError;
use base::Protection;
use base::RawDescriptor;
use base::Result;
use base::SafeDescriptor;
use cros_fdt::Fdt;
pub use halla_sys::*;
use libc::open;
use libc::EFAULT;
use libc::EINVAL;
use libc::EIO;
use libc::ENOENT;
use libc::ENOMEM;
use libc::ENOSPC;
use libc::ENOTSUP;
use libc::EOVERFLOW;
use libc::O_CLOEXEC;
use libc::O_RDWR;
use snapshot::AnySnapshot;
use sync::Mutex;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use vm_memory::MemoryRegionPurpose;
use crate::BalloonEvent;
use crate::ClockState;
use crate::Config;
use crate::Datamatch;
use crate::DeviceKind;
use crate::Hypervisor;
use crate::HypervisorCap;
use crate::HypervisorKind;
use crate::IoEventAddress;
use crate::IoOperation;
use crate::IoParams;
use crate::MemCacheType;
use crate::MemSlot;
use crate::ProtectionType;
use crate::PsciVersion;
use crate::Vcpu;
use crate::VcpuAArch64;
use crate::VcpuExit;
use crate::VcpuFeature;
use crate::VcpuRegAArch64;
use crate::VcpuSignalHandle;
use crate::VcpuSignalHandleInner;
use crate::Vm;
use crate::VmAArch64;
use crate::VmCap;
use crate::PSCI_0_2;
impl Halla {
pub fn get_guest_phys_addr_bits(&self) -> u8 {
match unsafe { ioctl_with_val(self, HVM_CHECK_EXTENSION, HVM_CAP_ARM_VM_IPA_SIZE.into()) } {
ret if ret <= 0 => 40,
ipa => ipa as u8,
}
}
pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
let ipa_size = self.get_guest_phys_addr_bits() as u32;
let protection_flag = if protection_type.isolates_memory() {
HVM_VM_TYPE_ARM_PROTECTED
} else {
0
};
Ok((ipa_size & HVM_VM_TYPE_IPA_SIZE_MASK) | protection_flag)
}
}
impl HallaVm {
pub fn init_arch(&self, cfg: &Config) -> Result<()> {
#[cfg(target_arch = "aarch64")]
if cfg.mte {
unsafe { self.ctrl_halla_enable_capability(HallaCap::ArmMte, &[0, 0, 0, 0, 0]) }?;
}
Ok(())
}
pub fn check_capability_arch(&self, _c: VmCap) -> Option<bool> {
None
}
pub fn get_pvclock_arch(&self) -> Result<ClockState> {
error!("Halla: not support get_pvclock_arch");
Err(Error::new(EINVAL))
}
pub fn set_pvclock_arch(&self, _state: &ClockState) -> Result<()> {
error!("Halla: not support set_pvclock_arch");
Err(Error::new(EINVAL))
}
fn get_protected_vm_info(&self) -> Result<u64> {
let cap: hvm_enable_cap = unsafe {
self.ctrl_halla_enable_capability(
HallaCap::ArmProtectedVm,
&[HVM_CAP_ARM_PVM_GET_PVMFW_SIZE as u64, 0, 0, 0, 0],
)
}?;
Ok(cap.args[1])
}
fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> {
unsafe {
self.ctrl_halla_enable_capability(
HallaCap::ArmProtectedVm,
&[HVM_CAP_ARM_PVM_SET_PVMFW_IPA as u64, fw_addr.0, 0, 0, 0],
)
}?;
Ok(())
}
}
impl VmAArch64 for HallaVm {
fn get_hypervisor(&self) -> &dyn Hypervisor {
&self.halla
}
fn load_protected_vm_firmware(
&mut self,
fw_addr: GuestAddress,
fw_max_size: u64,
) -> Result<()> {
let size: u64 = self.get_protected_vm_info()?;
if size == 0 {
Err(Error::new(EINVAL))
} else {
if size > fw_max_size {
return Err(Error::new(ENOMEM));
}
self.set_protected_vm_firmware_ipa(fw_addr)
}
}
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
Ok(Box::new(HallaVm::create_vcpu(self, id)?))
}
fn create_fdt(&self, _fdt: &mut Fdt, _phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
Ok(())
}
fn init_arch(
&self,
_payload_entry_address: GuestAddress,
fdt_address: GuestAddress,
fdt_size: usize,
) -> std::result::Result<(), anyhow::Error> {
let dtb_config = hvm_dtb_config {
dtb_addr: fdt_address.offset(),
dtb_size: fdt_size.try_into().unwrap(),
};
let ret = unsafe { ioctl_with_ref(self, HVM_SET_DTB_CONFIG, &dtb_config) };
if ret == 0 {
Ok(())
} else {
errno_result()?
}
}
}
impl HallaVcpu {
fn set_one_halla_reg_u64(&self, hvm_reg_id: HallaVcpuRegister, data: u64) -> Result<()> {
self.set_one_halla_reg(hvm_reg_id, data.to_ne_bytes().as_slice())
}
fn set_one_halla_reg(&self, hvm_reg_id: HallaVcpuRegister, data: &[u8]) -> Result<()> {
assert_eq!(hvm_reg_id.size(), data.len());
let id: u64 = hvm_reg_id.into();
let onereg = hvm_one_reg {
id,
addr: (data.as_ptr() as usize)
.try_into()
.expect("can't represent usize as u64"),
};
let ret = unsafe { ioctl_with_ref(self, HVM_SET_ONE_REG, &onereg) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
fn get_one_halla_reg_u64(&self, hvm_reg_id: HallaVcpuRegister) -> Result<u64> {
let mut bytes = 0u64.to_ne_bytes();
self.get_one_halla_reg(hvm_reg_id, bytes.as_mut_slice())?;
Ok(u64::from_ne_bytes(bytes))
}
fn get_one_halla_reg(&self, hvm_reg_id: HallaVcpuRegister, data: &mut [u8]) -> Result<()> {
assert_eq!(hvm_reg_id.size(), data.len());
let id: u64 = hvm_reg_id.into();
let onereg = hvm_one_reg {
id,
addr: (data.as_mut_ptr() as usize)
.try_into()
.expect("can't represent usize as u64"),
};
let ret = unsafe { ioctl_with_ref(self, HVM_GET_ONE_REG, &onereg) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum HallaVcpuRegister {
X(u8),
Sp,
Pc,
Pstate,
V(u8),
Firmware(u16),
System(AArch64SysRegId),
Ccsidr(u8),
}
impl HallaVcpuRegister {
pub fn size(&self) -> usize {
let hvm_reg = u64::from(*self);
let size_field = hvm_reg & HVM_REG_SIZE_MASK;
const REG_SIZE_U8: u64 = HVM_REG_SIZE_U8 as u64;
match size_field {
REG_SIZE_U8 => 1,
HVM_REG_SIZE_U16 => 2,
HVM_REG_SIZE_U32 => 4,
HVM_REG_SIZE_U64 => 8,
HVM_REG_SIZE_U128 => 16,
HVM_REG_SIZE_U256 => 32,
HVM_REG_SIZE_U512 => 64,
HVM_REG_SIZE_U1024 => 128,
HVM_REG_SIZE_U2048 => 256,
_ => panic!("invalid size field {size_field}"),
}
}
}
impl From<HallaVcpuRegister> for u64 {
fn from(register: HallaVcpuRegister) -> Self {
const fn reg(size: u64, kind: u64, fields: u64) -> u64 {
HVM_REG_ARM64 | size | kind | fields
}
const fn hvm_regs_reg(size: u64, offset: usize) -> u64 {
let offset = offset / std::mem::size_of::<u32>();
reg(size, HVM_REG_ARM_CORE as u64, offset as u64)
}
const fn hvm_reg(offset: usize) -> u64 {
hvm_regs_reg(HVM_REG_SIZE_U64, offset)
}
fn spsr_reg(spsr_reg: u32) -> u64 {
let n = std::mem::size_of::<u64>() * (spsr_reg as usize);
hvm_reg(offset_of!(hvm_regs, spsr) + n)
}
fn user_pt_reg(offset: usize) -> u64 {
hvm_regs_reg(HVM_REG_SIZE_U64, offset_of!(hvm_regs, regs) + offset)
}
fn user_fpsimd_state_reg(size: u64, offset: usize) -> u64 {
hvm_regs_reg(size, offset_of!(hvm_regs, fp_regs) + offset)
}
const fn reg_u64(kind: u64, fields: u64) -> u64 {
reg(HVM_REG_SIZE_U64, kind, fields)
}
const fn demux_reg(size: u64, index: u64, value: u64) -> u64 {
let index = (index << HVM_REG_ARM_DEMUX_ID_SHIFT) & (HVM_REG_ARM_DEMUX_ID_MASK as u64);
let value =
(value << HVM_REG_ARM_DEMUX_VAL_SHIFT) & (HVM_REG_ARM_DEMUX_VAL_MASK as u64);
reg(size, HVM_REG_ARM_DEMUX as u64, index | value)
}
match register {
HallaVcpuRegister::X(n @ 0..=30) => {
let n = std::mem::size_of::<u64>() * (n as usize);
user_pt_reg(offset_of!(user_pt_regs, regs) + n)
}
HallaVcpuRegister::X(n) => {
unreachable!("invalid HallaVcpuRegister Xn index: {n}")
}
HallaVcpuRegister::Sp => user_pt_reg(offset_of!(user_pt_regs, sp)),
HallaVcpuRegister::Pc => user_pt_reg(offset_of!(user_pt_regs, pc)),
HallaVcpuRegister::Pstate => user_pt_reg(offset_of!(user_pt_regs, pstate)),
HallaVcpuRegister::V(n @ 0..=31) => {
let n = std::mem::size_of::<u128>() * (n as usize);
user_fpsimd_state_reg(HVM_REG_SIZE_U128, offset_of!(user_fpsimd_state, vregs) + n)
}
HallaVcpuRegister::V(n) => {
unreachable!("invalid HallaVcpuRegister Vn index: {n}")
}
HallaVcpuRegister::System(aarch64_sys_reg::FPSR) => {
user_fpsimd_state_reg(HVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpsr))
}
HallaVcpuRegister::System(aarch64_sys_reg::FPCR) => {
user_fpsimd_state_reg(HVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpcr))
}
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_EL1) => spsr_reg(0),
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_abt) => spsr_reg(1),
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_und) => spsr_reg(2),
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_irq) => spsr_reg(3),
HallaVcpuRegister::System(aarch64_sys_reg::SPSR_fiq) => spsr_reg(4),
HallaVcpuRegister::System(aarch64_sys_reg::SP_EL1) => {
hvm_reg(offset_of!(hvm_regs, sp_el1))
}
HallaVcpuRegister::System(aarch64_sys_reg::ELR_EL1) => {
hvm_reg(offset_of!(hvm_regs, elr_el1))
}
HallaVcpuRegister::System(sysreg) => {
reg_u64(HVM_REG_ARM64_SYSREG.into(), sysreg.encoded().into())
}
HallaVcpuRegister::Firmware(n) => reg_u64(HVM_REG_ARM, n.into()),
HallaVcpuRegister::Ccsidr(n) => demux_reg(HVM_REG_SIZE_U32, 0, n.into()),
}
}
}
impl From<VcpuRegAArch64> for HallaVcpuRegister {
fn from(reg: VcpuRegAArch64) -> Self {
match reg {
VcpuRegAArch64::X(n @ 0..=30) => Self::X(n),
VcpuRegAArch64::X(n) => unreachable!("invalid VcpuRegAArch64 index: {n}"),
VcpuRegAArch64::Sp => Self::Sp,
VcpuRegAArch64::Pc => Self::Pc,
VcpuRegAArch64::Pstate => Self::Pstate,
VcpuRegAArch64::System(sysreg) => Self::System(sysreg),
}
}
}
impl VcpuAArch64 for HallaVcpu {
fn init(&self, _features: &[VcpuFeature]) -> Result<()> {
Ok(())
}
fn init_pmu(&self, _irq: u64) -> Result<()> {
Ok(())
}
fn has_pvtime_support(&self) -> bool {
false
}
fn init_pvtime(&self, _pvtime_ipa: u64) -> Result<()> {
error!("Halla: not support init_pvtime");
Err(Error::new(EINVAL))
}
fn set_one_reg(&self, reg_id: VcpuRegAArch64, data: u64) -> Result<()> {
self.set_one_halla_reg_u64(HallaVcpuRegister::from(reg_id), data)
}
fn get_one_reg(&self, reg_id: VcpuRegAArch64) -> Result<u64> {
self.get_one_halla_reg_u64(HallaVcpuRegister::from(reg_id))
}
fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
unimplemented!()
}
fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
unimplemented!()
}
fn get_psci_version(&self) -> Result<PsciVersion> {
Ok(PSCI_0_2)
}
fn get_max_hw_bps(&self) -> Result<usize> {
error!("Halla: not support get_max_hw_bps");
Err(Error::new(EINVAL))
}
fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
error!("Halla: not support get_system_regs");
Err(Error::new(EINVAL))
}
fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
error!("Halla: not support get_cache_info");
Err(Error::new(EINVAL))
}
fn set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()> {
error!("Halla: not support set_cache_info");
Err(Error::new(EINVAL))
}
fn hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot> {
Err(anyhow::anyhow!(
"Halla: not support hypervisor_specific_snapshot"
))
}
fn hypervisor_specific_restore(&self, _data: AnySnapshot) -> anyhow::Result<()> {
Err(anyhow::anyhow!(
"Halla: not support hypervisor_specific_restore"
))
}
fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
error!("Halla: not support set_guest_debug");
Err(Error::new(EINVAL))
}
}
unsafe fn set_user_memory_region(
descriptor: &SafeDescriptor,
slot: MemSlot,
guest_addr: u64,
memory_size: u64,
userspace_addr: *mut u8,
flags: u32,
) -> Result<()> {
let region = hvm_userspace_memory_region {
slot,
flags,
guest_phys_addr: guest_addr,
memory_size,
userspace_addr: userspace_addr as u64,
};
let ret = ioctl_with_ref(descriptor, HVM_SET_USER_MEMORY_REGION, ®ion);
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
pub fn dirty_log_bitmap_size(size: usize) -> usize {
let page_size = pagesize();
size.div_ceil(page_size).div_ceil(8)
}
pub struct Halla {
halla: SafeDescriptor,
}
#[repr(u32)]
pub enum HallaCap {
ArmMte,
ArmProtectedVm = HVM_CAP_ARM_PROTECTED_VM,
}
impl Halla {
pub fn new_with_path(device_path: &Path) -> Result<Halla> {
let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
if ret < 0 {
return errno_result();
}
Ok(Halla {
halla: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
})
}
pub fn new() -> Result<Halla> {
Halla::new_with_path(&PathBuf::from("/dev/halla"))
}
pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
let res = std::mem::size_of::<hvm_vcpu_run>();
Ok(res)
}
}
impl AsRawDescriptor for Halla {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.halla.as_raw_descriptor()
}
}
impl Hypervisor for Halla {
fn try_clone(&self) -> Result<Self> {
Ok(Halla {
halla: self.halla.try_clone()?,
})
}
fn check_capability(&self, cap: HypervisorCap) -> bool {
match cap {
HypervisorCap::UserMemory => true,
HypervisorCap::ImmediateExit => true,
HypervisorCap::StaticSwiotlbAllocationRequired => false,
HypervisorCap::HypervisorInitializedBootContext => false,
}
}
}
pub struct HallaVm {
halla: Halla,
vm: SafeDescriptor,
guest_mem: GuestMemory,
mem_regions: Arc<Mutex<BTreeMap<MemSlot, Box<dyn MappedRegion>>>>,
mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
}
impl HallaVm {
pub fn new(halla: &Halla, guest_mem: GuestMemory, cfg: Config) -> Result<HallaVm> {
let ret = unsafe {
ioctl_with_val(
halla,
HVM_CREATE_VM,
halla.get_vm_type(cfg.protection_type)? as c_ulong,
)
};
if ret < 0 {
return errno_result();
}
let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
for region in guest_mem.regions() {
let flags = match region.options.purpose {
MemoryRegionPurpose::Bios => HVM_USER_MEM_REGION_GUEST_MEM,
MemoryRegionPurpose::GuestMemoryRegion => HVM_USER_MEM_REGION_GUEST_MEM,
MemoryRegionPurpose::ProtectedFirmwareRegion => HVM_USER_MEM_REGION_PROTECT_FW,
MemoryRegionPurpose::ReservedMemory => HVM_USER_MEM_REGION_GUEST_MEM,
MemoryRegionPurpose::StaticSwiotlbRegion => HVM_USER_MEM_REGION_STATIC_SWIOTLB,
};
unsafe {
set_user_memory_region(
&vm_descriptor,
region.index as MemSlot,
region.guest_addr.offset(),
region.size as u64,
region.host_addr as *mut u8,
flags,
)
}?;
}
let vm = HallaVm {
halla: halla.try_clone()?,
vm: vm_descriptor,
guest_mem,
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
};
vm.init_arch(&cfg)?;
Ok(vm)
}
fn create_vcpu(&self, id: usize) -> Result<HallaVcpu> {
let run_mmap_size = self.halla.get_vcpu_mmap_size()?;
let fd =
unsafe { ioctl_with_val(self, HVM_CREATE_VCPU, c_ulong::try_from(id).unwrap()) };
if fd < 0 {
return errno_result();
}
let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
.build()
.map_err(|_| Error::new(ENOSPC))?;
Ok(HallaVcpu {
vm: self.vm.try_clone()?,
vcpu,
id,
run_mmap: Arc::new(run_mmap),
})
}
pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
let mut irq_level = hvm_irq_level::default();
irq_level.__bindgen_anon_1.irq = irq;
irq_level.level = active as u32;
let ret = unsafe { ioctl_with_ref(self, HVM_IRQ_LINE, &irq_level) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
pub fn register_irqfd(
&self,
gsi: u32,
evt: &Event,
resample_evt: Option<&Event>,
) -> Result<()> {
let mut irqfd = hvm_irqfd {
fd: evt.as_raw_descriptor() as u32,
gsi,
..Default::default()
};
if let Some(r_evt) = resample_evt {
irqfd.flags = HVM_IRQFD_FLAG_RESAMPLE;
irqfd.resamplefd = r_evt.as_raw_descriptor() as u32;
}
let ret = unsafe { ioctl_with_ref(self, HVM_IRQFD, &irqfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
pub fn unregister_irqfd(&self, gsi: u32, evt: &Event) -> Result<()> {
let irqfd = hvm_irqfd {
fd: evt.as_raw_descriptor() as u32,
gsi,
flags: HVM_IRQFD_FLAG_DEASSIGN,
..Default::default()
};
let ret = unsafe { ioctl_with_ref(self, HVM_IRQFD, &irqfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
fn ioeventfd(
&self,
evt: &Event,
addr: IoEventAddress,
datamatch: Datamatch,
deassign: bool,
) -> Result<()> {
let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
Datamatch::AnyLength => (false, 0, 0),
Datamatch::U8(v) => match v {
Some(u) => (true, u as u64, 1),
None => (false, 0, 1),
},
Datamatch::U16(v) => match v {
Some(u) => (true, u as u64, 2),
None => (false, 0, 2),
},
Datamatch::U32(v) => match v {
Some(u) => (true, u as u64, 4),
None => (false, 0, 4),
},
Datamatch::U64(v) => match v {
Some(u) => (true, u, 8),
None => (false, 0, 8),
},
};
let mut flags = 0;
if deassign {
flags |= 1 << hvm_ioeventfd_flag_nr_deassign;
}
if do_datamatch {
flags |= 1 << hvm_ioeventfd_flag_nr_datamatch
}
let ioeventfd = hvm_ioeventfd {
datamatch: datamatch_value,
len: datamatch_len,
addr: match addr {
IoEventAddress::Mmio(m) => m,
IoEventAddress::Pio(_) => EINVAL.try_into().unwrap(),
},
fd: evt.as_raw_descriptor(),
flags,
..Default::default()
};
let ret = unsafe { ioctl_with_ref(self, HVM_IOEVENTFD, &ioeventfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
fn check_raw_capability(&self, capability: HallaCap) -> bool {
let mut cap: u64 = capability as u64;
unsafe {
ioctl_with_mut_ref(self, HVM_CHECK_EXTENSION, &mut cap);
}
cap == 1
}
#[allow(dead_code)]
unsafe fn ctrl_halla_enable_capability(
&self,
capability: HallaCap,
args: &[u64; 5],
) -> Result<hvm_enable_cap> {
let hvm_cap = hvm_enable_cap {
cap: capability as u64,
args: *args,
};
let ret = ioctl_with_ref(self, HVM_ENABLE_CAP, &hvm_cap);
if ret == 0 {
Ok(hvm_cap)
} else {
errno_result()
}
}
pub fn create_halla_device(&self, dev: hvm_create_device) -> Result<()> {
let ret = unsafe { base::ioctl_with_ref(self, HVM_CREATE_DEVICE, &dev) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
match self.guest_mem.remove_range(guest_address, size) {
Ok(_) => Ok(()),
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
Err(_) => Err(Error::new(EIO)),
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
Ok(())
}
}
impl Vm for HallaVm {
fn try_clone(&self) -> Result<Self> {
Ok(HallaVm {
halla: self.halla.try_clone()?,
vm: self.vm.try_clone()?,
guest_mem: self.guest_mem.clone(),
mem_regions: self.mem_regions.clone(),
mem_slot_gaps: self.mem_slot_gaps.clone(),
})
}
fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
error!("try_clone_descriptor hasn't been tested on Halla, returning -ENOTSUP");
Err(Error::new(ENOTSUP))
}
fn hypervisor_kind(&self) -> HypervisorKind {
HypervisorKind::Halla
}
fn check_capability(&self, c: VmCap) -> bool {
if let Some(val) = self.check_capability_arch(c) {
return val;
}
match c {
VmCap::ArmPmuV3 => false,
VmCap::DirtyLog => false,
VmCap::PvClock => false,
VmCap::Protected => self.check_raw_capability(HallaCap::ArmProtectedVm),
VmCap::EarlyInitCpuid => false,
VmCap::ReadOnlyMemoryRegion => false,
VmCap::MemNoncoherentDma => false,
VmCap::Sve => false,
}
}
fn get_guest_phys_addr_bits(&self) -> u8 {
self.halla.get_guest_phys_addr_bits()
}
fn get_memory(&self) -> &GuestMemory {
&self.guest_mem
}
fn add_memory_region(
&mut self,
guest_addr: GuestAddress,
mem: Box<dyn MappedRegion>,
_read_only: bool,
_log_dirty_pages: bool,
_cache: MemCacheType,
) -> Result<MemSlot> {
let pgsz = pagesize() as u64;
let size = (mem.size() as u64).div_ceil(pgsz) * pgsz;
let end_addr = guest_addr
.checked_add(size)
.ok_or_else(|| Error::new(EOVERFLOW))?;
if self.guest_mem.range_overlap(guest_addr, end_addr) {
return Err(Error::new(ENOSPC));
}
let mut regions = self.mem_regions.lock();
let mut gaps = self.mem_slot_gaps.lock();
let slot = match gaps.pop() {
Some(gap) => gap.0,
None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
};
let flags = 0;
let res = unsafe {
set_user_memory_region(
&self.vm,
slot,
guest_addr.offset(),
size,
mem.as_ptr(),
flags,
)
};
if let Err(e) = res {
gaps.push(Reverse(slot));
return Err(e);
}
regions.insert(slot, mem);
Ok(slot)
}
fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
let mut regions = self.mem_regions.lock();
let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
mem.msync(offset, size).map_err(|err| match err {
MmapError::InvalidAddress => Error::new(EFAULT),
MmapError::NotPageAligned => Error::new(EINVAL),
MmapError::SystemCallFailed(e) => e,
_ => Error::new(EIO),
})
}
fn madvise_pageout_memory_region(
&mut self,
_slot: MemSlot,
_offset: usize,
_size: usize,
) -> Result<()> {
Err(Error::new(ENOTSUP))
}
fn madvise_remove_memory_region(
&mut self,
_slot: MemSlot,
_offset: usize,
_size: usize,
) -> Result<()> {
Err(Error::new(ENOTSUP))
}
fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
let mut regions = self.mem_regions.lock();
if !regions.contains_key(&slot) {
return Err(Error::new(ENOENT));
}
unsafe {
set_user_memory_region(&self.vm, slot, 0, 0, std::ptr::null_mut(), 0)?;
}
self.mem_slot_gaps.lock().push(Reverse(slot));
Ok(regions.remove(&slot).unwrap())
}
fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
errno_result()
}
fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
Err(Error::new(ENOTSUP))
}
fn register_ioevent(
&mut self,
evt: &Event,
addr: IoEventAddress,
datamatch: Datamatch,
) -> Result<()> {
self.ioeventfd(evt, addr, datamatch, false)
}
fn unregister_ioevent(
&mut self,
evt: &Event,
addr: IoEventAddress,
datamatch: Datamatch,
) -> Result<()> {
self.ioeventfd(evt, addr, datamatch, true)
}
fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
Ok(())
}
fn enable_hypercalls(&mut self, _nr: u64, _count: usize) -> Result<()> {
Err(Error::new(ENOTSUP))
}
fn get_pvclock(&self) -> Result<ClockState> {
self.get_pvclock_arch()
}
fn set_pvclock(&self, state: &ClockState) -> Result<()> {
self.set_pvclock_arch(state)
}
fn add_fd_mapping(
&mut self,
slot: u32,
offset: usize,
size: usize,
fd: &dyn AsRawDescriptor,
fd_offset: u64,
prot: Protection,
) -> Result<()> {
let mut regions = self.mem_regions.lock();
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
Ok(()) => Ok(()),
Err(MmapError::SystemCallFailed(e)) => Err(e),
Err(_) => Err(Error::new(EIO)),
}
}
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
let mut regions = self.mem_regions.lock();
let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
match region.remove_mapping(offset, size) {
Ok(()) => Ok(()),
Err(MmapError::SystemCallFailed(e)) => Err(e),
Err(_) => Err(Error::new(EIO)),
}
}
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
match event {
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
BalloonEvent::BalloonTargetReached(_) => Ok(()),
}
}
}
impl AsRawDescriptor for HallaVm {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.vm.as_raw_descriptor()
}
}
struct HallaVcpuSignalHandle {
run_mmap: Arc<MemoryMapping>,
}
impl VcpuSignalHandleInner for HallaVcpuSignalHandle {
fn signal_immediate_exit(&self) {
unsafe {
let run = self.run_mmap.as_ptr() as *mut hvm_vcpu_run;
(*run).immediate_exit = 1;
}
}
}
pub struct HallaVcpu {
vm: SafeDescriptor,
vcpu: SafeDescriptor,
id: usize,
run_mmap: Arc<MemoryMapping>,
}
impl Vcpu for HallaVcpu {
fn try_clone(&self) -> Result<Self> {
let vm = self.vm.try_clone()?;
let vcpu = self.vcpu.try_clone()?;
Ok(HallaVcpu {
vm,
vcpu,
id: self.id,
run_mmap: self.run_mmap.clone(),
})
}
fn as_vcpu(&self) -> &dyn Vcpu {
self
}
fn id(&self) -> usize {
self.id
}
#[allow(clippy::cast_ptr_alignment)]
fn set_immediate_exit(&self, exit: bool) {
#[allow(clippy::undocumented_unsafe_blocks)]
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
run.immediate_exit = exit as u8;
}
fn signal_handle(&self) -> VcpuSignalHandle {
VcpuSignalHandle {
inner: Box::new(HallaVcpuSignalHandle {
run_mmap: self.run_mmap.clone(),
}),
}
}
fn on_suspend(&self) -> Result<()> {
Ok(())
}
unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
Err(Error::new(libc::ENXIO))
}
#[allow(clippy::cast_ptr_alignment)]
fn run(&mut self) -> Result<VcpuExit> {
let ret = unsafe { ioctl_with_val(self, HVM_RUN, self.run_mmap.as_ptr() as u64) };
if ret != 0 {
return errno_result();
}
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
match run.exit_reason {
HVM_EXIT_MMIO => Ok(VcpuExit::Mmio),
HVM_EXIT_IRQ => Ok(VcpuExit::IrqWindowOpen),
HVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
HVM_EXIT_SYSTEM_EVENT => {
let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
match event_type {
HVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown),
HVM_SYSTEM_EVENT_RESET => Ok(VcpuExit::SystemEventReset),
HVM_SYSTEM_EVENT_CRASH => Ok(VcpuExit::SystemEventCrash),
_ => {
error!("Unknown HVM system event {}", event_type);
Err(Error::new(EINVAL))
}
}
}
HVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
HVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown(Ok(()))),
r => panic!("unknown hvm exit reason: {r}"),
}
}
fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut hvm_vcpu_run) };
assert!(run.exit_reason == HVM_EXIT_MMIO);
let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
let address = mmio.phys_addr;
let data = &mut mmio.data[..mmio.size as usize];
if mmio.is_write != 0 {
handle_fn(IoParams {
address,
operation: IoOperation::Write(data),
})
} else {
handle_fn(IoParams {
address,
operation: IoOperation::Read(data),
})
}
}
fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
Err(Error::new(EINVAL))
}
}
impl AsRawDescriptor for HallaVcpu {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.vcpu.as_raw_descriptor()
}
}