#![allow(clippy::useless_conversion)]
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::mem::offset_of;
use aarch64_sys_reg::AArch64SysRegId;
use anyhow::Context;
use base::errno_result;
use base::error;
use base::ioctl_with_mut_ref;
use base::ioctl_with_ref;
use base::ioctl_with_val;
use base::warn;
use base::Error;
use base::Result;
use cros_fdt::Fdt;
use kvm_sys::*;
use libc::EINVAL;
use libc::ENOMEM;
use libc::ENOTSUP;
use libc::ENXIO;
use serde::Deserialize;
use serde::Serialize;
use snapshot::AnySnapshot;
use vm_memory::GuestAddress;
use zerocopy::FromZeros;
use super::Config;
use super::Kvm;
use super::KvmCap;
use super::KvmVcpu;
use super::KvmVm;
use crate::ClockState;
use crate::DeviceKind;
use crate::HypercallAbi;
use crate::Hypervisor;
use crate::IrqSourceChip;
use crate::ProtectionType;
use crate::PsciVersion;
use crate::VcpuAArch64;
use crate::VcpuExit;
use crate::VcpuFeature;
use crate::VcpuRegAArch64;
use crate::VmAArch64;
use crate::VmCap;
use crate::AARCH64_MAX_REG_COUNT;
use crate::PSCI_0_2;
impl Kvm {
pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
let ipa_size = match unsafe {
ioctl_with_val(self, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE.into())
} {
ret if ret < 0 => 0,
ipa => ipa as u32,
};
let protection_flag = if protection_type.isolates_memory() {
KVM_VM_TYPE_ARM_PROTECTED
} else {
0
};
Ok((ipa_size & KVM_VM_TYPE_ARM_IPA_SIZE_MASK) | protection_flag)
}
pub fn get_guest_phys_addr_bits(&self) -> u8 {
match unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE.into()) } {
ret if ret <= 0 => 40,
ipa => ipa as u8,
}
}
}
impl KvmVm {
pub fn init_arch(&self, cfg: &Config) -> Result<()> {
if cfg.mte {
unsafe { self.enable_raw_capability(KvmCap::ArmMte, 0, &[0, 0, 0, 0])? }
}
#[cfg(target_os = "android")]
if cfg.ffa {
self.set_enable_ffa(true)?;
}
Ok(())
}
pub fn check_capability_arch(&self, _c: VmCap) -> Option<bool> {
None
}
pub fn get_device_params_arch(&self, kind: DeviceKind) -> Option<kvm_create_device> {
match kind {
DeviceKind::ArmVgicV2 => Some(kvm_create_device {
type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2,
fd: 0,
flags: 0,
}),
DeviceKind::ArmVgicV3 => Some(kvm_create_device {
type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3,
fd: 0,
flags: 0,
}),
DeviceKind::ArmVgicIts => Some(kvm_create_device {
type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_ITS,
fd: 0,
flags: 0,
}),
_ => None,
}
}
pub(super) fn enable_smccc_forwarding(&mut self, base: u32, nr_functions: u32) -> Result<()> {
let smccc_filter = kvm_smccc_filter {
base,
nr_functions,
action: kvm_smccc_filter_action_KVM_SMCCC_FILTER_FWD_TO_USER as u8,
pad: [0; 15],
};
let dev_attr = kvm_device_attr {
group: KVM_ARM_VM_SMCCC_CTRL,
attr: KVM_ARM_VM_SMCCC_FILTER as u64,
addr: &smccc_filter as *const _ as u64,
flags: 0,
};
match unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &dev_attr) } {
0 => Ok(()),
_ => errno_result(),
}
}
pub fn get_pvclock_arch(&self) -> Result<ClockState> {
Err(Error::new(ENXIO))
}
pub fn set_pvclock_arch(&self, _state: &ClockState) -> Result<()> {
Err(Error::new(ENXIO))
}
fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
let mut info = KvmProtectedVmInfo {
firmware_size: 0,
reserved: [0; 7],
};
unsafe {
self.enable_raw_capability(
KvmCap::ArmProtectedVm,
KVM_CAP_ARM_PROTECTED_VM_FLAGS_INFO,
&[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
)
}?;
Ok(info)
}
fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> {
unsafe {
self.enable_raw_capability(
KvmCap::ArmProtectedVm,
KVM_CAP_ARM_PROTECTED_VM_FLAGS_SET_FW_IPA,
&[fw_addr.0, 0, 0, 0],
)
}
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
fn set_enable_ffa(&self, ffa_support: bool) -> Result<()> {
unsafe {
self.enable_raw_capability(
KvmCap::ArmProtectedVm,
KVM_CAP_ARM_PROTECTED_VM_FLAGS_SET_FFA,
&[ffa_support.into(), 0, 0, 0],
)
}
}
}
#[repr(C)]
struct KvmProtectedVmInfo {
firmware_size: u64,
reserved: [u64; 7],
}
impl VmAArch64 for KvmVm {
fn get_hypervisor(&self) -> &dyn Hypervisor {
&self.kvm
}
fn load_protected_vm_firmware(
&mut self,
fw_addr: GuestAddress,
fw_max_size: u64,
) -> Result<()> {
let info = self.get_protected_vm_info()?;
if info.firmware_size == 0 {
Err(Error::new(EINVAL))
} else {
if info.firmware_size > fw_max_size {
return Err(Error::new(ENOMEM));
}
self.set_protected_vm_firmware_ipa(fw_addr)
}
}
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
Ok(Box::new(self.create_kvm_vcpu(id)?))
}
fn create_fdt(&self, _fdt: &mut Fdt, _phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
Ok(())
}
fn init_arch(
&self,
_payload_entry_address: GuestAddress,
_fdt_address: GuestAddress,
_fdt_size: usize,
) -> anyhow::Result<()> {
Ok(())
}
fn set_counter_offset(&self, offset: u64) -> Result<()> {
let off = kvm_arm_counter_offset {
counter_offset: offset,
reserved: 0,
};
let ret = unsafe { ioctl_with_ref(&self.vm, KVM_ARM_SET_COUNTER_OFFSET, &off) };
if ret != 0 {
return errno_result();
}
Ok(())
}
}
impl KvmVcpu {
pub fn system_event_reset(&self, event_flags: u64) -> Result<VcpuExit> {
if event_flags & u64::from(KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2) != 0 {
let reset_type = self.get_one_reg(VcpuRegAArch64::X(1))?;
let cookie = self.get_one_reg(VcpuRegAArch64::X(2))?;
warn!(
"PSCI SYSTEM_RESET2 with reset_type={:#x}, cookie={:#x}",
reset_type, cookie
);
}
Ok(VcpuExit::SystemEventReset)
}
fn kvm_reg_id(&self, reg: VcpuRegAArch64) -> Result<KvmVcpuRegister> {
match reg {
VcpuRegAArch64::X(n @ 0..=30) => Ok(KvmVcpuRegister::X(n)),
VcpuRegAArch64::Sp => Ok(KvmVcpuRegister::Sp),
VcpuRegAArch64::Pc => Ok(KvmVcpuRegister::Pc),
VcpuRegAArch64::Pstate => Ok(KvmVcpuRegister::Pstate),
VcpuRegAArch64::System(aarch64_sys_reg::CCSIDR_EL1) => {
let csselr =
self.get_one_reg(VcpuRegAArch64::System(aarch64_sys_reg::CSSELR_EL1))?;
Ok(KvmVcpuRegister::Ccsidr(csselr as u8))
}
VcpuRegAArch64::System(sysreg) => Ok(KvmVcpuRegister::System(sysreg)),
_ => Err(Error::new(EINVAL)),
}
}
fn set_one_kvm_reg_u32(&self, kvm_reg_id: KvmVcpuRegister, data: u32) -> Result<()> {
self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
}
fn set_one_kvm_reg_u64(&self, kvm_reg_id: KvmVcpuRegister, data: u64) -> Result<()> {
self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
}
fn set_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister, data: u128) -> Result<()> {
self.set_one_kvm_reg(kvm_reg_id, data.to_ne_bytes().as_slice())
}
fn set_one_kvm_reg(&self, kvm_reg_id: KvmVcpuRegister, data: &[u8]) -> Result<()> {
assert_eq!(kvm_reg_id.size(), data.len());
let id: u64 = kvm_reg_id.into();
let onereg = kvm_one_reg {
id,
addr: (data.as_ptr() as usize)
.try_into()
.expect("can't represent usize as u64"),
};
let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG, &onereg) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
fn get_one_kvm_reg_u32(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u32> {
let mut bytes = 0u32.to_ne_bytes();
self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
Ok(u32::from_ne_bytes(bytes))
}
fn get_one_kvm_reg_u64(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u64> {
let mut bytes = 0u64.to_ne_bytes();
self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
Ok(u64::from_ne_bytes(bytes))
}
fn get_one_kvm_reg_u128(&self, kvm_reg_id: KvmVcpuRegister) -> Result<u128> {
let mut bytes = 0u128.to_ne_bytes();
self.get_one_kvm_reg(kvm_reg_id, bytes.as_mut_slice())?;
Ok(u128::from_ne_bytes(bytes))
}
fn get_one_kvm_reg(&self, kvm_reg_id: KvmVcpuRegister, data: &mut [u8]) -> Result<()> {
assert_eq!(kvm_reg_id.size(), data.len());
let id: u64 = kvm_reg_id.into();
let onereg = kvm_one_reg {
id,
addr: (data.as_mut_ptr() as usize)
.try_into()
.expect("can't represent usize as u64"),
};
let ret = unsafe { ioctl_with_ref(self, KVM_GET_ONE_REG, &onereg) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
pub(super) fn handle_smccc_call(
&self,
handle_fn: &mut dyn FnMut(&mut HypercallAbi) -> anyhow::Result<()>,
) -> anyhow::Result<()> {
const SMCCC_NOT_SUPPORTED: usize = u64::MAX as usize;
let function_id = (self.get_one_reg(VcpuRegAArch64::X(0))? as u32)
.try_into()
.unwrap();
let args = &[
self.get_one_reg(VcpuRegAArch64::X(1))?.try_into().unwrap(),
self.get_one_reg(VcpuRegAArch64::X(2))?.try_into().unwrap(),
self.get_one_reg(VcpuRegAArch64::X(3))?.try_into().unwrap(),
];
let default_res = &[SMCCC_NOT_SUPPORTED, 0, 0, 0];
let mut smccc_abi = HypercallAbi::new(function_id, args, default_res);
let err_or_ok = handle_fn(&mut smccc_abi);
for (i, value) in smccc_abi.get_results().iter().enumerate() {
self.set_one_reg(VcpuRegAArch64::X(i as _), (*value).try_into().unwrap())?;
}
err_or_ok
}
#[inline]
pub(crate) fn handle_vm_exit_arch(&self, _run: &mut kvm_run) -> Option<VcpuExit> {
None
}
fn get_reg_list(&self) -> Result<Vec<u64>> {
let mut kvm_reg_list = kvm_reg_list::<[u64; AARCH64_MAX_REG_COUNT]>::new_zeroed();
kvm_reg_list.n = AARCH64_MAX_REG_COUNT as u64;
let ret =
unsafe { ioctl_with_mut_ref(self, KVM_GET_REG_LIST, &mut kvm_reg_list) };
if ret < 0 {
return errno_result();
}
let n = kvm_reg_list.n;
assert!(
n <= AARCH64_MAX_REG_COUNT as u64,
"Get reg list returned more registers than possible"
);
Ok(kvm_reg_list.reg[..n as usize].to_vec())
}
fn get_features_bitmap(&self, features: &[VcpuFeature]) -> Result<u32> {
let mut all_features = 0;
let check_extension = |ext: u32| -> bool {
unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, ext.into()) == 1 }
};
for f in features {
let shift = match f {
VcpuFeature::PsciV0_2 => KVM_ARM_VCPU_PSCI_0_2,
VcpuFeature::PmuV3 => KVM_ARM_VCPU_PMU_V3,
VcpuFeature::PowerOff => KVM_ARM_VCPU_POWER_OFF,
VcpuFeature::Sve => {
if !check_extension(KVM_CAP_ARM_SVE) {
return Err(Error::new(ENOTSUP));
}
KVM_ARM_VCPU_SVE
}
};
all_features |= 1 << shift;
}
if check_extension(KVM_CAP_ARM_PTRAUTH_ADDRESS)
&& check_extension(KVM_CAP_ARM_PTRAUTH_GENERIC)
{
all_features |= 1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS;
all_features |= 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC;
}
Ok(all_features)
}
fn finalize(&self, features: u32) -> Result<()> {
if (features & 1 << KVM_ARM_VCPU_SVE) != 0 {
let ret = unsafe {
ioctl_with_ref(
self,
KVM_ARM_VCPU_FINALIZE,
&std::os::raw::c_int::try_from(KVM_ARM_VCPU_SVE)
.map_err(|_| Error::new(EINVAL))?,
)
};
if ret != 0 {
return errno_result();
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum KvmVcpuRegister {
X(u8),
Sp,
Pc,
Pstate,
V(u8),
Firmware(u16),
System(AArch64SysRegId),
Ccsidr(u8),
}
impl KvmVcpuRegister {
pub const PSCI_VERSION: Self = Self::Firmware(0);
pub const SMCCC_ARCH_WORKAROUND_1: Self = Self::Firmware(1);
pub const SMCCC_ARCH_WORKAROUND_2: Self = Self::Firmware(2);
pub const SMCCC_ARCH_WORKAROUND_3: Self = Self::Firmware(3);
pub fn size(&self) -> usize {
let kvm_reg = u64::from(*self);
let size_field = kvm_reg & KVM_REG_SIZE_MASK;
const REG_SIZE_U8: u64 = KVM_REG_SIZE_U8 as u64;
match size_field {
REG_SIZE_U8 => 1,
KVM_REG_SIZE_U16 => 2,
KVM_REG_SIZE_U32 => 4,
KVM_REG_SIZE_U64 => 8,
KVM_REG_SIZE_U128 => 16,
KVM_REG_SIZE_U256 => 32,
KVM_REG_SIZE_U512 => 64,
KVM_REG_SIZE_U1024 => 128,
KVM_REG_SIZE_U2048 => 256,
_ => panic!("invalid size field {size_field}"),
}
}
}
impl From<KvmVcpuRegister> for u64 {
fn from(register: KvmVcpuRegister) -> Self {
const fn reg(size: u64, kind: u64, fields: u64) -> u64 {
KVM_REG_ARM64 | size | kind | fields
}
const fn kvm_regs_reg(size: u64, offset: usize) -> u64 {
let offset = offset / std::mem::size_of::<u32>();
reg(size, KVM_REG_ARM_CORE as u64, offset as u64)
}
const fn kvm_reg(offset: usize) -> u64 {
kvm_regs_reg(KVM_REG_SIZE_U64, offset)
}
fn spsr_reg(spsr_reg: u32) -> u64 {
let n = std::mem::size_of::<u64>() * (spsr_reg as usize);
kvm_reg(offset_of!(kvm_regs, spsr) + n)
}
fn user_pt_reg(offset: usize) -> u64 {
kvm_regs_reg(KVM_REG_SIZE_U64, offset_of!(kvm_regs, regs) + offset)
}
fn user_fpsimd_state_reg(size: u64, offset: usize) -> u64 {
kvm_regs_reg(size, offset_of!(kvm_regs, fp_regs) + offset)
}
const fn reg_u64(kind: u64, fields: u64) -> u64 {
reg(KVM_REG_SIZE_U64, kind, fields)
}
const fn demux_reg(size: u64, index: u64, value: u64) -> u64 {
let index = (index << KVM_REG_ARM_DEMUX_ID_SHIFT) & (KVM_REG_ARM_DEMUX_ID_MASK as u64);
let value =
(value << KVM_REG_ARM_DEMUX_VAL_SHIFT) & (KVM_REG_ARM_DEMUX_VAL_MASK as u64);
reg(size, KVM_REG_ARM_DEMUX as u64, index | value)
}
match register {
KvmVcpuRegister::X(n @ 0..=30) => {
let n = std::mem::size_of::<u64>() * (n as usize);
user_pt_reg(offset_of!(user_pt_regs, regs) + n)
}
KvmVcpuRegister::X(n) => unreachable!("invalid KvmVcpuRegister Xn index: {n}"),
KvmVcpuRegister::Sp => user_pt_reg(offset_of!(user_pt_regs, sp)),
KvmVcpuRegister::Pc => user_pt_reg(offset_of!(user_pt_regs, pc)),
KvmVcpuRegister::Pstate => user_pt_reg(offset_of!(user_pt_regs, pstate)),
KvmVcpuRegister::V(n @ 0..=31) => {
let n = std::mem::size_of::<u128>() * (n as usize);
user_fpsimd_state_reg(KVM_REG_SIZE_U128, offset_of!(user_fpsimd_state, vregs) + n)
}
KvmVcpuRegister::V(n) => unreachable!("invalid KvmVcpuRegister Vn index: {n}"),
KvmVcpuRegister::System(aarch64_sys_reg::FPSR) => {
user_fpsimd_state_reg(KVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpsr))
}
KvmVcpuRegister::System(aarch64_sys_reg::FPCR) => {
user_fpsimd_state_reg(KVM_REG_SIZE_U32, offset_of!(user_fpsimd_state, fpcr))
}
KvmVcpuRegister::System(aarch64_sys_reg::SPSR_EL1) => spsr_reg(KVM_SPSR_EL1),
KvmVcpuRegister::System(aarch64_sys_reg::SPSR_abt) => spsr_reg(KVM_SPSR_ABT),
KvmVcpuRegister::System(aarch64_sys_reg::SPSR_und) => spsr_reg(KVM_SPSR_UND),
KvmVcpuRegister::System(aarch64_sys_reg::SPSR_irq) => spsr_reg(KVM_SPSR_IRQ),
KvmVcpuRegister::System(aarch64_sys_reg::SPSR_fiq) => spsr_reg(KVM_SPSR_FIQ),
KvmVcpuRegister::System(aarch64_sys_reg::SP_EL1) => {
kvm_reg(offset_of!(kvm_regs, sp_el1))
}
KvmVcpuRegister::System(aarch64_sys_reg::ELR_EL1) => {
kvm_reg(offset_of!(kvm_regs, elr_el1))
}
KvmVcpuRegister::System(aarch64_sys_reg::CNTV_CVAL_EL0) => reg_u64(
KVM_REG_ARM64_SYSREG.into(),
aarch64_sys_reg::CNTVCT_EL0.encoded().into(),
),
KvmVcpuRegister::System(aarch64_sys_reg::CNTVCT_EL0) => reg_u64(
KVM_REG_ARM64_SYSREG.into(),
aarch64_sys_reg::CNTV_CVAL_EL0.encoded().into(),
),
KvmVcpuRegister::System(sysreg) => {
reg_u64(KVM_REG_ARM64_SYSREG.into(), sysreg.encoded().into())
}
KvmVcpuRegister::Firmware(n) => reg_u64(KVM_REG_ARM_FW.into(), n.into()),
KvmVcpuRegister::Ccsidr(n) => demux_reg(KVM_REG_SIZE_U32, 0, n.into()),
}
}
}
impl VcpuAArch64 for KvmVcpu {
fn init(&self, features: &[VcpuFeature]) -> Result<()> {
let mut kvi = kvm_vcpu_init {
target: KVM_ARM_TARGET_GENERIC_V8,
features: [0; 7],
};
let ret = unsafe { ioctl_with_mut_ref(&self.vm, KVM_ARM_PREFERRED_TARGET, &mut kvi) };
if ret != 0 {
return errno_result();
}
kvi.features[0] = self.get_features_bitmap(features)?;
let ret = unsafe { ioctl_with_ref(self, KVM_ARM_VCPU_INIT, &kvi) };
if ret != 0 {
return errno_result();
}
self.finalize(kvi.features[0])?;
Ok(())
}
fn init_pmu(&self, irq: u64) -> Result<()> {
let irq_addr = &irq as *const u64;
let irq_attr = kvm_device_attr {
group: KVM_ARM_VCPU_PMU_V3_CTRL,
attr: KVM_ARM_VCPU_PMU_V3_IRQ as u64,
addr: irq_addr as u64,
flags: 0,
};
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_HAS_DEVICE_ATTR, &irq_attr) };
if ret < 0 {
return errno_result();
}
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR, &irq_attr) };
if ret < 0 {
return errno_result();
}
let init_attr = kvm_device_attr {
group: KVM_ARM_VCPU_PMU_V3_CTRL,
attr: KVM_ARM_VCPU_PMU_V3_INIT as u64,
addr: 0,
flags: 0,
};
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR, &init_attr) };
if ret < 0 {
return errno_result();
}
Ok(())
}
fn has_pvtime_support(&self) -> bool {
let pvtime_attr = kvm_device_attr {
group: KVM_ARM_VCPU_PVTIME_CTRL,
attr: KVM_ARM_VCPU_PVTIME_IPA as u64,
addr: 0,
flags: 0,
};
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_HAS_DEVICE_ATTR, &pvtime_attr) };
ret >= 0
}
fn init_pvtime(&self, pvtime_ipa: u64) -> Result<()> {
let pvtime_ipa_addr = &pvtime_ipa as *const u64;
let pvtime_attr = kvm_device_attr {
group: KVM_ARM_VCPU_PVTIME_CTRL,
attr: KVM_ARM_VCPU_PVTIME_IPA as u64,
addr: pvtime_ipa_addr as u64,
flags: 0,
};
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR, &pvtime_attr) };
if ret < 0 {
return errno_result();
}
Ok(())
}
fn set_one_reg(&self, reg_id: VcpuRegAArch64, data: u64) -> Result<()> {
let kvm_reg = self.kvm_reg_id(reg_id)?;
match kvm_reg.size() {
4 => self.set_one_kvm_reg_u32(kvm_reg, data as u32),
8 => self.set_one_kvm_reg_u64(kvm_reg, data),
size => panic!("bad reg size {size}"),
}
}
fn get_one_reg(&self, reg_id: VcpuRegAArch64) -> Result<u64> {
let kvm_reg = self.kvm_reg_id(reg_id)?;
match kvm_reg.size() {
4 => self.get_one_kvm_reg_u32(kvm_reg).map(u64::from),
8 => self.get_one_kvm_reg_u64(kvm_reg),
size => panic!("bad reg size {size}"),
}
}
fn set_vector_reg(&self, reg_num: u8, data: u128) -> Result<()> {
if reg_num > 31 {
return Err(Error::new(EINVAL));
}
self.set_one_kvm_reg_u128(KvmVcpuRegister::V(reg_num), data)
}
fn get_vector_reg(&self, reg_num: u8) -> Result<u128> {
if reg_num > 31 {
return Err(Error::new(EINVAL));
}
self.get_one_kvm_reg_u128(KvmVcpuRegister::V(reg_num))
}
fn get_mpidr(&self) -> Result<u64> {
self.get_one_reg(VcpuRegAArch64::System(aarch64_sys_reg::MPIDR_EL1))
}
fn get_psci_version(&self) -> Result<PsciVersion> {
let version = if let Ok(v) = self.get_one_kvm_reg_u64(KvmVcpuRegister::PSCI_VERSION) {
let v = u32::try_from(v).map_err(|_| Error::new(EINVAL))?;
PsciVersion::try_from(v)?
} else {
PSCI_0_2
};
if version < PSCI_0_2 {
Err(Error::new(ENOTSUP))
} else {
Ok(version)
}
}
fn get_max_hw_bps(&self) -> Result<usize> {
let max_hw_bps = unsafe {
ioctl_with_val(
&self.vm,
KVM_CHECK_EXTENSION,
KVM_CAP_GUEST_DEBUG_HW_BPS.into(),
)
};
if max_hw_bps < 0 {
errno_result()
} else {
Ok(max_hw_bps.try_into().expect("can't represent u64 as usize"))
}
}
fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
let reg_list = self.get_reg_list()?;
let cntvct_el0: u16 = aarch64_sys_reg::CNTVCT_EL0.encoded();
let cntv_cval_el0: u16 = aarch64_sys_reg::CNTV_CVAL_EL0.encoded();
let mut sys_regs = BTreeMap::new();
for reg in reg_list {
if (reg as u32) & KVM_REG_ARM_COPROC_MASK == KVM_REG_ARM64_SYSREG {
let r = if reg as u16 == cntvct_el0 {
aarch64_sys_reg::CNTV_CVAL_EL0
} else if reg as u16 == cntv_cval_el0 {
aarch64_sys_reg::CNTVCT_EL0
} else {
AArch64SysRegId::from_encoded((reg & 0xFFFF) as u16)
};
sys_regs.insert(r, self.get_one_reg(VcpuRegAArch64::System(r))?);
assert_eq!(
Ok(reg),
self.kvm_reg_id(VcpuRegAArch64::System(r)).map(u64::from),
);
}
}
let extra_sys_regs = [
aarch64_sys_reg::ELR_EL1,
aarch64_sys_reg::FPCR,
aarch64_sys_reg::FPSR,
aarch64_sys_reg::SP_EL1,
aarch64_sys_reg::SPSR_EL1,
aarch64_sys_reg::SPSR_abt,
aarch64_sys_reg::SPSR_und,
aarch64_sys_reg::SPSR_irq,
aarch64_sys_reg::SPSR_fiq,
];
for reg in extra_sys_regs {
sys_regs.insert(reg, self.get_one_reg(VcpuRegAArch64::System(reg))?);
}
Ok(sys_regs)
}
fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
const KVM_REG_CCSIDR: u64 = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | (KVM_REG_ARM_DEMUX as u64);
const CCSIDR_INDEX_MASK: u64 = 0xFF;
let reg_list = self.get_reg_list()?;
let mut cache_info = BTreeMap::new();
for reg in reg_list {
if (reg & !CCSIDR_INDEX_MASK) == KVM_REG_CCSIDR {
let idx = reg as u8;
cache_info.insert(
idx,
self.get_one_kvm_reg_u32(KvmVcpuRegister::Ccsidr(idx))?
.into(),
);
}
}
Ok(cache_info)
}
fn set_cache_info(&self, cache_info: BTreeMap<u8, u64>) -> Result<()> {
for (idx, val) in cache_info {
self.set_one_kvm_reg_u32(
KvmVcpuRegister::Ccsidr(idx),
val.try_into()
.expect("trying to set a u32 register with a u64 value"),
)?;
}
Ok(())
}
fn hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot> {
let reg_list = self.get_reg_list()?;
let mut firmware_regs = BTreeMap::new();
for reg in reg_list {
if (reg as u32) & KVM_REG_ARM_COPROC_MASK == KVM_REG_ARM_FW {
firmware_regs.insert(
reg as u16,
self.get_one_kvm_reg_u64(KvmVcpuRegister::Firmware(reg as u16))?,
);
}
}
AnySnapshot::to_any(KvmSnapshot { firmware_regs })
.context("Failed to serialize KVM specific data")
}
fn hypervisor_specific_restore(&self, data: AnySnapshot) -> anyhow::Result<()> {
let deser: KvmSnapshot =
AnySnapshot::from_any(data).context("Failed to deserialize KVM specific data")?;
for (id, val) in &deser.firmware_regs {
self.set_one_kvm_reg_u64(KvmVcpuRegister::Firmware(*id), *val)?;
}
Ok(())
}
#[allow(clippy::unusual_byte_groupings)]
fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
let mut dbg = kvm_guest_debug {
control: KVM_GUESTDBG_ENABLE,
..Default::default()
};
if enable_singlestep {
dbg.control |= KVM_GUESTDBG_SINGLESTEP;
}
if !addrs.is_empty() {
dbg.control |= KVM_GUESTDBG_USE_HW;
}
for (i, guest_addr) in addrs.iter().enumerate() {
if guest_addr.0 & 0b11 != 0 {
return Err(Error::new(EINVAL));
}
let sign_ext = 15;
dbg.arch.dbg_bvr[i] = (((guest_addr.0 << sign_ext) as i64) >> sign_ext) as u64;
dbg.arch.dbg_bcr[i] = 0b1111_11_1;
}
let ret = unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct KvmSnapshot {
firmware_regs: BTreeMap<u16, u64>,
}
pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
match chip {
IrqSourceChip::Gic => 0,
_ => {
error!("Invalid IrqChipSource for ARM {:?}", chip);
0
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn system_timer_register_mixup() {
const KVM_REG_ARM_TIMER_CVAL: u64 = 0x6030_0000_0013_DF02;
let cntv_cval_el0_kvm = KvmVcpuRegister::System(aarch64_sys_reg::CNTV_CVAL_EL0);
assert_eq!(u64::from(cntv_cval_el0_kvm), KVM_REG_ARM_TIMER_CVAL);
const KVM_REG_ARM_TIMER_CNT: u64 = 0x6030_0000_0013_DF1A;
let cntvct_el0_kvm = KvmVcpuRegister::System(aarch64_sys_reg::CNTVCT_EL0);
assert_eq!(u64::from(cntvct_el0_kvm), KVM_REG_ARM_TIMER_CNT);
}
}