Path: blob/main/devices/src/virtio/iommu/ipc_memory_mapper.rs
5394 views
// Copyright 2022 The ChromiumOS Authors1// Use of this source code is governed by a BSD-style license that can be2// found in the LICENSE file.34//! Provide utility to communicate with an iommu in another process56use std::sync::Arc;78use anyhow::anyhow;9use anyhow::bail;10use anyhow::Context;11use anyhow::Result;12use base::error;13use base::AsRawDescriptor;14use base::AsRawDescriptors;15use base::Event;16use base::Protection;17use base::RawDescriptor;18use base::Tube;19use serde::Deserialize;20use serde::Serialize;21use smallvec::SmallVec;22use sync::Mutex;23use vm_memory::GuestAddress;24use vm_memory::GuestMemory;25use zerocopy::FromBytes;26use zerocopy::FromZeros;27use zerocopy::Immutable;28use zerocopy::IntoBytes;2930use crate::virtio::memory_mapper::MemRegion;3132#[derive(Serialize, Deserialize)]33pub(super) enum IommuRequest {34Export {35endpoint_id: u32,36iova: u64,37size: u64,38},39Release {40endpoint_id: u32,41iova: u64,42size: u64,43},44StartExportSession {45endpoint_id: u32,46},47}4849#[derive(Serialize, Deserialize)]50pub(super) enum IommuResponse {51Export(Vec<MemRegion>),52Release,53StartExportSession(Event),54Err(String),55}5657impl IommuRequest {58pub(super) fn get_endpoint_id(&self) -> u32 {59match self {60Self::Export { endpoint_id, .. } => *endpoint_id,61Self::Release { endpoint_id, .. } => *endpoint_id,62Self::StartExportSession { endpoint_id } => *endpoint_id,63}64}65}6667/// Sends an addr translation request to another process using `Tube`, and68/// gets the translated addr from another `Tube`69pub struct IpcMemoryMapper {70request_tx: Tube,71response_rx: Tube,72endpoint_id: u32,73}7475impl std::fmt::Debug for IpcMemoryMapper {76fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {77f.debug_struct("IpcMemoryMapper")78.field("endpoint_id", &self.endpoint_id)79.finish()80}81}8283fn map_bad_resp(resp: IommuResponse) -> anyhow::Error {84match resp {85IommuResponse::Err(e) => anyhow!("remote error {}", e),86_ => anyhow!("response type mismatch"),87}88}8990impl IpcMemoryMapper {91/// Returns a new `IpcMemoryMapper` instance.92///93/// # Arguments94///95/// * `request_tx` - A tube to send `TranslateRequest` to another process.96/// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`97/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.98pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {99Self {100request_tx,101response_rx,102endpoint_id,103}104}105106fn do_request(&self, req: IommuRequest) -> Result<IommuResponse> {107self.request_tx108.send(&req)109.context("failed to send request")?;110self.response_rx111.recv::<IommuResponse>()112.context("failed to get response")113}114115/// See [crate::virtio::memory_mapper::MemoryMapper::export].116pub fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {117let req = IommuRequest::Export {118endpoint_id: self.endpoint_id,119iova,120size,121};122match self.do_request(req)? {123IommuResponse::Export(vec) => Ok(vec),124e => Err(map_bad_resp(e)),125}126}127128/// See [crate::virtio::memory_mapper::MemoryMapper::release].129pub fn release(&mut self, iova: u64, size: u64) -> Result<()> {130let req = IommuRequest::Release {131endpoint_id: self.endpoint_id,132iova,133size,134};135match self.do_request(req)? {136IommuResponse::Release => Ok(()),137e => Err(map_bad_resp(e)),138}139}140141/// See [crate::virtio::memory_mapper::MemoryMapper::start_export_session].142pub fn start_export_session(&mut self) -> Result<Event> {143let req = IommuRequest::StartExportSession {144endpoint_id: self.endpoint_id,145};146match self.do_request(req)? {147IommuResponse::StartExportSession(evt) => Ok(evt),148e => Err(map_bad_resp(e)),149}150}151}152153impl AsRawDescriptors for IpcMemoryMapper {154fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {155vec![156self.request_tx.as_raw_descriptor(),157self.response_rx.as_raw_descriptor(),158]159}160}161162pub struct CreateIpcMapperRet {163pub mapper: IpcMemoryMapper,164pub response_tx: Tube,165}166167/// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu168/// to respond to `TranslateRequest`s.169///170/// # Arguments171///172/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.173/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This should be cloned and174/// shared between different ipc mappers with different `endpoint_id`s.175pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {176let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");177CreateIpcMapperRet {178mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),179response_tx,180}181}182183#[derive(Debug)]184struct ExportedRegionInner {185regions: Vec<MemRegion>,186iova: u64,187size: u64,188iommu: Arc<Mutex<IpcMemoryMapper>>,189}190191impl Drop for ExportedRegionInner {192fn drop(&mut self) {193if let Err(e) = self.iommu.lock().release(self.iova, self.size) {194error!("Error releasing region {:?}", e);195}196}197}198199/// A region exported from the virtio-iommu.200#[derive(Clone, Debug)]201pub struct ExportedRegion {202inner: Arc<Mutex<ExportedRegionInner>>,203}204205impl ExportedRegion {206/// Creates a new, fully initialized exported region.207pub fn new(208mem: &GuestMemory,209iommu: Arc<Mutex<IpcMemoryMapper>>,210iova: u64,211size: u64,212) -> Result<Self> {213let regions = iommu214.lock()215.export(iova, size)216.context("failed to export")?;217for r in ®ions {218if !mem.is_valid_range(r.gpa, r.len) {219bail!("region not in memory range");220}221}222Ok(Self {223inner: Arc::new(Mutex::new(ExportedRegionInner {224regions,225iova,226size,227iommu,228})),229})230}231232// Helper function for copying to/from [iova, iova+remaining).233fn do_copy<C>(234&self,235iova: u64,236mut remaining: usize,237prot: Protection,238mut copy_fn: C,239) -> Result<()>240where241C: FnMut(usize /* offset */, GuestAddress, usize /* len */) -> Result<usize>,242{243let inner = self.inner.lock();244let mut region_offset = iova.checked_sub(inner.iova).with_context(|| {245format!(246"out of bounds: src_iova={} region_iova={}",247iova, inner.iova248)249})?;250let mut offset = 0;251for r in &inner.regions {252if region_offset >= r.len {253region_offset -= r.len;254continue;255}256257if !r.prot.allows(&prot) {258bail!("gpa is not accessible");259}260261let len = (r.len as usize).min(remaining);262let copy_len = copy_fn(offset, r.gpa.unchecked_add(region_offset), len)?;263if len != copy_len {264bail!("incomplete copy: expected={}, actual={}", len, copy_len);265}266267remaining -= len;268offset += len;269region_offset = 0;270271if remaining == 0 {272return Ok(());273}274}275276Err(anyhow!("not enough data: remaining={}", remaining))277}278279/// Reads an object from the given iova. Fails if the specified iova range does280/// not lie within this region, or if part of the region isn't readable.281pub fn read_obj_from_addr<T: IntoBytes + FromBytes + FromZeros>(282&self,283mem: &GuestMemory,284iova: u64,285) -> anyhow::Result<T> {286let mut val = T::new_zeroed();287let buf = val.as_mut_bytes();288self.do_copy(iova, buf.len(), Protection::read(), |offset, gpa, len| {289mem.read_at_addr(&mut buf[offset..(offset + len)], gpa)290.context("failed to read from gpa")291})?;292Ok(val)293}294295/// Writes an object at a given iova. Fails if the specified iova range does296/// not lie within this region, or if part of the region isn't writable.297pub fn write_obj_at_addr<T: Immutable + IntoBytes>(298&self,299mem: &GuestMemory,300val: T,301iova: u64,302) -> anyhow::Result<()> {303let buf = val.as_bytes();304self.do_copy(iova, buf.len(), Protection::write(), |offset, gpa, len| {305mem.write_at_addr(&buf[offset..(offset + len)], gpa)306.context("failed to write from gpa")307})?;308Ok(())309}310311/// Validates that [iova, iova+size) lies within this region, and that312/// the region is valid according to mem.313pub fn is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool {314let inner = self.inner.lock();315let iova_end = iova.checked_add(size);316if iova_end.is_none() {317return false;318}319if iova < inner.iova || iova_end.unwrap() > (inner.iova + inner.size) {320return false;321}322self.inner323.lock()324.regions325.iter()326.all(|r| mem.range_overlap(r.gpa, r.gpa.unchecked_add(r.len)))327}328329/// Gets the list of guest physical regions for the exported region.330pub fn get_mem_regions(&self) -> SmallVec<[MemRegion; 1]> {331SmallVec::from_slice(&self.inner.lock().regions)332}333}334335#[cfg(test)]336mod tests {337use std::thread;338339use base::Protection;340use vm_memory::GuestAddress;341342use super::*;343344#[test]345fn test() {346let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");347let CreateIpcMapperRet {348mut mapper,349response_tx,350} = create_ipc_mapper(3, request_tx);351let user_handle = thread::spawn(move || {352assert!(mapper353.export(0x555, 1)354.unwrap()355.iter()356.zip(&vec![MemRegion {357gpa: GuestAddress(0x777),358len: 1,359prot: Protection::read_write(),360},])361.all(|(a, b)| a == b));362});363let iommu_handle = thread::spawn(move || {364let (endpoint_id, iova, size) = match request_rx.recv().unwrap() {365IommuRequest::Export {366endpoint_id,367iova,368size,369} => (endpoint_id, iova, size),370_ => unreachable!(),371};372assert_eq!(endpoint_id, 3);373assert_eq!(iova, 0x555);374assert_eq!(size, 1);375response_tx376.send(&IommuResponse::Export(vec![MemRegion {377gpa: GuestAddress(0x777),378len: 1,379prot: Protection::read_write(),380}]))381.unwrap();382// This join needs to be here because on Windows, if `response_tx`383// is dropped before `response_rx` can read, the connection will384// be severed and this test will fail.385user_handle.join().unwrap();386});387iommu_handle.join().unwrap();388}389}390391392