Path: blob/main/devices/src/virtio/iommu/memory_mapper.rs
5394 views
// Copyright 2021 The ChromiumOS Authors1// Use of this source code is governed by a BSD-style license that can be2// found in the LICENSE file.34//! MemoryMapper trait and basic impl for virtio-iommu implementation5//!6//! All the addr/range ends in this file are exclusive.78use std::any::Any;9use std::collections::BTreeMap;10use std::sync::atomic::AtomicU32;11use std::sync::atomic::Ordering;1213use anyhow::anyhow;14use anyhow::bail;15use anyhow::Context;16use anyhow::Result;17use base::warn;18use base::AsRawDescriptors;19use base::Event;20use base::Protection;21use base::RawDescriptor;22use cros_async::EventAsync;23use cros_async::Executor;24use resources::AddressRange;25use serde::Deserialize;26use serde::Serialize;27use vm_memory::GuestAddress;2829#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]30pub struct MemRegion {31pub gpa: GuestAddress,32pub len: u64,33pub prot: Protection,34}3536/// Manages the mapping from a guest IO virtual address space to the guest physical address space37#[derive(Debug)]38pub struct MappingInfo {39pub iova: u64,40pub gpa: GuestAddress,41pub size: u64,42pub prot: Protection,43}4445impl MappingInfo {46#[allow(dead_code)]47fn new(iova: u64, gpa: GuestAddress, size: u64, prot: Protection) -> Result<Self> {48if size == 0 {49bail!("can't create 0 sized region");50}51iova.checked_add(size).context("iova overflow")?;52gpa.checked_add(size).context("gpa overflow")?;53Ok(Self {54iova,55gpa,56size,57prot,58})59}60}6162struct ExportState {63// List of exported regions. Exported regions can overlap.64exported: Vec<AddressRange>,6566// Event used to signal the client device when there is a fault.67fault_event: Event,6869// Event used to signal virtio-iommu when the fault is resolved.70fault_resolved_event_internal: Event,71// Clone of the above event returned to virtio-iommu when a fault occurs.72fault_resolved_event_external: Option<EventAsync>,73}7475impl ExportState {76fn new(ex: &Executor) -> Result<(Self, Event)> {77let fault_event = Event::new().context("failed to create fault_event")?;78let fault_resolved_event = Event::new().context("failed to create resolve event")?;7980Ok((81Self {82exported: Vec::new(),83fault_event: fault_event84.try_clone()85.context("failed to clone fault event")?,86fault_resolved_event_internal: fault_resolved_event87.try_clone()88.context("failed to clone resolve event")?,89fault_resolved_event_external: Some(90EventAsync::new(fault_resolved_event, ex)91.context("failed to create async resolve event")?,92),93},94fault_event,95))96}9798fn on_fault(&mut self) -> Option<EventAsync> {99let ret = self.fault_resolved_event_external.take();100if ret.is_some() {101self.fault_event.signal().expect("failed to signal fault");102}103ret104}105106fn can_export(&self) -> bool {107self.fault_resolved_event_external.is_some()108}109}110111// A basic iommu. It is designed as a building block for virtio-iommu.112pub struct BasicMemoryMapper {113maps: BTreeMap<u64, MappingInfo>, // key = MappingInfo.iova114mask: u64,115id: u32,116export_state: Option<ExportState>,117}118119pub enum RemoveMapResult {120// The removal was successful. If the event is Some, it must be waited on before121// informing the guest that the unmapping completed.122Success(Option<EventAsync>),123// The removal failed because the range partially overlapped a mapping.124OverlapFailure,125}126127#[derive(PartialEq, Eq, Debug)]128pub enum AddMapResult {129Ok,130OverlapFailure,131}132133/// A generic interface for vfio and other iommu backends134///135/// This interface includes APIs to supports allowing clients within crosvm (e.g.136/// the VVU proxy) which are configured to sit behind a virtio-iommu device to137/// access memory via IO virtual address (IOVA). This is done by exporting mapped138/// memory to the client. The virtio-iommu device can manage many mappers139/// simultaneously. The current implementation has a 1-to-1 relationship between140/// mappers and clients, although this may be extended to 1-to-N to fully support141/// the virtio-iommu API.142///143/// Clients must only access memory while it is mapped into the virtio-iommu device.144/// As such, this interface has a concept of an "IOMMU fault". An IOMMU fault is145/// triggered when the guest removes a mapping that includes memory that is exported146/// but not yet released. This includes if |reset_domain| is called while any memory147/// is exported. When an IOMMU fault occurs, the event returned by148/// |start_export_session| is signaled, and the client must immediately release any149/// exported memory.150///151/// From the virtio-iommu's perspective, if |remove_map| or |reset_domain| triggers152/// an IOMMU fault, then an eventfd will be returned. It must wait on that event153/// until all exported regions have been released, at which point it can complete154/// the virtio request that triggered the fault.155///156/// As such, the flow of a fault is:157/// 1) The guest sends an virtio-iommu message that triggers a fault. Faults can be triggered by158/// unmap or detach messages, or by attach messages if such messages are re-attaching an159/// endpoint to a new domain. One example of a guest event that can trigger such a message is a160/// userspace VVU device process crashing and triggering the guest kernel to re-attach the VVU161/// device to the null endpoint.162/// 2) The viommu device removes an exported mapping from the mapper.163/// 3) The mapper signals the IOMMU fault eventfd and returns the fault resolution event to the164/// viommu device.165/// 4) The viommu device starts waiting on the fault resolution event. Note that although the166/// viommu device and mapper are both running on the same executor, this wait is async. This167/// means that although further processing of virtio-iommu requests is paused, the mapper168/// continues to run.169/// 5) The client receives the IOMMU fault.170/// 6) The client releases all exported regions.171/// 7) Once the mapper receives the final release message from the client, it signals the fault172/// resolution event that the viommu device is waiting on.173/// 8) The viommu device finishes processing the original virtio iommu request and sends a reply to174/// the guest.175pub trait MemoryMapper: Send {176/// Creates a new mapping. If the mapping overlaps with an existing177/// mapping, return Ok(false).178fn add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult>;179180/// Removes all mappings within the specified range.181fn remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult>;182183fn get_mask(&self) -> Result<u64>;184185/// Whether or not endpoints can be safely detached from this mapper.186fn supports_detach(&self) -> bool;187/// Resets the mapper's domain back into its initial state. Only necessary188/// if |supports_detach| returns true.189fn reset_domain(&mut self) -> Option<EventAsync> {190None191}192193/// Gets an identifier for the MemoryMapper instance. Must be unique among194/// instances of the same trait implementation.195fn id(&self) -> u32;196197/// Starts an export session with the mapper.198///199/// Returns an event which is signaled if exported memory is unmapped (i.e. if200/// a fault occurs). Once a fault occurs, no new regions may be exported for201/// that session. The client must watch for this event and immediately release202/// all exported regions.203///204/// Only one session can be active at a time. A new session can only be created if205/// the previous session has no remaining exported regions.206fn start_export_session(&mut self, _ex: &Executor) -> Result<Event> {207bail!("not supported");208}209210/// Exports the specified IO region.211///212/// # Safety213///214/// The memory in the region specified by hva and size must be215/// memory external to rust.216unsafe fn vfio_dma_map(217&mut self,218_iova: u64,219_hva: u64,220_size: u64,221_prot: Protection,222) -> Result<AddMapResult> {223bail!("not supported");224}225226/// Multiple MemRegions should be returned when the gpa is discontiguous or perms are different.227fn export(&mut self, _iova: u64, _size: u64) -> Result<Vec<MemRegion>> {228bail!("not supported");229}230231/// Releases a previously exported region.232///233/// If a given IO region is exported multiple times, it must be released multiple times.234fn release(&mut self, _iova: u64, _size: u64) -> Result<()> {235bail!("not supported");236}237}238239pub trait MemoryMapperTrait: MemoryMapper + AsRawDescriptors + Any {}240impl<T: MemoryMapper + AsRawDescriptors + Any> MemoryMapperTrait for T {}241242impl BasicMemoryMapper {243pub fn new(mask: u64) -> BasicMemoryMapper {244static NEXT_ID: AtomicU32 = AtomicU32::new(0);245BasicMemoryMapper {246maps: BTreeMap::new(),247mask,248id: NEXT_ID.fetch_add(1, Ordering::Relaxed),249export_state: None,250}251}252253#[cfg(test)]254pub fn len(&self) -> usize {255self.maps.len()256}257258#[cfg(test)]259pub fn is_empty(&self) -> bool {260self.maps.is_empty()261}262}263264impl MemoryMapper for BasicMemoryMapper {265fn add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult> {266if new_map.size == 0 {267bail!("can't map 0 sized region");268}269let new_iova_end = new_map270.iova271.checked_add(new_map.size)272.context("iova overflow")?;273new_map274.gpa275.checked_add(new_map.size)276.context("gpa overflow")?;277let mut iter = self.maps.range(..new_iova_end);278if let Some((_, map)) = iter.next_back() {279if map.iova + map.size > new_map.iova {280return Ok(AddMapResult::OverlapFailure);281}282}283self.maps.insert(new_map.iova, new_map);284Ok(AddMapResult::Ok)285}286287fn remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult> {288if size == 0 {289bail!("can't unmap 0 sized region");290}291let iova_end = iova_start.checked_add(size).context("iova overflow")?;292293// So that we invalid requests can be rejected w/o modifying things, check294// for partial overlap before removing the maps.295let mut to_be_removed = Vec::new();296for (key, map) in self.maps.range(..iova_end).rev() {297let map_iova_end = map.iova + map.size;298if map_iova_end <= iova_start {299// no overlap300break;301}302if iova_start <= map.iova && map_iova_end <= iova_end {303to_be_removed.push(*key);304} else {305return Ok(RemoveMapResult::OverlapFailure);306}307}308for key in to_be_removed {309self.maps.remove(&key).expect("map should contain key");310}311if let Some(export_state) = self.export_state.as_mut() {312let removed = AddressRange::from_start_and_size(iova_start, size).unwrap();313for export in &export_state.exported {314if export.overlaps(removed) {315return Ok(RemoveMapResult::Success(export_state.on_fault()));316}317}318}319Ok(RemoveMapResult::Success(None))320}321322fn get_mask(&self) -> Result<u64> {323Ok(self.mask)324}325326fn supports_detach(&self) -> bool {327true328}329330fn reset_domain(&mut self) -> Option<EventAsync> {331self.maps.clear();332if let Some(export_state) = self.export_state.as_mut() {333if !export_state.exported.is_empty() {334return export_state.on_fault();335}336}337None338}339340fn id(&self) -> u32 {341self.id342}343344fn start_export_session(&mut self, ex: &Executor) -> Result<Event> {345if let Some(export_state) = self.export_state.as_ref() {346if !export_state.exported.is_empty() {347bail!("previous export session still active");348}349}350351let (export_state, fault_event) = ExportState::new(ex)?;352self.export_state = Some(export_state);353Ok(fault_event)354}355356fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {357let export_state = self.export_state.as_mut().context("no export state")?;358if !export_state.can_export() {359bail!("broken export state");360}361if size == 0 {362bail!("can't translate 0 sized region");363}364365// Regions of contiguous iovas and gpas, and identical permission are merged366let iova_end = iova.checked_add(size).context("iova overflow")?;367let mut iter = self.maps.range(..iova_end);368let mut last_iova = iova_end;369let mut regions: Vec<MemRegion> = Vec::new();370while let Some((_, map)) = iter.next_back() {371if last_iova > map.iova + map.size {372break;373}374let mut new_region = true;375376// This is the last region to be inserted / first to be returned when iova >= map.iova377let region_len = last_iova - std::cmp::max::<u64>(map.iova, iova);378if let Some(last) = regions.last_mut() {379if map.gpa.unchecked_add(map.size) == last.gpa && map.prot == last.prot {380last.gpa = map.gpa;381last.len += region_len;382new_region = false;383}384}385if new_region {386// If this is the only region to be returned, region_len == size (arg of this387// function)388// iova_end = iova + size389// last_iova = iova_end390// region_len = last_iova - max(map.iova, iova)391// = iova + size - iova392// = size393regions.push(MemRegion {394gpa: map.gpa,395len: region_len,396prot: map.prot,397});398}399if iova >= map.iova {400regions.reverse();401// The gpa of the first region has to be offseted402regions[0].gpa = map403.gpa404.checked_add(iova - map.iova)405.context("gpa overflow")?;406407export_state408.exported409.push(AddressRange::from_start_and_end(iova, iova_end - 1));410411return Ok(regions);412}413last_iova = map.iova;414}415416Err(anyhow!("invalid iova {:x} {:x}", iova, size))417}418419fn release(&mut self, iova: u64, size: u64) -> Result<()> {420let to_remove = AddressRange::from_start_and_size(iova, size).context("iova overflow")?;421let state = self.export_state.as_mut().context("no export state")?;422423match state.exported.iter().position(|r| r == &to_remove) {424Some(idx) => {425state.exported.swap_remove(idx);426}427None => {428warn!("tried to release unknown range: {:?}", to_remove);429return Ok(());430}431}432433if state.exported.is_empty() && state.fault_resolved_event_external.is_none() {434state435.fault_resolved_event_internal436.signal()437.expect("failed to resolve fault");438}439440Ok(())441}442}443444impl AsRawDescriptors for BasicMemoryMapper {445fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {446Vec::new()447}448}449450#[cfg(test)]451mod tests {452use std::fmt::Debug;453454use super::*;455456fn assert_overlap_failure(val: RemoveMapResult) {457match val {458RemoveMapResult::OverlapFailure => (),459_ => unreachable!(),460}461}462463#[test]464fn test_mapping_info() {465// Overflow466MappingInfo::new(u64::MAX - 1, GuestAddress(1), 2, Protection::read()).unwrap_err();467MappingInfo::new(1, GuestAddress(u64::MAX - 1), 2, Protection::read()).unwrap_err();468MappingInfo::new(u64::MAX, GuestAddress(1), 2, Protection::read()).unwrap_err();469MappingInfo::new(1, GuestAddress(u64::MAX), 2, Protection::read()).unwrap_err();470MappingInfo::new(5, GuestAddress(5), u64::MAX, Protection::read()).unwrap_err();471// size = 0472MappingInfo::new(1, GuestAddress(5), 0, Protection::read()).unwrap_err();473}474475#[test]476fn test_map_overlap() {477let mut mapper = BasicMemoryMapper::new(u64::MAX);478mapper479.add_map(480MappingInfo::new(10, GuestAddress(1000), 10, Protection::read_write()).unwrap(),481)482.unwrap();483assert_eq!(484mapper485.add_map(486MappingInfo::new(14, GuestAddress(1000), 1, Protection::read_write()).unwrap()487)488.unwrap(),489AddMapResult::OverlapFailure490);491assert_eq!(492mapper493.add_map(494MappingInfo::new(0, GuestAddress(1000), 12, Protection::read_write()).unwrap()495)496.unwrap(),497AddMapResult::OverlapFailure498);499assert_eq!(500mapper501.add_map(502MappingInfo::new(16, GuestAddress(1000), 6, Protection::read_write()).unwrap()503)504.unwrap(),505AddMapResult::OverlapFailure506);507assert_eq!(508mapper509.add_map(510MappingInfo::new(5, GuestAddress(1000), 20, Protection::read_write()).unwrap()511)512.unwrap(),513AddMapResult::OverlapFailure514);515}516517#[test]518// This test is taken from the virtio_iommu spec with translate() calls added519fn test_map_unmap() {520let ex = Executor::new().expect("Failed to create an executor");521// #1522{523let mut mapper = BasicMemoryMapper::new(u64::MAX);524mapper.remove_map(0, 4).unwrap();525}526// #2527{528let mut mapper = BasicMemoryMapper::new(u64::MAX);529let _ = mapper.start_export_session(&ex);530mapper531.add_map(532MappingInfo::new(0, GuestAddress(1000), 9, Protection::read_write()).unwrap(),533)534.unwrap();535assert_eq!(536mapper.export(0, 1).unwrap()[0],537MemRegion {538gpa: GuestAddress(1000),539len: 1,540prot: Protection::read_write()541}542);543assert_eq!(544mapper.export(8, 1).unwrap()[0],545MemRegion {546gpa: GuestAddress(1008),547len: 1,548prot: Protection::read_write()549}550);551mapper.export(9, 1).unwrap_err();552mapper.remove_map(0, 9).unwrap();553mapper.export(0, 1).unwrap_err();554}555// #3556{557let mut mapper = BasicMemoryMapper::new(u64::MAX);558let _ = mapper.start_export_session(&ex);559mapper560.add_map(561MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),562)563.unwrap();564mapper565.add_map(566MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap(),567)568.unwrap();569assert_eq!(570mapper.export(0, 1).unwrap()[0],571MemRegion {572gpa: GuestAddress(1000),573len: 1,574prot: Protection::read_write()575}576);577assert_eq!(578mapper.export(6, 1).unwrap()[0],579MemRegion {580gpa: GuestAddress(51),581len: 1,582prot: Protection::read_write()583}584);585mapper.remove_map(0, 9).unwrap();586mapper.export(0, 1).unwrap_err();587mapper.export(6, 1).unwrap_err();588}589// #4590{591let mut mapper = BasicMemoryMapper::new(u64::MAX);592let _ = mapper.start_export_session(&ex);593mapper594.add_map(595MappingInfo::new(0, GuestAddress(1000), 9, Protection::read_write()).unwrap(),596)597.unwrap();598assert_overlap_failure(mapper.remove_map(0, 4).unwrap());599assert_eq!(600mapper.export(5, 1).unwrap()[0],601MemRegion {602gpa: GuestAddress(1005),603len: 1,604prot: Protection::read_write()605}606);607}608// #5609{610let mut mapper = BasicMemoryMapper::new(u64::MAX);611let _ = mapper.start_export_session(&ex);612mapper613.add_map(614MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),615)616.unwrap();617mapper618.add_map(619MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap(),620)621.unwrap();622assert_eq!(623mapper.export(0, 1).unwrap()[0],624MemRegion {625gpa: GuestAddress(1000),626len: 1,627prot: Protection::read_write()628}629);630assert_eq!(631mapper.export(5, 1).unwrap()[0],632MemRegion {633gpa: GuestAddress(50),634len: 1,635prot: Protection::read_write()636}637);638mapper.remove_map(0, 4).unwrap();639mapper.export(0, 1).unwrap_err();640mapper.export(4, 1).unwrap_err();641mapper.export(5, 1).unwrap_err();642}643// #6644{645let mut mapper = BasicMemoryMapper::new(u64::MAX);646let _ = mapper.start_export_session(&ex);647mapper648.add_map(649MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),650)651.unwrap();652assert_eq!(653mapper.export(0, 1).unwrap()[0],654MemRegion {655gpa: GuestAddress(1000),656len: 1,657prot: Protection::read_write()658}659);660mapper.export(9, 1).unwrap_err();661mapper.remove_map(0, 9).unwrap();662mapper.export(0, 1).unwrap_err();663mapper.export(9, 1).unwrap_err();664}665// #7666{667let mut mapper = BasicMemoryMapper::new(u64::MAX);668let _ = mapper.start_export_session(&ex);669mapper670.add_map(MappingInfo::new(0, GuestAddress(1000), 4, Protection::read()).unwrap())671.unwrap();672mapper673.add_map(674MappingInfo::new(10, GuestAddress(50), 4, Protection::read_write()).unwrap(),675)676.unwrap();677assert_eq!(678mapper.export(0, 1).unwrap()[0],679MemRegion {680gpa: GuestAddress(1000),681len: 1,682prot: Protection::read()683}684);685assert_eq!(686mapper.export(3, 1).unwrap()[0],687MemRegion {688gpa: GuestAddress(1003),689len: 1,690prot: Protection::read()691}692);693mapper.export(4, 1).unwrap_err();694assert_eq!(695mapper.export(10, 1).unwrap()[0],696MemRegion {697gpa: GuestAddress(50),698len: 1,699prot: Protection::read_write()700}701);702assert_eq!(703mapper.export(13, 1).unwrap()[0],704MemRegion {705gpa: GuestAddress(53),706len: 1,707prot: Protection::read_write()708}709);710mapper.remove_map(0, 14).unwrap();711mapper.export(0, 1).unwrap_err();712mapper.export(3, 1).unwrap_err();713mapper.export(4, 1).unwrap_err();714mapper.export(10, 1).unwrap_err();715mapper.export(13, 1).unwrap_err();716}717}718#[test]719fn test_remove_map() {720let mut mapper = BasicMemoryMapper::new(u64::MAX);721mapper722.add_map(MappingInfo::new(1, GuestAddress(1000), 4, Protection::read()).unwrap())723.unwrap();724mapper725.add_map(MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap())726.unwrap();727mapper728.add_map(MappingInfo::new(9, GuestAddress(50), 4, Protection::read_write()).unwrap())729.unwrap();730assert_eq!(mapper.len(), 3);731assert_overlap_failure(mapper.remove_map(0, 6).unwrap());732assert_eq!(mapper.len(), 3);733assert_overlap_failure(mapper.remove_map(1, 5).unwrap());734assert_eq!(mapper.len(), 3);735assert_overlap_failure(mapper.remove_map(1, 9).unwrap());736assert_eq!(mapper.len(), 3);737assert_overlap_failure(mapper.remove_map(6, 4).unwrap());738assert_eq!(mapper.len(), 3);739assert_overlap_failure(mapper.remove_map(6, 14).unwrap());740assert_eq!(mapper.len(), 3);741mapper.remove_map(5, 4).unwrap();742assert_eq!(mapper.len(), 2);743assert_overlap_failure(mapper.remove_map(1, 9).unwrap());744assert_eq!(mapper.len(), 2);745mapper.remove_map(0, 15).unwrap();746assert_eq!(mapper.len(), 0);747}748749fn assert_vec_eq<T: std::cmp::PartialEq + Debug>(a: Vec<T>, b: Vec<T>) {750assert_eq!(a.len(), b.len());751for (x, y) in a.into_iter().zip(b.into_iter()) {752assert_eq!(x, y);753}754}755756#[test]757fn test_translate_len() {758let mut mapper = BasicMemoryMapper::new(u64::MAX);759let ex = Executor::new().expect("Failed to create an executor");760let _ = mapper.start_export_session(&ex);761// [1, 5) -> [1000, 1004)762mapper763.add_map(MappingInfo::new(1, GuestAddress(1000), 4, Protection::read()).unwrap())764.unwrap();765mapper.export(1, 0).unwrap_err();766assert_eq!(767mapper.export(1, 1).unwrap()[0],768MemRegion {769gpa: GuestAddress(1000),770len: 1,771prot: Protection::read()772}773);774assert_eq!(775mapper.export(1, 2).unwrap()[0],776MemRegion {777gpa: GuestAddress(1000),778len: 2,779prot: Protection::read()780}781);782assert_eq!(783mapper.export(1, 3).unwrap()[0],784MemRegion {785gpa: GuestAddress(1000),786len: 3,787prot: Protection::read()788}789);790assert_eq!(791mapper.export(2, 1).unwrap()[0],792MemRegion {793gpa: GuestAddress(1001),794len: 1,795prot: Protection::read()796}797);798assert_eq!(799mapper.export(2, 2).unwrap()[0],800MemRegion {801gpa: GuestAddress(1001),802len: 2,803prot: Protection::read()804}805);806mapper.export(1, 5).unwrap_err();807// [1, 9) -> [1000, 1008)808mapper809.add_map(MappingInfo::new(5, GuestAddress(1004), 4, Protection::read()).unwrap())810.unwrap();811// Spanned across 2 maps812assert_eq!(813mapper.export(2, 5).unwrap()[0],814MemRegion {815gpa: GuestAddress(1001),816len: 5,817prot: Protection::read()818}819);820assert_eq!(821mapper.export(2, 6).unwrap()[0],822MemRegion {823gpa: GuestAddress(1001),824len: 6,825prot: Protection::read()826}827);828assert_eq!(829mapper.export(2, 7).unwrap()[0],830MemRegion {831gpa: GuestAddress(1001),832len: 7,833prot: Protection::read()834}835);836mapper.export(2, 8).unwrap_err();837mapper.export(3, 10).unwrap_err();838// [1, 9) -> [1000, 1008), [11, 17) -> [1010, 1016)839mapper840.add_map(MappingInfo::new(11, GuestAddress(1010), 6, Protection::read()).unwrap())841.unwrap();842// Discontiguous iova843mapper.export(3, 10).unwrap_err();844// [1, 17) -> [1000, 1016)845mapper846.add_map(MappingInfo::new(9, GuestAddress(1008), 2, Protection::read()).unwrap())847.unwrap();848// Spanned across 4 maps849assert_eq!(850mapper.export(3, 10).unwrap()[0],851MemRegion {852gpa: GuestAddress(1002),853len: 10,854prot: Protection::read()855}856);857assert_eq!(858mapper.export(1, 16).unwrap()[0],859MemRegion {860gpa: GuestAddress(1000),861len: 16,862prot: Protection::read()863}864);865mapper.export(1, 17).unwrap_err();866mapper.export(0, 16).unwrap_err();867// [0, 1) -> [5, 6), [1, 17) -> [1000, 1016)868mapper869.add_map(MappingInfo::new(0, GuestAddress(5), 1, Protection::read()).unwrap())870.unwrap();871assert_eq!(872mapper.export(0, 1).unwrap()[0],873MemRegion {874gpa: GuestAddress(5),875len: 1,876prot: Protection::read()877}878);879// Discontiguous gpa880assert_vec_eq(881mapper.export(0, 2).unwrap(),882vec![883MemRegion {884gpa: GuestAddress(5),885len: 1,886prot: Protection::read(),887},888MemRegion {889gpa: GuestAddress(1000),890len: 1,891prot: Protection::read(),892},893],894);895assert_vec_eq(896mapper.export(0, 16).unwrap(),897vec![898MemRegion {899gpa: GuestAddress(5),900len: 1,901prot: Protection::read(),902},903MemRegion {904gpa: GuestAddress(1000),905len: 15,906prot: Protection::read(),907},908],909);910// [0, 1) -> [5, 6), [1, 17) -> [1000, 1016), [17, 18) -> [1016, 1017) <RW>911mapper912.add_map(MappingInfo::new(17, GuestAddress(1016), 2, Protection::read_write()).unwrap())913.unwrap();914// Contiguous iova and gpa, but different perm915assert_vec_eq(916mapper.export(1, 17).unwrap(),917vec![918MemRegion {919gpa: GuestAddress(1000),920len: 16,921prot: Protection::read(),922},923MemRegion {924gpa: GuestAddress(1016),925len: 1,926prot: Protection::read_write(),927},928],929);930// Contiguous iova and gpa, but different perm931assert_vec_eq(932mapper.export(2, 16).unwrap(),933vec![934MemRegion {935gpa: GuestAddress(1001),936len: 15,937prot: Protection::read(),938},939MemRegion {940gpa: GuestAddress(1016),941len: 1,942prot: Protection::read_write(),943},944],945);946assert_vec_eq(947mapper.export(2, 17).unwrap(),948vec![949MemRegion {950gpa: GuestAddress(1001),951len: 15,952prot: Protection::read(),953},954MemRegion {955gpa: GuestAddress(1016),956len: 2,957prot: Protection::read_write(),958},959],960);961mapper.export(2, 500).unwrap_err();962mapper.export(500, 5).unwrap_err();963}964}965966967