Path: blob/main/cranelift/jit/src/memory/arena.rs
1692 views
use std::io;1use std::mem::ManuallyDrop;2use std::ptr;34use cranelift_module::ModuleResult;56use super::{BranchProtection, JITMemoryProvider};78fn align_up(addr: usize, align: usize) -> usize {9debug_assert!(align.is_power_of_two());10(addr + align - 1) & !(align - 1)11}1213#[derive(Debug)]14struct Segment {15ptr: *mut u8,16len: usize,17position: usize,18target_prot: region::Protection,19finalized: bool,20}2122impl Segment {23fn new(ptr: *mut u8, len: usize, target_prot: region::Protection) -> Self {24// Segments are created on page boundaries.25debug_assert_eq!(ptr as usize % region::page::size(), 0);26debug_assert_eq!(len % region::page::size(), 0);27let mut segment = Segment {28ptr,29len,30target_prot,31position: 0,32finalized: false,33};34// Set segment to read-write for initialization. The target permissions35// will be applied in `finalize`.36segment.set_rw();37segment38}3940fn set_rw(&mut self) {41unsafe {42region::protect(self.ptr, self.len, region::Protection::READ_WRITE)43.expect("unable to change memory protection for jit memory segment");44}45}4647fn finalize(&mut self, branch_protection: BranchProtection) {48if self.finalized {49return;50}5152// Executable regions are handled separately to correctly deal with53// branch protection and cache coherence.54if self.target_prot == region::Protection::READ_EXECUTE {55super::set_readable_and_executable(self.ptr, self.len, branch_protection)56.expect("unable to set memory protection for jit memory segment");57} else {58unsafe {59region::protect(self.ptr, self.len, self.target_prot)60.expect("unable to change memory protection for jit memory segment");61}62}63self.finalized = true;64}6566// Note: We do pointer arithmetic on `ptr` passed to `Segment::new` here.67// This assumes that `ptr` is valid for `len` bytes, or will result in UB.68fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {69assert!(self.has_space_for(size, align));70self.position = align_up(self.position, align);71let ptr = unsafe { self.ptr.add(self.position) };72self.position += size;73ptr74}7576fn has_space_for(&self, size: usize, align: usize) -> bool {77!self.finalized && align_up(self.position, align) + size <= self.len78}79}8081/// `ArenaMemoryProvider` allocates segments from a contiguous memory region82/// that is reserved up-front.83///84/// The arena's memory is initially allocated with PROT_NONE and gradually85/// updated as the JIT requires more space. This approach allows for stable86/// addresses throughout the lifetime of the JIT.87///88/// Depending on the underlying platform, requesting large parts of the address89/// space to be allocated might fail. This implementation currently doesn't do90/// overcommit on Windows.91///92/// Note: Memory will be leaked by default unless93/// [`JITMemoryProvider::free_memory`] is called to ensure function pointers94/// remain valid for the remainder of the program's life.95pub struct ArenaMemoryProvider {96alloc: ManuallyDrop<Option<region::Allocation>>,97ptr: *mut u8,98size: usize,99position: usize,100segments: Vec<Segment>,101}102103impl ArenaMemoryProvider {104/// Create a new memory region with the given size.105pub fn new_with_size(reserve_size: usize) -> Result<Self, region::Error> {106let size = align_up(reserve_size, region::page::size());107// Note: The region crate uses `MEM_RESERVE | MEM_COMMIT` on Windows.108// This means that allocations that exceed the page file plus system109// memory will fail here.110// https://github.com/darfink/region-rs/pull/34111let mut alloc = region::alloc(size, region::Protection::NONE)?;112let ptr = alloc.as_mut_ptr();113114Ok(Self {115alloc: ManuallyDrop::new(Some(alloc)),116segments: Vec::new(),117ptr,118size,119position: 0,120})121}122123fn allocate(124&mut self,125size: usize,126align: u64,127protection: region::Protection,128) -> io::Result<*mut u8> {129let align = usize::try_from(align).expect("alignment too big");130assert!(131align <= region::page::size(),132"alignment over page size is not supported"133);134135// Note: Add a fast path without a linear scan over segments here?136137// Can we fit this allocation into an existing segment?138if let Some(segment) = self.segments.iter_mut().find(|seg| {139seg.target_prot == protection && !seg.finalized && seg.has_space_for(size, align)140}) {141return Ok(segment.allocate(size, align));142}143144// Can we resize the last segment?145if let Some(segment) = self.segments.iter_mut().last() {146if segment.target_prot == protection && !segment.finalized {147let additional_size = align_up(size, region::page::size());148149// If our reserved arena can fit the additional size, extend the150// last segment.151if self.position + additional_size <= self.size {152segment.len += additional_size;153segment.set_rw();154self.position += additional_size;155return Ok(segment.allocate(size, align));156}157}158}159160// Allocate new segment for given size and alignment.161self.allocate_segment(size, protection)?;162let i = self.segments.len() - 1;163Ok(self.segments[i].allocate(size, align))164}165166fn allocate_segment(167&mut self,168size: usize,169target_prot: region::Protection,170) -> Result<(), io::Error> {171let size = align_up(size, region::page::size());172let ptr = unsafe { self.ptr.add(self.position) };173if self.position + size > self.size {174return Err(io::Error::new(175io::ErrorKind::Other,176"pre-allocated jit memory region exhausted",177));178}179self.position += size;180self.segments.push(Segment::new(ptr, size, target_prot));181Ok(())182}183184pub(crate) fn finalize(&mut self, branch_protection: BranchProtection) {185for segment in &mut self.segments {186segment.finalize(branch_protection);187}188189// Flush any in-flight instructions from the pipeline190wasmtime_jit_icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");191}192193/// Frees the allocated memory region, which would be leaked otherwise.194/// Likely to invalidate existing function pointers, causing unsafety.195pub(crate) unsafe fn free_memory(&mut self) {196if self.ptr == ptr::null_mut() {197return;198}199self.segments.clear();200// Drop the allocation, freeing memory.201let _: Option<region::Allocation> = self.alloc.take();202self.ptr = ptr::null_mut();203}204}205206impl Drop for ArenaMemoryProvider {207fn drop(&mut self) {208if self.ptr == ptr::null_mut() {209return;210}211let is_live = self.segments.iter().any(|seg| seg.finalized);212if !is_live {213// Only free memory if it's not been finalized yet.214// Otherwise, leak it since JIT memory may still be in use.215unsafe { self.free_memory() };216}217}218}219220impl JITMemoryProvider for ArenaMemoryProvider {221fn allocate_readexec(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {222self.allocate(size, align, region::Protection::READ_EXECUTE)223}224225fn allocate_readwrite(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {226self.allocate(size, align, region::Protection::READ_WRITE)227}228229fn allocate_readonly(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {230self.allocate(size, align, region::Protection::READ)231}232233unsafe fn free_memory(&mut self) {234self.free_memory();235}236237fn finalize(&mut self, branch_protection: BranchProtection) -> ModuleResult<()> {238self.finalize(branch_protection);239Ok(())240}241}242243#[cfg(test)]244mod tests {245use super::*;246247#[test]248fn alignment_ok() {249let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap();250251for align_log2 in 0..8 {252let align = 1usize << align_log2;253for size in 1..128 {254let ptr = arena.allocate_readwrite(size, align as u64).unwrap();255// assert!(ptr.is_aligned_to(align));256assert_eq!(ptr.addr() % align, 0);257}258}259}260261#[test]262#[cfg(all(target_pointer_width = "64", not(target_os = "windows")))]263// Windows: See https://github.com/darfink/region-rs/pull/34264fn large_virtual_allocation() {265// We should be able to request 1TB of virtual address space on 64-bit266// platforms. Physical memory should be committed as we go.267let reserve_size = 1 << 40;268let mut arena = ArenaMemoryProvider::new_with_size(reserve_size).unwrap();269let ptr = arena.allocate_readwrite(1, 1).unwrap();270assert_eq!(ptr.addr(), arena.ptr.addr());271arena.finalize(BranchProtection::None);272unsafe { ptr.write_volatile(42) };273unsafe { arena.free_memory() };274}275276#[test]277fn over_capacity() {278let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap(); // 1 MB279280let _ = arena.allocate_readwrite(900_000, 1).unwrap();281let _ = arena.allocate_readwrite(200_000, 1).unwrap_err();282}283}284285286