Path: blob/main/cranelift/jit/src/memory/arena.rs
3076 views
use std::io;1use std::mem::ManuallyDrop;2use std::ptr;34use cranelift_module::ModuleResult;56use super::{BranchProtection, JITMemoryKind, JITMemoryProvider};78fn align_up(addr: usize, align: usize) -> usize {9debug_assert!(align.is_power_of_two());10(addr + align - 1) & !(align - 1)11}1213#[derive(Debug)]14struct Segment {15ptr: *mut u8,16len: usize,17position: usize,18target_prot: region::Protection,19finalized: bool,20}2122impl Segment {23fn new(ptr: *mut u8, len: usize, target_prot: region::Protection) -> Self {24// Segments are created on page boundaries.25debug_assert_eq!(ptr as usize % region::page::size(), 0);26debug_assert_eq!(len % region::page::size(), 0);27let mut segment = Segment {28ptr,29len,30target_prot,31position: 0,32finalized: false,33};34// Set segment to read-write for initialization. The target permissions35// will be applied in `finalize`.36segment.set_rw();37segment38}3940fn set_rw(&mut self) {41unsafe {42region::protect(self.ptr, self.len, region::Protection::READ_WRITE)43.expect("unable to change memory protection for jit memory segment");44}45}4647fn finalize(&mut self, branch_protection: BranchProtection) {48if self.finalized {49return;50}5152// Executable regions are handled separately to correctly deal with53// branch protection and cache coherence.54if self.target_prot == region::Protection::READ_EXECUTE {55super::set_readable_and_executable(self.ptr, self.len, branch_protection)56.expect("unable to set memory protection for jit memory segment");57} else {58unsafe {59region::protect(self.ptr, self.len, self.target_prot)60.expect("unable to change memory protection for jit memory segment");61}62}63self.finalized = true;64}6566// Note: We do pointer arithmetic on `ptr` passed to `Segment::new` here.67// This assumes that `ptr` is valid for `len` bytes, or will result in UB.68fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {69assert!(self.has_space_for(size, align));70self.position = align_up(self.position, align);71let ptr = unsafe { self.ptr.add(self.position) };72self.position += size;73ptr74}7576fn has_space_for(&self, size: usize, align: usize) -> bool {77!self.finalized && align_up(self.position, align) + size <= self.len78}79}8081/// `ArenaMemoryProvider` allocates segments from a contiguous memory region82/// that is reserved up-front.83///84/// The arena's memory is initially allocated with PROT_NONE and gradually85/// updated as the JIT requires more space. This approach allows for stable86/// addresses throughout the lifetime of the JIT.87///88/// Depending on the underlying platform, requesting large parts of the address89/// space to be allocated might fail. This implementation currently doesn't do90/// overcommit on Windows.91///92/// Note: Memory will be leaked by default unless93/// [`JITMemoryProvider::free_memory`] is called to ensure function pointers94/// remain valid for the remainder of the program's life.95pub struct ArenaMemoryProvider {96alloc: ManuallyDrop<Option<region::Allocation>>,97ptr: *mut u8,98size: usize,99position: usize,100segments: Vec<Segment>,101}102103unsafe impl Send for ArenaMemoryProvider {}104105impl ArenaMemoryProvider {106/// Create a new memory region with the given size.107pub fn new_with_size(reserve_size: usize) -> Result<Self, region::Error> {108let size = align_up(reserve_size, region::page::size());109// Note: The region crate uses `MEM_RESERVE | MEM_COMMIT` on Windows.110// This means that allocations that exceed the page file plus system111// memory will fail here.112// https://github.com/darfink/region-rs/pull/34113let mut alloc = region::alloc(size, region::Protection::NONE)?;114let ptr = alloc.as_mut_ptr();115116Ok(Self {117alloc: ManuallyDrop::new(Some(alloc)),118segments: Vec::new(),119ptr,120size,121position: 0,122})123}124125fn allocate_inner(126&mut self,127size: usize,128align: u64,129protection: region::Protection,130) -> io::Result<*mut u8> {131let align = usize::try_from(align).expect("alignment too big");132assert!(133align <= region::page::size(),134"alignment over page size is not supported"135);136137// Note: Add a fast path without a linear scan over segments here?138139// Can we fit this allocation into an existing segment?140if let Some(segment) = self.segments.iter_mut().find(|seg| {141seg.target_prot == protection && !seg.finalized && seg.has_space_for(size, align)142}) {143return Ok(segment.allocate(size, align));144}145146// Can we resize the last segment?147if let Some(segment) = self.segments.iter_mut().last() {148if segment.target_prot == protection && !segment.finalized {149let additional_size = align_up(size, region::page::size());150151// If our reserved arena can fit the additional size, extend the152// last segment.153if self.position + additional_size <= self.size {154segment.len += additional_size;155segment.set_rw();156self.position += additional_size;157return Ok(segment.allocate(size, align));158}159}160}161162// Allocate new segment for given size and alignment.163self.allocate_segment(size, protection)?;164let i = self.segments.len() - 1;165Ok(self.segments[i].allocate(size, align))166}167168fn allocate_segment(169&mut self,170size: usize,171target_prot: region::Protection,172) -> Result<(), io::Error> {173let size = align_up(size, region::page::size());174let ptr = unsafe { self.ptr.add(self.position) };175if self.position + size > self.size {176return Err(io::Error::new(177io::ErrorKind::Other,178"pre-allocated jit memory region exhausted",179));180}181self.position += size;182self.segments.push(Segment::new(ptr, size, target_prot));183Ok(())184}185186pub(crate) fn finalize(&mut self, branch_protection: BranchProtection) {187for segment in &mut self.segments {188segment.finalize(branch_protection);189}190191// Flush any in-flight instructions from the pipeline192wasmtime_jit_icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");193}194195/// Frees the allocated memory region, which would be leaked otherwise.196/// Likely to invalidate existing function pointers, causing unsafety.197pub(crate) unsafe fn free_memory(&mut self) {198if self.ptr == ptr::null_mut() {199return;200}201self.segments.clear();202// Drop the allocation, freeing memory.203let _: Option<region::Allocation> = self.alloc.take();204self.ptr = ptr::null_mut();205}206}207208impl Drop for ArenaMemoryProvider {209fn drop(&mut self) {210if self.ptr == ptr::null_mut() {211return;212}213let is_live = self.segments.iter().any(|seg| seg.finalized);214if !is_live {215// Only free memory if it's not been finalized yet.216// Otherwise, leak it since JIT memory may still be in use.217unsafe { self.free_memory() };218}219}220}221222impl JITMemoryProvider for ArenaMemoryProvider {223fn allocate(&mut self, size: usize, align: u64, kind: JITMemoryKind) -> io::Result<*mut u8> {224self.allocate_inner(225size,226align,227match kind {228JITMemoryKind::Executable => region::Protection::READ_EXECUTE,229JITMemoryKind::Writable => region::Protection::READ_WRITE,230JITMemoryKind::ReadOnly => region::Protection::READ,231},232)233}234235unsafe fn free_memory(&mut self) {236self.free_memory();237}238239fn finalize(&mut self, branch_protection: BranchProtection) -> ModuleResult<()> {240self.finalize(branch_protection);241Ok(())242}243}244245#[cfg(test)]246mod tests {247use super::*;248249#[test]250fn alignment_ok() {251let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap();252253for align_log2 in 0..8 {254let align = 1usize << align_log2;255for size in 1..128 {256let ptr = arena257.allocate(size, align as u64, JITMemoryKind::Writable)258.unwrap();259// assert!(ptr.is_aligned_to(align));260assert_eq!(ptr.addr() % align, 0);261}262}263}264265#[test]266#[cfg(all(target_pointer_width = "64", not(target_os = "windows")))]267// Windows: See https://github.com/darfink/region-rs/pull/34268fn large_virtual_allocation() {269// We should be able to request 1TB of virtual address space on 64-bit270// platforms. Physical memory should be committed as we go.271let reserve_size = 1 << 40;272let mut arena = ArenaMemoryProvider::new_with_size(reserve_size).unwrap();273let ptr = arena.allocate(1, 1, JITMemoryKind::Writable).unwrap();274assert_eq!(ptr.addr(), arena.ptr.addr());275arena.finalize(BranchProtection::None);276unsafe { ptr.write_volatile(42) };277unsafe { arena.free_memory() };278}279280#[test]281fn over_capacity() {282let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap(); // 1 MB283284let _ = arena.allocate(900_000, 1, JITMemoryKind::Writable).unwrap();285let _ = arena286.allocate(200_000, 1, JITMemoryKind::Writable)287.unwrap_err();288}289290#[test]291fn test_is_send() {292fn assert_is_send<T: Send>() {}293assert_is_send::<ArenaMemoryProvider>();294}295}296297298