Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/jit/src/memory/arena.rs
1692 views
1
use std::io;
2
use std::mem::ManuallyDrop;
3
use std::ptr;
4
5
use cranelift_module::ModuleResult;
6
7
use super::{BranchProtection, JITMemoryProvider};
8
9
fn align_up(addr: usize, align: usize) -> usize {
10
debug_assert!(align.is_power_of_two());
11
(addr + align - 1) & !(align - 1)
12
}
13
14
#[derive(Debug)]
15
struct Segment {
16
ptr: *mut u8,
17
len: usize,
18
position: usize,
19
target_prot: region::Protection,
20
finalized: bool,
21
}
22
23
impl Segment {
24
fn new(ptr: *mut u8, len: usize, target_prot: region::Protection) -> Self {
25
// Segments are created on page boundaries.
26
debug_assert_eq!(ptr as usize % region::page::size(), 0);
27
debug_assert_eq!(len % region::page::size(), 0);
28
let mut segment = Segment {
29
ptr,
30
len,
31
target_prot,
32
position: 0,
33
finalized: false,
34
};
35
// Set segment to read-write for initialization. The target permissions
36
// will be applied in `finalize`.
37
segment.set_rw();
38
segment
39
}
40
41
fn set_rw(&mut self) {
42
unsafe {
43
region::protect(self.ptr, self.len, region::Protection::READ_WRITE)
44
.expect("unable to change memory protection for jit memory segment");
45
}
46
}
47
48
fn finalize(&mut self, branch_protection: BranchProtection) {
49
if self.finalized {
50
return;
51
}
52
53
// Executable regions are handled separately to correctly deal with
54
// branch protection and cache coherence.
55
if self.target_prot == region::Protection::READ_EXECUTE {
56
super::set_readable_and_executable(self.ptr, self.len, branch_protection)
57
.expect("unable to set memory protection for jit memory segment");
58
} else {
59
unsafe {
60
region::protect(self.ptr, self.len, self.target_prot)
61
.expect("unable to change memory protection for jit memory segment");
62
}
63
}
64
self.finalized = true;
65
}
66
67
// Note: We do pointer arithmetic on `ptr` passed to `Segment::new` here.
68
// This assumes that `ptr` is valid for `len` bytes, or will result in UB.
69
fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
70
assert!(self.has_space_for(size, align));
71
self.position = align_up(self.position, align);
72
let ptr = unsafe { self.ptr.add(self.position) };
73
self.position += size;
74
ptr
75
}
76
77
fn has_space_for(&self, size: usize, align: usize) -> bool {
78
!self.finalized && align_up(self.position, align) + size <= self.len
79
}
80
}
81
82
/// `ArenaMemoryProvider` allocates segments from a contiguous memory region
83
/// that is reserved up-front.
84
///
85
/// The arena's memory is initially allocated with PROT_NONE and gradually
86
/// updated as the JIT requires more space. This approach allows for stable
87
/// addresses throughout the lifetime of the JIT.
88
///
89
/// Depending on the underlying platform, requesting large parts of the address
90
/// space to be allocated might fail. This implementation currently doesn't do
91
/// overcommit on Windows.
92
///
93
/// Note: Memory will be leaked by default unless
94
/// [`JITMemoryProvider::free_memory`] is called to ensure function pointers
95
/// remain valid for the remainder of the program's life.
96
pub struct ArenaMemoryProvider {
97
alloc: ManuallyDrop<Option<region::Allocation>>,
98
ptr: *mut u8,
99
size: usize,
100
position: usize,
101
segments: Vec<Segment>,
102
}
103
104
impl ArenaMemoryProvider {
105
/// Create a new memory region with the given size.
106
pub fn new_with_size(reserve_size: usize) -> Result<Self, region::Error> {
107
let size = align_up(reserve_size, region::page::size());
108
// Note: The region crate uses `MEM_RESERVE | MEM_COMMIT` on Windows.
109
// This means that allocations that exceed the page file plus system
110
// memory will fail here.
111
// https://github.com/darfink/region-rs/pull/34
112
let mut alloc = region::alloc(size, region::Protection::NONE)?;
113
let ptr = alloc.as_mut_ptr();
114
115
Ok(Self {
116
alloc: ManuallyDrop::new(Some(alloc)),
117
segments: Vec::new(),
118
ptr,
119
size,
120
position: 0,
121
})
122
}
123
124
fn allocate(
125
&mut self,
126
size: usize,
127
align: u64,
128
protection: region::Protection,
129
) -> io::Result<*mut u8> {
130
let align = usize::try_from(align).expect("alignment too big");
131
assert!(
132
align <= region::page::size(),
133
"alignment over page size is not supported"
134
);
135
136
// Note: Add a fast path without a linear scan over segments here?
137
138
// Can we fit this allocation into an existing segment?
139
if let Some(segment) = self.segments.iter_mut().find(|seg| {
140
seg.target_prot == protection && !seg.finalized && seg.has_space_for(size, align)
141
}) {
142
return Ok(segment.allocate(size, align));
143
}
144
145
// Can we resize the last segment?
146
if let Some(segment) = self.segments.iter_mut().last() {
147
if segment.target_prot == protection && !segment.finalized {
148
let additional_size = align_up(size, region::page::size());
149
150
// If our reserved arena can fit the additional size, extend the
151
// last segment.
152
if self.position + additional_size <= self.size {
153
segment.len += additional_size;
154
segment.set_rw();
155
self.position += additional_size;
156
return Ok(segment.allocate(size, align));
157
}
158
}
159
}
160
161
// Allocate new segment for given size and alignment.
162
self.allocate_segment(size, protection)?;
163
let i = self.segments.len() - 1;
164
Ok(self.segments[i].allocate(size, align))
165
}
166
167
fn allocate_segment(
168
&mut self,
169
size: usize,
170
target_prot: region::Protection,
171
) -> Result<(), io::Error> {
172
let size = align_up(size, region::page::size());
173
let ptr = unsafe { self.ptr.add(self.position) };
174
if self.position + size > self.size {
175
return Err(io::Error::new(
176
io::ErrorKind::Other,
177
"pre-allocated jit memory region exhausted",
178
));
179
}
180
self.position += size;
181
self.segments.push(Segment::new(ptr, size, target_prot));
182
Ok(())
183
}
184
185
pub(crate) fn finalize(&mut self, branch_protection: BranchProtection) {
186
for segment in &mut self.segments {
187
segment.finalize(branch_protection);
188
}
189
190
// Flush any in-flight instructions from the pipeline
191
wasmtime_jit_icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");
192
}
193
194
/// Frees the allocated memory region, which would be leaked otherwise.
195
/// Likely to invalidate existing function pointers, causing unsafety.
196
pub(crate) unsafe fn free_memory(&mut self) {
197
if self.ptr == ptr::null_mut() {
198
return;
199
}
200
self.segments.clear();
201
// Drop the allocation, freeing memory.
202
let _: Option<region::Allocation> = self.alloc.take();
203
self.ptr = ptr::null_mut();
204
}
205
}
206
207
impl Drop for ArenaMemoryProvider {
208
fn drop(&mut self) {
209
if self.ptr == ptr::null_mut() {
210
return;
211
}
212
let is_live = self.segments.iter().any(|seg| seg.finalized);
213
if !is_live {
214
// Only free memory if it's not been finalized yet.
215
// Otherwise, leak it since JIT memory may still be in use.
216
unsafe { self.free_memory() };
217
}
218
}
219
}
220
221
impl JITMemoryProvider for ArenaMemoryProvider {
222
fn allocate_readexec(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
223
self.allocate(size, align, region::Protection::READ_EXECUTE)
224
}
225
226
fn allocate_readwrite(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
227
self.allocate(size, align, region::Protection::READ_WRITE)
228
}
229
230
fn allocate_readonly(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
231
self.allocate(size, align, region::Protection::READ)
232
}
233
234
unsafe fn free_memory(&mut self) {
235
self.free_memory();
236
}
237
238
fn finalize(&mut self, branch_protection: BranchProtection) -> ModuleResult<()> {
239
self.finalize(branch_protection);
240
Ok(())
241
}
242
}
243
244
#[cfg(test)]
245
mod tests {
246
use super::*;
247
248
#[test]
249
fn alignment_ok() {
250
let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap();
251
252
for align_log2 in 0..8 {
253
let align = 1usize << align_log2;
254
for size in 1..128 {
255
let ptr = arena.allocate_readwrite(size, align as u64).unwrap();
256
// assert!(ptr.is_aligned_to(align));
257
assert_eq!(ptr.addr() % align, 0);
258
}
259
}
260
}
261
262
#[test]
263
#[cfg(all(target_pointer_width = "64", not(target_os = "windows")))]
264
// Windows: See https://github.com/darfink/region-rs/pull/34
265
fn large_virtual_allocation() {
266
// We should be able to request 1TB of virtual address space on 64-bit
267
// platforms. Physical memory should be committed as we go.
268
let reserve_size = 1 << 40;
269
let mut arena = ArenaMemoryProvider::new_with_size(reserve_size).unwrap();
270
let ptr = arena.allocate_readwrite(1, 1).unwrap();
271
assert_eq!(ptr.addr(), arena.ptr.addr());
272
arena.finalize(BranchProtection::None);
273
unsafe { ptr.write_volatile(42) };
274
unsafe { arena.free_memory() };
275
}
276
277
#[test]
278
fn over_capacity() {
279
let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap(); // 1 MB
280
281
let _ = arena.allocate_readwrite(900_000, 1).unwrap();
282
let _ = arena.allocate_readwrite(200_000, 1).unwrap_err();
283
}
284
}
285
286