Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/jit/src/memory/arena.rs
3076 views
1
use std::io;
2
use std::mem::ManuallyDrop;
3
use std::ptr;
4
5
use cranelift_module::ModuleResult;
6
7
use super::{BranchProtection, JITMemoryKind, JITMemoryProvider};
8
9
fn align_up(addr: usize, align: usize) -> usize {
10
debug_assert!(align.is_power_of_two());
11
(addr + align - 1) & !(align - 1)
12
}
13
14
#[derive(Debug)]
15
struct Segment {
16
ptr: *mut u8,
17
len: usize,
18
position: usize,
19
target_prot: region::Protection,
20
finalized: bool,
21
}
22
23
impl Segment {
24
fn new(ptr: *mut u8, len: usize, target_prot: region::Protection) -> Self {
25
// Segments are created on page boundaries.
26
debug_assert_eq!(ptr as usize % region::page::size(), 0);
27
debug_assert_eq!(len % region::page::size(), 0);
28
let mut segment = Segment {
29
ptr,
30
len,
31
target_prot,
32
position: 0,
33
finalized: false,
34
};
35
// Set segment to read-write for initialization. The target permissions
36
// will be applied in `finalize`.
37
segment.set_rw();
38
segment
39
}
40
41
fn set_rw(&mut self) {
42
unsafe {
43
region::protect(self.ptr, self.len, region::Protection::READ_WRITE)
44
.expect("unable to change memory protection for jit memory segment");
45
}
46
}
47
48
fn finalize(&mut self, branch_protection: BranchProtection) {
49
if self.finalized {
50
return;
51
}
52
53
// Executable regions are handled separately to correctly deal with
54
// branch protection and cache coherence.
55
if self.target_prot == region::Protection::READ_EXECUTE {
56
super::set_readable_and_executable(self.ptr, self.len, branch_protection)
57
.expect("unable to set memory protection for jit memory segment");
58
} else {
59
unsafe {
60
region::protect(self.ptr, self.len, self.target_prot)
61
.expect("unable to change memory protection for jit memory segment");
62
}
63
}
64
self.finalized = true;
65
}
66
67
// Note: We do pointer arithmetic on `ptr` passed to `Segment::new` here.
68
// This assumes that `ptr` is valid for `len` bytes, or will result in UB.
69
fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
70
assert!(self.has_space_for(size, align));
71
self.position = align_up(self.position, align);
72
let ptr = unsafe { self.ptr.add(self.position) };
73
self.position += size;
74
ptr
75
}
76
77
fn has_space_for(&self, size: usize, align: usize) -> bool {
78
!self.finalized && align_up(self.position, align) + size <= self.len
79
}
80
}
81
82
/// `ArenaMemoryProvider` allocates segments from a contiguous memory region
83
/// that is reserved up-front.
84
///
85
/// The arena's memory is initially allocated with PROT_NONE and gradually
86
/// updated as the JIT requires more space. This approach allows for stable
87
/// addresses throughout the lifetime of the JIT.
88
///
89
/// Depending on the underlying platform, requesting large parts of the address
90
/// space to be allocated might fail. This implementation currently doesn't do
91
/// overcommit on Windows.
92
///
93
/// Note: Memory will be leaked by default unless
94
/// [`JITMemoryProvider::free_memory`] is called to ensure function pointers
95
/// remain valid for the remainder of the program's life.
96
pub struct ArenaMemoryProvider {
97
alloc: ManuallyDrop<Option<region::Allocation>>,
98
ptr: *mut u8,
99
size: usize,
100
position: usize,
101
segments: Vec<Segment>,
102
}
103
104
unsafe impl Send for ArenaMemoryProvider {}
105
106
impl ArenaMemoryProvider {
107
/// Create a new memory region with the given size.
108
pub fn new_with_size(reserve_size: usize) -> Result<Self, region::Error> {
109
let size = align_up(reserve_size, region::page::size());
110
// Note: The region crate uses `MEM_RESERVE | MEM_COMMIT` on Windows.
111
// This means that allocations that exceed the page file plus system
112
// memory will fail here.
113
// https://github.com/darfink/region-rs/pull/34
114
let mut alloc = region::alloc(size, region::Protection::NONE)?;
115
let ptr = alloc.as_mut_ptr();
116
117
Ok(Self {
118
alloc: ManuallyDrop::new(Some(alloc)),
119
segments: Vec::new(),
120
ptr,
121
size,
122
position: 0,
123
})
124
}
125
126
fn allocate_inner(
127
&mut self,
128
size: usize,
129
align: u64,
130
protection: region::Protection,
131
) -> io::Result<*mut u8> {
132
let align = usize::try_from(align).expect("alignment too big");
133
assert!(
134
align <= region::page::size(),
135
"alignment over page size is not supported"
136
);
137
138
// Note: Add a fast path without a linear scan over segments here?
139
140
// Can we fit this allocation into an existing segment?
141
if let Some(segment) = self.segments.iter_mut().find(|seg| {
142
seg.target_prot == protection && !seg.finalized && seg.has_space_for(size, align)
143
}) {
144
return Ok(segment.allocate(size, align));
145
}
146
147
// Can we resize the last segment?
148
if let Some(segment) = self.segments.iter_mut().last() {
149
if segment.target_prot == protection && !segment.finalized {
150
let additional_size = align_up(size, region::page::size());
151
152
// If our reserved arena can fit the additional size, extend the
153
// last segment.
154
if self.position + additional_size <= self.size {
155
segment.len += additional_size;
156
segment.set_rw();
157
self.position += additional_size;
158
return Ok(segment.allocate(size, align));
159
}
160
}
161
}
162
163
// Allocate new segment for given size and alignment.
164
self.allocate_segment(size, protection)?;
165
let i = self.segments.len() - 1;
166
Ok(self.segments[i].allocate(size, align))
167
}
168
169
fn allocate_segment(
170
&mut self,
171
size: usize,
172
target_prot: region::Protection,
173
) -> Result<(), io::Error> {
174
let size = align_up(size, region::page::size());
175
let ptr = unsafe { self.ptr.add(self.position) };
176
if self.position + size > self.size {
177
return Err(io::Error::new(
178
io::ErrorKind::Other,
179
"pre-allocated jit memory region exhausted",
180
));
181
}
182
self.position += size;
183
self.segments.push(Segment::new(ptr, size, target_prot));
184
Ok(())
185
}
186
187
pub(crate) fn finalize(&mut self, branch_protection: BranchProtection) {
188
for segment in &mut self.segments {
189
segment.finalize(branch_protection);
190
}
191
192
// Flush any in-flight instructions from the pipeline
193
wasmtime_jit_icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");
194
}
195
196
/// Frees the allocated memory region, which would be leaked otherwise.
197
/// Likely to invalidate existing function pointers, causing unsafety.
198
pub(crate) unsafe fn free_memory(&mut self) {
199
if self.ptr == ptr::null_mut() {
200
return;
201
}
202
self.segments.clear();
203
// Drop the allocation, freeing memory.
204
let _: Option<region::Allocation> = self.alloc.take();
205
self.ptr = ptr::null_mut();
206
}
207
}
208
209
impl Drop for ArenaMemoryProvider {
210
fn drop(&mut self) {
211
if self.ptr == ptr::null_mut() {
212
return;
213
}
214
let is_live = self.segments.iter().any(|seg| seg.finalized);
215
if !is_live {
216
// Only free memory if it's not been finalized yet.
217
// Otherwise, leak it since JIT memory may still be in use.
218
unsafe { self.free_memory() };
219
}
220
}
221
}
222
223
impl JITMemoryProvider for ArenaMemoryProvider {
224
fn allocate(&mut self, size: usize, align: u64, kind: JITMemoryKind) -> io::Result<*mut u8> {
225
self.allocate_inner(
226
size,
227
align,
228
match kind {
229
JITMemoryKind::Executable => region::Protection::READ_EXECUTE,
230
JITMemoryKind::Writable => region::Protection::READ_WRITE,
231
JITMemoryKind::ReadOnly => region::Protection::READ,
232
},
233
)
234
}
235
236
unsafe fn free_memory(&mut self) {
237
self.free_memory();
238
}
239
240
fn finalize(&mut self, branch_protection: BranchProtection) -> ModuleResult<()> {
241
self.finalize(branch_protection);
242
Ok(())
243
}
244
}
245
246
#[cfg(test)]
247
mod tests {
248
use super::*;
249
250
#[test]
251
fn alignment_ok() {
252
let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap();
253
254
for align_log2 in 0..8 {
255
let align = 1usize << align_log2;
256
for size in 1..128 {
257
let ptr = arena
258
.allocate(size, align as u64, JITMemoryKind::Writable)
259
.unwrap();
260
// assert!(ptr.is_aligned_to(align));
261
assert_eq!(ptr.addr() % align, 0);
262
}
263
}
264
}
265
266
#[test]
267
#[cfg(all(target_pointer_width = "64", not(target_os = "windows")))]
268
// Windows: See https://github.com/darfink/region-rs/pull/34
269
fn large_virtual_allocation() {
270
// We should be able to request 1TB of virtual address space on 64-bit
271
// platforms. Physical memory should be committed as we go.
272
let reserve_size = 1 << 40;
273
let mut arena = ArenaMemoryProvider::new_with_size(reserve_size).unwrap();
274
let ptr = arena.allocate(1, 1, JITMemoryKind::Writable).unwrap();
275
assert_eq!(ptr.addr(), arena.ptr.addr());
276
arena.finalize(BranchProtection::None);
277
unsafe { ptr.write_volatile(42) };
278
unsafe { arena.free_memory() };
279
}
280
281
#[test]
282
fn over_capacity() {
283
let mut arena = ArenaMemoryProvider::new_with_size(1 << 20).unwrap(); // 1 MB
284
285
let _ = arena.allocate(900_000, 1, JITMemoryKind::Writable).unwrap();
286
let _ = arena
287
.allocate(200_000, 1, JITMemoryKind::Writable)
288
.unwrap_err();
289
}
290
291
#[test]
292
fn test_is_send() {
293
fn assert_is_send<T: Send>() {}
294
assert_is_send::<ArenaMemoryProvider>();
295
}
296
}
297
298