Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/jit/src/memory/system.rs
1692 views
1
use cranelift_module::{ModuleError, ModuleResult};
2
3
#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
4
use memmap2::MmapMut;
5
6
#[cfg(not(any(feature = "selinux-fix", windows)))]
7
use std::alloc;
8
use std::io;
9
use std::mem;
10
use std::ptr;
11
12
use super::BranchProtection;
13
use super::JITMemoryProvider;
14
15
/// A simple struct consisting of a pointer and length.
16
struct PtrLen {
17
#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
18
map: Option<MmapMut>,
19
20
ptr: *mut u8,
21
len: usize,
22
}
23
24
impl PtrLen {
25
/// Create a new empty `PtrLen`.
26
fn new() -> Self {
27
Self {
28
#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
29
map: None,
30
31
ptr: ptr::null_mut(),
32
len: 0,
33
}
34
}
35
36
/// Create a new `PtrLen` pointing to at least `size` bytes of memory,
37
/// suitably sized and aligned for memory protection.
38
#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
39
fn with_size(size: usize) -> io::Result<Self> {
40
let alloc_size = region::page::ceil(size as *const ()) as usize;
41
MmapMut::map_anon(alloc_size).map(|mut mmap| {
42
// The order here is important; we assign the pointer first to get
43
// around compile time borrow errors.
44
Self {
45
ptr: mmap.as_mut_ptr(),
46
map: Some(mmap),
47
len: alloc_size,
48
}
49
})
50
}
51
52
#[cfg(all(not(target_os = "windows"), not(feature = "selinux-fix")))]
53
fn with_size(size: usize) -> io::Result<Self> {
54
assert_ne!(size, 0);
55
let page_size = region::page::size();
56
let alloc_size = region::page::ceil(size as *const ()) as usize;
57
let layout = alloc::Layout::from_size_align(alloc_size, page_size).unwrap();
58
// Safety: We assert that the size is non-zero above.
59
let ptr = unsafe { alloc::alloc(layout) };
60
61
if !ptr.is_null() {
62
Ok(Self {
63
ptr,
64
len: alloc_size,
65
})
66
} else {
67
Err(io::Error::from(io::ErrorKind::OutOfMemory))
68
}
69
}
70
71
#[cfg(target_os = "windows")]
72
fn with_size(size: usize) -> io::Result<Self> {
73
use windows_sys::Win32::System::Memory::{
74
MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE, VirtualAlloc,
75
};
76
77
// VirtualAlloc always rounds up to the next multiple of the page size
78
let ptr = unsafe {
79
VirtualAlloc(
80
ptr::null_mut(),
81
size,
82
MEM_COMMIT | MEM_RESERVE,
83
PAGE_READWRITE,
84
)
85
};
86
if !ptr.is_null() {
87
Ok(Self {
88
ptr: ptr as *mut u8,
89
len: region::page::ceil(size as *const ()) as usize,
90
})
91
} else {
92
Err(io::Error::last_os_error())
93
}
94
}
95
}
96
97
// `MMapMut` from `cfg(feature = "selinux-fix")` already deallocates properly.
98
#[cfg(all(not(target_os = "windows"), not(feature = "selinux-fix")))]
99
impl Drop for PtrLen {
100
fn drop(&mut self) {
101
if !self.ptr.is_null() {
102
let page_size = region::page::size();
103
let layout = alloc::Layout::from_size_align(self.len, page_size).unwrap();
104
unsafe {
105
region::protect(self.ptr, self.len, region::Protection::READ_WRITE)
106
.expect("unable to unprotect memory");
107
alloc::dealloc(self.ptr, layout)
108
}
109
}
110
}
111
}
112
113
// TODO: add a `Drop` impl for `cfg(target_os = "windows")`
114
115
/// JIT memory manager. This manages pages of suitably aligned and
116
/// accessible memory. Memory will be leaked by default to have
117
/// function pointers remain valid for the remainder of the
118
/// program's life.
119
pub(crate) struct Memory {
120
allocations: Vec<PtrLen>,
121
already_protected: usize,
122
current: PtrLen,
123
position: usize,
124
}
125
126
unsafe impl Send for Memory {}
127
128
impl Memory {
129
pub(crate) fn new() -> Self {
130
Self {
131
allocations: Vec::new(),
132
already_protected: 0,
133
current: PtrLen::new(),
134
position: 0,
135
}
136
}
137
138
fn finish_current(&mut self) {
139
self.allocations
140
.push(mem::replace(&mut self.current, PtrLen::new()));
141
self.position = 0;
142
}
143
144
pub(crate) fn allocate(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
145
let align = usize::try_from(align).expect("alignment too big");
146
if self.position % align != 0 {
147
self.position += align - self.position % align;
148
debug_assert!(self.position % align == 0);
149
}
150
151
if size <= self.current.len - self.position {
152
// TODO: Ensure overflow is not possible.
153
let ptr = unsafe { self.current.ptr.add(self.position) };
154
self.position += size;
155
return Ok(ptr);
156
}
157
158
self.finish_current();
159
160
// TODO: Allocate more at a time.
161
self.current = PtrLen::with_size(size)?;
162
self.position = size;
163
164
Ok(self.current.ptr)
165
}
166
167
/// Set all memory allocated in this `Memory` up to now as readable and executable.
168
pub(crate) fn set_readable_and_executable(
169
&mut self,
170
branch_protection: BranchProtection,
171
) -> ModuleResult<()> {
172
self.finish_current();
173
174
for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
175
super::set_readable_and_executable(ptr, len, branch_protection)?;
176
}
177
178
// Flush any in-flight instructions from the pipeline
179
wasmtime_jit_icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");
180
181
self.already_protected = self.allocations.len();
182
Ok(())
183
}
184
185
/// Set all memory allocated in this `Memory` up to now as readonly.
186
pub(crate) fn set_readonly(&mut self) -> ModuleResult<()> {
187
self.finish_current();
188
189
for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
190
unsafe {
191
region::protect(ptr, len, region::Protection::READ).map_err(|e| {
192
ModuleError::Backend(
193
anyhow::Error::new(e).context("unable to make memory readonly"),
194
)
195
})?;
196
}
197
}
198
199
self.already_protected = self.allocations.len();
200
Ok(())
201
}
202
203
/// Iterates non protected memory allocations that are of not zero bytes in size.
204
fn non_protected_allocations_iter(&self) -> impl Iterator<Item = &PtrLen> {
205
let iter = self.allocations[self.already_protected..].iter();
206
207
#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
208
return iter.filter(|&PtrLen { map, len, .. }| *len != 0 && map.is_some());
209
210
#[cfg(any(target_os = "windows", not(feature = "selinux-fix")))]
211
return iter.filter(|&PtrLen { len, .. }| *len != 0);
212
}
213
214
/// Frees all allocated memory regions that would be leaked otherwise.
215
/// Likely to invalidate existing function pointers, causing unsafety.
216
pub(crate) unsafe fn free_memory(&mut self) {
217
self.allocations.clear();
218
self.already_protected = 0;
219
}
220
}
221
222
impl Drop for Memory {
223
fn drop(&mut self) {
224
// leak memory to guarantee validity of function pointers
225
mem::replace(&mut self.allocations, Vec::new())
226
.into_iter()
227
.for_each(mem::forget);
228
}
229
}
230
231
/// A memory provider that allocates memory on-demand using the system
232
/// allocator.
233
///
234
/// Note: Memory will be leaked by default unless
235
/// [`JITMemoryProvider::free_memory`] is called to ensure function pointers
236
/// remain valid for the remainder of the program's life.
237
pub struct SystemMemoryProvider {
238
code: Memory,
239
readonly: Memory,
240
writable: Memory,
241
}
242
243
impl SystemMemoryProvider {
244
/// Create a new memory handle with the given branch protection.
245
pub fn new() -> Self {
246
Self {
247
code: Memory::new(),
248
readonly: Memory::new(),
249
writable: Memory::new(),
250
}
251
}
252
}
253
254
impl JITMemoryProvider for SystemMemoryProvider {
255
unsafe fn free_memory(&mut self) {
256
self.code.free_memory();
257
self.readonly.free_memory();
258
self.writable.free_memory();
259
}
260
261
fn finalize(&mut self, branch_protection: BranchProtection) -> ModuleResult<()> {
262
self.readonly.set_readonly()?;
263
self.code.set_readable_and_executable(branch_protection)
264
}
265
266
fn allocate_readexec(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
267
self.code.allocate(size, align)
268
}
269
270
fn allocate_readwrite(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
271
self.writable.allocate(size, align)
272
}
273
274
fn allocate_readonly(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
275
self.readonly.allocate(size, align)
276
}
277
}
278
279