Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/fiber/src/unix.rs
1692 views
1
//! The unix fiber implementation has some platform-specific details
2
//! (naturally) but there's a few details of the stack layout which are common
3
//! amongst all platforms using this file. Remember that none of this applies to
4
//! Windows, which is entirely separate.
5
//!
6
//! The stack is expected to look pretty standard with a guard page at the end.
7
//! Currently allocation happens in this file but this is probably going to be
8
//! refactored to happen somewhere else. Otherwise though the stack layout is
9
//! expected to look like so:
10
//!
11
//!
12
//! ```text
13
//! 0xB000 +-----------------------+ <- top of stack
14
//! | &Cell<RunResult> | <- where to store results
15
//! 0xAff8 +-----------------------+
16
//! | *const u8 | <- last sp to resume from
17
//! 0xAff0 +-----------------------+ <- 16-byte aligned
18
//! | |
19
//! ~ ... ~ <- actual native stack space to use
20
//! | |
21
//! 0x1000 +-----------------------+
22
//! | guard page |
23
//! 0x0000 +-----------------------+
24
//! ```
25
//!
26
//! Here `0xAff8` is filled in temporarily while `resume` is running. The fiber
27
//! started with 0xB000 as a parameter so it knows how to find this.
28
//! Additionally `resumes` stores state at 0xAff0 to restart execution, and
29
//! `suspend`, which has 0xB000 so it can find this, will read that and write
30
//! its own resumption information into this slot as well.
31
32
use crate::stackswitch::*;
33
use crate::{RunResult, RuntimeFiberStack};
34
use std::boxed::Box;
35
use std::cell::Cell;
36
use std::io;
37
use std::ops::Range;
38
use std::ptr;
39
use std::sync::atomic::{AtomicUsize, Ordering};
40
41
pub type Error = io::Error;
42
43
pub struct FiberStack {
44
base: BasePtr,
45
len: usize,
46
47
/// Stored here to ensure that when this `FiberStack` the backing storage,
48
/// if any, is additionally dropped.
49
storage: FiberStackStorage,
50
}
51
52
struct BasePtr(*mut u8);
53
54
unsafe impl Send for BasePtr {}
55
unsafe impl Sync for BasePtr {}
56
57
enum FiberStackStorage {
58
Mmap(MmapFiberStack),
59
Unmanaged(usize),
60
Custom(Box<dyn RuntimeFiberStack>),
61
}
62
63
// FIXME: this is a duplicate copy of what's already in the `wasmtime` crate. If
64
// this changes that should change over there, and ideally one day we should
65
// probably deduplicate the two.
66
fn host_page_size() -> usize {
67
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
68
69
return match PAGE_SIZE.load(Ordering::Relaxed) {
70
0 => {
71
let size = unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() };
72
assert!(size != 0);
73
PAGE_SIZE.store(size, Ordering::Relaxed);
74
size
75
}
76
n => n,
77
};
78
}
79
80
impl FiberStack {
81
pub fn new(size: usize, zeroed: bool) -> io::Result<Self> {
82
let page_size = host_page_size();
83
// The anonymous `mmap`s we use for `FiberStackStorage` are alawys
84
// zeroed.
85
let _ = zeroed;
86
87
// See comments in `mod asan` below for why asan has a different stack
88
// allocation strategy.
89
if cfg!(asan) {
90
return Self::from_custom(asan::new_fiber_stack(size)?);
91
}
92
let stack = MmapFiberStack::new(size)?;
93
94
// An `MmapFiberStack` allocates a guard page at the bottom of the
95
// region so the base and length of our stack are both offset by a
96
// single page.
97
Ok(FiberStack {
98
base: BasePtr(stack.mapping_base.wrapping_byte_add(page_size)),
99
len: stack.mapping_len - page_size,
100
storage: FiberStackStorage::Mmap(stack),
101
})
102
}
103
104
pub unsafe fn from_raw_parts(base: *mut u8, guard_size: usize, len: usize) -> io::Result<Self> {
105
// See comments in `mod asan` below for why asan has a different stack
106
// allocation strategy.
107
if cfg!(asan) {
108
return Self::from_custom(asan::new_fiber_stack(len)?);
109
}
110
Ok(FiberStack {
111
base: BasePtr(unsafe { base.add(guard_size) }),
112
len,
113
storage: FiberStackStorage::Unmanaged(guard_size),
114
})
115
}
116
117
pub fn is_from_raw_parts(&self) -> bool {
118
matches!(self.storage, FiberStackStorage::Unmanaged(_))
119
}
120
121
pub fn from_custom(custom: Box<dyn RuntimeFiberStack>) -> io::Result<Self> {
122
let range = custom.range();
123
let page_size = host_page_size();
124
let start_ptr = range.start as *mut u8;
125
assert!(
126
start_ptr.align_offset(page_size) == 0,
127
"expected fiber stack base ({start_ptr:?}) to be page aligned ({page_size:#x})",
128
);
129
let end_ptr = range.end as *const u8;
130
assert!(
131
end_ptr.align_offset(page_size) == 0,
132
"expected fiber stack end ({end_ptr:?}) to be page aligned ({page_size:#x})",
133
);
134
Ok(FiberStack {
135
base: BasePtr(start_ptr),
136
len: range.len(),
137
storage: FiberStackStorage::Custom(custom),
138
})
139
}
140
141
pub fn top(&self) -> Option<*mut u8> {
142
Some(self.base.0.wrapping_byte_add(self.len))
143
}
144
145
pub fn range(&self) -> Option<Range<usize>> {
146
let base = self.base.0 as usize;
147
Some(base..base + self.len)
148
}
149
150
pub fn guard_range(&self) -> Option<Range<*mut u8>> {
151
match &self.storage {
152
FiberStackStorage::Unmanaged(guard_size) => unsafe {
153
let start = self.base.0.sub(*guard_size);
154
Some(start..self.base.0)
155
},
156
FiberStackStorage::Mmap(mmap) => Some(mmap.mapping_base..self.base.0),
157
FiberStackStorage::Custom(custom) => Some(custom.guard_range()),
158
}
159
}
160
}
161
162
struct MmapFiberStack {
163
mapping_base: *mut u8,
164
mapping_len: usize,
165
}
166
167
unsafe impl Send for MmapFiberStack {}
168
unsafe impl Sync for MmapFiberStack {}
169
170
impl MmapFiberStack {
171
fn new(size: usize) -> io::Result<Self> {
172
// Round up our stack size request to the nearest multiple of the
173
// page size.
174
let page_size = host_page_size();
175
let size = if size == 0 {
176
page_size
177
} else {
178
(size + (page_size - 1)) & (!(page_size - 1))
179
};
180
181
unsafe {
182
// Add in one page for a guard page and then ask for some memory.
183
let mmap_len = size + page_size;
184
let mmap = rustix::mm::mmap_anonymous(
185
ptr::null_mut(),
186
mmap_len,
187
rustix::mm::ProtFlags::empty(),
188
rustix::mm::MapFlags::PRIVATE,
189
)?;
190
191
rustix::mm::mprotect(
192
mmap.byte_add(page_size),
193
size,
194
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
195
)?;
196
197
Ok(MmapFiberStack {
198
mapping_base: mmap.cast(),
199
mapping_len: mmap_len,
200
})
201
}
202
}
203
}
204
205
impl Drop for MmapFiberStack {
206
fn drop(&mut self) {
207
unsafe {
208
let ret = rustix::mm::munmap(self.mapping_base.cast(), self.mapping_len);
209
debug_assert!(ret.is_ok());
210
}
211
}
212
}
213
214
pub struct Fiber;
215
216
pub struct Suspend {
217
top_of_stack: *mut u8,
218
previous: asan::PreviousStack,
219
}
220
221
extern "C" fn fiber_start<F, A, B, C>(arg0: *mut u8, top_of_stack: *mut u8)
222
where
223
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
224
{
225
unsafe {
226
// Complete the `start_switch` AddressSanitizer handshake which would
227
// have been started in `Fiber::resume`.
228
let previous = asan::fiber_start_complete();
229
230
let inner = Suspend {
231
top_of_stack,
232
previous,
233
};
234
let initial = inner.take_resume::<A, B, C>();
235
super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>()))
236
}
237
}
238
239
impl Fiber {
240
pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
241
where
242
F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
243
{
244
// On unsupported platforms `wasmtime_fiber_init` is a panicking shim so
245
// return an error saying the host architecture isn't supported instead.
246
if !SUPPORTED_ARCH {
247
return Err(io::Error::new(
248
io::ErrorKind::Other,
249
"fibers not supported on this host architecture",
250
));
251
}
252
unsafe {
253
let data = Box::into_raw(Box::new(func)).cast();
254
wasmtime_fiber_init(stack.top().unwrap(), fiber_start::<F, A, B, C>, data);
255
}
256
257
Ok(Self)
258
}
259
260
pub(crate) fn resume<A, B, C>(&self, stack: &FiberStack, result: &Cell<RunResult<A, B, C>>) {
261
unsafe {
262
// Store where our result is going at the very tip-top of the
263
// stack, otherwise known as our reserved slot for this information.
264
//
265
// In the diagram above this is updating address 0xAff8
266
let addr = stack.top().unwrap().cast::<usize>().offset(-1);
267
addr.write(result as *const _ as usize);
268
269
asan::fiber_switch(
270
stack.top().unwrap(),
271
false,
272
&mut asan::PreviousStack::new(stack),
273
);
274
275
// null this out to help catch use-after-free
276
addr.write(0);
277
}
278
}
279
280
pub(crate) unsafe fn drop<A, B, C>(&mut self) {}
281
}
282
283
impl Suspend {
284
pub(crate) fn switch<A, B, C>(&mut self, result: RunResult<A, B, C>) -> A {
285
unsafe {
286
let is_finishing = match &result {
287
RunResult::Returned(_) | RunResult::Panicked(_) => true,
288
RunResult::Executing | RunResult::Resuming(_) | RunResult::Yield(_) => false,
289
};
290
// Calculate 0xAff8 and then write to it
291
(*self.result_location::<A, B, C>()).set(result);
292
293
asan::fiber_switch(self.top_of_stack, is_finishing, &mut self.previous);
294
295
self.take_resume::<A, B, C>()
296
}
297
}
298
299
pub(crate) fn exit<A, B, C>(&mut self, result: RunResult<A, B, C>) {
300
self.switch(result);
301
unreachable!()
302
}
303
304
unsafe fn take_resume<A, B, C>(&self) -> A {
305
unsafe {
306
match (*self.result_location::<A, B, C>()).replace(RunResult::Executing) {
307
RunResult::Resuming(val) => val,
308
_ => panic!("not in resuming state"),
309
}
310
}
311
}
312
313
unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> {
314
unsafe {
315
let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read();
316
assert!(!ret.is_null());
317
ret.cast()
318
}
319
}
320
}
321
322
/// Support for AddressSanitizer to support stack manipulations we do in this
323
/// fiber implementation.
324
///
325
/// This module uses, when fuzzing is enabled, special intrinsics provided by
326
/// the sanitizer runtime called `__sanitizer_{start,finish}_switch_fiber`.
327
/// These aren't really super heavily documented and the current implementation
328
/// is inspired by googling the functions and looking at Boost & Julia's usage
329
/// of them as well as the documentation for these functions in their own
330
/// header file in the LLVM source tree. The general idea is that they're
331
/// called around every stack switch with some other fiddly bits as well.
332
#[cfg(asan)]
333
mod asan {
334
use super::{FiberStack, MmapFiberStack, RuntimeFiberStack, host_page_size};
335
use alloc::boxed::Box;
336
use alloc::vec::Vec;
337
use std::mem::ManuallyDrop;
338
use std::ops::Range;
339
use std::sync::Mutex;
340
341
/// State for the "previous stack" maintained by asan itself and fed in for
342
/// custom stacks.
343
pub struct PreviousStack {
344
bottom: *const u8,
345
size: usize,
346
}
347
348
impl PreviousStack {
349
pub fn new(stack: &FiberStack) -> PreviousStack {
350
let range = stack.range().unwrap();
351
PreviousStack {
352
bottom: range.start as *const u8,
353
// Discount the two pointers we store at the top of the stack,
354
// so subtract two pointers.
355
size: range.len() - 2 * std::mem::size_of::<*const u8>(),
356
}
357
}
358
}
359
360
impl Default for PreviousStack {
361
fn default() -> PreviousStack {
362
PreviousStack {
363
bottom: std::ptr::null(),
364
size: 0,
365
}
366
}
367
}
368
369
/// Switches the current stack to `top_of_stack`
370
///
371
/// * `top_of_stack` - for going to fibers this is calculated and for
372
/// restoring back to the original stack this was saved during the initial
373
/// transition.
374
/// * `is_finishing` - whether or not we're switching off a fiber for the
375
/// final time; customizes how asan intrinsics are invoked.
376
/// * `prev` - the stack we're switching to initially and saves the
377
/// stack to return to upon resumption.
378
pub unsafe fn fiber_switch(
379
top_of_stack: *mut u8,
380
is_finishing: bool,
381
prev: &mut PreviousStack,
382
) {
383
assert!(super::SUPPORTED_ARCH);
384
let mut private_asan_pointer = std::ptr::null_mut();
385
386
// If this fiber is finishing then NULL is passed to asan to let it know
387
// that it can deallocate the "fake stack" that it's tracking for this
388
// fiber.
389
let private_asan_pointer_ref = if is_finishing {
390
None
391
} else {
392
Some(&mut private_asan_pointer)
393
};
394
395
// NB: in fiddling with asan an optimizations and such it appears that
396
// these functions need to be "very close to each other". If other Rust
397
// functions are invoked or added as an abstraction here that appears to
398
// trigger false positives in ASAN. That leads to the design of this
399
// module as-is where this function exists to have these three
400
// functions very close to one another.
401
unsafe {
402
__sanitizer_start_switch_fiber(private_asan_pointer_ref, prev.bottom, prev.size);
403
super::wasmtime_fiber_switch(top_of_stack);
404
__sanitizer_finish_switch_fiber(private_asan_pointer, &mut prev.bottom, &mut prev.size);
405
}
406
}
407
408
/// Hook for when a fiber first starts, used to configure ASAN.
409
pub unsafe fn fiber_start_complete() -> PreviousStack {
410
let mut ret = PreviousStack::default();
411
unsafe {
412
__sanitizer_finish_switch_fiber(std::ptr::null_mut(), &mut ret.bottom, &mut ret.size);
413
}
414
ret
415
}
416
417
// These intrinsics are provided by the address sanitizer runtime. Their C
418
// signatures were translated into Rust-isms here with `Option` and `&mut`.
419
unsafe extern "C" {
420
fn __sanitizer_start_switch_fiber(
421
private_asan_pointer_save: Option<&mut *mut u8>,
422
bottom: *const u8,
423
size: usize,
424
);
425
fn __sanitizer_finish_switch_fiber(
426
private_asan_pointer: *mut u8,
427
bottom_old: &mut *const u8,
428
size_old: &mut usize,
429
);
430
}
431
432
/// This static is a workaround for llvm/llvm-project#53891, notably this is
433
/// a global cache of all fiber stacks.
434
///
435
/// The problem with ASAN is that if we allocate memory for a stack, use it
436
/// as a stack, deallocate the stack, and then when that memory is later
437
/// mapped as normal heap memory. This is possible due to `mmap` reusing
438
/// addresses and it ends up confusing ASAN. In this situation ASAN will
439
/// have false positives about stack overflows saying that writes to
440
/// freshly-allocated memory, which just happened to historically be a
441
/// stack, are a stack overflow.
442
///
443
/// This static works around the issue by ensuring that, only when asan is
444
/// enabled, all stacks are cached globally. Stacks are never deallocated
445
/// and forever retained here. This only works if the number of stacks
446
/// retained here is relatively small to prevent OOM from continuously
447
/// running programs. That's hopefully the case as ASAN is mostly used in
448
/// OSS-Fuzz and our fuzzers only fuzz one thing at a time per thread
449
/// meaning that this should only ever be a relatively small set of stacks.
450
static FIBER_STACKS: Mutex<Vec<MmapFiberStack>> = Mutex::new(Vec::new());
451
452
pub fn new_fiber_stack(size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
453
let page_size = host_page_size();
454
let needed_size = size + page_size;
455
let mut stacks = FIBER_STACKS.lock().unwrap();
456
457
let stack = match stacks.iter().position(|i| needed_size <= i.mapping_len) {
458
// If an appropriately sized stack was already allocated, then use
459
// that one.
460
Some(i) => stacks.remove(i),
461
// ... otherwise allocate a brand new stack.
462
None => MmapFiberStack::new(size)?,
463
};
464
let stack = AsanFiberStack {
465
mmap: ManuallyDrop::new(stack),
466
};
467
Ok(Box::new(stack))
468
}
469
470
/// Custom structure used to prevent the interior mmap-allocated stack from
471
/// actually getting unmapped.
472
///
473
/// On drop this stack will return the interior stack to the global
474
/// `FIBER_STACKS` list.
475
struct AsanFiberStack {
476
mmap: ManuallyDrop<MmapFiberStack>,
477
}
478
479
unsafe impl RuntimeFiberStack for AsanFiberStack {
480
fn top(&self) -> *mut u8 {
481
self.mmap
482
.mapping_base
483
.wrapping_byte_add(self.mmap.mapping_len)
484
}
485
486
fn range(&self) -> Range<usize> {
487
let base = self.mmap.mapping_base as usize;
488
let end = base + self.mmap.mapping_len;
489
base + host_page_size()..end
490
}
491
492
fn guard_range(&self) -> Range<*mut u8> {
493
self.mmap.mapping_base..self.mmap.mapping_base.wrapping_add(host_page_size())
494
}
495
}
496
497
impl Drop for AsanFiberStack {
498
fn drop(&mut self) {
499
let stack = unsafe { ManuallyDrop::take(&mut self.mmap) };
500
FIBER_STACKS.lock().unwrap().push(stack);
501
}
502
}
503
}
504
505
// Shim module that's the same as above but only has stubs.
506
#[cfg(not(asan))]
507
mod asan_disabled {
508
use super::{FiberStack, RuntimeFiberStack};
509
use std::boxed::Box;
510
511
#[derive(Default)]
512
pub struct PreviousStack;
513
514
impl PreviousStack {
515
#[inline]
516
pub fn new(_stack: &FiberStack) -> PreviousStack {
517
PreviousStack
518
}
519
}
520
521
pub unsafe fn fiber_switch(
522
top_of_stack: *mut u8,
523
_is_finishing: bool,
524
_prev: &mut PreviousStack,
525
) {
526
assert!(super::SUPPORTED_ARCH);
527
unsafe {
528
super::wasmtime_fiber_switch(top_of_stack);
529
}
530
}
531
532
#[inline]
533
pub unsafe fn fiber_start_complete() -> PreviousStack {
534
PreviousStack
535
}
536
537
pub fn new_fiber_stack(_size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
538
unimplemented!()
539
}
540
}
541
542
#[cfg(not(asan))]
543
use asan_disabled as asan;
544
545