Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/fuzzing/src/oracles.rs
3054 views
1
//! Oracles.
2
//!
3
//! Oracles take a test case and determine whether we have a bug. For example,
4
//! one of the simplest oracles is to take a Wasm binary as our input test case,
5
//! validate and instantiate it, and (implicitly) check that no assertions
6
//! failed or segfaults happened. A more complicated oracle might compare the
7
//! result of executing a Wasm file with and without optimizations enabled, and
8
//! make sure that the two executions are observably identical.
9
//!
10
//! When an oracle finds a bug, it should report it to the fuzzing engine by
11
//! panicking.
12
13
pub mod component_api;
14
pub mod component_async;
15
#[cfg(feature = "fuzz-spec-interpreter")]
16
pub mod diff_spec;
17
pub mod diff_wasmi;
18
pub mod diff_wasmtime;
19
pub mod dummy;
20
pub mod engine;
21
pub mod memory;
22
mod stacks;
23
24
use self::diff_wasmtime::WasmtimeInstance;
25
use self::engine::{DiffEngine, DiffInstance};
26
use crate::generators::GcOps;
27
use crate::generators::{self, CompilerStrategy, DiffValue, DiffValueType};
28
use crate::single_module_fuzzer::KnownValid;
29
use crate::{YieldN, block_on};
30
use arbitrary::Arbitrary;
31
pub use stacks::check_stacks;
32
use std::future::Future;
33
use std::pin::Pin;
34
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst};
35
use std::sync::{Arc, Condvar, Mutex};
36
use std::task::{Context, Poll};
37
use std::time::{Duration, Instant};
38
use wasmtime::*;
39
use wasmtime_wast::WastContext;
40
41
#[cfg(not(any(windows, target_arch = "s390x", target_arch = "riscv64")))]
42
mod diff_v8;
43
44
static CNT: AtomicUsize = AtomicUsize::new(0);
45
46
/// Logs a wasm file to the filesystem to make it easy to figure out what wasm
47
/// was used when debugging.
48
pub fn log_wasm(wasm: &[u8]) {
49
super::init_fuzzing();
50
51
if !log::log_enabled!(log::Level::Debug) {
52
return;
53
}
54
55
let i = CNT.fetch_add(1, SeqCst);
56
let name = format!("testcase{i}.wasm");
57
std::fs::write(&name, wasm).expect("failed to write wasm file");
58
log::debug!("wrote wasm file to `{name}`");
59
let wat = format!("testcase{i}.wat");
60
match wasmprinter::print_bytes(wasm) {
61
Ok(s) => {
62
std::fs::write(&wat, s).expect("failed to write wat file");
63
log::debug!("wrote wat file to `{wat}`");
64
}
65
// If wasmprinter failed remove a `*.wat` file, if any, to avoid
66
// confusing a preexisting one with this wasm which failed to get
67
// printed.
68
Err(e) => {
69
log::debug!("failed to print to wat: {e}");
70
drop(std::fs::remove_file(&wat));
71
}
72
}
73
}
74
75
/// The `T` in `Store<T>` for fuzzing stores, used to limit resource
76
/// consumption during fuzzing.
77
#[derive(Clone)]
78
pub struct StoreLimits(Arc<LimitsState>);
79
80
struct LimitsState {
81
/// Remaining memory, in bytes, left to allocate
82
remaining_memory: AtomicUsize,
83
/// Remaining amount of memory that's allowed to be copied via a growth.
84
remaining_copy_allowance: AtomicUsize,
85
/// Whether or not an allocation request has been denied
86
oom: AtomicBool,
87
}
88
89
/// Allow up to 1G which is well below the 2G limit on OSS-Fuzz and should allow
90
/// most interesting behavior.
91
const MAX_MEMORY: usize = 1 << 30;
92
93
/// Allow up to 4G of bytes to be copied (conservatively) which should enable
94
/// growth up to `MAX_MEMORY` or at least up to a relatively large amount.
95
const MAX_MEMORY_MOVED: usize = 4 << 30;
96
97
impl StoreLimits {
98
/// Creates the default set of limits for all fuzzing stores.
99
pub fn new() -> StoreLimits {
100
StoreLimits(Arc::new(LimitsState {
101
remaining_memory: AtomicUsize::new(MAX_MEMORY),
102
remaining_copy_allowance: AtomicUsize::new(MAX_MEMORY_MOVED),
103
oom: AtomicBool::new(false),
104
}))
105
}
106
107
fn alloc(&mut self, amt: usize) -> bool {
108
log::trace!("alloc {amt:#x} bytes");
109
110
// Assume that on each allocation of memory that all previous
111
// allocations of memory are moved. This is pretty coarse but is used to
112
// help prevent against fuzz test cases that just move tons of bytes
113
// around continuously. This assumes that all previous memory was
114
// allocated in a single linear memory and growing by `amt` will require
115
// moving all the bytes to a new location. This isn't actually required
116
// all the time nor does it accurately reflect what happens all the
117
// time, but it's a coarse approximation that should be "good enough"
118
// for allowing interesting fuzz behaviors to happen while not timing
119
// out just copying bytes around.
120
let prev_size = MAX_MEMORY - self.0.remaining_memory.load(SeqCst);
121
if self
122
.0
123
.remaining_copy_allowance
124
.fetch_update(SeqCst, SeqCst, |remaining| remaining.checked_sub(prev_size))
125
.is_err()
126
{
127
self.0.oom.store(true, SeqCst);
128
log::debug!("-> too many bytes moved, rejecting allocation");
129
return false;
130
}
131
132
// If we're allowed to move the bytes, then also check if we're allowed
133
// to actually have this much residence at once.
134
match self
135
.0
136
.remaining_memory
137
.fetch_update(SeqCst, SeqCst, |remaining| remaining.checked_sub(amt))
138
{
139
Ok(_) => true,
140
Err(_) => {
141
self.0.oom.store(true, SeqCst);
142
log::debug!("-> OOM hit");
143
false
144
}
145
}
146
}
147
148
fn is_oom(&self) -> bool {
149
self.0.oom.load(SeqCst)
150
}
151
}
152
153
impl ResourceLimiter for StoreLimits {
154
fn memory_growing(
155
&mut self,
156
current: usize,
157
desired: usize,
158
_maximum: Option<usize>,
159
) -> Result<bool> {
160
Ok(self.alloc(desired - current))
161
}
162
163
fn table_growing(
164
&mut self,
165
current: usize,
166
desired: usize,
167
_maximum: Option<usize>,
168
) -> Result<bool> {
169
let delta = (desired - current).saturating_mul(std::mem::size_of::<usize>());
170
Ok(self.alloc(delta))
171
}
172
}
173
174
/// Methods of timing out execution of a WebAssembly module
175
#[derive(Clone, Debug)]
176
pub enum Timeout {
177
/// No timeout is used, it should be guaranteed via some other means that
178
/// the input does not infinite loop.
179
None,
180
/// Fuel-based timeouts are used where the specified fuel is all that the
181
/// provided wasm module is allowed to consume.
182
Fuel(u64),
183
/// An epoch-interruption-based timeout is used with a sleeping
184
/// thread bumping the epoch counter after the specified duration.
185
Epoch(Duration),
186
}
187
188
/// Instantiate the Wasm buffer, and implicitly fail if we have an unexpected
189
/// panic or segfault or anything else that can be detected "passively".
190
///
191
/// The engine will be configured using provided config.
192
pub fn instantiate(
193
wasm: &[u8],
194
known_valid: KnownValid,
195
config: &generators::Config,
196
timeout: Timeout,
197
) {
198
let mut store = config.to_store();
199
200
let module = match compile_module(store.engine(), wasm, known_valid, config) {
201
Some(module) => module,
202
None => return,
203
};
204
205
let mut timeout_state = HelperThread::default();
206
match timeout {
207
Timeout::Fuel(fuel) => store.set_fuel(fuel).unwrap(),
208
209
// If a timeout is requested then we spawn a helper thread to wait for
210
// the requested time and then send us a signal to get interrupted. We
211
// also arrange for the thread's sleep to get interrupted if we return
212
// early (or the wasm returns within the time limit), which allows the
213
// thread to get torn down.
214
//
215
// This prevents us from creating a huge number of sleeping threads if
216
// this function is executed in a loop, like it does on nightly fuzzing
217
// infrastructure.
218
Timeout::Epoch(timeout) => {
219
let engine = store.engine().clone();
220
timeout_state.run_periodically(timeout, move || engine.increment_epoch());
221
}
222
Timeout::None => {}
223
}
224
225
instantiate_with_dummy(&mut store, &module);
226
}
227
228
/// Represents supported commands to the `instantiate_many` function.
229
#[derive(Arbitrary, Debug)]
230
pub enum Command {
231
/// Instantiates a module.
232
///
233
/// The value is the index of the module to instantiate.
234
///
235
/// The module instantiated will be this value modulo the number of modules provided to `instantiate_many`.
236
Instantiate(usize),
237
/// Terminates a "running" instance.
238
///
239
/// The value is the index of the instance to terminate.
240
///
241
/// The instance terminated will be this value modulo the number of currently running
242
/// instances.
243
///
244
/// If no instances are running, the command will be ignored.
245
Terminate(usize),
246
}
247
248
/// Instantiates many instances from the given modules.
249
///
250
/// The engine will be configured using the provided config.
251
///
252
/// The modules are expected to *not* have start functions as no timeouts are configured.
253
pub fn instantiate_many(
254
modules: &[Vec<u8>],
255
known_valid: KnownValid,
256
config: &generators::Config,
257
commands: &[Command],
258
) {
259
log::debug!("instantiate_many: {commands:#?}");
260
261
assert!(!config.module_config.config.allow_start_export);
262
263
let engine = Engine::new(&config.to_wasmtime()).unwrap();
264
265
let modules = modules
266
.iter()
267
.enumerate()
268
.filter_map(
269
|(i, bytes)| match compile_module(&engine, bytes, known_valid, config) {
270
Some(m) => {
271
log::debug!("successfully compiled module {i}");
272
Some(m)
273
}
274
None => {
275
log::debug!("failed to compile module {i}");
276
None
277
}
278
},
279
)
280
.collect::<Vec<_>>();
281
282
// If no modules were valid, we're done
283
if modules.is_empty() {
284
return;
285
}
286
287
// This stores every `Store` where a successful instantiation takes place
288
let mut stores = Vec::new();
289
let limits = StoreLimits::new();
290
291
for command in commands {
292
match command {
293
Command::Instantiate(index) => {
294
let index = *index % modules.len();
295
log::info!("instantiating {index}");
296
let module = &modules[index];
297
let mut store = Store::new(&engine, limits.clone());
298
config.configure_store(&mut store);
299
300
if instantiate_with_dummy(&mut store, module).is_some() {
301
stores.push(Some(store));
302
} else {
303
log::warn!("instantiation failed");
304
}
305
}
306
Command::Terminate(index) => {
307
if stores.is_empty() {
308
continue;
309
}
310
let index = *index % stores.len();
311
312
log::info!("dropping {index}");
313
stores.swap_remove(index);
314
}
315
}
316
}
317
}
318
319
fn compile_module(
320
engine: &Engine,
321
bytes: &[u8],
322
known_valid: KnownValid,
323
config: &generators::Config,
324
) -> Option<Module> {
325
log_wasm(bytes);
326
327
fn is_pcc_error(e: &wasmtime::Error) -> bool {
328
// NOTE: please keep this predicate in sync with the display format of CodegenError,
329
// defined in `wasmtime/cranelift/codegen/src/result.rs`
330
e.to_string().to_lowercase().contains("proof-carrying-code")
331
}
332
333
match config.compile(engine, bytes) {
334
Ok(module) => Some(module),
335
Err(e) if is_pcc_error(&e) => {
336
panic!("pcc error in input: {e:#?}");
337
}
338
Err(_) if known_valid == KnownValid::No => None,
339
Err(e) => {
340
if let generators::InstanceAllocationStrategy::Pooling(c) = &config.wasmtime.strategy {
341
// When using the pooling allocator, accept failures to compile
342
// when arbitrary table element limits have been exceeded as
343
// there is currently no way to constrain the generated module
344
// table types.
345
let string = format!("{e:?}");
346
if string.contains("minimum element size") {
347
return None;
348
}
349
350
// Allow modules-failing-to-compile which exceed the requested
351
// size for each instance. This is something that is difficult
352
// to control and ensure it always succeeds, so we simply have a
353
// "random" instance size limit and if a module doesn't fit we
354
// move on to the next fuzz input.
355
if string.contains("instance allocation for this module requires") {
356
return None;
357
}
358
359
// If the pooling allocator is more restrictive on the number of
360
// tables and memories than we allowed wasm-smith to generate
361
// then allow compilation errors along those lines.
362
if c.max_tables_per_module < (config.module_config.config.max_tables as u32)
363
&& string.contains("defined tables count")
364
&& string.contains("exceeds the per-instance limit")
365
{
366
return None;
367
}
368
369
if c.max_memories_per_module < (config.module_config.config.max_memories as u32)
370
&& string.contains("defined memories count")
371
&& string.contains("exceeds the per-instance limit")
372
{
373
return None;
374
}
375
}
376
377
panic!("failed to compile module: {e:?}");
378
}
379
}
380
}
381
382
/// Create a Wasmtime [`Instance`] from a [`Module`] and fill in all imports
383
/// with dummy values (e.g., zeroed values, immediately-trapping functions).
384
/// Also, this function catches certain fuzz-related instantiation failures and
385
/// returns `None` instead of panicking.
386
///
387
/// TODO: we should implement tracing versions of these dummy imports that
388
/// record a trace of the order that imported functions were called in and with
389
/// what values. Like the results of exported functions, calls to imports should
390
/// also yield the same values for each configuration, and we should assert
391
/// that.
392
pub fn instantiate_with_dummy(store: &mut Store<StoreLimits>, module: &Module) -> Option<Instance> {
393
// Creation of imports can fail due to resource limit constraints, and then
394
// instantiation can naturally fail for a number of reasons as well. Bundle
395
// the two steps together to match on the error below.
396
let linker = dummy::dummy_linker(store, module);
397
if let Err(e) = &linker {
398
log::warn!("failed to create dummy linker: {e:?}");
399
}
400
let instance = linker.and_then(|l| l.instantiate(&mut *store, module));
401
unwrap_instance(store, instance)
402
}
403
404
fn unwrap_instance(
405
store: &Store<StoreLimits>,
406
instance: wasmtime::Result<Instance>,
407
) -> Option<Instance> {
408
let e = match instance {
409
Ok(i) => return Some(i),
410
Err(e) => e,
411
};
412
413
log::debug!("failed to instantiate: {e:?}");
414
415
// If the instantiation hit OOM for some reason then that's ok, it's
416
// expected that fuzz-generated programs try to allocate lots of
417
// stuff.
418
if store.data().is_oom() {
419
return None;
420
}
421
422
// Allow traps which can happen normally with `unreachable` or a timeout or
423
// such.
424
if e.is::<Trap>()
425
// Also allow failures to instantiate as a result of hitting pooling
426
// limits.
427
|| e.is::<wasmtime::PoolConcurrencyLimitError>()
428
// And GC heap OOMs.
429
|| e.is::<wasmtime::GcHeapOutOfMemory<()>>()
430
// And thrown exceptions.
431
|| e.is::<wasmtime::ThrownException>()
432
{
433
return None;
434
}
435
436
let string = e.to_string();
437
438
// Currently we instantiate with a `Linker` which can't instantiate
439
// every single module under the sun due to using name-based resolution
440
// rather than positional-based resolution
441
if string.contains("incompatible import type") {
442
return None;
443
}
444
445
// Everything else should be a bug in the fuzzer or a bug in wasmtime
446
panic!("failed to instantiate: {e:?}");
447
}
448
449
/// Evaluate the function identified by `name` in two different engine
450
/// instances--`lhs` and `rhs`.
451
///
452
/// Returns `Ok(true)` if more evaluations can happen or `Ok(false)` if the
453
/// instances may have drifted apart and no more evaluations can happen.
454
///
455
/// # Panics
456
///
457
/// This will panic if the evaluation is different between engines (e.g.,
458
/// results are different, hashed instance is different, one side traps, etc.).
459
pub fn differential(
460
lhs: &mut dyn DiffInstance,
461
lhs_engine: &dyn DiffEngine,
462
rhs: &mut WasmtimeInstance,
463
name: &str,
464
args: &[DiffValue],
465
result_tys: &[DiffValueType],
466
) -> wasmtime::Result<bool> {
467
log::debug!("Evaluating: `{name}` with {args:?}");
468
let lhs_results = match lhs.evaluate(name, args, result_tys) {
469
Ok(Some(results)) => Ok(results),
470
Err(e) => Err(e),
471
// this engine couldn't execute this type signature, so discard this
472
// execution by returning success.
473
Ok(None) => return Ok(true),
474
};
475
log::debug!(" -> lhs results on {}: {:?}", lhs.name(), &lhs_results);
476
477
let rhs_results = rhs
478
.evaluate(name, args, result_tys)
479
// wasmtime should be able to invoke any signature, so unwrap this result
480
.map(|results| results.unwrap());
481
log::debug!(" -> rhs results on {}: {:?}", rhs.name(), &rhs_results);
482
483
// If Wasmtime hit its OOM condition, which is possible since it's set
484
// somewhat low while fuzzing, then don't return an error but return
485
// `false` indicating that differential fuzzing must stop. There's no
486
// guarantee the other engine has the same OOM limits as Wasmtime, and
487
// it's assumed that Wasmtime is configured to have a more conservative
488
// limit than the other engine.
489
if rhs.is_oom() {
490
return Ok(false);
491
}
492
493
match DiffEqResult::new(lhs_engine, lhs_results, rhs_results) {
494
DiffEqResult::Success(lhs, rhs) => assert_eq!(lhs, rhs),
495
DiffEqResult::Poisoned => return Ok(false),
496
DiffEqResult::Failed => {}
497
}
498
499
for (global, ty) in rhs.exported_globals() {
500
log::debug!("Comparing global `{global}`");
501
let lhs = match lhs.get_global(&global, ty) {
502
Some(val) => val,
503
None => continue,
504
};
505
let rhs = rhs.get_global(&global, ty).unwrap();
506
assert_eq!(lhs, rhs);
507
}
508
for (memory, shared) in rhs.exported_memories() {
509
log::debug!("Comparing memory `{memory}`");
510
let lhs = match lhs.get_memory(&memory, shared) {
511
Some(val) => val,
512
None => continue,
513
};
514
let rhs = rhs.get_memory(&memory, shared).unwrap();
515
if lhs == rhs {
516
continue;
517
}
518
eprintln!("differential memory is {} bytes long", lhs.len());
519
eprintln!("wasmtime memory is {} bytes long", rhs.len());
520
panic!("memories have differing values");
521
}
522
523
Ok(true)
524
}
525
526
/// Result of comparing the result of two operations during differential
527
/// execution.
528
pub enum DiffEqResult<T, U> {
529
/// Both engines succeeded.
530
Success(T, U),
531
/// The result has reached the state where engines may have diverged and
532
/// results can no longer be compared.
533
Poisoned,
534
/// Both engines failed with the same error message, and internal state
535
/// should still match between the two engines.
536
Failed,
537
}
538
539
fn wasmtime_trap_is_non_deterministic(trap: &Trap) -> bool {
540
match trap {
541
// Allocations being too large for the GC are
542
// implementation-defined.
543
Trap::AllocationTooLarge |
544
// Stack size, and therefore when overflow happens, is
545
// implementation-defined.
546
Trap::StackOverflow => true,
547
_ => false,
548
}
549
}
550
551
fn wasmtime_error_is_non_deterministic(error: &wasmtime::Error) -> bool {
552
match error.downcast_ref::<Trap>() {
553
Some(trap) => wasmtime_trap_is_non_deterministic(trap),
554
555
// For general, unknown errors, we can't rely on this being
556
// a deterministic Wasm failure that both engines handled
557
// identically, leaving Wasm in identical states. We could
558
// just as easily be hitting engine-specific failures, like
559
// different implementation-defined limits. So simply poison
560
// this execution and move on to the next test.
561
None => true,
562
}
563
}
564
565
impl<T, U> DiffEqResult<T, U> {
566
/// Computes the differential result from executing in two different
567
/// engines.
568
pub fn new(
569
lhs_engine: &dyn DiffEngine,
570
lhs_result: Result<T>,
571
rhs_result: Result<U>,
572
) -> DiffEqResult<T, U> {
573
match (lhs_result, rhs_result) {
574
(Ok(lhs_result), Ok(rhs_result)) => DiffEqResult::Success(lhs_result, rhs_result),
575
576
// Handle all non-deterministic errors by poisoning this execution's
577
// state, so that we simply move on to the next test.
578
(Err(lhs), _) if lhs_engine.is_non_deterministic_error(&lhs) => {
579
log::debug!("lhs failed non-deterministically: {lhs:?}");
580
DiffEqResult::Poisoned
581
}
582
(_, Err(rhs)) if wasmtime_error_is_non_deterministic(&rhs) => {
583
log::debug!("rhs failed non-deterministically: {rhs:?}");
584
DiffEqResult::Poisoned
585
}
586
587
// Both sides failed deterministically. Check that the trap and
588
// state at the time of failure is the same.
589
(Err(lhs), Err(rhs)) => {
590
let rhs = rhs
591
.downcast::<Trap>()
592
.expect("non-traps handled in earlier match arm");
593
594
debug_assert!(
595
!lhs_engine.is_non_deterministic_error(&lhs),
596
"non-deterministic traps handled in earlier match arm",
597
);
598
debug_assert!(
599
!wasmtime_trap_is_non_deterministic(&rhs),
600
"non-deterministic traps handled in earlier match arm",
601
);
602
603
lhs_engine.assert_error_match(&lhs, &rhs);
604
DiffEqResult::Failed
605
}
606
607
// A real bug is found if only one side fails.
608
(Ok(_), Err(err)) => panic!("only the `rhs` failed for this input: {err:?}"),
609
(Err(err), Ok(_)) => panic!("only the `lhs` failed for this input: {err:?}"),
610
}
611
}
612
}
613
614
/// Invoke the given API calls.
615
pub fn make_api_calls(api: generators::api::ApiCalls) {
616
use crate::generators::api::ApiCall;
617
use std::collections::HashMap;
618
619
let mut store: Option<Store<StoreLimits>> = None;
620
let mut modules: HashMap<usize, Module> = Default::default();
621
let mut instances: HashMap<usize, Instance> = Default::default();
622
623
for call in api.calls {
624
match call {
625
ApiCall::StoreNew(config) => {
626
log::trace!("creating store");
627
assert!(store.is_none());
628
store = Some(config.to_store());
629
}
630
631
ApiCall::ModuleNew { id, wasm } => {
632
log::debug!("creating module: {id}");
633
log_wasm(&wasm);
634
let module = match Module::new(store.as_ref().unwrap().engine(), &wasm) {
635
Ok(m) => m,
636
Err(_) => continue,
637
};
638
let old = modules.insert(id, module);
639
assert!(old.is_none());
640
}
641
642
ApiCall::ModuleDrop { id } => {
643
log::trace!("dropping module: {id}");
644
drop(modules.remove(&id));
645
}
646
647
ApiCall::InstanceNew { id, module } => {
648
log::trace!("instantiating module {module} as {id}");
649
let module = match modules.get(&module) {
650
Some(m) => m,
651
None => continue,
652
};
653
654
let store = store.as_mut().unwrap();
655
if let Some(instance) = instantiate_with_dummy(store, module) {
656
instances.insert(id, instance);
657
}
658
}
659
660
ApiCall::InstanceDrop { id } => {
661
log::trace!("dropping instance {id}");
662
instances.remove(&id);
663
}
664
665
ApiCall::CallExportedFunc { instance, nth } => {
666
log::trace!("calling instance export {instance} / {nth}");
667
let instance = match instances.get(&instance) {
668
Some(i) => i,
669
None => {
670
// Note that we aren't guaranteed to instantiate valid
671
// modules, see comments in `InstanceNew` for details on
672
// that. But the API call generator can't know if
673
// instantiation failed, so we might not actually have
674
// this instance. When that's the case, just skip the
675
// API call and keep going.
676
continue;
677
}
678
};
679
let store = store.as_mut().unwrap();
680
681
let funcs = instance
682
.exports(&mut *store)
683
.filter_map(|e| match e.into_extern() {
684
Extern::Func(f) => Some(f),
685
_ => None,
686
})
687
.collect::<Vec<_>>();
688
689
if funcs.is_empty() {
690
continue;
691
}
692
693
let nth = nth % funcs.len();
694
let f = &funcs[nth];
695
let ty = f.ty(&store);
696
if let Some(params) = ty
697
.params()
698
.map(|p| p.default_value())
699
.collect::<Option<Vec<_>>>()
700
{
701
let mut results = vec![Val::I32(0); ty.results().len()];
702
let _ = f.call(store, &params, &mut results);
703
}
704
}
705
}
706
}
707
}
708
709
/// Executes the wast `test` with the `config` specified.
710
///
711
/// Ensures that wast tests pass regardless of the `Config`.
712
pub fn wast_test(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<()> {
713
crate::init_fuzzing();
714
715
let mut fuzz_config: generators::Config = u.arbitrary()?;
716
fuzz_config.module_config.shared_memory = true;
717
let test: generators::WastTest = u.arbitrary()?;
718
719
let test = &test.test;
720
721
if test.config.component_model_async() || u.arbitrary()? {
722
fuzz_config.enable_async(u)?;
723
}
724
725
// Discard tests that allocate a lot of memory as we don't want to OOM the
726
// fuzzer and we also limit memory growth which would cause the test to
727
// fail.
728
if test.config.hogs_memory.unwrap_or(false) {
729
return Err(arbitrary::Error::IncorrectFormat);
730
}
731
732
// Transform `fuzz_config` to be valid for `test` and make sure that this
733
// test is supposed to pass.
734
let wast_config = fuzz_config.make_wast_test_compliant(test);
735
if test.should_fail(&wast_config) {
736
return Err(arbitrary::Error::IncorrectFormat);
737
}
738
739
// Winch requires AVX and AVX2 for SIMD tests to pass so don't run the test
740
// if either isn't enabled.
741
if fuzz_config.wasmtime.compiler_strategy == CompilerStrategy::Winch
742
&& test.config.simd()
743
&& (fuzz_config
744
.wasmtime
745
.codegen_flag("has_avx")
746
.is_some_and(|value| value == "false")
747
|| fuzz_config
748
.wasmtime
749
.codegen_flag("has_avx2")
750
.is_some_and(|value| value == "false"))
751
{
752
log::warn!(
753
"Skipping Wast test because Winch doesn't support SIMD tests with AVX or AVX2 disabled"
754
);
755
return Err(arbitrary::Error::IncorrectFormat);
756
}
757
758
// Fuel and epochs don't play well with threads right now, so exclude any
759
// thread-spawning test if it looks like threads are spawned in that case.
760
if fuzz_config.wasmtime.consume_fuel || fuzz_config.wasmtime.epoch_interruption {
761
if test.contents.contains("(thread") {
762
return Err(arbitrary::Error::IncorrectFormat);
763
}
764
}
765
766
log::debug!("running {:?}", test.path);
767
let async_ = if fuzz_config.wasmtime.async_config == generators::AsyncConfig::Disabled {
768
wasmtime_wast::Async::No
769
} else {
770
wasmtime_wast::Async::Yes
771
};
772
log::debug!("async: {async_:?}");
773
let engine = Engine::new(&fuzz_config.to_wasmtime()).unwrap();
774
let mut wast_context = WastContext::new(&engine, async_, move |store| {
775
fuzz_config.configure_store_epoch_and_fuel(store);
776
});
777
wast_context
778
.register_spectest(&wasmtime_wast::SpectestConfig {
779
use_shared_memory: true,
780
suppress_prints: true,
781
})
782
.unwrap();
783
wast_context
784
.run_wast(test.path.to_str().unwrap(), test.contents.as_bytes())
785
.unwrap();
786
Ok(())
787
}
788
789
/// Execute a series of `gc` operations.
790
///
791
/// Returns the number of `gc` operations which occurred throughout the test
792
/// case -- used to test below that gc happens reasonably soon and eventually.
793
pub fn gc_ops(mut fuzz_config: generators::Config, mut ops: GcOps) -> Result<usize> {
794
let expected_drops = Arc::new(AtomicUsize::new(0));
795
let num_dropped = Arc::new(AtomicUsize::new(0));
796
797
let num_gcs = Arc::new(AtomicUsize::new(0));
798
{
799
fuzz_config.wasmtime.consume_fuel = true;
800
let mut store = fuzz_config.to_store();
801
store.set_fuel(1_000).unwrap();
802
803
let wasm = ops.to_wasm_binary();
804
log_wasm(&wasm);
805
let module = match compile_module(store.engine(), &wasm, KnownValid::No, &fuzz_config) {
806
Some(m) => m,
807
None => return Ok(0),
808
};
809
810
let mut linker = Linker::new(store.engine());
811
812
// To avoid timeouts, limit the number of explicit GCs we perform per
813
// test case.
814
const MAX_GCS: usize = 5;
815
816
let func_ty = FuncType::new(
817
store.engine(),
818
vec![],
819
vec![ValType::EXTERNREF, ValType::EXTERNREF, ValType::EXTERNREF],
820
);
821
let func = Func::new(&mut store, func_ty, {
822
let num_dropped = num_dropped.clone();
823
let expected_drops = expected_drops.clone();
824
let num_gcs = num_gcs.clone();
825
move |mut caller: Caller<'_, StoreLimits>, _params, results| {
826
log::info!("gc_ops: GC");
827
if num_gcs.fetch_add(1, SeqCst) < MAX_GCS {
828
caller.gc(None)?;
829
}
830
831
let a = ExternRef::new(
832
&mut caller,
833
CountDrops::new(&expected_drops, num_dropped.clone()),
834
)?;
835
let b = ExternRef::new(
836
&mut caller,
837
CountDrops::new(&expected_drops, num_dropped.clone()),
838
)?;
839
let c = ExternRef::new(
840
&mut caller,
841
CountDrops::new(&expected_drops, num_dropped.clone()),
842
)?;
843
844
log::info!("gc_ops: gc() -> ({a:?}, {b:?}, {c:?})");
845
results[0] = Some(a).into();
846
results[1] = Some(b).into();
847
results[2] = Some(c).into();
848
Ok(())
849
}
850
});
851
linker.define(&store, "", "gc", func).unwrap();
852
853
linker
854
.func_wrap("", "take_refs", {
855
let expected_drops = expected_drops.clone();
856
move |caller: Caller<'_, StoreLimits>,
857
a: Option<Rooted<ExternRef>>,
858
b: Option<Rooted<ExternRef>>,
859
c: Option<Rooted<ExternRef>>|
860
-> Result<()> {
861
log::info!("gc_ops: take_refs({a:?}, {b:?}, {c:?})",);
862
863
// Do the assertion on each ref's inner data, even though it
864
// all points to the same atomic, so that if we happen to
865
// run into a use-after-free bug with one of these refs we
866
// are more likely to trigger a segfault.
867
if let Some(a) = a {
868
let a = a
869
.data(&caller)?
870
.unwrap()
871
.downcast_ref::<CountDrops>()
872
.unwrap();
873
assert!(a.0.load(SeqCst) <= expected_drops.load(SeqCst));
874
}
875
if let Some(b) = b {
876
let b = b
877
.data(&caller)?
878
.unwrap()
879
.downcast_ref::<CountDrops>()
880
.unwrap();
881
assert!(b.0.load(SeqCst) <= expected_drops.load(SeqCst));
882
}
883
if let Some(c) = c {
884
let c = c
885
.data(&caller)?
886
.unwrap()
887
.downcast_ref::<CountDrops>()
888
.unwrap();
889
assert!(c.0.load(SeqCst) <= expected_drops.load(SeqCst));
890
}
891
Ok(())
892
}
893
})
894
.unwrap();
895
896
let func_ty = FuncType::new(
897
store.engine(),
898
vec![],
899
vec![ValType::EXTERNREF, ValType::EXTERNREF, ValType::EXTERNREF],
900
);
901
let func = Func::new(&mut store, func_ty, {
902
let num_dropped = num_dropped.clone();
903
let expected_drops = expected_drops.clone();
904
move |mut caller, _params, results| {
905
log::info!("gc_ops: make_refs");
906
907
let a = ExternRef::new(
908
&mut caller,
909
CountDrops::new(&expected_drops, num_dropped.clone()),
910
)?;
911
let b = ExternRef::new(
912
&mut caller,
913
CountDrops::new(&expected_drops, num_dropped.clone()),
914
)?;
915
let c = ExternRef::new(
916
&mut caller,
917
CountDrops::new(&expected_drops, num_dropped.clone()),
918
)?;
919
920
log::info!("gc_ops: make_refs() -> ({a:?}, {b:?}, {c:?})");
921
922
results[0] = Some(a).into();
923
results[1] = Some(b).into();
924
results[2] = Some(c).into();
925
926
Ok(())
927
}
928
});
929
linker.define(&store, "", "make_refs", func).unwrap();
930
931
let func_ty = FuncType::new(
932
store.engine(),
933
vec![ValType::Ref(RefType::new(true, HeapType::Struct))],
934
vec![],
935
);
936
937
let func = Func::new(&mut store, func_ty, {
938
move |_caller: Caller<'_, StoreLimits>, _params, _results| {
939
log::info!("gc_ops: take_struct(<ref null struct>)");
940
Ok(())
941
}
942
});
943
944
linker.define(&store, "", "take_struct", func).unwrap();
945
946
for imp in module.imports() {
947
if imp.module() == "" {
948
let name = imp.name();
949
if name.starts_with("take_struct_") {
950
if let wasmtime::ExternType::Func(ft) = imp.ty() {
951
let imp_name = name.to_string();
952
let func =
953
Func::new(&mut store, ft.clone(), move |_caller, _params, _results| {
954
log::info!("gc_ops: {imp_name}(<typed structref>)");
955
Ok(())
956
});
957
linker.define(&store, "", name, func).unwrap();
958
}
959
}
960
}
961
}
962
963
let instance = linker.instantiate(&mut store, &module).unwrap();
964
let run = instance.get_func(&mut store, "run").unwrap();
965
966
{
967
let mut scope = RootScope::new(&mut store);
968
969
log::info!(
970
"gc_ops: begin allocating {} externref arguments",
971
ops.limits.num_globals
972
);
973
let args: Vec<_> = (0..ops.limits.num_params)
974
.map(|_| {
975
Ok(Val::ExternRef(Some(ExternRef::new(
976
&mut scope,
977
CountDrops::new(&expected_drops, num_dropped.clone()),
978
)?)))
979
})
980
.collect::<Result<_>>()?;
981
log::info!(
982
"gc_ops: end allocating {} externref arguments",
983
ops.limits.num_globals
984
);
985
986
// The generated function should always return a trap. The only two
987
// valid traps are table-out-of-bounds which happens through `table.get`
988
// and `table.set` generated or an out-of-fuel trap. Otherwise any other
989
// error is unexpected and should fail fuzzing.
990
log::info!("gc_ops: calling into Wasm `run` function");
991
let err = run.call(&mut scope, &args, &mut []).unwrap_err();
992
if err.is::<GcHeapOutOfMemory<CountDrops>>() || err.is::<GcHeapOutOfMemory<()>>() {
993
// Accept GC OOM as an allowed outcome for this fuzzer.
994
} else {
995
let trap = err
996
.downcast::<Trap>()
997
.expect("if not GC oom, error should be a Wasm trap");
998
match trap {
999
Trap::TableOutOfBounds | Trap::OutOfFuel | Trap::AllocationTooLarge => {}
1000
_ => panic!("unexpected trap: {trap}"),
1001
}
1002
}
1003
}
1004
1005
// Do a final GC after running the Wasm.
1006
store.gc(None)?;
1007
}
1008
1009
assert_eq!(num_dropped.load(SeqCst), expected_drops.load(SeqCst));
1010
return Ok(num_gcs.load(SeqCst));
1011
1012
struct CountDrops(Arc<AtomicUsize>);
1013
1014
impl CountDrops {
1015
fn new(expected_drops: &AtomicUsize, num_dropped: Arc<AtomicUsize>) -> Self {
1016
let expected = expected_drops.fetch_add(1, SeqCst);
1017
log::info!(
1018
"CountDrops::new: expected drops: {expected} -> {}",
1019
expected + 1
1020
);
1021
Self(num_dropped)
1022
}
1023
}
1024
1025
impl Drop for CountDrops {
1026
fn drop(&mut self) {
1027
let drops = self.0.fetch_add(1, SeqCst);
1028
log::info!("CountDrops::drop: actual drops: {drops} -> {}", drops + 1);
1029
}
1030
}
1031
}
1032
1033
#[derive(Default)]
1034
struct HelperThread {
1035
state: Arc<HelperThreadState>,
1036
thread: Option<std::thread::JoinHandle<()>>,
1037
}
1038
1039
#[derive(Default)]
1040
struct HelperThreadState {
1041
should_exit: Mutex<bool>,
1042
should_exit_cvar: Condvar,
1043
}
1044
1045
impl HelperThread {
1046
fn run_periodically(&mut self, dur: Duration, mut closure: impl FnMut() + Send + 'static) {
1047
let state = self.state.clone();
1048
self.thread = Some(std::thread::spawn(move || {
1049
// Using our mutex/condvar we wait here for the first of `dur` to
1050
// pass or the `HelperThread` instance to get dropped.
1051
let mut should_exit = state.should_exit.lock().unwrap();
1052
while !*should_exit {
1053
let (lock, result) = state
1054
.should_exit_cvar
1055
.wait_timeout(should_exit, dur)
1056
.unwrap();
1057
should_exit = lock;
1058
// If we timed out for sure then there's no need to continue
1059
// since we'll just abort on the next `checked_sub` anyway.
1060
if result.timed_out() {
1061
closure();
1062
}
1063
}
1064
}));
1065
}
1066
}
1067
1068
impl Drop for HelperThread {
1069
fn drop(&mut self) {
1070
let thread = match self.thread.take() {
1071
Some(thread) => thread,
1072
None => return,
1073
};
1074
// Signal our thread that it should exit and wake it up in case it's
1075
// sleeping.
1076
*self.state.should_exit.lock().unwrap() = true;
1077
self.state.should_exit_cvar.notify_one();
1078
1079
// ... and then wait for the thread to exit to ensure we clean up
1080
// after ourselves.
1081
thread.join().unwrap();
1082
}
1083
}
1084
1085
/// Instantiates a wasm module and runs its exports with dummy values, all in
1086
/// an async fashion.
1087
///
1088
/// Attempts to stress yields in host functions to ensure that exiting and
1089
/// resuming a wasm function call works.
1090
pub fn call_async(wasm: &[u8], config: &generators::Config, mut poll_amts: &[u32]) {
1091
let mut store = config.to_store();
1092
let module = match compile_module(store.engine(), wasm, KnownValid::Yes, config) {
1093
Some(module) => module,
1094
None => return,
1095
};
1096
1097
// Configure a helper thread to periodically increment the epoch to
1098
// forcibly enable yields-via-epochs if epochs are in use. Note that this
1099
// is required because the wasm isn't otherwise guaranteed to necessarily
1100
// call any imports which will also increment the epoch.
1101
let mut helper_thread = HelperThread::default();
1102
if let generators::AsyncConfig::YieldWithEpochs { dur, .. } = &config.wasmtime.async_config {
1103
let engine = store.engine().clone();
1104
helper_thread.run_periodically(*dur, move || engine.increment_epoch());
1105
}
1106
1107
// Generate a `Linker` where all function imports are custom-built to yield
1108
// periodically and additionally increment the epoch.
1109
let mut imports = Vec::new();
1110
for import in module.imports() {
1111
let item = match import.ty() {
1112
ExternType::Func(ty) => {
1113
let poll_amt = take_poll_amt(&mut poll_amts);
1114
Func::new_async(&mut store, ty.clone(), move |caller, _, results| {
1115
let ty = ty.clone();
1116
Box::new(async move {
1117
caller.engine().increment_epoch();
1118
log::info!("yielding {poll_amt} times in import");
1119
YieldN(poll_amt).await;
1120
for (ret_ty, result) in ty.results().zip(results) {
1121
*result = ret_ty.default_value().unwrap();
1122
}
1123
Ok(())
1124
})
1125
})
1126
.into()
1127
}
1128
other_ty => match other_ty.default_value(&mut store) {
1129
Ok(item) => item,
1130
Err(e) => {
1131
log::warn!("couldn't create import for {import:?}: {e:?}");
1132
return;
1133
}
1134
},
1135
};
1136
imports.push(item);
1137
}
1138
1139
// Run the instantiation process, asynchronously, and if everything
1140
// succeeds then pull out the instance.
1141
// log::info!("starting instantiation");
1142
let instance = block_on(Timeout {
1143
future: Instance::new_async(&mut store, &module, &imports),
1144
polls: take_poll_amt(&mut poll_amts),
1145
end: Instant::now() + Duration::from_millis(2_000),
1146
});
1147
let instance = match instance {
1148
Ok(instantiation_result) => match unwrap_instance(&store, instantiation_result) {
1149
Some(instance) => instance,
1150
None => {
1151
log::info!("instantiation hit a nominal error");
1152
return; // resource exhaustion or limits met
1153
}
1154
},
1155
Err(_) => {
1156
log::info!("instantiation failed to complete");
1157
return; // Timed out or ran out of polls
1158
}
1159
};
1160
1161
// Run each export of the instance in the same manner as instantiation
1162
// above. Dummy values are passed in for argument values here:
1163
//
1164
// TODO: this should probably be more clever about passing in arguments for
1165
// example they might be used as pointers or something and always using 0
1166
// isn't too interesting.
1167
let funcs = instance
1168
.exports(&mut store)
1169
.filter_map(|e| {
1170
let name = e.name().to_string();
1171
let func = e.into_extern().into_func()?;
1172
Some((name, func))
1173
})
1174
.collect::<Vec<_>>();
1175
for (name, func) in funcs {
1176
let ty = func.ty(&store);
1177
let params = ty
1178
.params()
1179
.map(|ty| ty.default_value().unwrap())
1180
.collect::<Vec<_>>();
1181
let mut results = ty
1182
.results()
1183
.map(|ty| ty.default_value().unwrap())
1184
.collect::<Vec<_>>();
1185
1186
log::info!("invoking export {name:?}");
1187
let future = func.call_async(&mut store, &params, &mut results);
1188
match block_on(Timeout {
1189
future,
1190
polls: take_poll_amt(&mut poll_amts),
1191
end: Instant::now() + Duration::from_millis(2_000),
1192
}) {
1193
// On success or too many polls, try the next export.
1194
Ok(_) | Err(Exhausted::Polls) => {}
1195
1196
// If time ran out then stop the current test case as we might have
1197
// already sucked up a lot of time for this fuzz test case so don't
1198
// keep it going.
1199
Err(Exhausted::Time) => return,
1200
}
1201
}
1202
1203
fn take_poll_amt(polls: &mut &[u32]) -> u32 {
1204
match polls.split_first() {
1205
Some((a, rest)) => {
1206
*polls = rest;
1207
*a
1208
}
1209
None => 0,
1210
}
1211
}
1212
1213
/// Helper future for applying a timeout to `future` up to either when `end`
1214
/// is the current time or `polls` polls happen.
1215
///
1216
/// Note that this helps to time out infinite loops in wasm, for example.
1217
struct Timeout<F> {
1218
future: F,
1219
/// If the future isn't ready by this time then the `Timeout<F>` future
1220
/// will return `None`.
1221
end: Instant,
1222
/// If the future doesn't resolve itself in this many calls to `poll`
1223
/// then the `Timeout<F>` future will return `None`.
1224
polls: u32,
1225
}
1226
1227
enum Exhausted {
1228
Time,
1229
Polls,
1230
}
1231
1232
impl<F: Future> Future for Timeout<F> {
1233
type Output = Result<F::Output, Exhausted>;
1234
1235
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1236
let (end, polls, future) = unsafe {
1237
let me = self.get_unchecked_mut();
1238
(me.end, &mut me.polls, Pin::new_unchecked(&mut me.future))
1239
};
1240
match future.poll(cx) {
1241
Poll::Ready(val) => Poll::Ready(Ok(val)),
1242
Poll::Pending => {
1243
if Instant::now() >= end {
1244
log::warn!("future operation timed out");
1245
return Poll::Ready(Err(Exhausted::Time));
1246
}
1247
if *polls == 0 {
1248
log::warn!("future operation ran out of polls");
1249
return Poll::Ready(Err(Exhausted::Polls));
1250
}
1251
*polls -= 1;
1252
Poll::Pending
1253
}
1254
}
1255
}
1256
}
1257
}
1258
1259
#[cfg(test)]
1260
mod tests {
1261
use super::*;
1262
use crate::test::{gen_until_pass, test_n_times};
1263
use wasmparser::{Validator, WasmFeatures};
1264
1265
// Test that the `gc_ops` fuzzer eventually runs the gc function in the host.
1266
// We've historically had issues where this fuzzer accidentally wasn't fuzzing
1267
// anything for a long time so this is an attempt to prevent that from happening
1268
// again.
1269
#[test]
1270
fn gc_ops_eventually_gcs() {
1271
// Skip if we're under emulation because some fuzz configurations will do
1272
// large address space reservations that QEMU doesn't handle well.
1273
if std::env::var("WASMTIME_TEST_NO_HOG_MEMORY").is_ok() {
1274
return;
1275
}
1276
1277
let ok = gen_until_pass(|(config, test), _| {
1278
let result = gc_ops(config, test)?;
1279
Ok(result > 0)
1280
});
1281
1282
if !ok {
1283
panic!("gc was never found");
1284
}
1285
}
1286
1287
#[test]
1288
fn module_generation_uses_expected_proposals() {
1289
// Proposals that Wasmtime supports. Eventually a module should be
1290
// generated that needs these proposals.
1291
let mut expected = WasmFeatures::MUTABLE_GLOBAL
1292
| WasmFeatures::FLOATS
1293
| WasmFeatures::SIGN_EXTENSION
1294
| WasmFeatures::SATURATING_FLOAT_TO_INT
1295
| WasmFeatures::MULTI_VALUE
1296
| WasmFeatures::BULK_MEMORY
1297
| WasmFeatures::REFERENCE_TYPES
1298
| WasmFeatures::SIMD
1299
| WasmFeatures::MULTI_MEMORY
1300
| WasmFeatures::RELAXED_SIMD
1301
| WasmFeatures::TAIL_CALL
1302
| WasmFeatures::WIDE_ARITHMETIC
1303
| WasmFeatures::MEMORY64
1304
| WasmFeatures::FUNCTION_REFERENCES
1305
| WasmFeatures::GC
1306
| WasmFeatures::GC_TYPES
1307
| WasmFeatures::CUSTOM_PAGE_SIZES
1308
| WasmFeatures::EXTENDED_CONST
1309
| WasmFeatures::EXCEPTIONS;
1310
1311
// All other features that wasmparser supports, which is presumably a
1312
// superset of the features that wasm-smith supports, are listed here as
1313
// unexpected. This means, for example, that if wasm-smith updates to
1314
// include a new proposal by default that wasmtime implements then it
1315
// will be required to be listed above.
1316
let unexpected = WasmFeatures::all() ^ expected;
1317
1318
let ok = gen_until_pass(|config: generators::Config, u| {
1319
let wasm = config.generate(u, None)?.to_bytes();
1320
1321
// Double-check the module is valid
1322
Validator::new_with_features(WasmFeatures::all()).validate_all(&wasm)?;
1323
1324
// If any of the unexpected features are removed then this module
1325
// should always be valid, otherwise something went wrong.
1326
for feature in unexpected.iter() {
1327
let ok =
1328
Validator::new_with_features(WasmFeatures::all() ^ feature).validate_all(&wasm);
1329
if ok.is_err() {
1330
wasmtime::bail!("generated a module with {feature:?} but that wasn't expected");
1331
}
1332
}
1333
1334
// If any of `expected` is removed and the module fails to validate,
1335
// then that means the module requires that feature. Remove that
1336
// from the set of features we're then expecting.
1337
for feature in expected.iter() {
1338
let ok =
1339
Validator::new_with_features(WasmFeatures::all() ^ feature).validate_all(&wasm);
1340
if ok.is_err() {
1341
expected ^= feature;
1342
}
1343
}
1344
1345
Ok(expected.is_empty())
1346
});
1347
1348
if !ok {
1349
panic!("never generated wasm module using {expected:?}");
1350
}
1351
}
1352
1353
#[test]
1354
fn wast_smoke_test() {
1355
test_n_times(50, |(), u| super::wast_test(u));
1356
}
1357
}
1358
1359