Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/tests/wast.rs
1685 views
1
use anyhow::{Context, bail};
2
use libtest_mimic::{Arguments, FormatSetting, Trial};
3
use std::sync::{Condvar, LazyLock, Mutex};
4
use wasmtime::{
5
Config, Enabled, Engine, InstanceAllocationStrategy, PoolingAllocationConfig, Store,
6
};
7
use wasmtime_test_util::wast::{Collector, Compiler, WastConfig, WastTest, limits};
8
use wasmtime_wast::{Async, SpectestConfig, WastContext};
9
10
fn main() {
11
env_logger::init();
12
13
let tests = if cfg!(miri) {
14
Vec::new()
15
} else {
16
wasmtime_test_util::wast::find_tests(".".as_ref()).unwrap()
17
};
18
19
let mut trials = Vec::new();
20
21
let mut add_trial = |test: &WastTest, config: WastConfig| {
22
let trial = Trial::test(
23
format!(
24
"{:?}/{}{}{}",
25
config.compiler,
26
if config.pooling { "pooling/" } else { "" },
27
if config.collector != Collector::Auto {
28
format!("{:?}/", config.collector)
29
} else {
30
String::new()
31
},
32
test.path.to_str().unwrap()
33
),
34
{
35
let test = test.clone();
36
move || run_wast(&test, config).map_err(|e| format!("{e:?}").into())
37
},
38
);
39
40
trials.push(trial);
41
};
42
43
// List of supported compilers, filtered by what our current host supports.
44
let mut compilers = vec![
45
Compiler::CraneliftNative,
46
Compiler::Winch,
47
Compiler::CraneliftPulley,
48
];
49
compilers.retain(|c| c.supports_host());
50
51
// Only test one compiler in ASAN since we're mostly interested in testing
52
// runtime code, not compiler-generated code.
53
if cfg!(asan) {
54
compilers.truncate(1);
55
}
56
57
// Run each wast test in a few interesting configuration combinations, but
58
// leave the full combinatorial matrix and such to fuzz testing which
59
// configures many more settings than those configured here.
60
for test in tests {
61
let collector = if test.test_uses_gc_types() {
62
Collector::DeferredReferenceCounting
63
} else {
64
Collector::Auto
65
};
66
67
// Run this test in all supported compilers.
68
for compiler in compilers.iter().copied() {
69
add_trial(
70
&test,
71
WastConfig {
72
compiler,
73
pooling: false,
74
collector,
75
},
76
);
77
}
78
79
// Don't do extra tests in ASAN as it takes awhile and is unlikely to
80
// reap much benefit.
81
if cfg!(asan) {
82
continue;
83
}
84
85
let compiler = compilers[0];
86
87
// Run this test with the pooling allocator under the default compiler.
88
add_trial(
89
&test,
90
WastConfig {
91
compiler,
92
pooling: true,
93
collector,
94
},
95
);
96
97
// If applicable, also run with the null collector in addition to the
98
// default collector.
99
if test.test_uses_gc_types() {
100
add_trial(
101
&test,
102
WastConfig {
103
compiler,
104
pooling: false,
105
collector: Collector::Null,
106
},
107
);
108
}
109
}
110
111
// There's a lot of tests so print only a `.` to keep the output a
112
// bit more terse by default.
113
let mut args = Arguments::from_args();
114
if args.format.is_none() {
115
args.format = Some(FormatSetting::Terse);
116
}
117
libtest_mimic::run(&args, trials).exit()
118
}
119
120
// Each of the tests included from `wast_testsuite_tests` will call this
121
// function which actually executes the `wast` test suite given the `strategy`
122
// to compile it.
123
fn run_wast(test: &WastTest, config: WastConfig) -> anyhow::Result<()> {
124
let test_config = test.config.clone();
125
126
// Determine whether this test is expected to fail or pass. Regardless the
127
// test is executed and the result of the execution is asserted to match
128
// this expectation. Note that this means that the test can't, for example,
129
// panic or segfault as a result.
130
//
131
// Updates to whether a test should pass or fail should be done in the
132
// `crates/wast-util/src/lib.rs` file.
133
let should_fail = test.should_fail(&config);
134
135
let multi_memory = test_config.multi_memory();
136
let test_hogs_memory = test_config.hogs_memory();
137
let relaxed_simd = test_config.relaxed_simd();
138
139
let is_cranelift = match config.compiler {
140
Compiler::CraneliftNative | Compiler::CraneliftPulley => true,
141
_ => false,
142
};
143
144
let mut cfg = Config::new();
145
cfg.async_support(true);
146
wasmtime_test_util::wasmtime_wast::apply_test_config(&mut cfg, &test_config);
147
wasmtime_test_util::wasmtime_wast::apply_wast_config(&mut cfg, &config);
148
149
if is_cranelift {
150
cfg.cranelift_debug_verifier(true);
151
}
152
153
// By default we'll allocate huge chunks (6gb) of the address space for each
154
// linear memory. This is typically fine but when we emulate tests with QEMU
155
// it turns out that it causes memory usage to balloon massively. Leave a
156
// knob here so on CI we can cut down the memory usage of QEMU and avoid the
157
// OOM killer.
158
//
159
// Locally testing this out this drops QEMU's memory usage running this
160
// tests suite from 10GiB to 600MiB. Previously we saw that crossing the
161
// 10GiB threshold caused our processes to get OOM killed on CI.
162
//
163
// Note that this branch is also taken for 32-bit platforms which generally
164
// can't test much of the pooling allocator as the virtual address space is
165
// so limited.
166
if cfg!(target_pointer_width = "32") || std::env::var("WASMTIME_TEST_NO_HOG_MEMORY").is_ok() {
167
// The pooling allocator hogs ~6TB of virtual address space for each
168
// store, so if we don't to hog memory then ignore pooling tests.
169
if config.pooling {
170
return Ok(());
171
}
172
173
// If the test allocates a lot of memory, that's considered "hogging"
174
// memory, so skip it.
175
if test_hogs_memory {
176
return Ok(());
177
}
178
179
// Don't use 4gb address space reservations when not hogging memory, and
180
// also don't reserve lots of memory after dynamic memories for growth
181
// (makes growth slower).
182
cfg.memory_reservation(2 * u64::from(wasmtime_environ::Memory::DEFAULT_PAGE_SIZE));
183
cfg.memory_reservation_for_growth(0);
184
185
let small_guard = 64 * 1024;
186
cfg.memory_guard_size(small_guard);
187
}
188
189
let _pooling_lock = if config.pooling {
190
// Some memory64 tests take more than 4gb of resident memory to test,
191
// but we don't want to configure the pooling allocator to allow that
192
// (that's a ton of memory to reserve), so we skip those tests.
193
if test_hogs_memory {
194
return Ok(());
195
}
196
197
// Reduce the virtual memory required to run multi-memory-based tests.
198
//
199
// The configuration parameters below require that a bare minimum
200
// virtual address space reservation of 450*9*805*65536 == 200G be made
201
// to support each test. If 6G reservations are made for each linear
202
// memory then not that many tests can run concurrently with much else.
203
//
204
// When multiple memories are used and are configured in the pool then
205
// force the usage of static memories without guards to reduce the VM
206
// impact.
207
let max_memory_size = limits::MEMORY_SIZE;
208
if multi_memory {
209
cfg.memory_reservation(max_memory_size as u64);
210
cfg.memory_reservation_for_growth(0);
211
cfg.memory_guard_size(0);
212
}
213
214
let mut pool = PoolingAllocationConfig::default();
215
pool.total_memories(limits::MEMORIES * 2)
216
.max_memory_protection_keys(2)
217
.max_memory_size(max_memory_size)
218
.max_memories_per_module(if multi_memory {
219
limits::MEMORIES_PER_MODULE
220
} else {
221
1
222
})
223
.max_tables_per_module(limits::TABLES_PER_MODULE);
224
225
// When testing, we may choose to start with MPK force-enabled to ensure
226
// we use that functionality.
227
if std::env::var("WASMTIME_TEST_FORCE_MPK").is_ok() {
228
pool.memory_protection_keys(Enabled::Yes);
229
}
230
231
cfg.allocation_strategy(InstanceAllocationStrategy::Pooling(pool));
232
Some(lock_pooling())
233
} else {
234
None
235
};
236
237
let mut engines = vec![(Engine::new(&cfg), "default")];
238
239
// For tests that use relaxed-simd test both the default engine and the
240
// guaranteed-deterministic engine to ensure that both the 'native'
241
// semantics of the instructions plus the canonical semantics work.
242
if relaxed_simd {
243
engines.push((
244
Engine::new(cfg.relaxed_simd_deterministic(true)),
245
"deterministic",
246
));
247
}
248
249
for (engine, desc) in engines {
250
let result = engine.and_then(|engine| {
251
let store = Store::new(&engine, ());
252
let mut wast_context = WastContext::new(store, Async::Yes);
253
wast_context.generate_dwarf(true);
254
wast_context.register_spectest(&SpectestConfig {
255
use_shared_memory: true,
256
suppress_prints: true,
257
})?;
258
wast_context
259
.run_wast(test.path.to_str().unwrap(), test.contents.as_bytes())
260
.with_context(|| format!("failed to run spec test with {desc} engine"))
261
});
262
263
if should_fail {
264
if result.is_ok() {
265
bail!("this test is flagged as should-fail but it succeeded")
266
}
267
} else {
268
result?;
269
}
270
}
271
272
Ok(())
273
}
274
275
// The pooling tests make about 6TB of address space reservation which means
276
// that we shouldn't let too many of them run concurrently at once. On
277
// high-cpu-count systems (e.g. 80 threads) this leads to mmap failures because
278
// presumably too much of the address space has been reserved with our limits
279
// specified above. By keeping the number of active pooling-related tests to a
280
// specified maximum we can put a cap on the virtual address space reservations
281
// made.
282
fn lock_pooling() -> impl Drop {
283
const MAX_CONCURRENT_POOLING: u32 = 4;
284
285
static ACTIVE: LazyLock<MyState> = LazyLock::new(MyState::default);
286
287
#[derive(Default)]
288
struct MyState {
289
lock: Mutex<u32>,
290
waiters: Condvar,
291
}
292
293
impl MyState {
294
fn lock(&self) -> impl Drop + '_ {
295
let state = self.lock.lock().unwrap();
296
let mut state = self
297
.waiters
298
.wait_while(state, |cnt| *cnt >= MAX_CONCURRENT_POOLING)
299
.unwrap();
300
*state += 1;
301
LockGuard { state: self }
302
}
303
}
304
305
struct LockGuard<'a> {
306
state: &'a MyState,
307
}
308
309
impl Drop for LockGuard<'_> {
310
fn drop(&mut self) {
311
*self.state.lock.lock().unwrap() -= 1;
312
self.state.waiters.notify_one();
313
}
314
}
315
316
ACTIVE.lock()
317
}
318
319