Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/cranelift/src/func_environ.rs
3071 views
1
mod gc;
2
pub(crate) mod stack_switching;
3
4
use crate::compiler::Compiler;
5
use crate::translate::{
6
FuncTranslationStacks, GlobalVariable, Heap, HeapData, StructFieldsVec, TableData, TableSize,
7
TargetEnvironment,
8
};
9
use crate::{BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT};
10
use cranelift_codegen::cursor::FuncCursor;
11
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
12
use cranelift_codegen::ir::immediates::{Imm64, Offset32, V128Imm};
13
use cranelift_codegen::ir::pcc::Fact;
14
use cranelift_codegen::ir::{self, BlockArg, ExceptionTableData, ExceptionTableItem, types};
15
use cranelift_codegen::ir::{ArgumentPurpose, ConstantData, Function, InstBuilder, MemFlags};
16
use cranelift_codegen::ir::{Block, types::*};
17
use cranelift_codegen::isa::{CallConv, TargetFrontendConfig, TargetIsa};
18
use cranelift_entity::packed_option::{PackedOption, ReservedValue};
19
use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
20
use cranelift_frontend::Variable;
21
use cranelift_frontend::{FuncInstBuilder, FunctionBuilder};
22
use smallvec::{SmallVec, smallvec};
23
use std::mem;
24
use wasmparser::{FuncValidator, Operator, WasmFeatures, WasmModuleResources};
25
use wasmtime_core::math::f64_cvt_to_int_bounds;
26
use wasmtime_environ::{
27
BuiltinFunctionIndex, DataIndex, DefinedFuncIndex, ElemIndex, EngineOrModuleTypeIndex,
28
FrameStateSlotBuilder, FrameValType, FuncIndex, FuncKey, GlobalConstValue, GlobalIndex,
29
IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation,
30
ModuleTypesBuilder, PtrSize, Table, TableIndex, TagIndex, TripleExt, Tunables, TypeConvert,
31
TypeIndex, VMOffsets, WasmCompositeInnerType, WasmFuncType, WasmHeapTopType, WasmHeapType,
32
WasmRefType, WasmResult, WasmValType,
33
};
34
use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
35
36
#[derive(Debug)]
37
pub(crate) enum Extension {
38
Sign,
39
Zero,
40
}
41
42
/// A struct with an `Option<ir::FuncRef>` member for every builtin
43
/// function, to de-duplicate constructing/getting its function.
44
pub(crate) struct BuiltinFunctions {
45
types: BuiltinFunctionSignatures,
46
47
builtins: [Option<ir::FuncRef>; BuiltinFunctionIndex::len() as usize],
48
breakpoint_trampoline: Option<ir::FuncRef>,
49
}
50
51
impl BuiltinFunctions {
52
fn new(compiler: &Compiler) -> Self {
53
Self {
54
types: BuiltinFunctionSignatures::new(compiler),
55
builtins: [None; BuiltinFunctionIndex::len() as usize],
56
breakpoint_trampoline: None,
57
}
58
}
59
60
fn load_builtin(&mut self, func: &mut Function, builtin: BuiltinFunctionIndex) -> ir::FuncRef {
61
let cache = &mut self.builtins[builtin.index() as usize];
62
if let Some(f) = cache {
63
return *f;
64
}
65
let signature = func.import_signature(self.types.wasm_signature(builtin));
66
let key = FuncKey::WasmToBuiltinTrampoline(builtin);
67
let (namespace, index) = key.into_raw_parts();
68
let name = ir::ExternalName::User(
69
func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
70
);
71
let f = func.import_function(ir::ExtFuncData {
72
name,
73
signature,
74
colocated: true,
75
patchable: false,
76
});
77
*cache = Some(f);
78
f
79
}
80
81
pub(crate) fn patchable_breakpoint(&mut self, func: &mut Function) -> ir::FuncRef {
82
*self.breakpoint_trampoline.get_or_insert_with(|| {
83
let mut signature = ir::Signature::new(CallConv::PreserveAll);
84
signature
85
.params
86
.push(ir::AbiParam::new(self.types.pointer_type));
87
let signature = func.import_signature(signature);
88
let key = FuncKey::PatchableToBuiltinTrampoline(BuiltinFunctionIndex::breakpoint());
89
let (namespace, index) = key.into_raw_parts();
90
let name = ir::ExternalName::User(
91
func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
92
);
93
func.import_function(ir::ExtFuncData {
94
name,
95
signature,
96
colocated: true,
97
patchable: true,
98
})
99
})
100
}
101
}
102
103
// Generate helper methods on `BuiltinFunctions` above for each named builtin
104
// as well.
105
macro_rules! declare_function_signatures {
106
($(
107
$( #[$attr:meta] )*
108
$name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
109
)*) => {
110
$(impl BuiltinFunctions {
111
$( #[$attr] )*
112
#[allow(dead_code, reason = "debug breakpoint libcall not used in host ABI, only patchable ABI")]
113
pub(crate) fn $name(&mut self, func: &mut Function) -> ir::FuncRef {
114
self.load_builtin(func, BuiltinFunctionIndex::$name())
115
}
116
})*
117
};
118
}
119
wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
120
121
/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`.
122
pub struct FuncEnvironment<'module_environment> {
123
compiler: &'module_environment Compiler,
124
isa: &'module_environment (dyn TargetIsa + 'module_environment),
125
key: FuncKey,
126
pub(crate) module: &'module_environment Module,
127
types: &'module_environment ModuleTypesBuilder,
128
wasm_func_ty: &'module_environment WasmFuncType,
129
sig_ref_to_ty: SecondaryMap<ir::SigRef, Option<&'module_environment WasmFuncType>>,
130
needs_gc_heap: bool,
131
entities: WasmEntities,
132
133
/// Translation state at the given point.
134
pub(crate) stacks: FuncTranslationStacks,
135
136
#[cfg(feature = "gc")]
137
ty_to_gc_layout: std::collections::HashMap<
138
wasmtime_environ::ModuleInternedTypeIndex,
139
wasmtime_environ::GcLayout,
140
>,
141
142
#[cfg(feature = "gc")]
143
gc_heap: Option<Heap>,
144
145
/// The Cranelift global holding the GC heap's base address.
146
#[cfg(feature = "gc")]
147
gc_heap_base: Option<ir::GlobalValue>,
148
149
/// The Cranelift global holding the GC heap's base address.
150
#[cfg(feature = "gc")]
151
gc_heap_bound: Option<ir::GlobalValue>,
152
153
translation: &'module_environment ModuleTranslation<'module_environment>,
154
155
/// Heaps implementing WebAssembly linear memories.
156
heaps: PrimaryMap<Heap, HeapData>,
157
158
/// The Cranelift global holding the vmctx address.
159
vmctx: Option<ir::GlobalValue>,
160
161
/// The Cranelift global for our vmctx's `*mut VMStoreContext`.
162
vm_store_context: Option<ir::GlobalValue>,
163
164
/// The PCC memory type describing the vmctx layout, if we're
165
/// using PCC.
166
pcc_vmctx_memtype: Option<ir::MemoryType>,
167
168
/// Caches of signatures for builtin functions.
169
builtin_functions: BuiltinFunctions,
170
171
/// Offsets to struct fields accessed by JIT code.
172
pub(crate) offsets: VMOffsets<u8>,
173
174
tunables: &'module_environment Tunables,
175
176
/// A function-local variable which stores the cached value of the amount of
177
/// fuel remaining to execute. If used this is modified frequently so it's
178
/// stored locally as a variable instead of always referenced from the field
179
/// in `*const VMStoreContext`
180
fuel_var: cranelift_frontend::Variable,
181
182
/// A cached epoch deadline value, when performing epoch-based
183
/// interruption. Loaded from `VMStoreContext` and reloaded after
184
/// any yield.
185
epoch_deadline_var: cranelift_frontend::Variable,
186
187
/// A cached pointer to the per-Engine epoch counter, when
188
/// performing epoch-based interruption. Initialized in the
189
/// function prologue. We prefer to use a variable here rather
190
/// than reload on each check because it's better to let the
191
/// regalloc keep it in a register if able; if not, it can always
192
/// spill, and this isn't any worse than reloading each time.
193
epoch_ptr_var: cranelift_frontend::Variable,
194
195
fuel_consumed: i64,
196
197
/// A `GlobalValue` in CLIF which represents the stack limit.
198
///
199
/// Typically this resides in the `stack_limit` value of `ir::Function` but
200
/// that requires signal handlers on the host and when that's disabled this
201
/// is here with an explicit check instead. Note that the explicit check is
202
/// always present even if this is a "leaf" function, as we have to call
203
/// into the host to trap when signal handlers are disabled.
204
pub(crate) stack_limit_at_function_entry: Option<ir::GlobalValue>,
205
206
/// Used by the stack switching feature. If set, we have a allocated a
207
/// slot on this function's stack to be used for the
208
/// current stack's `handler_list` field.
209
stack_switching_handler_list_buffer: Option<ir::StackSlot>,
210
211
/// Used by the stack switching feature. If set, we have a allocated a
212
/// slot on this function's stack to be used for the
213
/// current continuation's `values` field.
214
stack_switching_values_buffer: Option<ir::StackSlot>,
215
216
/// The stack-slot used for exposing Wasm state via debug
217
/// instrumentation, if any, and the builder containing its metadata.
218
pub(crate) state_slot: Option<(ir::StackSlot, FrameStateSlotBuilder)>,
219
}
220
221
impl<'module_environment> FuncEnvironment<'module_environment> {
222
pub fn new(
223
compiler: &'module_environment Compiler,
224
translation: &'module_environment ModuleTranslation<'module_environment>,
225
types: &'module_environment ModuleTypesBuilder,
226
wasm_func_ty: &'module_environment WasmFuncType,
227
key: FuncKey,
228
) -> Self {
229
let tunables = compiler.tunables();
230
let builtin_functions = BuiltinFunctions::new(compiler);
231
232
// This isn't used during translation, so squash the warning about this
233
// being unused from the compiler.
234
let _ = BuiltinFunctions::raise;
235
236
Self {
237
key,
238
isa: compiler.isa(),
239
module: &translation.module,
240
compiler,
241
types,
242
wasm_func_ty,
243
sig_ref_to_ty: SecondaryMap::default(),
244
needs_gc_heap: false,
245
entities: WasmEntities::default(),
246
stacks: FuncTranslationStacks::new(),
247
248
#[cfg(feature = "gc")]
249
ty_to_gc_layout: std::collections::HashMap::new(),
250
#[cfg(feature = "gc")]
251
gc_heap: None,
252
#[cfg(feature = "gc")]
253
gc_heap_base: None,
254
#[cfg(feature = "gc")]
255
gc_heap_bound: None,
256
257
heaps: PrimaryMap::default(),
258
vmctx: None,
259
vm_store_context: None,
260
pcc_vmctx_memtype: None,
261
builtin_functions,
262
offsets: VMOffsets::new(compiler.isa().pointer_bytes(), &translation.module),
263
tunables,
264
fuel_var: Variable::reserved_value(),
265
epoch_deadline_var: Variable::reserved_value(),
266
epoch_ptr_var: Variable::reserved_value(),
267
268
// Start with at least one fuel being consumed because even empty
269
// functions should consume at least some fuel.
270
fuel_consumed: 1,
271
272
translation,
273
274
stack_limit_at_function_entry: None,
275
276
stack_switching_handler_list_buffer: None,
277
stack_switching_values_buffer: None,
278
279
state_slot: None,
280
}
281
}
282
283
pub(crate) fn pointer_type(&self) -> ir::Type {
284
self.isa.pointer_type()
285
}
286
287
pub(crate) fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
288
self.vmctx.unwrap_or_else(|| {
289
let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
290
if self.isa.flags().enable_pcc() {
291
// Create a placeholder memtype for the vmctx; we'll
292
// add fields to it as we lazily create HeapData
293
// structs and global values.
294
let vmctx_memtype = func.create_memory_type(ir::MemoryTypeData::Struct {
295
size: 0,
296
fields: vec![],
297
});
298
299
self.pcc_vmctx_memtype = Some(vmctx_memtype);
300
func.global_value_facts[vmctx] = Some(Fact::Mem {
301
ty: vmctx_memtype,
302
min_offset: 0,
303
max_offset: 0,
304
nullable: false,
305
});
306
}
307
308
self.vmctx = Some(vmctx);
309
vmctx
310
})
311
}
312
313
pub(crate) fn vmctx_val(&mut self, pos: &mut FuncCursor<'_>) -> ir::Value {
314
let pointer_type = self.pointer_type();
315
let vmctx = self.vmctx(&mut pos.func);
316
pos.ins().global_value(pointer_type, vmctx)
317
}
318
319
fn get_table_copy_func(
320
&mut self,
321
func: &mut Function,
322
dst_table_index: TableIndex,
323
src_table_index: TableIndex,
324
) -> (ir::FuncRef, usize, usize) {
325
let sig = self.builtin_functions.table_copy(func);
326
(
327
sig,
328
dst_table_index.as_u32() as usize,
329
src_table_index.as_u32() as usize,
330
)
331
}
332
333
#[cfg(feature = "threads")]
334
fn get_memory_atomic_wait(&mut self, func: &mut Function, ty: ir::Type) -> ir::FuncRef {
335
match ty {
336
I32 => self.builtin_functions.memory_atomic_wait32(func),
337
I64 => self.builtin_functions.memory_atomic_wait64(func),
338
x => panic!("get_memory_atomic_wait unsupported type: {x:?}"),
339
}
340
}
341
342
fn get_global_location(
343
&mut self,
344
func: &mut ir::Function,
345
index: GlobalIndex,
346
) -> (ir::GlobalValue, i32) {
347
let pointer_type = self.pointer_type();
348
let vmctx = self.vmctx(func);
349
if let Some(def_index) = self.module.defined_global_index(index) {
350
let offset = i32::try_from(self.offsets.vmctx_vmglobal_definition(def_index)).unwrap();
351
(vmctx, offset)
352
} else {
353
let from_offset = self.offsets.vmctx_vmglobal_import_from(index);
354
let global = func.create_global_value(ir::GlobalValueData::Load {
355
base: vmctx,
356
offset: Offset32::new(i32::try_from(from_offset).unwrap()),
357
global_type: pointer_type,
358
flags: MemFlags::trusted().with_readonly().with_can_move(),
359
});
360
(global, 0)
361
}
362
}
363
364
/// Get or create the `ir::Global` for the `*mut VMStoreContext` in our
365
/// `VMContext`.
366
fn get_vmstore_context_ptr_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
367
if let Some(ptr) = self.vm_store_context {
368
return ptr;
369
}
370
371
let offset = self.offsets.ptr.vmctx_store_context();
372
let base = self.vmctx(func);
373
let ptr = func.create_global_value(ir::GlobalValueData::Load {
374
base,
375
offset: Offset32::new(offset.into()),
376
global_type: self.pointer_type(),
377
flags: ir::MemFlags::trusted().with_readonly().with_can_move(),
378
});
379
self.vm_store_context = Some(ptr);
380
ptr
381
}
382
383
/// Get the `*mut VMStoreContext` value for our `VMContext`.
384
fn get_vmstore_context_ptr(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
385
let global = self.get_vmstore_context_ptr_global(&mut builder.func);
386
builder.ins().global_value(self.pointer_type(), global)
387
}
388
389
fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
390
// On function entry we load the amount of fuel into a function-local
391
// `self.fuel_var` to make fuel modifications fast locally. This cache
392
// is then periodically flushed to the Store-defined location in
393
// `VMStoreContext` later.
394
debug_assert!(self.fuel_var.is_reserved_value());
395
self.fuel_var = builder.declare_var(ir::types::I64);
396
self.fuel_load_into_var(builder);
397
self.fuel_check(builder);
398
}
399
400
fn fuel_function_exit(&mut self, builder: &mut FunctionBuilder<'_>) {
401
// On exiting the function we need to be sure to save the fuel we have
402
// cached locally in `self.fuel_var` back into the Store-defined
403
// location.
404
self.fuel_save_from_var(builder);
405
}
406
407
fn fuel_before_op(
408
&mut self,
409
op: &Operator<'_>,
410
builder: &mut FunctionBuilder<'_>,
411
reachable: bool,
412
) {
413
if !reachable {
414
// In unreachable code we shouldn't have any leftover fuel we
415
// haven't accounted for since the reason for us to become
416
// unreachable should have already added it to `self.fuel_var`.
417
debug_assert_eq!(self.fuel_consumed, 0);
418
return;
419
}
420
421
self.fuel_consumed += match op {
422
// Nop and drop generate no code, so don't consume fuel for them.
423
Operator::Nop | Operator::Drop => 0,
424
425
// Control flow may create branches, but is generally cheap and
426
// free, so don't consume fuel. Note the lack of `if` since some
427
// cost is incurred with the conditional check.
428
Operator::Block { .. }
429
| Operator::Loop { .. }
430
| Operator::Unreachable
431
| Operator::Return
432
| Operator::Else
433
| Operator::End => 0,
434
435
// everything else, just call it one operation.
436
_ => 1,
437
};
438
439
match op {
440
// Exiting a function (via a return or unreachable) or otherwise
441
// entering a different function (via a call) means that we need to
442
// update the fuel consumption in `VMStoreContext` because we're
443
// about to move control out of this function itself and the fuel
444
// may need to be read.
445
//
446
// Before this we need to update the fuel counter from our own cost
447
// leading up to this function call, and then we can store
448
// `self.fuel_var` into `VMStoreContext`.
449
Operator::Unreachable
450
| Operator::Return
451
| Operator::CallIndirect { .. }
452
| Operator::Call { .. }
453
| Operator::ReturnCall { .. }
454
| Operator::ReturnCallRef { .. }
455
| Operator::ReturnCallIndirect { .. }
456
| Operator::Throw { .. } | Operator::ThrowRef => {
457
self.fuel_increment_var(builder);
458
self.fuel_save_from_var(builder);
459
}
460
461
// To ensure all code preceding a loop is only counted once we
462
// update the fuel variable on entry.
463
Operator::Loop { .. }
464
465
// Entering into an `if` block means that the edge we take isn't
466
// known until runtime, so we need to update our fuel consumption
467
// before we take the branch.
468
| Operator::If { .. }
469
470
// Control-flow instructions mean that we're moving to the end/exit
471
// of a block somewhere else. That means we need to update the fuel
472
// counter since we're effectively terminating our basic block.
473
| Operator::Br { .. }
474
| Operator::BrIf { .. }
475
| Operator::BrTable { .. }
476
| Operator::BrOnNull { .. }
477
| Operator::BrOnNonNull { .. }
478
| Operator::BrOnCast { .. }
479
| Operator::BrOnCastFail { .. }
480
481
// Exiting a scope means that we need to update the fuel
482
// consumption because there are multiple ways to exit a scope and
483
// this is the only time we have to account for instructions
484
// executed so far.
485
| Operator::End
486
487
// This is similar to `end`, except that it's only the terminator
488
// for an `if` block. The same reasoning applies though in that we
489
// are terminating a basic block and need to update the fuel
490
// variable.
491
| Operator::Else => self.fuel_increment_var(builder),
492
493
// This is a normal instruction where the fuel is buffered to later
494
// get added to `self.fuel_var`.
495
//
496
// Note that we generally ignore instructions which may trap and
497
// therefore result in exiting a block early. Current usage of fuel
498
// means that it's not too important to account for a precise amount
499
// of fuel consumed but rather "close to the actual amount" is good
500
// enough. For 100% precise counting, however, we'd probably need to
501
// not only increment but also save the fuel amount more often
502
// around trapping instructions. (see the `unreachable` instruction
503
// case above)
504
//
505
// Note that `Block` is specifically omitted from incrementing the
506
// fuel variable. Control flow entering a `block` is unconditional
507
// which means it's effectively executing straight-line code. We'll
508
// update the counter when exiting a block, but we shouldn't need to
509
// do so upon entering a block.
510
_ => {}
511
}
512
}
513
514
fn fuel_after_op(&mut self, op: &Operator<'_>, builder: &mut FunctionBuilder<'_>) {
515
// After a function call we need to reload our fuel value since the
516
// function may have changed it.
517
match op {
518
Operator::Call { .. } | Operator::CallIndirect { .. } => {
519
self.fuel_load_into_var(builder);
520
}
521
_ => {}
522
}
523
}
524
525
/// Adds `self.fuel_consumed` to the `fuel_var`, zero-ing out the amount of
526
/// fuel consumed at that point.
527
fn fuel_increment_var(&mut self, builder: &mut FunctionBuilder<'_>) {
528
let consumption = mem::replace(&mut self.fuel_consumed, 0);
529
if consumption == 0 {
530
return;
531
}
532
533
let fuel = builder.use_var(self.fuel_var);
534
let fuel = builder.ins().iadd_imm(fuel, consumption);
535
builder.def_var(self.fuel_var, fuel);
536
}
537
538
/// Loads the fuel consumption value from `VMStoreContext` into `self.fuel_var`
539
fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
540
let (addr, offset) = self.fuel_addr_offset(builder);
541
let fuel = builder
542
.ins()
543
.load(ir::types::I64, ir::MemFlags::trusted(), addr, offset);
544
builder.def_var(self.fuel_var, fuel);
545
}
546
547
/// Stores the fuel consumption value from `self.fuel_var` into
548
/// `VMStoreContext`.
549
fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) {
550
let (addr, offset) = self.fuel_addr_offset(builder);
551
let fuel_consumed = builder.use_var(self.fuel_var);
552
builder
553
.ins()
554
.store(ir::MemFlags::trusted(), fuel_consumed, addr, offset);
555
}
556
557
/// Returns the `(address, offset)` of the fuel consumption within
558
/// `VMStoreContext`, used to perform loads/stores later.
559
fn fuel_addr_offset(
560
&mut self,
561
builder: &mut FunctionBuilder<'_>,
562
) -> (ir::Value, ir::immediates::Offset32) {
563
let vmstore_ctx = self.get_vmstore_context_ptr(builder);
564
(
565
vmstore_ctx,
566
i32::from(self.offsets.ptr.vmstore_context_fuel_consumed()).into(),
567
)
568
}
569
570
/// Checks the amount of remaining, and if we've run out of fuel we call
571
/// the out-of-fuel function.
572
fn fuel_check(&mut self, builder: &mut FunctionBuilder) {
573
self.fuel_increment_var(builder);
574
let out_of_gas_block = builder.create_block();
575
let continuation_block = builder.create_block();
576
577
// Note that our fuel is encoded as adding positive values to a
578
// negative number. Whenever the negative number goes positive that
579
// means we ran out of fuel.
580
//
581
// Compare to see if our fuel is positive, and if so we ran out of gas.
582
// Otherwise we can continue on like usual.
583
let zero = builder.ins().iconst(ir::types::I64, 0);
584
let fuel = builder.use_var(self.fuel_var);
585
let cmp = builder
586
.ins()
587
.icmp(IntCC::SignedGreaterThanOrEqual, fuel, zero);
588
builder
589
.ins()
590
.brif(cmp, out_of_gas_block, &[], continuation_block, &[]);
591
builder.seal_block(out_of_gas_block);
592
593
// If we ran out of gas then we call our out-of-gas intrinsic and it
594
// figures out what to do. Note that this may raise a trap, or do
595
// something like yield to an async runtime. In either case we don't
596
// assume what happens and handle the case the intrinsic returns.
597
//
598
// Note that we save/reload fuel around this since the out-of-gas
599
// intrinsic may alter how much fuel is in the system.
600
builder.switch_to_block(out_of_gas_block);
601
self.fuel_save_from_var(builder);
602
let out_of_gas = self.builtin_functions.out_of_gas(builder.func);
603
let vmctx = self.vmctx_val(&mut builder.cursor());
604
builder.ins().call(out_of_gas, &[vmctx]);
605
self.fuel_load_into_var(builder);
606
builder.ins().jump(continuation_block, &[]);
607
builder.seal_block(continuation_block);
608
609
builder.switch_to_block(continuation_block);
610
}
611
612
fn epoch_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
613
debug_assert!(self.epoch_deadline_var.is_reserved_value());
614
self.epoch_deadline_var = builder.declare_var(ir::types::I64);
615
// Let epoch_check_full load the current deadline and call def_var
616
617
debug_assert!(self.epoch_ptr_var.is_reserved_value());
618
self.epoch_ptr_var = builder.declare_var(self.pointer_type());
619
let epoch_ptr = self.epoch_ptr(builder);
620
builder.def_var(self.epoch_ptr_var, epoch_ptr);
621
622
// We must check for an epoch change when entering a
623
// function. Why? Why aren't checks at loops sufficient to
624
// bound runtime to O(|static program size|)?
625
//
626
// The reason is that one can construct a "zip-bomb-like"
627
// program with exponential-in-program-size runtime, with no
628
// backedges (loops), by building a tree of function calls: f0
629
// calls f1 ten times, f1 calls f2 ten times, etc. E.g., nine
630
// levels of this yields a billion function calls with no
631
// backedges. So we can't do checks only at backedges.
632
//
633
// In this "call-tree" scenario, and in fact in any program
634
// that uses calls as a sort of control flow to try to evade
635
// backedge checks, a check at every function entry is
636
// sufficient. Then, combined with checks at every backedge
637
// (loop) the longest runtime between checks is bounded by the
638
// straightline length of any function body.
639
let continuation_block = builder.create_block();
640
let cur_epoch_value = self.epoch_load_current(builder);
641
self.epoch_check_full(builder, cur_epoch_value, continuation_block);
642
}
643
644
#[cfg(feature = "wmemcheck")]
645
fn hook_malloc_exit(&mut self, builder: &mut FunctionBuilder, retvals: &[ir::Value]) {
646
let check_malloc = self.builtin_functions.check_malloc(builder.func);
647
let vmctx = self.vmctx_val(&mut builder.cursor());
648
let func_args = builder
649
.func
650
.dfg
651
.block_params(builder.func.layout.entry_block().unwrap());
652
let len = if func_args.len() < 3 {
653
return;
654
} else {
655
// If a function named `malloc` has at least one argument, we assume the
656
// first argument is the requested allocation size.
657
func_args[2]
658
};
659
let retval = if retvals.len() < 1 {
660
return;
661
} else {
662
retvals[0]
663
};
664
builder.ins().call(check_malloc, &[vmctx, retval, len]);
665
}
666
667
#[cfg(feature = "wmemcheck")]
668
fn hook_free_exit(&mut self, builder: &mut FunctionBuilder) {
669
let check_free = self.builtin_functions.check_free(builder.func);
670
let vmctx = self.vmctx_val(&mut builder.cursor());
671
let func_args = builder
672
.func
673
.dfg
674
.block_params(builder.func.layout.entry_block().unwrap());
675
let ptr = if func_args.len() < 3 {
676
return;
677
} else {
678
// If a function named `free` has at least one argument, we assume the
679
// first argument is a pointer to memory.
680
func_args[2]
681
};
682
builder.ins().call(check_free, &[vmctx, ptr]);
683
}
684
685
fn epoch_ptr(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
686
let vmctx = self.vmctx(builder.func);
687
let pointer_type = self.pointer_type();
688
let base = builder.ins().global_value(pointer_type, vmctx);
689
let offset = i32::from(self.offsets.ptr.vmctx_epoch_ptr());
690
let epoch_ptr = builder
691
.ins()
692
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
693
epoch_ptr
694
}
695
696
fn epoch_load_current(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
697
let addr = builder.use_var(self.epoch_ptr_var);
698
builder.ins().load(
699
ir::types::I64,
700
ir::MemFlags::trusted(),
701
addr,
702
ir::immediates::Offset32::new(0),
703
)
704
}
705
706
fn epoch_check(&mut self, builder: &mut FunctionBuilder<'_>) {
707
let continuation_block = builder.create_block();
708
709
// Load new epoch and check against the cached deadline.
710
let cur_epoch_value = self.epoch_load_current(builder);
711
self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
712
713
// At this point we've noticed that the epoch has exceeded our
714
// cached deadline. However the real deadline may have been
715
// updated (within another yield) during some function that we
716
// called in the meantime, so reload the cache and check again.
717
self.epoch_check_full(builder, cur_epoch_value, continuation_block);
718
}
719
720
fn epoch_check_cached(
721
&mut self,
722
builder: &mut FunctionBuilder,
723
cur_epoch_value: ir::Value,
724
continuation_block: ir::Block,
725
) {
726
let new_epoch_block = builder.create_block();
727
builder.set_cold_block(new_epoch_block);
728
729
let epoch_deadline = builder.use_var(self.epoch_deadline_var);
730
let cmp = builder.ins().icmp(
731
IntCC::UnsignedGreaterThanOrEqual,
732
cur_epoch_value,
733
epoch_deadline,
734
);
735
builder
736
.ins()
737
.brif(cmp, new_epoch_block, &[], continuation_block, &[]);
738
builder.seal_block(new_epoch_block);
739
740
builder.switch_to_block(new_epoch_block);
741
}
742
743
fn epoch_check_full(
744
&mut self,
745
builder: &mut FunctionBuilder,
746
cur_epoch_value: ir::Value,
747
continuation_block: ir::Block,
748
) {
749
// We keep the deadline cached in a register to speed the checks
750
// in the common case (between epoch ticks) but we want to do a
751
// precise check here by reloading the cache first.
752
let vmstore_ctx = self.get_vmstore_context_ptr(builder);
753
let deadline = builder.ins().load(
754
ir::types::I64,
755
ir::MemFlags::trusted(),
756
vmstore_ctx,
757
ir::immediates::Offset32::new(self.offsets.ptr.vmstore_context_epoch_deadline() as i32),
758
);
759
builder.def_var(self.epoch_deadline_var, deadline);
760
self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
761
762
let new_epoch = self.builtin_functions.new_epoch(builder.func);
763
let vmctx = self.vmctx_val(&mut builder.cursor());
764
// new_epoch() returns the new deadline, so we don't have to
765
// reload it.
766
let call = builder.ins().call(new_epoch, &[vmctx]);
767
let new_deadline = *builder.func.dfg.inst_results(call).first().unwrap();
768
builder.def_var(self.epoch_deadline_var, new_deadline);
769
builder.ins().jump(continuation_block, &[]);
770
builder.seal_block(continuation_block);
771
772
builder.switch_to_block(continuation_block);
773
}
774
775
/// Get the Memory for the given index.
776
fn memory(&self, index: MemoryIndex) -> Memory {
777
self.module.memories[index]
778
}
779
780
/// Get the Table for the given index.
781
fn table(&self, index: TableIndex) -> Table {
782
self.module.tables[index]
783
}
784
785
/// Cast the value to I64 and sign extend if necessary.
786
///
787
/// Returns the value casted to I64.
788
fn cast_index_to_i64(
789
&self,
790
pos: &mut FuncCursor<'_>,
791
val: ir::Value,
792
index_type: IndexType,
793
) -> ir::Value {
794
match index_type {
795
IndexType::I32 => pos.ins().uextend(I64, val),
796
IndexType::I64 => val,
797
}
798
}
799
800
/// Convert the target pointer-sized integer `val` into the memory/table's index type.
801
///
802
/// For memory, `val` is holding a memory length (or the `-1` `memory.grow`-failed sentinel).
803
/// For table, `val` is holding a table length.
804
///
805
/// This might involve extending or truncating it depending on the memory/table's
806
/// index type and the target's pointer type.
807
fn convert_pointer_to_index_type(
808
&self,
809
mut pos: FuncCursor<'_>,
810
val: ir::Value,
811
index_type: IndexType,
812
// When it is a memory and the memory is using single-byte pages,
813
// we need to handle the truncation differently. See comments below.
814
//
815
// When it is a table, this should be set to false.
816
single_byte_pages: bool,
817
) -> ir::Value {
818
let desired_type = index_type_to_ir_type(index_type);
819
let pointer_type = self.pointer_type();
820
assert_eq!(pos.func.dfg.value_type(val), pointer_type);
821
822
// The current length is of type `pointer_type` but we need to fit it
823
// into `desired_type`. We are guaranteed that the result will always
824
// fit, so we just need to do the right ireduce/sextend here.
825
if pointer_type == desired_type {
826
val
827
} else if pointer_type.bits() > desired_type.bits() {
828
pos.ins().ireduce(desired_type, val)
829
} else {
830
// We have a 64-bit memory/table on a 32-bit host -- this combo doesn't
831
// really make a whole lot of sense to do from a user perspective
832
// but that is neither here nor there. We want to logically do an
833
// unsigned extend *except* when we are given the `-1` sentinel,
834
// which we must preserve as `-1` in the wider type.
835
match single_byte_pages {
836
false => {
837
// In the case that we have default page sizes, we can
838
// always sign extend, since valid memory lengths (in pages)
839
// never have their sign bit set, and so if the sign bit is
840
// set then this must be the `-1` sentinel, which we want to
841
// preserve through the extension.
842
//
843
// When it comes to table, `single_byte_pages` should have always been set to false.
844
// Then we simply do a signed extension.
845
pos.ins().sextend(desired_type, val)
846
}
847
true => {
848
// For single-byte pages, we have to explicitly check for
849
// `-1` and choose whether to do an unsigned extension or
850
// return a larger `-1` because there are valid memory
851
// lengths (in pages) that have the sign bit set.
852
let extended = pos.ins().uextend(desired_type, val);
853
let neg_one = pos.ins().iconst(desired_type, -1);
854
let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1);
855
pos.ins().select(is_failure, neg_one, extended)
856
}
857
}
858
}
859
}
860
861
fn get_or_init_func_ref_table_elem(
862
&mut self,
863
builder: &mut FunctionBuilder,
864
table_index: TableIndex,
865
index: ir::Value,
866
cold_blocks: bool,
867
) -> ir::Value {
868
let pointer_type = self.pointer_type();
869
let table_data = self.get_or_create_table(builder.func, table_index);
870
871
// To support lazy initialization of table
872
// contents, we check for a null entry here, and
873
// if null, we take a slow-path that invokes a
874
// libcall.
875
let (table_entry_addr, flags) = table_data.prepare_table_addr(self, builder, index);
876
let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
877
878
if !self.tunables.table_lazy_init {
879
return value;
880
}
881
882
// Mask off the "initialized bit". See documentation on
883
// FUNCREF_INIT_BIT in crates/environ/src/ref_bits.rs for more
884
// details. Note that `FUNCREF_MASK` has type `usize` which may not be
885
// appropriate for the target architecture. Right now its value is
886
// always -2 so assert that part doesn't change and then thread through
887
// -2 as the immediate.
888
assert_eq!(FUNCREF_MASK as isize, -2);
889
let value_masked = builder.ins().band_imm(value, Imm64::from(-2));
890
891
let null_block = builder.create_block();
892
let continuation_block = builder.create_block();
893
if cold_blocks {
894
builder.set_cold_block(null_block);
895
builder.set_cold_block(continuation_block);
896
}
897
let result_param = builder.append_block_param(continuation_block, pointer_type);
898
builder.set_cold_block(null_block);
899
900
builder.ins().brif(
901
value,
902
continuation_block,
903
&[value_masked.into()],
904
null_block,
905
&[],
906
);
907
builder.seal_block(null_block);
908
909
builder.switch_to_block(null_block);
910
let index_type = self.table(table_index).idx_type;
911
let table_index = builder.ins().iconst(I32, table_index.index() as i64);
912
let lazy_init = self
913
.builtin_functions
914
.table_get_lazy_init_func_ref(builder.func);
915
let vmctx = self.vmctx_val(&mut builder.cursor());
916
let index = self.cast_index_to_i64(&mut builder.cursor(), index, index_type);
917
let call_inst = builder.ins().call(lazy_init, &[vmctx, table_index, index]);
918
let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
919
builder
920
.ins()
921
.jump(continuation_block, &[returned_entry.into()]);
922
builder.seal_block(continuation_block);
923
924
builder.switch_to_block(continuation_block);
925
result_param
926
}
927
928
#[cfg(feature = "wmemcheck")]
929
fn check_malloc_start(&mut self, builder: &mut FunctionBuilder) {
930
let malloc_start = self.builtin_functions.malloc_start(builder.func);
931
let vmctx = self.vmctx_val(&mut builder.cursor());
932
builder.ins().call(malloc_start, &[vmctx]);
933
}
934
935
#[cfg(feature = "wmemcheck")]
936
fn check_free_start(&mut self, builder: &mut FunctionBuilder) {
937
let free_start = self.builtin_functions.free_start(builder.func);
938
let vmctx = self.vmctx_val(&mut builder.cursor());
939
builder.ins().call(free_start, &[vmctx]);
940
}
941
942
#[cfg(feature = "wmemcheck")]
943
fn current_func_name(&self, builder: &mut FunctionBuilder) -> Option<&str> {
944
let func_index = match &builder.func.name {
945
ir::UserFuncName::User(user) => FuncIndex::from_u32(user.index),
946
_ => {
947
panic!("function name not a UserFuncName::User as expected")
948
}
949
};
950
self.translation
951
.debuginfo
952
.name_section
953
.func_names
954
.get(&func_index)
955
.copied()
956
}
957
958
/// Proof-carrying code: create a memtype describing an empty
959
/// runtime struct (to be updated later).
960
fn create_empty_struct_memtype(&self, func: &mut ir::Function) -> ir::MemoryType {
961
func.create_memory_type(ir::MemoryTypeData::Struct {
962
size: 0,
963
fields: vec![],
964
})
965
}
966
967
/// Proof-carrying code: add a new field to a memtype used to
968
/// describe a runtime struct. A memory region of type `memtype`
969
/// will have a pointer at `offset` pointing to another memory
970
/// region of type `pointee`. `readonly` indicates whether the
971
/// PCC-checked code is expected to update this field or not.
972
fn add_field_to_memtype(
973
&self,
974
func: &mut ir::Function,
975
memtype: ir::MemoryType,
976
offset: u32,
977
pointee: ir::MemoryType,
978
readonly: bool,
979
) {
980
let ptr_size = self.pointer_type().bytes();
981
match &mut func.memory_types[memtype] {
982
ir::MemoryTypeData::Struct { size, fields } => {
983
*size = std::cmp::max(*size, offset.checked_add(ptr_size).unwrap().into());
984
fields.push(ir::MemoryTypeField {
985
ty: self.pointer_type(),
986
offset: offset.into(),
987
readonly,
988
fact: Some(ir::Fact::Mem {
989
ty: pointee,
990
min_offset: 0,
991
max_offset: 0,
992
nullable: false,
993
}),
994
});
995
996
// Sort fields by offset -- we need to do this now
997
// because we may create an arbitrary number of
998
// memtypes for imported memories and we don't
999
// otherwise track them.
1000
fields.sort_by_key(|f| f.offset);
1001
}
1002
_ => panic!("Cannot add field to non-struct memtype"),
1003
}
1004
}
1005
1006
/// Create an `ir::Global` that does `load(ptr + offset)` and, when PCC and
1007
/// memory types are enabled, adds a field to the pointer's memory type for
1008
/// this value we are loading.
1009
pub(crate) fn global_load_with_memory_type(
1010
&mut self,
1011
func: &mut ir::Function,
1012
ptr: ir::GlobalValue,
1013
offset: u32,
1014
flags: ir::MemFlags,
1015
ptr_mem_ty: Option<ir::MemoryType>,
1016
) -> (ir::GlobalValue, Option<ir::MemoryType>) {
1017
let pointee = func.create_global_value(ir::GlobalValueData::Load {
1018
base: ptr,
1019
offset: Offset32::new(i32::try_from(offset).unwrap()),
1020
global_type: self.pointer_type(),
1021
flags,
1022
});
1023
1024
let pointee_mem_ty = ptr_mem_ty.map(|ptr_mem_ty| {
1025
let pointee_mem_ty = self.create_empty_struct_memtype(func);
1026
self.add_field_to_memtype(func, ptr_mem_ty, offset, pointee_mem_ty, flags.readonly());
1027
func.global_value_facts[pointee] = Some(Fact::Mem {
1028
ty: pointee_mem_ty,
1029
min_offset: 0,
1030
max_offset: 0,
1031
nullable: false,
1032
});
1033
pointee_mem_ty
1034
});
1035
1036
(pointee, pointee_mem_ty)
1037
}
1038
1039
/// Like `global_load_with_memory_type` but specialized for loads out of the
1040
/// `vmctx`.
1041
pub(crate) fn global_load_from_vmctx_with_memory_type(
1042
&mut self,
1043
func: &mut ir::Function,
1044
offset: u32,
1045
flags: ir::MemFlags,
1046
) -> (ir::GlobalValue, Option<ir::MemoryType>) {
1047
let vmctx = self.vmctx(func);
1048
self.global_load_with_memory_type(func, vmctx, offset, flags, self.pcc_vmctx_memtype)
1049
}
1050
1051
/// Helper to emit a conditional trap based on `trap_cond`.
1052
///
1053
/// This should only be used if `self.clif_instruction_traps_enabled()` is
1054
/// false, otherwise native CLIF instructions should be used instead.
1055
pub fn conditionally_trap(
1056
&mut self,
1057
builder: &mut FunctionBuilder,
1058
trap_cond: ir::Value,
1059
trap: ir::TrapCode,
1060
) {
1061
assert!(!self.clif_instruction_traps_enabled());
1062
1063
let trap_block = builder.create_block();
1064
builder.set_cold_block(trap_block);
1065
let continuation_block = builder.create_block();
1066
1067
builder
1068
.ins()
1069
.brif(trap_cond, trap_block, &[], continuation_block, &[]);
1070
1071
builder.seal_block(trap_block);
1072
builder.seal_block(continuation_block);
1073
1074
builder.switch_to_block(trap_block);
1075
self.trap(builder, trap);
1076
builder.switch_to_block(continuation_block);
1077
}
1078
1079
/// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1080
/// test whether the divisor is zero.
1081
fn guard_zero_divisor(&mut self, builder: &mut FunctionBuilder, rhs: ir::Value) {
1082
if self.clif_instruction_traps_enabled() {
1083
return;
1084
}
1085
self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1086
}
1087
1088
/// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1089
/// test whether a signed division operation will raise a trap.
1090
fn guard_signed_divide(
1091
&mut self,
1092
builder: &mut FunctionBuilder,
1093
lhs: ir::Value,
1094
rhs: ir::Value,
1095
) {
1096
if self.clif_instruction_traps_enabled() {
1097
return;
1098
}
1099
self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1100
1101
let ty = builder.func.dfg.value_type(rhs);
1102
let minus_one = builder.ins().iconst(ty, -1);
1103
let rhs_is_minus_one = builder.ins().icmp(IntCC::Equal, rhs, minus_one);
1104
let int_min = builder.ins().iconst(
1105
ty,
1106
match ty {
1107
I32 => i64::from(i32::MIN),
1108
I64 => i64::MIN,
1109
_ => unreachable!(),
1110
},
1111
);
1112
let lhs_is_int_min = builder.ins().icmp(IntCC::Equal, lhs, int_min);
1113
let is_integer_overflow = builder.ins().band(rhs_is_minus_one, lhs_is_int_min);
1114
self.conditionally_trap(builder, is_integer_overflow, ir::TrapCode::INTEGER_OVERFLOW);
1115
}
1116
1117
/// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1118
/// guard the traps from float-to-int conversions.
1119
fn guard_fcvt_to_int(
1120
&mut self,
1121
builder: &mut FunctionBuilder,
1122
ty: ir::Type,
1123
val: ir::Value,
1124
signed: bool,
1125
) {
1126
assert!(!self.clif_instruction_traps_enabled());
1127
let val_ty = builder.func.dfg.value_type(val);
1128
let val = if val_ty == F64 {
1129
val
1130
} else {
1131
builder.ins().fpromote(F64, val)
1132
};
1133
let isnan = builder.ins().fcmp(FloatCC::NotEqual, val, val);
1134
self.trapnz(builder, isnan, ir::TrapCode::BAD_CONVERSION_TO_INTEGER);
1135
let val = self.trunc_f64(builder, val);
1136
let (lower_bound, upper_bound) = f64_cvt_to_int_bounds(signed, ty.bits());
1137
let lower_bound = builder.ins().f64const(lower_bound);
1138
let too_small = builder
1139
.ins()
1140
.fcmp(FloatCC::LessThanOrEqual, val, lower_bound);
1141
self.trapnz(builder, too_small, ir::TrapCode::INTEGER_OVERFLOW);
1142
let upper_bound = builder.ins().f64const(upper_bound);
1143
let too_large = builder
1144
.ins()
1145
.fcmp(FloatCC::GreaterThanOrEqual, val, upper_bound);
1146
self.trapnz(builder, too_large, ir::TrapCode::INTEGER_OVERFLOW);
1147
}
1148
1149
/// Get the `ir::Type` for a `VMSharedTypeIndex`.
1150
pub(crate) fn vmshared_type_index_ty(&self) -> Type {
1151
Type::int_with_byte_size(self.offsets.size_of_vmshared_type_index().into()).unwrap()
1152
}
1153
1154
/// Given a `ModuleInternedTypeIndex`, emit code to get the corresponding
1155
/// `VMSharedTypeIndex` at runtime.
1156
pub(crate) fn module_interned_to_shared_ty(
1157
&mut self,
1158
pos: &mut FuncCursor,
1159
interned_ty: ModuleInternedTypeIndex,
1160
) -> ir::Value {
1161
let vmctx = self.vmctx_val(pos);
1162
let pointer_type = self.pointer_type();
1163
let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1164
1165
// Load the base pointer of the array of `VMSharedTypeIndex`es.
1166
let shared_indices = pos.ins().load(
1167
pointer_type,
1168
mem_flags,
1169
vmctx,
1170
i32::from(self.offsets.ptr.vmctx_type_ids_array()),
1171
);
1172
1173
// Calculate the offset in that array for this type's entry.
1174
let ty = self.vmshared_type_index_ty();
1175
let offset = i32::try_from(interned_ty.as_u32().checked_mul(ty.bytes()).unwrap()).unwrap();
1176
1177
// Load the`VMSharedTypeIndex` that this `ModuleInternedTypeIndex` is
1178
// associated with at runtime from the array.
1179
pos.ins().load(ty, mem_flags, shared_indices, offset)
1180
}
1181
1182
/// Load the associated `VMSharedTypeIndex` from inside a `*const VMFuncRef`.
1183
///
1184
/// Does not check for null; just assumes that the `funcref` is a valid
1185
/// pointer.
1186
pub(crate) fn load_funcref_type_index(
1187
&mut self,
1188
pos: &mut FuncCursor,
1189
mem_flags: ir::MemFlags,
1190
funcref: ir::Value,
1191
) -> ir::Value {
1192
let ty = self.vmshared_type_index_ty();
1193
pos.ins().load(
1194
ty,
1195
mem_flags,
1196
funcref,
1197
i32::from(self.offsets.ptr.vm_func_ref_type_index()),
1198
)
1199
}
1200
1201
/// Does this function need a GC heap?
1202
pub fn needs_gc_heap(&self) -> bool {
1203
self.needs_gc_heap
1204
}
1205
1206
/// Get the number of Wasm parameters for the given function.
1207
pub(crate) fn num_params_for_func(&self, function_index: FuncIndex) -> usize {
1208
let ty = self.module.functions[function_index]
1209
.signature
1210
.unwrap_module_type_index();
1211
self.types[ty].unwrap_func().params().len()
1212
}
1213
1214
/// Get the number of Wasm parameters for the given function type.
1215
///
1216
/// Panics on non-function types.
1217
pub(crate) fn num_params_for_function_type(&self, type_index: TypeIndex) -> usize {
1218
let ty = self.module.types[type_index].unwrap_module_type_index();
1219
self.types[ty].unwrap_func().params().len()
1220
}
1221
1222
/// Initialize the state slot with an empty layout.
1223
pub(crate) fn create_state_slot(&mut self, builder: &mut FunctionBuilder) {
1224
if self.tunables.debug_guest {
1225
let frame_builder = FrameStateSlotBuilder::new(self.key, self.pointer_type().bytes());
1226
1227
// Initially zero-size and with no descriptor; we will fill in
1228
// this info once we're done with the function body.
1229
let slot = builder
1230
.func
1231
.create_sized_stack_slot(ir::StackSlotData::new_with_key(
1232
ir::StackSlotKind::ExplicitSlot,
1233
0,
1234
0,
1235
ir::StackSlotKey::new(self.key.into_raw_u64()),
1236
));
1237
1238
self.state_slot = Some((slot, frame_builder));
1239
}
1240
}
1241
1242
/// Update the state slot layout with a new layout given a local.
1243
pub(crate) fn add_state_slot_local(
1244
&mut self,
1245
builder: &mut FunctionBuilder,
1246
ty: WasmValType,
1247
init: Option<ir::Value>,
1248
) {
1249
if let Some((slot, b)) = &mut self.state_slot {
1250
let offset = b.add_local(FrameValType::from(ty));
1251
if let Some(init) = init {
1252
builder.ins().stack_store(init, *slot, offset.offset());
1253
}
1254
}
1255
}
1256
1257
fn update_state_slot_stack(
1258
&mut self,
1259
validator: &FuncValidator<impl WasmModuleResources>,
1260
builder: &mut FunctionBuilder,
1261
) -> WasmResult<()> {
1262
// Take ownership of the state-slot builder temporarily rather
1263
// than mutably borrowing so we can invoke a method below.
1264
if let Some((slot, mut b)) = self.state_slot.take() {
1265
// If the stack-shape stack is shorter than the value
1266
// stack, that means that values were popped and then new
1267
// values were pushed; hence, these operand-stack values
1268
// are "dirty" and need to be flushed to the stackslot.
1269
//
1270
// N.B.: note that we don't re-sync GC-rooted values, and
1271
// we don't root the instrumentation slots
1272
// explicitly. This is safe as long as we don't have a
1273
// moving GC, because the value that we're observing in
1274
// the main program dataflow is already rooted in the main
1275
// program (we are only storing an extra copy of it). But
1276
// if/when we do build a moving GC, we will need to handle
1277
// this, probably by invalidating the "freshness" of all
1278
// ref-typed values after a safepoint and re-writing them
1279
// to the instrumentation slot; or alternately, extending
1280
// the debug instrumentation mechanism to be able to
1281
// directly refer to the user stack-slot.
1282
for i in self.stacks.stack_shape.len()..self.stacks.stack.len() {
1283
let parent_shape = i
1284
.checked_sub(1)
1285
.map(|parent_idx| self.stacks.stack_shape[parent_idx]);
1286
if let Some(this_ty) = validator
1287
.get_operand_type(self.stacks.stack.len() - i - 1)
1288
.expect("Index should not be out of range")
1289
{
1290
let wasm_ty = self.convert_valtype(this_ty)?;
1291
let (this_shape, offset) =
1292
b.push_stack(parent_shape, FrameValType::from(wasm_ty));
1293
self.stacks.stack_shape.push(this_shape);
1294
1295
let value = self.stacks.stack[i];
1296
builder.ins().stack_store(value, slot, offset.offset());
1297
} else {
1298
// Unreachable code with unknown type -- no
1299
// flushes for this or later-pushed values.
1300
break;
1301
}
1302
}
1303
1304
self.state_slot = Some((slot, b));
1305
}
1306
1307
Ok(())
1308
}
1309
1310
pub(crate) fn debug_tags(&self, srcloc: ir::SourceLoc) -> Vec<ir::DebugTag> {
1311
if let Some((slot, _b)) = &self.state_slot {
1312
self.stacks.assert_debug_stack_is_synced();
1313
let stack_shape = self
1314
.stacks
1315
.stack_shape
1316
.last()
1317
.map(|s| s.raw())
1318
.unwrap_or(u32::MAX);
1319
let pc = srcloc.bits();
1320
vec![
1321
ir::DebugTag::StackSlot(*slot),
1322
ir::DebugTag::User(pc),
1323
ir::DebugTag::User(stack_shape),
1324
]
1325
} else {
1326
vec![]
1327
}
1328
}
1329
1330
fn finish_debug_metadata(&self, builder: &mut FunctionBuilder) {
1331
if let Some((slot, b)) = &self.state_slot {
1332
builder.func.sized_stack_slots[*slot].size = b.size();
1333
}
1334
}
1335
1336
/// Store a new value for a local in the state slot, if present.
1337
pub(crate) fn state_slot_local_set(
1338
&self,
1339
builder: &mut FunctionBuilder,
1340
local: u32,
1341
value: ir::Value,
1342
) {
1343
if let Some((slot, b)) = &self.state_slot {
1344
let offset = b.local_offset(local);
1345
builder.ins().stack_store(value, *slot, offset.offset());
1346
}
1347
}
1348
1349
fn update_state_slot_vmctx(&mut self, builder: &mut FunctionBuilder) {
1350
if let &Some((slot, _)) = &self.state_slot {
1351
let vmctx = self.vmctx_val(&mut builder.cursor());
1352
// N.B.: we always store vmctx at offset 0 in the
1353
// slot. This is relied upon in
1354
// crates/wasmtime/src/runtime/debug.rs in
1355
// `raw_instance()`. See also the slot layout computation in crates/environ/src/
1356
builder.ins().stack_store(vmctx, slot, 0);
1357
}
1358
}
1359
}
1360
1361
#[derive(Default)]
1362
pub(crate) struct WasmEntities {
1363
/// Map from a Wasm global index from this module to its implementation in
1364
/// the Cranelift function we are building.
1365
pub(crate) globals: SecondaryMap<GlobalIndex, Option<GlobalVariable>>,
1366
1367
/// Map from a Wasm memory index to its `Heap` implementation in the
1368
/// Cranelift function we are building.
1369
pub(crate) memories: SecondaryMap<MemoryIndex, PackedOption<Heap>>,
1370
1371
/// Map from an (interned) Wasm type index from this module to its
1372
/// `ir::SigRef` in the Cranelift function we are building.
1373
pub(crate) sig_refs: SecondaryMap<ModuleInternedTypeIndex, PackedOption<ir::SigRef>>,
1374
1375
/// Map from a defined Wasm function index to its associated function
1376
/// reference in the Cranelift function we are building.
1377
pub(crate) defined_func_refs: SecondaryMap<DefinedFuncIndex, PackedOption<ir::FuncRef>>,
1378
1379
/// Map from an imported Wasm function index for which we statically know
1380
/// which function will always be used to satisfy that import to its
1381
/// associated function reference in the Cranelift function we are building.
1382
pub(crate) imported_func_refs: SecondaryMap<FuncIndex, PackedOption<ir::FuncRef>>,
1383
1384
/// Map from a Wasm table index to its associated implementation in the
1385
/// Cranelift function we are building.
1386
pub(crate) tables: SecondaryMap<TableIndex, Option<TableData>>,
1387
}
1388
1389
macro_rules! define_get_or_create_methods {
1390
( $( $name:ident ( $map:ident ) : $create:ident : $key:ty => $val:ty ; )* ) => {
1391
$(
1392
pub(crate) fn $name(&mut self, func: &mut ir::Function, key: $key) -> $val {
1393
match self.entities.$map[key].clone().into() {
1394
Some(val) => val,
1395
None => {
1396
let val = self.$create(func, key);
1397
self.entities.$map[key] = Some(val.clone()).into();
1398
val
1399
}
1400
}
1401
}
1402
)*
1403
};
1404
}
1405
1406
impl FuncEnvironment<'_> {
1407
define_get_or_create_methods! {
1408
get_or_create_global(globals) : make_global : GlobalIndex => GlobalVariable;
1409
get_or_create_heap(memories) : make_heap : MemoryIndex => Heap;
1410
get_or_create_interned_sig_ref(sig_refs) : make_sig_ref : ModuleInternedTypeIndex => ir::SigRef;
1411
get_or_create_defined_func_ref(defined_func_refs) : make_defined_func_ref : DefinedFuncIndex => ir::FuncRef;
1412
get_or_create_imported_func_ref(imported_func_refs) : make_imported_func_ref : FuncIndex => ir::FuncRef;
1413
get_or_create_table(tables) : make_table : TableIndex => TableData;
1414
}
1415
1416
fn make_global(&mut self, func: &mut ir::Function, index: GlobalIndex) -> GlobalVariable {
1417
let ty = self.module.globals[index].wasm_ty;
1418
1419
if ty.is_vmgcref_type() {
1420
// Although reference-typed globals live at the same memory location as
1421
// any other type of global at the same index would, getting or
1422
// setting them requires ref counting barriers. Therefore, we need
1423
// to use `GlobalVariable::Custom`, as that is the only kind of
1424
// `GlobalVariable` for which translation supports custom
1425
// access translation.
1426
return GlobalVariable::Custom;
1427
}
1428
1429
if !self.module.globals[index].mutability {
1430
if let Some(index) = self.module.defined_global_index(index) {
1431
let init = &self.module.global_initializers[index];
1432
if let Some(value) = init.const_eval() {
1433
return GlobalVariable::Constant { value };
1434
}
1435
}
1436
}
1437
1438
let (gv, offset) = self.get_global_location(func, index);
1439
GlobalVariable::Memory {
1440
gv,
1441
offset: offset.into(),
1442
ty: super::value_type(self.isa, ty),
1443
}
1444
}
1445
1446
pub(crate) fn get_or_create_sig_ref(
1447
&mut self,
1448
func: &mut ir::Function,
1449
ty: TypeIndex,
1450
) -> ir::SigRef {
1451
let ty = self.module.types[ty].unwrap_module_type_index();
1452
self.get_or_create_interned_sig_ref(func, ty)
1453
}
1454
1455
fn make_sig_ref(
1456
&mut self,
1457
func: &mut ir::Function,
1458
index: ModuleInternedTypeIndex,
1459
) -> ir::SigRef {
1460
let wasm_func_ty = self.types[index].unwrap_func();
1461
let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
1462
let sig_ref = func.import_signature(sig);
1463
self.sig_ref_to_ty[sig_ref] = Some(wasm_func_ty);
1464
sig_ref
1465
}
1466
1467
fn make_defined_func_ref(
1468
&mut self,
1469
func: &mut ir::Function,
1470
def_func_index: DefinedFuncIndex,
1471
) -> ir::FuncRef {
1472
let func_index = self.module.func_index(def_func_index);
1473
1474
let ty = self.module.functions[func_index]
1475
.signature
1476
.unwrap_module_type_index();
1477
let signature = self.get_or_create_interned_sig_ref(func, ty);
1478
1479
let key = FuncKey::DefinedWasmFunction(self.translation.module_index(), def_func_index);
1480
let (namespace, index) = key.into_raw_parts();
1481
let name = ir::ExternalName::User(
1482
func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
1483
);
1484
1485
func.import_function(ir::ExtFuncData {
1486
name,
1487
signature,
1488
colocated: true,
1489
patchable: false,
1490
})
1491
}
1492
1493
fn make_imported_func_ref(
1494
&mut self,
1495
func: &mut ir::Function,
1496
func_index: FuncIndex,
1497
) -> ir::FuncRef {
1498
assert!(self.module.is_imported_function(func_index));
1499
assert!(self.translation.known_imported_functions[func_index].is_some());
1500
1501
let ty = self.module.functions[func_index]
1502
.signature
1503
.unwrap_module_type_index();
1504
let signature = self.get_or_create_interned_sig_ref(func, ty);
1505
1506
let key = match self.translation.known_imported_functions[func_index] {
1507
Some(key @ FuncKey::DefinedWasmFunction(..)) => key,
1508
1509
#[cfg(feature = "component-model")]
1510
Some(key @ FuncKey::UnsafeIntrinsic(..)) => key,
1511
1512
Some(key) => {
1513
panic!("unexpected kind of known-import function: {key:?}")
1514
}
1515
1516
None => panic!(
1517
"cannot make an `ir::FuncRef` for a function import that is not statically known"
1518
),
1519
};
1520
1521
let (namespace, index) = key.into_raw_parts();
1522
let name = ir::ExternalName::User(
1523
func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
1524
);
1525
1526
func.import_function(ir::ExtFuncData {
1527
name,
1528
signature,
1529
colocated: true,
1530
patchable: false,
1531
})
1532
}
1533
1534
fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> Heap {
1535
let pointer_type = self.pointer_type();
1536
let memory = self.module.memories[index];
1537
let is_shared = memory.shared;
1538
1539
let (base_ptr, base_offset, current_length_offset, ptr_memtype) = {
1540
let vmctx = self.vmctx(func);
1541
if let Some(def_index) = self.module.defined_memory_index(index) {
1542
if is_shared {
1543
// As with imported memory, the `VMMemoryDefinition` for a
1544
// shared memory is stored elsewhere. We store a `*mut
1545
// VMMemoryDefinition` to it and dereference that when
1546
// atomically growing it.
1547
let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index);
1548
let (memory, def_mt) = self.global_load_from_vmctx_with_memory_type(
1549
func,
1550
from_offset,
1551
ir::MemFlags::trusted().with_readonly().with_can_move(),
1552
);
1553
let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1554
let current_length_offset =
1555
i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1556
(memory, base_offset, current_length_offset, def_mt)
1557
} else {
1558
let owned_index = self.module.owned_memory_index(def_index);
1559
let owned_base_offset =
1560
self.offsets.vmctx_vmmemory_definition_base(owned_index);
1561
let owned_length_offset = self
1562
.offsets
1563
.vmctx_vmmemory_definition_current_length(owned_index);
1564
let current_base_offset = i32::try_from(owned_base_offset).unwrap();
1565
let current_length_offset = i32::try_from(owned_length_offset).unwrap();
1566
(
1567
vmctx,
1568
current_base_offset,
1569
current_length_offset,
1570
self.pcc_vmctx_memtype,
1571
)
1572
}
1573
} else {
1574
let from_offset = self.offsets.vmctx_vmmemory_import_from(index);
1575
let (memory, def_mt) = self.global_load_from_vmctx_with_memory_type(
1576
func,
1577
from_offset,
1578
ir::MemFlags::trusted().with_readonly().with_can_move(),
1579
);
1580
let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1581
let current_length_offset =
1582
i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1583
(memory, base_offset, current_length_offset, def_mt)
1584
}
1585
};
1586
1587
let bound = func.create_global_value(ir::GlobalValueData::Load {
1588
base: base_ptr,
1589
offset: Offset32::new(current_length_offset),
1590
global_type: pointer_type,
1591
flags: MemFlags::trusted(),
1592
});
1593
1594
let (base_fact, pcc_memory_type) = self.make_pcc_base_fact_and_type_for_memory(
1595
func,
1596
memory,
1597
base_offset,
1598
current_length_offset,
1599
ptr_memtype,
1600
bound,
1601
);
1602
1603
let base = self.make_heap_base(func, memory, base_ptr, base_offset, base_fact);
1604
1605
self.heaps.push(HeapData {
1606
base,
1607
bound,
1608
pcc_memory_type,
1609
memory,
1610
})
1611
}
1612
1613
pub(crate) fn make_heap_base(
1614
&self,
1615
func: &mut Function,
1616
memory: Memory,
1617
ptr: ir::GlobalValue,
1618
offset: i32,
1619
fact: Option<Fact>,
1620
) -> ir::GlobalValue {
1621
let pointer_type = self.pointer_type();
1622
1623
let mut flags = ir::MemFlags::trusted().with_checked().with_can_move();
1624
if !memory.memory_may_move(self.tunables) {
1625
flags.set_readonly();
1626
}
1627
1628
let heap_base = func.create_global_value(ir::GlobalValueData::Load {
1629
base: ptr,
1630
offset: Offset32::new(offset),
1631
global_type: pointer_type,
1632
flags,
1633
});
1634
func.global_value_facts[heap_base] = fact;
1635
heap_base
1636
}
1637
1638
pub(crate) fn make_pcc_base_fact_and_type_for_memory(
1639
&mut self,
1640
func: &mut Function,
1641
memory: Memory,
1642
base_offset: i32,
1643
current_length_offset: i32,
1644
ptr_memtype: Option<ir::MemoryType>,
1645
heap_bound: ir::GlobalValue,
1646
) -> (Option<Fact>, Option<ir::MemoryType>) {
1647
// If we have a declared maximum, we can make this a "static" heap, which is
1648
// allocated up front and never moved.
1649
let host_page_size_log2 = self.target_config().page_size_align_log2;
1650
let (base_fact, memory_type) = if !memory
1651
.can_elide_bounds_check(self.tunables, host_page_size_log2)
1652
{
1653
if let Some(ptr_memtype) = ptr_memtype {
1654
// Create a memtype representing the untyped memory region.
1655
let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory {
1656
gv: heap_bound,
1657
size: self.tunables.memory_guard_size,
1658
});
1659
// This fact applies to any pointer to the start of the memory.
1660
let base_fact = ir::Fact::dynamic_base_ptr(data_mt);
1661
// This fact applies to the length.
1662
let length_fact = ir::Fact::global_value(
1663
u16::try_from(self.isa.pointer_type().bits()).unwrap(),
1664
heap_bound,
1665
);
1666
// Create a field in the vmctx for the base pointer.
1667
match &mut func.memory_types[ptr_memtype] {
1668
ir::MemoryTypeData::Struct { size, fields } => {
1669
let base_offset = u64::try_from(base_offset).unwrap();
1670
fields.push(ir::MemoryTypeField {
1671
offset: base_offset,
1672
ty: self.isa.pointer_type(),
1673
// Read-only field from the PoV of PCC checks:
1674
// don't allow stores to this field. (Even if
1675
// it is a dynamic memory whose base can
1676
// change, that update happens inside the
1677
// runtime, not in generated code.)
1678
readonly: true,
1679
fact: Some(base_fact.clone()),
1680
});
1681
let current_length_offset = u64::try_from(current_length_offset).unwrap();
1682
fields.push(ir::MemoryTypeField {
1683
offset: current_length_offset,
1684
ty: self.isa.pointer_type(),
1685
// As above, read-only; only the runtime modifies it.
1686
readonly: true,
1687
fact: Some(length_fact),
1688
});
1689
1690
let pointer_size = u64::from(self.isa.pointer_type().bytes());
1691
let fields_end = std::cmp::max(
1692
base_offset + pointer_size,
1693
current_length_offset + pointer_size,
1694
);
1695
*size = std::cmp::max(*size, fields_end);
1696
}
1697
_ => {
1698
panic!("Bad memtype");
1699
}
1700
}
1701
// Apply a fact to the base pointer.
1702
(Some(base_fact), Some(data_mt))
1703
} else {
1704
(None, None)
1705
}
1706
} else {
1707
if let Some(ptr_memtype) = ptr_memtype {
1708
// Create a memtype representing the untyped memory region.
1709
let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory {
1710
size: self
1711
.tunables
1712
.memory_reservation
1713
.checked_add(self.tunables.memory_guard_size)
1714
.expect("Memory plan has overflowing size plus guard"),
1715
});
1716
// This fact applies to any pointer to the start of the memory.
1717
let base_fact = Fact::Mem {
1718
ty: data_mt,
1719
min_offset: 0,
1720
max_offset: 0,
1721
nullable: false,
1722
};
1723
// Create a field in the vmctx for the base pointer.
1724
match &mut func.memory_types[ptr_memtype] {
1725
ir::MemoryTypeData::Struct { size, fields } => {
1726
let offset = u64::try_from(base_offset).unwrap();
1727
fields.push(ir::MemoryTypeField {
1728
offset,
1729
ty: self.isa.pointer_type(),
1730
// Read-only field from the PoV of PCC checks:
1731
// don't allow stores to this field. (Even if
1732
// it is a dynamic memory whose base can
1733
// change, that update happens inside the
1734
// runtime, not in generated code.)
1735
readonly: true,
1736
fact: Some(base_fact.clone()),
1737
});
1738
*size = std::cmp::max(
1739
*size,
1740
offset + u64::from(self.isa.pointer_type().bytes()),
1741
);
1742
}
1743
_ => {
1744
panic!("Bad memtype");
1745
}
1746
}
1747
// Apply a fact to the base pointer.
1748
(Some(base_fact), Some(data_mt))
1749
} else {
1750
(None, None)
1751
}
1752
};
1753
(base_fact, memory_type)
1754
}
1755
1756
fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> TableData {
1757
let pointer_type = self.pointer_type();
1758
1759
let (ptr, base_offset, current_elements_offset) = {
1760
let vmctx = self.vmctx(func);
1761
if let Some(def_index) = self.module.defined_table_index(index) {
1762
let base_offset =
1763
i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap();
1764
let current_elements_offset = i32::try_from(
1765
self.offsets
1766
.vmctx_vmtable_definition_current_elements(def_index),
1767
)
1768
.unwrap();
1769
(vmctx, base_offset, current_elements_offset)
1770
} else {
1771
let from_offset = self.offsets.vmctx_vmtable_from(index);
1772
let table = func.create_global_value(ir::GlobalValueData::Load {
1773
base: vmctx,
1774
offset: Offset32::new(i32::try_from(from_offset).unwrap()),
1775
global_type: pointer_type,
1776
flags: MemFlags::trusted().with_readonly().with_can_move(),
1777
});
1778
let base_offset = i32::from(self.offsets.vmtable_definition_base());
1779
let current_elements_offset =
1780
i32::from(self.offsets.vmtable_definition_current_elements());
1781
(table, base_offset, current_elements_offset)
1782
}
1783
};
1784
1785
let table = &self.module.tables[index];
1786
let element_size = if table.ref_type.is_vmgcref_type() {
1787
// For GC-managed references, tables store `Option<VMGcRef>`s.
1788
ir::types::I32.bytes()
1789
} else {
1790
self.reference_type(table.ref_type.heap_type).0.bytes()
1791
};
1792
1793
let base_gv = func.create_global_value(ir::GlobalValueData::Load {
1794
base: ptr,
1795
offset: Offset32::new(base_offset),
1796
global_type: pointer_type,
1797
flags: if Some(table.limits.min) == table.limits.max {
1798
// A fixed-size table can't be resized so its base address won't
1799
// change.
1800
MemFlags::trusted().with_readonly().with_can_move()
1801
} else {
1802
MemFlags::trusted()
1803
},
1804
});
1805
1806
let bound = if Some(table.limits.min) == table.limits.max {
1807
TableSize::Static {
1808
bound: table.limits.min,
1809
}
1810
} else {
1811
TableSize::Dynamic {
1812
bound_gv: func.create_global_value(ir::GlobalValueData::Load {
1813
base: ptr,
1814
offset: Offset32::new(current_elements_offset),
1815
global_type: ir::Type::int(
1816
u16::from(self.offsets.size_of_vmtable_definition_current_elements()) * 8,
1817
)
1818
.unwrap(),
1819
flags: MemFlags::trusted(),
1820
}),
1821
}
1822
};
1823
1824
TableData {
1825
base_gv,
1826
bound,
1827
element_size,
1828
}
1829
}
1830
1831
/// Get the type index associated with an exception object.
1832
#[cfg(feature = "gc")]
1833
pub(crate) fn exception_type_from_tag(&self, tag: TagIndex) -> EngineOrModuleTypeIndex {
1834
self.module.tags[tag].exception
1835
}
1836
1837
/// Get the parameter arity of the associated function type for the given tag.
1838
pub(crate) fn tag_param_arity(&self, tag: TagIndex) -> usize {
1839
let func_ty = self.module.tags[tag].signature.unwrap_module_type_index();
1840
let func_ty = self
1841
.types
1842
.unwrap_func(func_ty)
1843
.expect("already validated to refer to a function type");
1844
func_ty.params().len()
1845
}
1846
1847
/// Get the runtime instance ID and defined-tag ID in that
1848
/// instance for a particular static tag ID.
1849
#[cfg(feature = "gc")]
1850
pub(crate) fn get_instance_and_tag(
1851
&mut self,
1852
builder: &mut FunctionBuilder<'_>,
1853
tag_index: TagIndex,
1854
) -> (ir::Value, ir::Value) {
1855
if let Some(defined_tag_index) = self.module.defined_tag_index(tag_index) {
1856
// Our own tag -- we only need to get our instance ID.
1857
let builtin = self.builtin_functions.get_instance_id(builder.func);
1858
let vmctx = self.vmctx_val(&mut builder.cursor());
1859
let call = builder.ins().call(builtin, &[vmctx]);
1860
let instance_id = builder.func.dfg.inst_results(call)[0];
1861
let tag_id = builder
1862
.ins()
1863
.iconst(I32, i64::from(defined_tag_index.as_u32()));
1864
(instance_id, tag_id)
1865
} else {
1866
// An imported tag -- we need to load the VMTagImport struct.
1867
let vmctx_tag_vmctx_offset = self.offsets.vmctx_vmtag_import_vmctx(tag_index);
1868
let vmctx_tag_index_offset = self.offsets.vmctx_vmtag_import_index(tag_index);
1869
let vmctx = self.vmctx_val(&mut builder.cursor());
1870
let pointer_type = self.pointer_type();
1871
let from_vmctx = builder.ins().load(
1872
pointer_type,
1873
MemFlags::trusted().with_readonly(),
1874
vmctx,
1875
i32::try_from(vmctx_tag_vmctx_offset).unwrap(),
1876
);
1877
let index = builder.ins().load(
1878
I32,
1879
MemFlags::trusted().with_readonly(),
1880
vmctx,
1881
i32::try_from(vmctx_tag_index_offset).unwrap(),
1882
);
1883
let builtin = self.builtin_functions.get_instance_id(builder.func);
1884
let call = builder.ins().call(builtin, &[from_vmctx]);
1885
let from_instance_id = builder.func.dfg.inst_results(call)[0];
1886
(from_instance_id, index)
1887
}
1888
}
1889
}
1890
1891
struct Call<'a, 'func, 'module_env> {
1892
builder: &'a mut FunctionBuilder<'func>,
1893
env: &'a mut FuncEnvironment<'module_env>,
1894
srcloc: ir::SourceLoc,
1895
tail: bool,
1896
}
1897
1898
enum CheckIndirectCallTypeSignature {
1899
Runtime,
1900
StaticMatch {
1901
/// Whether or not the funcref may be null or if it's statically known
1902
/// to not be null.
1903
may_be_null: bool,
1904
},
1905
StaticTrap,
1906
}
1907
1908
type CallRets = SmallVec<[ir::Value; 4]>;
1909
1910
impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> {
1911
/// Create a new `Call` site that will do regular, non-tail calls.
1912
pub fn new(
1913
builder: &'a mut FunctionBuilder<'func>,
1914
env: &'a mut FuncEnvironment<'module_env>,
1915
srcloc: ir::SourceLoc,
1916
) -> Self {
1917
Call {
1918
builder,
1919
env,
1920
srcloc,
1921
tail: false,
1922
}
1923
}
1924
1925
/// Create a new `Call` site that will perform tail calls.
1926
pub fn new_tail(
1927
builder: &'a mut FunctionBuilder<'func>,
1928
env: &'a mut FuncEnvironment<'module_env>,
1929
srcloc: ir::SourceLoc,
1930
) -> Self {
1931
Call {
1932
builder,
1933
env,
1934
srcloc,
1935
tail: true,
1936
}
1937
}
1938
1939
/// Do a Wasm-level direct call to the given callee function.
1940
pub fn direct_call(
1941
mut self,
1942
callee_index: FuncIndex,
1943
sig_ref: ir::SigRef,
1944
wasm_call_args: &[ir::Value],
1945
) -> WasmResult<CallRets> {
1946
let mut real_call_args = Vec::with_capacity(wasm_call_args.len() + 2);
1947
let caller_vmctx = self
1948
.builder
1949
.func
1950
.special_param(ArgumentPurpose::VMContext)
1951
.unwrap();
1952
1953
// Handle direct calls to locally-defined functions.
1954
if let Some(def_func_index) = self.env.module.defined_func_index(callee_index) {
1955
// First append the callee vmctx address, which is the same as the caller vmctx in
1956
// this case.
1957
real_call_args.push(caller_vmctx);
1958
1959
// Then append the caller vmctx address.
1960
real_call_args.push(caller_vmctx);
1961
1962
// Then append the regular call arguments.
1963
real_call_args.extend_from_slice(wasm_call_args);
1964
1965
// Finally, make the direct call!
1966
let callee = self
1967
.env
1968
.get_or_create_defined_func_ref(self.builder.func, def_func_index);
1969
return Ok(self.direct_call_inst(callee, &real_call_args));
1970
}
1971
1972
// Handle direct calls to imported functions. We use an indirect call
1973
// so that we don't have to patch the code at runtime.
1974
let pointer_type = self.env.pointer_type();
1975
let vmctx = self.env.vmctx(self.builder.func);
1976
let base = self.builder.ins().global_value(pointer_type, vmctx);
1977
1978
let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1979
1980
// Load the callee address.
1981
let body_offset = i32::try_from(
1982
self.env
1983
.offsets
1984
.vmctx_vmfunction_import_wasm_call(callee_index),
1985
)
1986
.unwrap();
1987
1988
// First append the callee vmctx address.
1989
let vmctx_offset =
1990
i32::try_from(self.env.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap();
1991
let callee_vmctx = self
1992
.builder
1993
.ins()
1994
.load(pointer_type, mem_flags, base, vmctx_offset);
1995
real_call_args.push(callee_vmctx);
1996
real_call_args.push(caller_vmctx);
1997
1998
// Then append the Wasm call arguments.
1999
real_call_args.extend_from_slice(wasm_call_args);
2000
2001
// If we statically know the imported function (e.g. this is a
2002
// component-to-component call where we statically know both components)
2003
// then we can avoid doing an indirect call.
2004
match self.env.translation.known_imported_functions[callee_index].as_ref() {
2005
// The import is always a compile-time builtin intrinsic. Make a
2006
// direct call to that function (presumably it will eventually be
2007
// inlined).
2008
#[cfg(feature = "component-model")]
2009
Some(FuncKey::UnsafeIntrinsic(..)) => {
2010
let callee = self
2011
.env
2012
.get_or_create_imported_func_ref(self.builder.func, callee_index);
2013
Ok(self.direct_call_inst(callee, &real_call_args))
2014
}
2015
2016
// The import is always satisfied with the given defined Wasm
2017
// function, so do a direct call to that function! (Although we take
2018
// care to still pass its `funcref`'s `vmctx` as the callee `vmctx`
2019
// in `real_call_args` and not the caller's.)
2020
Some(FuncKey::DefinedWasmFunction(..)) => {
2021
let callee = self
2022
.env
2023
.get_or_create_imported_func_ref(self.builder.func, callee_index);
2024
Ok(self.direct_call_inst(callee, &real_call_args))
2025
}
2026
2027
Some(key) => panic!("unexpected kind of known-import function: {key:?}"),
2028
2029
// Unknown import function or this module is instantiated many times
2030
// and with different functions. Either way, we have to do the
2031
// indirect call.
2032
None => {
2033
let func_addr = self
2034
.builder
2035
.ins()
2036
.load(pointer_type, mem_flags, base, body_offset);
2037
Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
2038
}
2039
}
2040
}
2041
2042
/// Do a Wasm-level indirect call through the given funcref table.
2043
pub fn indirect_call(
2044
mut self,
2045
features: &WasmFeatures,
2046
table_index: TableIndex,
2047
ty_index: TypeIndex,
2048
sig_ref: ir::SigRef,
2049
callee: ir::Value,
2050
call_args: &[ir::Value],
2051
) -> WasmResult<Option<CallRets>> {
2052
let (code_ptr, callee_vmctx) = match self.check_and_load_code_and_callee_vmctx(
2053
features,
2054
table_index,
2055
ty_index,
2056
callee,
2057
false,
2058
)? {
2059
Some(pair) => pair,
2060
None => return Ok(None),
2061
};
2062
2063
self.unchecked_call_impl(sig_ref, code_ptr, callee_vmctx, call_args)
2064
.map(Some)
2065
}
2066
2067
fn check_and_load_code_and_callee_vmctx(
2068
&mut self,
2069
features: &WasmFeatures,
2070
table_index: TableIndex,
2071
ty_index: TypeIndex,
2072
callee: ir::Value,
2073
cold_blocks: bool,
2074
) -> WasmResult<Option<(ir::Value, ir::Value)>> {
2075
// Get the funcref pointer from the table.
2076
let funcref_ptr = self.env.get_or_init_func_ref_table_elem(
2077
self.builder,
2078
table_index,
2079
callee,
2080
cold_blocks,
2081
);
2082
2083
// If necessary, check the signature.
2084
let check =
2085
self.check_indirect_call_type_signature(features, table_index, ty_index, funcref_ptr);
2086
2087
let trap_code = match check {
2088
// `funcref_ptr` is checked at runtime that its type matches,
2089
// meaning that if code gets this far it's guaranteed to not be
2090
// null. That means nothing in `unchecked_call` can fail.
2091
CheckIndirectCallTypeSignature::Runtime => None,
2092
2093
// No type check was performed on `funcref_ptr` because it's
2094
// statically known to have the right type. Note that whether or
2095
// not the function is null is not necessarily tested so far since
2096
// no type information was inspected.
2097
//
2098
// If the table may hold null functions, then further loads in
2099
// `unchecked_call` may fail. If the table only holds non-null
2100
// functions, though, then there's no possibility of a trap.
2101
CheckIndirectCallTypeSignature::StaticMatch { may_be_null } => {
2102
if may_be_null {
2103
Some(crate::TRAP_INDIRECT_CALL_TO_NULL)
2104
} else {
2105
None
2106
}
2107
}
2108
2109
// Code has already trapped, so return nothing indicating that this
2110
// is now unreachable code.
2111
CheckIndirectCallTypeSignature::StaticTrap => return Ok(None),
2112
};
2113
2114
Ok(Some(self.load_code_and_vmctx(funcref_ptr, trap_code)))
2115
}
2116
2117
fn check_indirect_call_type_signature(
2118
&mut self,
2119
features: &WasmFeatures,
2120
table_index: TableIndex,
2121
ty_index: TypeIndex,
2122
funcref_ptr: ir::Value,
2123
) -> CheckIndirectCallTypeSignature {
2124
let table = &self.env.module.tables[table_index];
2125
let sig_id_size = self.env.offsets.size_of_vmshared_type_index();
2126
let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
2127
2128
// Test if a type check is necessary for this table. If this table is a
2129
// table of typed functions and that type matches `ty_index`, then
2130
// there's no need to perform a typecheck.
2131
match table.ref_type.heap_type {
2132
// Functions do not have a statically known type in the table, a
2133
// typecheck is required. Fall through to below to perform the
2134
// actual typecheck.
2135
WasmHeapType::Func => {}
2136
2137
// Functions that have a statically known type are either going to
2138
// always succeed or always fail. Figure out by inspecting the types
2139
// further.
2140
WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Module(table_ty)) => {
2141
// If `ty_index` matches `table_ty`, then this call is
2142
// statically known to have the right type, so no checks are
2143
// necessary.
2144
let specified_ty = self.env.module.types[ty_index].unwrap_module_type_index();
2145
if specified_ty == table_ty {
2146
return CheckIndirectCallTypeSignature::StaticMatch {
2147
may_be_null: table.ref_type.nullable,
2148
};
2149
}
2150
2151
if features.gc() {
2152
// If we are in the Wasm GC world, then we need to perform
2153
// an actual subtype check at runtime. Fall through to below
2154
// to do that.
2155
} else {
2156
// Otherwise if the types don't match then either (a) this
2157
// is a null pointer or (b) it's a pointer with the wrong
2158
// type. Figure out which and trap here.
2159
//
2160
// If it's possible to have a null here then try to load the
2161
// type information. If that fails due to the function being
2162
// a null pointer, then this was a call to null. Otherwise
2163
// if it succeeds then we know it won't match, so trap
2164
// anyway.
2165
if table.ref_type.nullable {
2166
if self.env.clif_memory_traps_enabled() {
2167
self.builder.ins().load(
2168
sig_id_type,
2169
ir::MemFlags::trusted()
2170
.with_readonly()
2171
.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL)),
2172
funcref_ptr,
2173
i32::from(self.env.offsets.ptr.vm_func_ref_type_index()),
2174
);
2175
} else {
2176
self.env.trapz(
2177
self.builder,
2178
funcref_ptr,
2179
crate::TRAP_INDIRECT_CALL_TO_NULL,
2180
);
2181
}
2182
}
2183
self.env.trap(self.builder, crate::TRAP_BAD_SIGNATURE);
2184
return CheckIndirectCallTypeSignature::StaticTrap;
2185
}
2186
}
2187
2188
// Tables of `nofunc` can only be inhabited by null, so go ahead and
2189
// trap with that.
2190
WasmHeapType::NoFunc => {
2191
assert!(table.ref_type.nullable);
2192
self.env
2193
.trap(self.builder, crate::TRAP_INDIRECT_CALL_TO_NULL);
2194
return CheckIndirectCallTypeSignature::StaticTrap;
2195
}
2196
2197
// Engine-indexed types don't show up until runtime and it's a Wasm
2198
// validation error to perform a call through a non-function table,
2199
// so these cases are dynamically not reachable.
2200
WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Engine(_))
2201
| WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::RecGroup(_))
2202
| WasmHeapType::Extern
2203
| WasmHeapType::NoExtern
2204
| WasmHeapType::Any
2205
| WasmHeapType::Eq
2206
| WasmHeapType::I31
2207
| WasmHeapType::Array
2208
| WasmHeapType::ConcreteArray(_)
2209
| WasmHeapType::Struct
2210
| WasmHeapType::ConcreteStruct(_)
2211
| WasmHeapType::Exn
2212
| WasmHeapType::ConcreteExn(_)
2213
| WasmHeapType::NoExn
2214
| WasmHeapType::Cont
2215
| WasmHeapType::ConcreteCont(_)
2216
| WasmHeapType::NoCont
2217
| WasmHeapType::None => {
2218
unreachable!()
2219
}
2220
}
2221
2222
// Load the caller's `VMSharedTypeIndex.
2223
let interned_ty = self.env.module.types[ty_index].unwrap_module_type_index();
2224
let caller_sig_id = self
2225
.env
2226
.module_interned_to_shared_ty(&mut self.builder.cursor(), interned_ty);
2227
2228
// Load the callee's `VMSharedTypeIndex`.
2229
//
2230
// Note that the callee may be null in which case this load may
2231
// trap. If so use the `TRAP_INDIRECT_CALL_TO_NULL` trap code.
2232
let mut mem_flags = ir::MemFlags::trusted().with_readonly();
2233
if self.env.clif_memory_traps_enabled() {
2234
mem_flags = mem_flags.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL));
2235
} else {
2236
self.env
2237
.trapz(self.builder, funcref_ptr, crate::TRAP_INDIRECT_CALL_TO_NULL);
2238
}
2239
let callee_sig_id =
2240
self.env
2241
.load_funcref_type_index(&mut self.builder.cursor(), mem_flags, funcref_ptr);
2242
2243
// Check that they match: in the case of Wasm GC, this means doing a
2244
// full subtype check. Otherwise, we do a simple equality check.
2245
let matches = if features.gc() {
2246
#[cfg(feature = "gc")]
2247
{
2248
self.env
2249
.is_subtype(self.builder, callee_sig_id, caller_sig_id)
2250
}
2251
#[cfg(not(feature = "gc"))]
2252
{
2253
unreachable!()
2254
}
2255
} else {
2256
self.builder
2257
.ins()
2258
.icmp(IntCC::Equal, callee_sig_id, caller_sig_id)
2259
};
2260
self.env
2261
.trapz(self.builder, matches, crate::TRAP_BAD_SIGNATURE);
2262
CheckIndirectCallTypeSignature::Runtime
2263
}
2264
2265
/// Call a typed function reference.
2266
pub fn call_ref(
2267
self,
2268
sig_ref: ir::SigRef,
2269
callee: ir::Value,
2270
args: &[ir::Value],
2271
) -> WasmResult<CallRets> {
2272
// FIXME: the wasm type system tracks enough information to know whether
2273
// `callee` is a null reference or not. In some situations it can be
2274
// statically known here that `callee` cannot be null in which case this
2275
// can be `None` instead. This requires feeding type information from
2276
// wasmparser's validator into this function, however, which is not
2277
// easily done at this time.
2278
let callee_load_trap_code = Some(crate::TRAP_NULL_REFERENCE);
2279
2280
self.unchecked_call(sig_ref, callee, callee_load_trap_code, args)
2281
}
2282
2283
/// This calls a function by reference without checking the signature.
2284
///
2285
/// It gets the function address, sets relevant flags, and passes the
2286
/// special callee/caller vmctxs. It is used by both call_indirect (which
2287
/// checks the signature) and call_ref (which doesn't).
2288
fn unchecked_call(
2289
mut self,
2290
sig_ref: ir::SigRef,
2291
callee: ir::Value,
2292
callee_load_trap_code: Option<ir::TrapCode>,
2293
call_args: &[ir::Value],
2294
) -> WasmResult<CallRets> {
2295
let (func_addr, callee_vmctx) = self.load_code_and_vmctx(callee, callee_load_trap_code);
2296
self.unchecked_call_impl(sig_ref, func_addr, callee_vmctx, call_args)
2297
}
2298
2299
fn load_code_and_vmctx(
2300
&mut self,
2301
callee: ir::Value,
2302
callee_load_trap_code: Option<ir::TrapCode>,
2303
) -> (ir::Value, ir::Value) {
2304
let pointer_type = self.env.pointer_type();
2305
2306
// Dereference callee pointer to get the function address.
2307
//
2308
// Note that this may trap if `callee` hasn't previously been verified
2309
// to be non-null. This means that this load is annotated with an
2310
// optional trap code provided by the caller of `unchecked_call` which
2311
// will handle the case where this is either already known to be
2312
// non-null or may trap.
2313
let mem_flags = ir::MemFlags::trusted().with_readonly();
2314
let mut callee_flags = mem_flags;
2315
if self.env.clif_memory_traps_enabled() {
2316
callee_flags = callee_flags.with_trap_code(callee_load_trap_code);
2317
} else {
2318
if let Some(trap) = callee_load_trap_code {
2319
self.env.trapz(self.builder, callee, trap);
2320
}
2321
}
2322
let func_addr = self.builder.ins().load(
2323
pointer_type,
2324
callee_flags,
2325
callee,
2326
i32::from(self.env.offsets.ptr.vm_func_ref_wasm_call()),
2327
);
2328
let callee_vmctx = self.builder.ins().load(
2329
pointer_type,
2330
mem_flags,
2331
callee,
2332
i32::from(self.env.offsets.ptr.vm_func_ref_vmctx()),
2333
);
2334
2335
(func_addr, callee_vmctx)
2336
}
2337
2338
fn caller_vmctx(&self) -> ir::Value {
2339
self.builder
2340
.func
2341
.special_param(ArgumentPurpose::VMContext)
2342
.unwrap()
2343
}
2344
2345
/// This calls a function by reference without checking the
2346
/// signature, given the raw code pointer to the
2347
/// Wasm-calling-convention entry point and the callee vmctx.
2348
fn unchecked_call_impl(
2349
mut self,
2350
sig_ref: ir::SigRef,
2351
func_addr: ir::Value,
2352
callee_vmctx: ir::Value,
2353
call_args: &[ir::Value],
2354
) -> WasmResult<CallRets> {
2355
let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
2356
let caller_vmctx = self.caller_vmctx();
2357
2358
// First append the callee and caller vmctx addresses.
2359
real_call_args.push(callee_vmctx);
2360
real_call_args.push(caller_vmctx);
2361
2362
// Then append the regular call arguments.
2363
real_call_args.extend_from_slice(call_args);
2364
2365
Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
2366
}
2367
2368
fn exception_table(
2369
&mut self,
2370
sig: ir::SigRef,
2371
) -> Option<(ir::ExceptionTable, Block, CallRets)> {
2372
if !self.tail && !self.env.stacks.handlers.is_empty() {
2373
let continuation_block = self.builder.create_block();
2374
let mut args = vec![];
2375
let mut results = smallvec![];
2376
for i in 0..self.builder.func.dfg.signatures[sig].returns.len() {
2377
let ty = self.builder.func.dfg.signatures[sig].returns[i].value_type;
2378
results.push(
2379
self.builder
2380
.func
2381
.dfg
2382
.append_block_param(continuation_block, ty),
2383
);
2384
args.push(BlockArg::TryCallRet(u32::try_from(i).unwrap()));
2385
}
2386
2387
let continuation = self
2388
.builder
2389
.func
2390
.dfg
2391
.block_call(continuation_block, args.iter());
2392
let mut handlers = vec![ExceptionTableItem::Context(self.caller_vmctx())];
2393
for (tag, block) in self.env.stacks.handlers.handlers() {
2394
let block_call = self
2395
.builder
2396
.func
2397
.dfg
2398
.block_call(block, &[BlockArg::TryCallExn(0)]);
2399
handlers.push(match tag {
2400
Some(tag) => ExceptionTableItem::Tag(tag, block_call),
2401
None => ExceptionTableItem::Default(block_call),
2402
});
2403
}
2404
let etd = ExceptionTableData::new(sig, continuation, handlers);
2405
let et = self.builder.func.dfg.exception_tables.push(etd);
2406
Some((et, continuation_block, results))
2407
} else {
2408
None
2409
}
2410
}
2411
2412
fn results_from_call_inst(&self, inst: ir::Inst) -> CallRets {
2413
self.builder
2414
.func
2415
.dfg
2416
.inst_results(inst)
2417
.iter()
2418
.copied()
2419
.collect()
2420
}
2421
2422
fn handle_call_result_stackmap(&mut self, results: &[ir::Value], sig_ref: ir::SigRef) {
2423
for (i, &val) in results.iter().enumerate() {
2424
if self.env.sig_ref_result_needs_stack_map(sig_ref, i) {
2425
self.builder.declare_value_needs_stack_map(val);
2426
}
2427
}
2428
}
2429
2430
fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> CallRets {
2431
let sig_ref = self.builder.func.dfg.ext_funcs[callee].signature;
2432
if self.tail {
2433
self.builder.ins().return_call(callee, args);
2434
smallvec![]
2435
} else if let Some((exception_table, continuation_block, results)) =
2436
self.exception_table(sig_ref)
2437
{
2438
let inst = self.builder.ins().try_call(callee, args, exception_table);
2439
self.handle_call_result_stackmap(&results, sig_ref);
2440
self.builder.switch_to_block(continuation_block);
2441
self.builder.seal_block(continuation_block);
2442
self.attach_tags(inst);
2443
results
2444
} else {
2445
let inst = self.builder.ins().call(callee, args);
2446
let results = self.results_from_call_inst(inst);
2447
self.handle_call_result_stackmap(&results, sig_ref);
2448
self.attach_tags(inst);
2449
results
2450
}
2451
}
2452
2453
fn indirect_call_inst(
2454
&mut self,
2455
sig_ref: ir::SigRef,
2456
func_addr: ir::Value,
2457
args: &[ir::Value],
2458
) -> CallRets {
2459
if self.tail {
2460
self.builder
2461
.ins()
2462
.return_call_indirect(sig_ref, func_addr, args);
2463
smallvec![]
2464
} else if let Some((exception_table, continuation_block, results)) =
2465
self.exception_table(sig_ref)
2466
{
2467
let inst = self
2468
.builder
2469
.ins()
2470
.try_call_indirect(func_addr, args, exception_table);
2471
self.handle_call_result_stackmap(&results, sig_ref);
2472
self.builder.switch_to_block(continuation_block);
2473
self.builder.seal_block(continuation_block);
2474
self.attach_tags(inst);
2475
results
2476
} else {
2477
let inst = self.builder.ins().call_indirect(sig_ref, func_addr, args);
2478
let results = self.results_from_call_inst(inst);
2479
self.handle_call_result_stackmap(&results, sig_ref);
2480
self.attach_tags(inst);
2481
results
2482
}
2483
}
2484
2485
fn attach_tags(&mut self, inst: ir::Inst) {
2486
let tags = self.env.debug_tags(self.srcloc);
2487
if !tags.is_empty() {
2488
self.builder.func.debug_tags.set(inst, tags);
2489
}
2490
}
2491
}
2492
2493
impl TypeConvert for FuncEnvironment<'_> {
2494
fn lookup_heap_type(&self, ty: wasmparser::UnpackedIndex) -> WasmHeapType {
2495
wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
2496
self.module.types[idx].unwrap_module_type_index()
2497
})
2498
.lookup_heap_type(ty)
2499
}
2500
2501
fn lookup_type_index(&self, index: wasmparser::UnpackedIndex) -> EngineOrModuleTypeIndex {
2502
wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
2503
self.module.types[idx].unwrap_module_type_index()
2504
})
2505
.lookup_type_index(index)
2506
}
2507
}
2508
2509
impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
2510
fn target_config(&self) -> TargetFrontendConfig {
2511
self.isa.frontend_config()
2512
}
2513
2514
fn reference_type(&self, wasm_ty: WasmHeapType) -> (ir::Type, bool) {
2515
let ty = crate::reference_type(wasm_ty, self.pointer_type());
2516
let needs_stack_map = match wasm_ty.top() {
2517
WasmHeapTopType::Extern | WasmHeapTopType::Any | WasmHeapTopType::Exn => true,
2518
WasmHeapTopType::Func => false,
2519
// TODO(#10248) Once continuations can be stored on the GC heap, we
2520
// will need stack maps for continuation objects.
2521
WasmHeapTopType::Cont => false,
2522
};
2523
(ty, needs_stack_map)
2524
}
2525
2526
fn heap_access_spectre_mitigation(&self) -> bool {
2527
self.isa.flags().enable_heap_access_spectre_mitigation()
2528
}
2529
2530
fn proof_carrying_code(&self) -> bool {
2531
self.isa.flags().enable_pcc()
2532
}
2533
2534
fn tunables(&self) -> &Tunables {
2535
self.compiler.tunables()
2536
}
2537
}
2538
2539
impl FuncEnvironment<'_> {
2540
pub fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
2541
&self.heaps
2542
}
2543
2544
pub fn is_wasm_parameter(&self, index: usize) -> bool {
2545
// The first two parameters are the vmctx and caller vmctx. The rest are
2546
// the wasm parameters.
2547
index >= 2
2548
}
2549
2550
pub fn clif_param_as_wasm_param(&self, index: usize) -> Option<WasmValType> {
2551
if index >= 2 {
2552
Some(self.wasm_func_ty.params()[index - 2])
2553
} else {
2554
None
2555
}
2556
}
2557
2558
pub fn param_needs_stack_map(&self, _signature: &ir::Signature, index: usize) -> bool {
2559
// Skip the caller and callee vmctx.
2560
if index < 2 {
2561
return false;
2562
}
2563
2564
self.wasm_func_ty.params()[index - 2].is_vmgcref_type_and_not_i31()
2565
}
2566
2567
pub fn sig_ref_result_needs_stack_map(&self, sig_ref: ir::SigRef, index: usize) -> bool {
2568
let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
2569
wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
2570
}
2571
2572
pub fn translate_table_grow(
2573
&mut self,
2574
builder: &mut FunctionBuilder<'_>,
2575
table_index: TableIndex,
2576
delta: ir::Value,
2577
init_value: ir::Value,
2578
) -> WasmResult<ir::Value> {
2579
let mut pos = builder.cursor();
2580
let table = self.table(table_index);
2581
let ty = table.ref_type.heap_type;
2582
let (table_vmctx, defined_table_index) =
2583
self.table_vmctx_and_defined_index(&mut pos, table_index);
2584
let index_type = table.idx_type;
2585
let delta = self.cast_index_to_i64(&mut pos, delta, index_type);
2586
2587
let mut args: SmallVec<[_; 6]> = smallvec![table_vmctx, defined_table_index, delta];
2588
let grow = match ty.top() {
2589
WasmHeapTopType::Extern | WasmHeapTopType::Any | WasmHeapTopType::Exn => {
2590
args.push(init_value);
2591
gc::builtins::table_grow_gc_ref(self, pos.func)?
2592
}
2593
WasmHeapTopType::Func => {
2594
args.push(init_value);
2595
self.builtin_functions.table_grow_func_ref(pos.func)
2596
}
2597
WasmHeapTopType::Cont => {
2598
let (revision, contref) =
2599
stack_switching::fatpointer::deconstruct(self, &mut pos, init_value);
2600
args.extend_from_slice(&[contref, revision]);
2601
stack_switching::builtins::table_grow_cont_obj(self, pos.func)?
2602
}
2603
};
2604
2605
let call_inst = pos.ins().call(grow, &args);
2606
let result = builder.func.dfg.first_result(call_inst);
2607
2608
Ok(self.convert_pointer_to_index_type(builder.cursor(), result, index_type, false))
2609
}
2610
2611
pub fn translate_table_get(
2612
&mut self,
2613
builder: &mut FunctionBuilder,
2614
table_index: TableIndex,
2615
index: ir::Value,
2616
) -> WasmResult<ir::Value> {
2617
let table = self.module.tables[table_index];
2618
let table_data = self.get_or_create_table(builder.func, table_index);
2619
let heap_ty = table.ref_type.heap_type;
2620
match heap_ty.top() {
2621
// GC-managed types.
2622
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2623
let (src, flags) = table_data.prepare_table_addr(self, builder, index);
2624
gc::gc_compiler(self)?.translate_read_gc_reference(
2625
self,
2626
builder,
2627
table.ref_type,
2628
src,
2629
flags,
2630
)
2631
}
2632
2633
// Function types.
2634
WasmHeapTopType::Func => {
2635
Ok(self.get_or_init_func_ref_table_elem(builder, table_index, index, false))
2636
}
2637
2638
// Continuation types.
2639
WasmHeapTopType::Cont => {
2640
let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2641
Ok(builder.ins().load(
2642
stack_switching::fatpointer::fatpointer_type(self),
2643
flags,
2644
elem_addr,
2645
0,
2646
))
2647
}
2648
}
2649
}
2650
2651
pub fn translate_table_set(
2652
&mut self,
2653
builder: &mut FunctionBuilder,
2654
table_index: TableIndex,
2655
value: ir::Value,
2656
index: ir::Value,
2657
) -> WasmResult<()> {
2658
let table = self.module.tables[table_index];
2659
let table_data = self.get_or_create_table(builder.func, table_index);
2660
let heap_ty = table.ref_type.heap_type;
2661
match heap_ty.top() {
2662
// GC-managed types.
2663
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2664
let (dst, flags) = table_data.prepare_table_addr(self, builder, index);
2665
gc::gc_compiler(self)?.translate_write_gc_reference(
2666
self,
2667
builder,
2668
table.ref_type,
2669
dst,
2670
value,
2671
flags,
2672
)
2673
}
2674
2675
// Function types.
2676
WasmHeapTopType::Func => {
2677
let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2678
// Set the "initialized bit". See doc-comment on
2679
// `FUNCREF_INIT_BIT` in
2680
// crates/environ/src/ref_bits.rs for details.
2681
let value_with_init_bit = if self.tunables.table_lazy_init {
2682
builder
2683
.ins()
2684
.bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64))
2685
} else {
2686
value
2687
};
2688
builder
2689
.ins()
2690
.store(flags, value_with_init_bit, elem_addr, 0);
2691
Ok(())
2692
}
2693
2694
// Continuation types.
2695
WasmHeapTopType::Cont => {
2696
let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2697
builder.ins().store(flags, value, elem_addr, 0);
2698
Ok(())
2699
}
2700
}
2701
}
2702
2703
pub fn translate_table_fill(
2704
&mut self,
2705
builder: &mut FunctionBuilder<'_>,
2706
table_index: TableIndex,
2707
dst: ir::Value,
2708
val: ir::Value,
2709
len: ir::Value,
2710
) -> WasmResult<()> {
2711
let mut pos = builder.cursor();
2712
let table = self.table(table_index);
2713
let ty = table.ref_type.heap_type;
2714
let dst = self.cast_index_to_i64(&mut pos, dst, table.idx_type);
2715
let len = self.cast_index_to_i64(&mut pos, len, table.idx_type);
2716
let (table_vmctx, table_index) = self.table_vmctx_and_defined_index(&mut pos, table_index);
2717
2718
let mut args: SmallVec<[_; 6]> = smallvec![table_vmctx, table_index, dst];
2719
let libcall = match ty.top() {
2720
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2721
args.push(val);
2722
gc::builtins::table_fill_gc_ref(self, &mut pos.func)?
2723
}
2724
WasmHeapTopType::Func => {
2725
args.push(val);
2726
self.builtin_functions.table_fill_func_ref(&mut pos.func)
2727
}
2728
WasmHeapTopType::Cont => {
2729
let (revision, contref) =
2730
stack_switching::fatpointer::deconstruct(self, &mut pos, val);
2731
args.extend_from_slice(&[contref, revision]);
2732
stack_switching::builtins::table_fill_cont_obj(self, &mut pos.func)?
2733
}
2734
};
2735
2736
args.push(len);
2737
builder.ins().call(libcall, &args);
2738
2739
Ok(())
2740
}
2741
2742
pub fn translate_ref_i31(
2743
&mut self,
2744
mut pos: FuncCursor,
2745
val: ir::Value,
2746
) -> WasmResult<ir::Value> {
2747
debug_assert_eq!(pos.func.dfg.value_type(val), ir::types::I32);
2748
let shifted = pos.ins().ishl_imm(val, 1);
2749
let tagged = pos
2750
.ins()
2751
.bor_imm(shifted, i64::from(crate::I31_REF_DISCRIMINANT));
2752
let (ref_ty, _needs_stack_map) = self.reference_type(WasmHeapType::I31);
2753
debug_assert_eq!(ref_ty, ir::types::I32);
2754
Ok(tagged)
2755
}
2756
2757
pub fn translate_i31_get_s(
2758
&mut self,
2759
builder: &mut FunctionBuilder,
2760
i31ref: ir::Value,
2761
) -> WasmResult<ir::Value> {
2762
// TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
2763
// null i31)`, we could omit the `trapz`. But plumbing that type info
2764
// from `wasmparser` and through to here is a bit funky.
2765
self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
2766
Ok(builder.ins().sshr_imm(i31ref, 1))
2767
}
2768
2769
pub fn translate_i31_get_u(
2770
&mut self,
2771
builder: &mut FunctionBuilder,
2772
i31ref: ir::Value,
2773
) -> WasmResult<ir::Value> {
2774
// TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
2775
// null i31)`, we could omit the `trapz`. But plumbing that type info
2776
// from `wasmparser` and through to here is a bit funky.
2777
self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
2778
Ok(builder.ins().ushr_imm(i31ref, 1))
2779
}
2780
2781
pub fn struct_fields_len(&mut self, struct_type_index: TypeIndex) -> WasmResult<usize> {
2782
let ty = self.module.types[struct_type_index].unwrap_module_type_index();
2783
match &self.types[ty].composite_type.inner {
2784
WasmCompositeInnerType::Struct(s) => Ok(s.fields.len()),
2785
_ => unreachable!(),
2786
}
2787
}
2788
2789
pub fn translate_struct_new(
2790
&mut self,
2791
builder: &mut FunctionBuilder,
2792
struct_type_index: TypeIndex,
2793
fields: StructFieldsVec,
2794
) -> WasmResult<ir::Value> {
2795
gc::translate_struct_new(self, builder, struct_type_index, &fields)
2796
}
2797
2798
pub fn translate_struct_new_default(
2799
&mut self,
2800
builder: &mut FunctionBuilder,
2801
struct_type_index: TypeIndex,
2802
) -> WasmResult<ir::Value> {
2803
gc::translate_struct_new_default(self, builder, struct_type_index)
2804
}
2805
2806
pub fn translate_struct_get(
2807
&mut self,
2808
builder: &mut FunctionBuilder,
2809
struct_type_index: TypeIndex,
2810
field_index: u32,
2811
struct_ref: ir::Value,
2812
extension: Option<Extension>,
2813
) -> WasmResult<ir::Value> {
2814
gc::translate_struct_get(
2815
self,
2816
builder,
2817
struct_type_index,
2818
field_index,
2819
struct_ref,
2820
extension,
2821
)
2822
}
2823
2824
pub fn translate_struct_set(
2825
&mut self,
2826
builder: &mut FunctionBuilder,
2827
struct_type_index: TypeIndex,
2828
field_index: u32,
2829
struct_ref: ir::Value,
2830
value: ir::Value,
2831
) -> WasmResult<()> {
2832
gc::translate_struct_set(
2833
self,
2834
builder,
2835
struct_type_index,
2836
field_index,
2837
struct_ref,
2838
value,
2839
)
2840
}
2841
2842
pub fn translate_exn_unbox(
2843
&mut self,
2844
builder: &mut FunctionBuilder<'_>,
2845
tag_index: TagIndex,
2846
exn_ref: ir::Value,
2847
) -> WasmResult<SmallVec<[ir::Value; 4]>> {
2848
gc::translate_exn_unbox(self, builder, tag_index, exn_ref)
2849
}
2850
2851
pub fn translate_exn_throw(
2852
&mut self,
2853
builder: &mut FunctionBuilder<'_>,
2854
tag_index: TagIndex,
2855
args: &[ir::Value],
2856
) -> WasmResult<()> {
2857
gc::translate_exn_throw(self, builder, tag_index, args)
2858
}
2859
2860
pub fn translate_exn_throw_ref(
2861
&mut self,
2862
builder: &mut FunctionBuilder<'_>,
2863
exnref: ir::Value,
2864
) -> WasmResult<()> {
2865
gc::translate_exn_throw_ref(self, builder, exnref)
2866
}
2867
2868
pub fn translate_array_new(
2869
&mut self,
2870
builder: &mut FunctionBuilder,
2871
array_type_index: TypeIndex,
2872
elem: ir::Value,
2873
len: ir::Value,
2874
) -> WasmResult<ir::Value> {
2875
gc::translate_array_new(self, builder, array_type_index, elem, len)
2876
}
2877
2878
pub fn translate_array_new_default(
2879
&mut self,
2880
builder: &mut FunctionBuilder,
2881
array_type_index: TypeIndex,
2882
len: ir::Value,
2883
) -> WasmResult<ir::Value> {
2884
gc::translate_array_new_default(self, builder, array_type_index, len)
2885
}
2886
2887
pub fn translate_array_new_fixed(
2888
&mut self,
2889
builder: &mut FunctionBuilder,
2890
array_type_index: TypeIndex,
2891
elems: &[ir::Value],
2892
) -> WasmResult<ir::Value> {
2893
gc::translate_array_new_fixed(self, builder, array_type_index, elems)
2894
}
2895
2896
pub fn translate_array_new_data(
2897
&mut self,
2898
builder: &mut FunctionBuilder,
2899
array_type_index: TypeIndex,
2900
data_index: DataIndex,
2901
data_offset: ir::Value,
2902
len: ir::Value,
2903
) -> WasmResult<ir::Value> {
2904
let libcall = gc::builtins::array_new_data(self, builder.func)?;
2905
let vmctx = self.vmctx_val(&mut builder.cursor());
2906
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2907
let interned_type_index = builder
2908
.ins()
2909
.iconst(I32, i64::from(interned_type_index.as_u32()));
2910
let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2911
let call_inst = builder.ins().call(
2912
libcall,
2913
&[vmctx, interned_type_index, data_index, data_offset, len],
2914
);
2915
Ok(builder.func.dfg.first_result(call_inst))
2916
}
2917
2918
pub fn translate_array_new_elem(
2919
&mut self,
2920
builder: &mut FunctionBuilder,
2921
array_type_index: TypeIndex,
2922
elem_index: ElemIndex,
2923
elem_offset: ir::Value,
2924
len: ir::Value,
2925
) -> WasmResult<ir::Value> {
2926
let libcall = gc::builtins::array_new_elem(self, builder.func)?;
2927
let vmctx = self.vmctx_val(&mut builder.cursor());
2928
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2929
let interned_type_index = builder
2930
.ins()
2931
.iconst(I32, i64::from(interned_type_index.as_u32()));
2932
let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
2933
let call_inst = builder.ins().call(
2934
libcall,
2935
&[vmctx, interned_type_index, elem_index, elem_offset, len],
2936
);
2937
Ok(builder.func.dfg.first_result(call_inst))
2938
}
2939
2940
pub fn translate_array_copy(
2941
&mut self,
2942
builder: &mut FunctionBuilder,
2943
_dst_array_type_index: TypeIndex,
2944
dst_array: ir::Value,
2945
dst_index: ir::Value,
2946
_src_array_type_index: TypeIndex,
2947
src_array: ir::Value,
2948
src_index: ir::Value,
2949
len: ir::Value,
2950
) -> WasmResult<()> {
2951
let libcall = gc::builtins::array_copy(self, builder.func)?;
2952
let vmctx = self.vmctx_val(&mut builder.cursor());
2953
builder.ins().call(
2954
libcall,
2955
&[vmctx, dst_array, dst_index, src_array, src_index, len],
2956
);
2957
Ok(())
2958
}
2959
2960
pub fn translate_array_fill(
2961
&mut self,
2962
builder: &mut FunctionBuilder,
2963
array_type_index: TypeIndex,
2964
array: ir::Value,
2965
index: ir::Value,
2966
value: ir::Value,
2967
len: ir::Value,
2968
) -> WasmResult<()> {
2969
gc::translate_array_fill(self, builder, array_type_index, array, index, value, len)
2970
}
2971
2972
pub fn translate_array_init_data(
2973
&mut self,
2974
builder: &mut FunctionBuilder,
2975
array_type_index: TypeIndex,
2976
array: ir::Value,
2977
dst_index: ir::Value,
2978
data_index: DataIndex,
2979
data_offset: ir::Value,
2980
len: ir::Value,
2981
) -> WasmResult<()> {
2982
let libcall = gc::builtins::array_init_data(self, builder.func)?;
2983
let vmctx = self.vmctx_val(&mut builder.cursor());
2984
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2985
let interned_type_index = builder
2986
.ins()
2987
.iconst(I32, i64::from(interned_type_index.as_u32()));
2988
let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2989
builder.ins().call(
2990
libcall,
2991
&[
2992
vmctx,
2993
interned_type_index,
2994
array,
2995
dst_index,
2996
data_index,
2997
data_offset,
2998
len,
2999
],
3000
);
3001
Ok(())
3002
}
3003
3004
pub fn translate_array_init_elem(
3005
&mut self,
3006
builder: &mut FunctionBuilder,
3007
array_type_index: TypeIndex,
3008
array: ir::Value,
3009
dst_index: ir::Value,
3010
elem_index: ElemIndex,
3011
elem_offset: ir::Value,
3012
len: ir::Value,
3013
) -> WasmResult<()> {
3014
let libcall = gc::builtins::array_init_elem(self, builder.func)?;
3015
let vmctx = self.vmctx_val(&mut builder.cursor());
3016
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
3017
let interned_type_index = builder
3018
.ins()
3019
.iconst(I32, i64::from(interned_type_index.as_u32()));
3020
let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
3021
builder.ins().call(
3022
libcall,
3023
&[
3024
vmctx,
3025
interned_type_index,
3026
array,
3027
dst_index,
3028
elem_index,
3029
elem_offset,
3030
len,
3031
],
3032
);
3033
Ok(())
3034
}
3035
3036
pub fn translate_array_len(
3037
&mut self,
3038
builder: &mut FunctionBuilder,
3039
array: ir::Value,
3040
) -> WasmResult<ir::Value> {
3041
gc::translate_array_len(self, builder, array)
3042
}
3043
3044
pub fn translate_array_get(
3045
&mut self,
3046
builder: &mut FunctionBuilder,
3047
array_type_index: TypeIndex,
3048
array: ir::Value,
3049
index: ir::Value,
3050
extension: Option<Extension>,
3051
) -> WasmResult<ir::Value> {
3052
gc::translate_array_get(self, builder, array_type_index, array, index, extension)
3053
}
3054
3055
pub fn translate_array_set(
3056
&mut self,
3057
builder: &mut FunctionBuilder,
3058
array_type_index: TypeIndex,
3059
array: ir::Value,
3060
index: ir::Value,
3061
value: ir::Value,
3062
) -> WasmResult<()> {
3063
gc::translate_array_set(self, builder, array_type_index, array, index, value)
3064
}
3065
3066
pub fn translate_ref_test(
3067
&mut self,
3068
builder: &mut FunctionBuilder<'_>,
3069
test_ty: WasmRefType,
3070
gc_ref: ir::Value,
3071
gc_ref_ty: WasmRefType,
3072
) -> WasmResult<ir::Value> {
3073
gc::translate_ref_test(self, builder, test_ty, gc_ref, gc_ref_ty)
3074
}
3075
3076
pub fn translate_ref_null(
3077
&mut self,
3078
mut pos: cranelift_codegen::cursor::FuncCursor,
3079
ht: WasmHeapType,
3080
) -> WasmResult<ir::Value> {
3081
Ok(match ht.top() {
3082
WasmHeapTopType::Func => pos.ins().iconst(self.pointer_type(), 0),
3083
// NB: null GC references don't need to be in stack maps.
3084
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
3085
pos.ins().iconst(types::I32, 0)
3086
}
3087
WasmHeapTopType::Cont => {
3088
let zero = pos.ins().iconst(self.pointer_type(), 0);
3089
stack_switching::fatpointer::construct(self, &mut pos, zero, zero)
3090
}
3091
})
3092
}
3093
3094
pub fn translate_ref_is_null(
3095
&mut self,
3096
mut pos: cranelift_codegen::cursor::FuncCursor,
3097
value: ir::Value,
3098
ty: WasmRefType,
3099
) -> WasmResult<ir::Value> {
3100
// If we know the type is not nullable, then we don't actually need to
3101
// check for null.
3102
if !ty.nullable {
3103
return Ok(pos.ins().iconst(ir::types::I32, 0));
3104
}
3105
3106
let byte_is_null = match ty.heap_type.top() {
3107
WasmHeapTopType::Cont => {
3108
let (_revision, contref) =
3109
stack_switching::fatpointer::deconstruct(self, &mut pos, value);
3110
pos.ins()
3111
.icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, contref, 0)
3112
}
3113
_ => pos
3114
.ins()
3115
.icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0),
3116
};
3117
3118
Ok(pos.ins().uextend(ir::types::I32, byte_is_null))
3119
}
3120
3121
pub fn translate_ref_func(
3122
&mut self,
3123
mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
3124
func_index: FuncIndex,
3125
) -> WasmResult<ir::Value> {
3126
let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
3127
let ref_func = self.builtin_functions.ref_func(&mut pos.func);
3128
let vmctx = self.vmctx_val(&mut pos);
3129
3130
let call_inst = pos.ins().call(ref_func, &[vmctx, func_index]);
3131
Ok(pos.func.dfg.first_result(call_inst))
3132
}
3133
3134
pub(crate) fn translate_global_get(
3135
&mut self,
3136
builder: &mut FunctionBuilder<'_>,
3137
global_index: GlobalIndex,
3138
) -> WasmResult<ir::Value> {
3139
match self.get_or_create_global(builder.func, global_index) {
3140
GlobalVariable::Constant { value } => match value {
3141
GlobalConstValue::I32(x) => Ok(builder.ins().iconst(ir::types::I32, i64::from(x))),
3142
GlobalConstValue::I64(x) => Ok(builder.ins().iconst(ir::types::I64, x)),
3143
GlobalConstValue::F32(x) => {
3144
Ok(builder.ins().f32const(ir::immediates::Ieee32::with_bits(x)))
3145
}
3146
GlobalConstValue::F64(x) => {
3147
Ok(builder.ins().f64const(ir::immediates::Ieee64::with_bits(x)))
3148
}
3149
GlobalConstValue::V128(x) => {
3150
let data = x.to_le_bytes().to_vec().into();
3151
let handle = builder.func.dfg.constants.insert(data);
3152
Ok(builder.ins().vconst(ir::types::I8X16, handle))
3153
}
3154
},
3155
GlobalVariable::Memory { gv, offset, ty } => {
3156
let addr = builder.ins().global_value(self.pointer_type(), gv);
3157
let mut flags = ir::MemFlags::trusted();
3158
// Store vector globals in little-endian format to avoid
3159
// byte swaps on big-endian platforms since at-rest vectors
3160
// should already be in little-endian format anyway.
3161
if ty.is_vector() {
3162
flags.set_endianness(ir::Endianness::Little);
3163
}
3164
// Put globals in the "table" abstract heap category as well.
3165
flags.set_alias_region(Some(ir::AliasRegion::Table));
3166
Ok(builder.ins().load(ty, flags, addr, offset))
3167
}
3168
GlobalVariable::Custom => {
3169
let global_ty = self.module.globals[global_index];
3170
let wasm_ty = global_ty.wasm_ty;
3171
debug_assert!(
3172
wasm_ty.is_vmgcref_type(),
3173
"We only use GlobalVariable::Custom for VMGcRef types"
3174
);
3175
let WasmValType::Ref(ref_ty) = wasm_ty else {
3176
unreachable!()
3177
};
3178
3179
let (gv, offset) = self.get_global_location(builder.func, global_index);
3180
let gv = builder.ins().global_value(self.pointer_type(), gv);
3181
let src = builder.ins().iadd_imm(gv, i64::from(offset));
3182
3183
gc::gc_compiler(self)?.translate_read_gc_reference(
3184
self,
3185
builder,
3186
ref_ty,
3187
src,
3188
if global_ty.mutability {
3189
ir::MemFlags::trusted()
3190
} else {
3191
ir::MemFlags::trusted().with_readonly().with_can_move()
3192
},
3193
)
3194
}
3195
}
3196
}
3197
3198
pub(crate) fn translate_global_set(
3199
&mut self,
3200
builder: &mut FunctionBuilder<'_>,
3201
global_index: GlobalIndex,
3202
val: ir::Value,
3203
) -> WasmResult<()> {
3204
match self.get_or_create_global(builder.func, global_index) {
3205
GlobalVariable::Constant { .. } => {
3206
unreachable!("validation checks that Wasm cannot `global.set` constant globals")
3207
}
3208
GlobalVariable::Memory { gv, offset, ty } => {
3209
let addr = builder.ins().global_value(self.pointer_type(), gv);
3210
let mut flags = ir::MemFlags::trusted();
3211
// Like `global.get`, store globals in little-endian format.
3212
if ty.is_vector() {
3213
flags.set_endianness(ir::Endianness::Little);
3214
}
3215
// Put globals in the "table" abstract heap category as well.
3216
flags.set_alias_region(Some(ir::AliasRegion::Table));
3217
debug_assert_eq!(ty, builder.func.dfg.value_type(val));
3218
builder.ins().store(flags, val, addr, offset);
3219
self.update_global(builder, global_index, val);
3220
}
3221
GlobalVariable::Custom => {
3222
let ty = self.module.globals[global_index].wasm_ty;
3223
debug_assert!(
3224
ty.is_vmgcref_type(),
3225
"We only use GlobalVariable::Custom for VMGcRef types"
3226
);
3227
let WasmValType::Ref(ty) = ty else {
3228
unreachable!()
3229
};
3230
3231
let (gv, offset) = self.get_global_location(builder.func, global_index);
3232
let gv = builder.ins().global_value(self.pointer_type(), gv);
3233
let src = builder.ins().iadd_imm(gv, i64::from(offset));
3234
3235
gc::gc_compiler(self)?.translate_write_gc_reference(
3236
self,
3237
builder,
3238
ty,
3239
src,
3240
val,
3241
ir::MemFlags::trusted(),
3242
)?
3243
}
3244
}
3245
Ok(())
3246
}
3247
3248
pub fn translate_call_indirect<'a>(
3249
&mut self,
3250
builder: &'a mut FunctionBuilder,
3251
srcloc: ir::SourceLoc,
3252
features: &WasmFeatures,
3253
table_index: TableIndex,
3254
ty_index: TypeIndex,
3255
sig_ref: ir::SigRef,
3256
callee: ir::Value,
3257
call_args: &[ir::Value],
3258
) -> WasmResult<Option<CallRets>> {
3259
Call::new(builder, self, srcloc).indirect_call(
3260
features,
3261
table_index,
3262
ty_index,
3263
sig_ref,
3264
callee,
3265
call_args,
3266
)
3267
}
3268
3269
pub fn translate_call<'a>(
3270
&mut self,
3271
builder: &'a mut FunctionBuilder,
3272
srcloc: ir::SourceLoc,
3273
callee_index: FuncIndex,
3274
sig_ref: ir::SigRef,
3275
call_args: &[ir::Value],
3276
) -> WasmResult<CallRets> {
3277
Call::new(builder, self, srcloc).direct_call(callee_index, sig_ref, call_args)
3278
}
3279
3280
pub fn translate_call_ref<'a>(
3281
&mut self,
3282
builder: &'a mut FunctionBuilder,
3283
srcloc: ir::SourceLoc,
3284
sig_ref: ir::SigRef,
3285
callee: ir::Value,
3286
call_args: &[ir::Value],
3287
) -> WasmResult<CallRets> {
3288
Call::new(builder, self, srcloc).call_ref(sig_ref, callee, call_args)
3289
}
3290
3291
pub fn translate_return_call(
3292
&mut self,
3293
builder: &mut FunctionBuilder,
3294
srcloc: ir::SourceLoc,
3295
callee_index: FuncIndex,
3296
sig_ref: ir::SigRef,
3297
call_args: &[ir::Value],
3298
) -> WasmResult<()> {
3299
Call::new_tail(builder, self, srcloc).direct_call(callee_index, sig_ref, call_args)?;
3300
Ok(())
3301
}
3302
3303
pub fn translate_return_call_indirect(
3304
&mut self,
3305
builder: &mut FunctionBuilder,
3306
srcloc: ir::SourceLoc,
3307
features: &WasmFeatures,
3308
table_index: TableIndex,
3309
ty_index: TypeIndex,
3310
sig_ref: ir::SigRef,
3311
callee: ir::Value,
3312
call_args: &[ir::Value],
3313
) -> WasmResult<()> {
3314
Call::new_tail(builder, self, srcloc).indirect_call(
3315
features,
3316
table_index,
3317
ty_index,
3318
sig_ref,
3319
callee,
3320
call_args,
3321
)?;
3322
Ok(())
3323
}
3324
3325
pub fn translate_return_call_ref(
3326
&mut self,
3327
builder: &mut FunctionBuilder,
3328
srcloc: ir::SourceLoc,
3329
sig_ref: ir::SigRef,
3330
callee: ir::Value,
3331
call_args: &[ir::Value],
3332
) -> WasmResult<()> {
3333
Call::new_tail(builder, self, srcloc).call_ref(sig_ref, callee, call_args)?;
3334
Ok(())
3335
}
3336
3337
/// Returns two `ir::Value`s, the first of which is the vmctx for the memory
3338
/// `index` and the second of which is the `DefinedMemoryIndex` for `index`.
3339
///
3340
/// Handles internally whether `index` is an imported memory or not.
3341
fn memory_vmctx_and_defined_index(
3342
&mut self,
3343
pos: &mut FuncCursor,
3344
index: MemoryIndex,
3345
) -> (ir::Value, ir::Value) {
3346
let cur_vmctx = self.vmctx_val(pos);
3347
match self.module.defined_memory_index(index) {
3348
// This is a defined memory, so the vmctx is our own and the defined
3349
// index is `index` here.
3350
Some(index) => (cur_vmctx, pos.ins().iconst(I32, i64::from(index.as_u32()))),
3351
3352
// This is an imported memory, so load the vmctx/defined index from
3353
// the import definition itself.
3354
None => {
3355
let vmimport = self.offsets.vmctx_vmmemory_import(index);
3356
3357
let vmctx = pos.ins().load(
3358
self.isa.pointer_type(),
3359
ir::MemFlags::trusted(),
3360
cur_vmctx,
3361
i32::try_from(vmimport + u32::from(self.offsets.vmmemory_import_vmctx()))
3362
.unwrap(),
3363
);
3364
let index = pos.ins().load(
3365
ir::types::I32,
3366
ir::MemFlags::trusted(),
3367
cur_vmctx,
3368
i32::try_from(vmimport + u32::from(self.offsets.vmmemory_import_index()))
3369
.unwrap(),
3370
);
3371
(vmctx, index)
3372
}
3373
}
3374
}
3375
3376
/// Returns two `ir::Value`s, the first of which is the vmctx for the table
3377
/// `index` and the second of which is the `DefinedTableIndex` for `index`.
3378
///
3379
/// Handles internally whether `index` is an imported table or not.
3380
fn table_vmctx_and_defined_index(
3381
&mut self,
3382
pos: &mut FuncCursor,
3383
index: TableIndex,
3384
) -> (ir::Value, ir::Value) {
3385
// NB: the body of this method is similar to
3386
// `memory_vmctx_and_defined_index` above.
3387
let cur_vmctx = self.vmctx_val(pos);
3388
match self.module.defined_table_index(index) {
3389
Some(index) => (cur_vmctx, pos.ins().iconst(I32, i64::from(index.as_u32()))),
3390
None => {
3391
let vmimport = self.offsets.vmctx_vmtable_import(index);
3392
3393
let vmctx = pos.ins().load(
3394
self.isa.pointer_type(),
3395
ir::MemFlags::trusted(),
3396
cur_vmctx,
3397
i32::try_from(vmimport + u32::from(self.offsets.vmtable_import_vmctx()))
3398
.unwrap(),
3399
);
3400
let index = pos.ins().load(
3401
ir::types::I32,
3402
ir::MemFlags::trusted(),
3403
cur_vmctx,
3404
i32::try_from(vmimport + u32::from(self.offsets.vmtable_import_index()))
3405
.unwrap(),
3406
);
3407
(vmctx, index)
3408
}
3409
}
3410
}
3411
3412
pub fn translate_memory_grow(
3413
&mut self,
3414
builder: &mut FunctionBuilder<'_>,
3415
index: MemoryIndex,
3416
val: ir::Value,
3417
) -> WasmResult<ir::Value> {
3418
let mut pos = builder.cursor();
3419
let memory_grow = self.builtin_functions.memory_grow(&mut pos.func);
3420
3421
let (memory_vmctx, defined_memory_index) =
3422
self.memory_vmctx_and_defined_index(&mut pos, index);
3423
3424
let index_type = self.memory(index).idx_type;
3425
let val = self.cast_index_to_i64(&mut pos, val, index_type);
3426
let call_inst = pos
3427
.ins()
3428
.call(memory_grow, &[memory_vmctx, val, defined_memory_index]);
3429
let result = *pos.func.dfg.inst_results(call_inst).first().unwrap();
3430
let single_byte_pages = match self.memory(index).page_size_log2 {
3431
16 => false,
3432
0 => true,
3433
_ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
3434
};
3435
Ok(self.convert_pointer_to_index_type(
3436
builder.cursor(),
3437
result,
3438
index_type,
3439
single_byte_pages,
3440
))
3441
}
3442
3443
pub fn translate_memory_size(
3444
&mut self,
3445
mut pos: FuncCursor<'_>,
3446
index: MemoryIndex,
3447
) -> WasmResult<ir::Value> {
3448
let pointer_type = self.pointer_type();
3449
let vmctx = self.vmctx(&mut pos.func);
3450
let is_shared = self.module.memories[index].shared;
3451
let base = pos.ins().global_value(pointer_type, vmctx);
3452
let current_length_in_bytes = match self.module.defined_memory_index(index) {
3453
Some(def_index) => {
3454
if is_shared {
3455
let offset =
3456
i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap();
3457
let vmmemory_ptr =
3458
pos.ins()
3459
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
3460
let vmmemory_definition_offset =
3461
i64::from(self.offsets.ptr.vmmemory_definition_current_length());
3462
let vmmemory_definition_ptr =
3463
pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
3464
// This atomic access of the
3465
// `VMMemoryDefinition::current_length` is direct; no bounds
3466
// check is needed. This is possible because shared memory
3467
// has a static size (the maximum is always known). Shared
3468
// memory is thus built with a static memory plan and no
3469
// bounds-checked version of this is implemented.
3470
pos.ins().atomic_load(
3471
pointer_type,
3472
ir::MemFlags::trusted(),
3473
vmmemory_definition_ptr,
3474
)
3475
} else {
3476
let owned_index = self.module.owned_memory_index(def_index);
3477
let offset = i32::try_from(
3478
self.offsets
3479
.vmctx_vmmemory_definition_current_length(owned_index),
3480
)
3481
.unwrap();
3482
pos.ins()
3483
.load(pointer_type, ir::MemFlags::trusted(), base, offset)
3484
}
3485
}
3486
None => {
3487
let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
3488
let vmmemory_ptr =
3489
pos.ins()
3490
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
3491
if is_shared {
3492
let vmmemory_definition_offset =
3493
i64::from(self.offsets.ptr.vmmemory_definition_current_length());
3494
let vmmemory_definition_ptr =
3495
pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
3496
pos.ins().atomic_load(
3497
pointer_type,
3498
ir::MemFlags::trusted(),
3499
vmmemory_definition_ptr,
3500
)
3501
} else {
3502
pos.ins().load(
3503
pointer_type,
3504
ir::MemFlags::trusted(),
3505
vmmemory_ptr,
3506
i32::from(self.offsets.ptr.vmmemory_definition_current_length()),
3507
)
3508
}
3509
}
3510
};
3511
3512
let page_size_log2 = i64::from(self.module.memories[index].page_size_log2);
3513
let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2);
3514
let single_byte_pages = match page_size_log2 {
3515
16 => false,
3516
0 => true,
3517
_ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
3518
};
3519
Ok(self.convert_pointer_to_index_type(
3520
pos,
3521
current_length_in_pages,
3522
self.memory(index).idx_type,
3523
single_byte_pages,
3524
))
3525
}
3526
3527
pub fn translate_memory_copy(
3528
&mut self,
3529
builder: &mut FunctionBuilder<'_>,
3530
src_index: MemoryIndex,
3531
dst_index: MemoryIndex,
3532
dst: ir::Value,
3533
src: ir::Value,
3534
len: ir::Value,
3535
) -> WasmResult<()> {
3536
let mut pos = builder.cursor();
3537
let vmctx = self.vmctx_val(&mut pos);
3538
3539
let memory_copy = self.builtin_functions.memory_copy(&mut pos.func);
3540
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(dst_index).idx_type);
3541
let src = self.cast_index_to_i64(&mut pos, src, self.memory(src_index).idx_type);
3542
// The length is 32-bit if either memory is 32-bit, but if they're both
3543
// 64-bit then it's 64-bit. Our intrinsic takes a 64-bit length for
3544
// compatibility across all memories, so make sure that it's cast
3545
// correctly here (this is a bit special so no generic helper unlike for
3546
// `dst`/`src` above)
3547
let len = if index_type_to_ir_type(self.memory(dst_index).idx_type) == I64
3548
&& index_type_to_ir_type(self.memory(src_index).idx_type) == I64
3549
{
3550
len
3551
} else {
3552
pos.ins().uextend(I64, len)
3553
};
3554
let src_index = pos.ins().iconst(I32, i64::from(src_index.as_u32()));
3555
let dst_index = pos.ins().iconst(I32, i64::from(dst_index.as_u32()));
3556
pos.ins()
3557
.call(memory_copy, &[vmctx, dst_index, dst, src_index, src, len]);
3558
3559
Ok(())
3560
}
3561
3562
pub fn translate_memory_fill(
3563
&mut self,
3564
builder: &mut FunctionBuilder<'_>,
3565
memory_index: MemoryIndex,
3566
dst: ir::Value,
3567
val: ir::Value,
3568
len: ir::Value,
3569
) -> WasmResult<()> {
3570
let mut pos = builder.cursor();
3571
let memory_fill = self.builtin_functions.memory_fill(&mut pos.func);
3572
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
3573
let len = self.cast_index_to_i64(&mut pos, len, self.memory(memory_index).idx_type);
3574
let (memory_vmctx, defined_memory_index) =
3575
self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3576
3577
pos.ins().call(
3578
memory_fill,
3579
&[memory_vmctx, defined_memory_index, dst, val, len],
3580
);
3581
3582
Ok(())
3583
}
3584
3585
pub fn translate_memory_init(
3586
&mut self,
3587
builder: &mut FunctionBuilder<'_>,
3588
memory_index: MemoryIndex,
3589
seg_index: u32,
3590
dst: ir::Value,
3591
src: ir::Value,
3592
len: ir::Value,
3593
) -> WasmResult<()> {
3594
let mut pos = builder.cursor();
3595
let memory_init = self.builtin_functions.memory_init(&mut pos.func);
3596
3597
let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
3598
let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
3599
3600
let vmctx = self.vmctx_val(&mut pos);
3601
3602
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
3603
3604
pos.ins().call(
3605
memory_init,
3606
&[vmctx, memory_index_arg, seg_index_arg, dst, src, len],
3607
);
3608
3609
Ok(())
3610
}
3611
3612
pub fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
3613
let data_drop = self.builtin_functions.data_drop(&mut pos.func);
3614
let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
3615
let vmctx = self.vmctx_val(&mut pos);
3616
pos.ins().call(data_drop, &[vmctx, seg_index_arg]);
3617
Ok(())
3618
}
3619
3620
pub fn translate_table_size(
3621
&mut self,
3622
pos: FuncCursor,
3623
table_index: TableIndex,
3624
) -> WasmResult<ir::Value> {
3625
let table_data = self.get_or_create_table(pos.func, table_index);
3626
let index_type = index_type_to_ir_type(self.table(table_index).idx_type);
3627
Ok(table_data.bound.bound(&*self.isa, pos, index_type))
3628
}
3629
3630
pub fn translate_table_copy(
3631
&mut self,
3632
builder: &mut FunctionBuilder<'_>,
3633
dst_table_index: TableIndex,
3634
src_table_index: TableIndex,
3635
dst: ir::Value,
3636
src: ir::Value,
3637
len: ir::Value,
3638
) -> WasmResult<()> {
3639
let (table_copy, dst_table_index_arg, src_table_index_arg) =
3640
self.get_table_copy_func(&mut builder.func, dst_table_index, src_table_index);
3641
3642
let mut pos = builder.cursor();
3643
let dst = self.cast_index_to_i64(&mut pos, dst, self.table(dst_table_index).idx_type);
3644
let src = self.cast_index_to_i64(&mut pos, src, self.table(src_table_index).idx_type);
3645
let len = if index_type_to_ir_type(self.table(dst_table_index).idx_type) == I64
3646
&& index_type_to_ir_type(self.table(src_table_index).idx_type) == I64
3647
{
3648
len
3649
} else {
3650
pos.ins().uextend(I64, len)
3651
};
3652
let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64);
3653
let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64);
3654
let vmctx = self.vmctx_val(&mut pos);
3655
pos.ins().call(
3656
table_copy,
3657
&[
3658
vmctx,
3659
dst_table_index_arg,
3660
src_table_index_arg,
3661
dst,
3662
src,
3663
len,
3664
],
3665
);
3666
3667
Ok(())
3668
}
3669
3670
pub fn translate_table_init(
3671
&mut self,
3672
builder: &mut FunctionBuilder<'_>,
3673
seg_index: u32,
3674
table_index: TableIndex,
3675
dst: ir::Value,
3676
src: ir::Value,
3677
len: ir::Value,
3678
) -> WasmResult<()> {
3679
let mut pos = builder.cursor();
3680
let table_init = self.builtin_functions.table_init(&mut pos.func);
3681
let table_index_arg = pos.ins().iconst(I32, i64::from(table_index.as_u32()));
3682
let seg_index_arg = pos.ins().iconst(I32, i64::from(seg_index));
3683
let vmctx = self.vmctx_val(&mut pos);
3684
let index_type = self.table(table_index).idx_type;
3685
let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
3686
let src = pos.ins().uextend(I64, src);
3687
let len = pos.ins().uextend(I64, len);
3688
3689
pos.ins().call(
3690
table_init,
3691
&[vmctx, table_index_arg, seg_index_arg, dst, src, len],
3692
);
3693
3694
Ok(())
3695
}
3696
3697
pub fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> {
3698
let elem_drop = self.builtin_functions.elem_drop(&mut pos.func);
3699
let elem_index_arg = pos.ins().iconst(I32, elem_index as i64);
3700
let vmctx = self.vmctx_val(&mut pos);
3701
pos.ins().call(elem_drop, &[vmctx, elem_index_arg]);
3702
Ok(())
3703
}
3704
3705
pub fn translate_atomic_wait(
3706
&mut self,
3707
builder: &mut FunctionBuilder<'_>,
3708
memory_index: MemoryIndex,
3709
_heap: Heap,
3710
addr: ir::Value,
3711
expected: ir::Value,
3712
timeout: ir::Value,
3713
) -> WasmResult<ir::Value> {
3714
#[cfg(feature = "threads")]
3715
{
3716
let mut pos = builder.cursor();
3717
let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
3718
let implied_ty = pos.func.dfg.value_type(expected);
3719
let wait_func = self.get_memory_atomic_wait(&mut pos.func, implied_ty);
3720
3721
let (memory_vmctx, defined_memory_index) =
3722
self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3723
3724
let call_inst = pos.ins().call(
3725
wait_func,
3726
&[memory_vmctx, defined_memory_index, addr, expected, timeout],
3727
);
3728
let ret = pos.func.dfg.inst_results(call_inst)[0];
3729
Ok(builder.ins().ireduce(ir::types::I32, ret))
3730
}
3731
#[cfg(not(feature = "threads"))]
3732
{
3733
let _ = (builder, memory_index, addr, expected, timeout);
3734
Err(wasmtime_environ::WasmError::Unsupported(
3735
"threads support disabled at compile time".to_string(),
3736
))
3737
}
3738
}
3739
3740
pub fn translate_atomic_notify(
3741
&mut self,
3742
builder: &mut FunctionBuilder<'_>,
3743
memory_index: MemoryIndex,
3744
_heap: Heap,
3745
addr: ir::Value,
3746
count: ir::Value,
3747
) -> WasmResult<ir::Value> {
3748
#[cfg(feature = "threads")]
3749
{
3750
let mut pos = builder.cursor();
3751
let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
3752
let atomic_notify = self.builtin_functions.memory_atomic_notify(&mut pos.func);
3753
3754
let (memory_vmctx, defined_memory_index) =
3755
self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3756
let call_inst = pos.ins().call(
3757
atomic_notify,
3758
&[memory_vmctx, defined_memory_index, addr, count],
3759
);
3760
let ret = pos.func.dfg.inst_results(call_inst)[0];
3761
Ok(builder.ins().ireduce(ir::types::I32, ret))
3762
}
3763
#[cfg(not(feature = "threads"))]
3764
{
3765
let _ = (builder, memory_index, addr, count);
3766
Err(wasmtime_environ::WasmError::Unsupported(
3767
"threads support disabled at compile time".to_string(),
3768
))
3769
}
3770
}
3771
3772
pub fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
3773
// Additionally if enabled check how much fuel we have remaining to see
3774
// if we've run out by this point.
3775
if self.tunables.consume_fuel {
3776
self.fuel_check(builder);
3777
}
3778
3779
// If we are performing epoch-based interruption, check to see
3780
// if the epoch counter has changed.
3781
if self.tunables.epoch_interruption {
3782
self.epoch_check(builder);
3783
}
3784
3785
Ok(())
3786
}
3787
3788
pub fn before_translate_operator(
3789
&mut self,
3790
op: &Operator,
3791
_operand_types: Option<&[WasmValType]>,
3792
builder: &mut FunctionBuilder,
3793
) -> WasmResult<()> {
3794
if self.tunables.consume_fuel {
3795
self.fuel_before_op(op, builder, self.is_reachable());
3796
}
3797
if self.is_reachable() && self.state_slot.is_some() {
3798
let builtin = self.builtin_functions.patchable_breakpoint(builder.func);
3799
let vmctx = self.vmctx_val(&mut builder.cursor());
3800
let inst = builder.ins().call(builtin, &[vmctx]);
3801
let tags = self.debug_tags(builder.srcloc());
3802
builder.func.debug_tags.set(inst, tags);
3803
}
3804
3805
Ok(())
3806
}
3807
3808
pub fn after_translate_operator(
3809
&mut self,
3810
op: &Operator,
3811
validator: &FuncValidator<impl WasmModuleResources>,
3812
builder: &mut FunctionBuilder,
3813
) -> WasmResult<()> {
3814
if self.tunables.consume_fuel && self.is_reachable() {
3815
self.fuel_after_op(op, builder);
3816
}
3817
if self.is_reachable() {
3818
self.update_state_slot_stack(validator, builder)?;
3819
}
3820
Ok(())
3821
}
3822
3823
pub fn before_unconditionally_trapping_memory_access(&mut self, builder: &mut FunctionBuilder) {
3824
if self.tunables.consume_fuel {
3825
self.fuel_increment_var(builder);
3826
self.fuel_save_from_var(builder);
3827
}
3828
}
3829
3830
pub fn before_translate_function(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
3831
// If an explicit stack limit is requested, emit one here at the start
3832
// of the function.
3833
if let Some(gv) = self.stack_limit_at_function_entry {
3834
let limit = builder.ins().global_value(self.pointer_type(), gv);
3835
let sp = builder.ins().get_stack_pointer(self.pointer_type());
3836
let overflow = builder.ins().icmp(IntCC::UnsignedLessThan, sp, limit);
3837
self.conditionally_trap(builder, overflow, ir::TrapCode::STACK_OVERFLOW);
3838
}
3839
3840
// Additionally we initialize `fuel_var` if it will get used.
3841
if self.tunables.consume_fuel {
3842
self.fuel_function_entry(builder);
3843
}
3844
3845
// Initialize `epoch_var` with the current epoch.
3846
if self.tunables.epoch_interruption {
3847
self.epoch_function_entry(builder);
3848
}
3849
3850
#[cfg(feature = "wmemcheck")]
3851
if self.compiler.wmemcheck {
3852
let func_name = self.current_func_name(builder);
3853
if func_name == Some("malloc") {
3854
self.check_malloc_start(builder);
3855
} else if func_name == Some("free") {
3856
self.check_free_start(builder);
3857
}
3858
}
3859
3860
self.update_state_slot_vmctx(builder);
3861
3862
Ok(())
3863
}
3864
3865
pub fn after_translate_function(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
3866
if self.tunables.consume_fuel && self.is_reachable() {
3867
self.fuel_function_exit(builder);
3868
}
3869
self.finish_debug_metadata(builder);
3870
Ok(())
3871
}
3872
3873
pub fn relaxed_simd_deterministic(&self) -> bool {
3874
self.tunables.relaxed_simd_deterministic
3875
}
3876
3877
pub fn has_native_fma(&self) -> bool {
3878
self.isa.has_native_fma()
3879
}
3880
3881
pub fn is_x86(&self) -> bool {
3882
self.isa.triple().architecture == target_lexicon::Architecture::X86_64
3883
}
3884
3885
pub fn translate_cont_bind(
3886
&mut self,
3887
builder: &mut FunctionBuilder<'_>,
3888
contobj: ir::Value,
3889
args: &[ir::Value],
3890
) -> ir::Value {
3891
stack_switching::instructions::translate_cont_bind(self, builder, contobj, args)
3892
}
3893
3894
pub fn translate_cont_new(
3895
&mut self,
3896
builder: &mut FunctionBuilder<'_>,
3897
func: ir::Value,
3898
arg_types: &[WasmValType],
3899
return_types: &[WasmValType],
3900
) -> WasmResult<ir::Value> {
3901
stack_switching::instructions::translate_cont_new(
3902
self,
3903
builder,
3904
func,
3905
arg_types,
3906
return_types,
3907
)
3908
}
3909
3910
pub fn translate_resume(
3911
&mut self,
3912
builder: &mut FunctionBuilder<'_>,
3913
type_index: u32,
3914
contobj: ir::Value,
3915
resume_args: &[ir::Value],
3916
resumetable: &[(u32, Option<ir::Block>)],
3917
) -> WasmResult<Vec<ir::Value>> {
3918
stack_switching::instructions::translate_resume(
3919
self,
3920
builder,
3921
type_index,
3922
contobj,
3923
resume_args,
3924
resumetable,
3925
)
3926
}
3927
3928
pub fn translate_suspend(
3929
&mut self,
3930
builder: &mut FunctionBuilder<'_>,
3931
tag_index: u32,
3932
suspend_args: &[ir::Value],
3933
tag_return_types: &[ir::Type],
3934
) -> Vec<ir::Value> {
3935
stack_switching::instructions::translate_suspend(
3936
self,
3937
builder,
3938
tag_index,
3939
suspend_args,
3940
tag_return_types,
3941
)
3942
}
3943
3944
/// Translates switch instructions.
3945
pub fn translate_switch(
3946
&mut self,
3947
builder: &mut FunctionBuilder,
3948
tag_index: u32,
3949
contobj: ir::Value,
3950
switch_args: &[ir::Value],
3951
return_types: &[ir::Type],
3952
) -> WasmResult<Vec<ir::Value>> {
3953
stack_switching::instructions::translate_switch(
3954
self,
3955
builder,
3956
tag_index,
3957
contobj,
3958
switch_args,
3959
return_types,
3960
)
3961
}
3962
3963
pub fn continuation_arguments(&self, index: TypeIndex) -> &[WasmValType] {
3964
let idx = self.module.types[index].unwrap_module_type_index();
3965
self.types[self.types[idx].unwrap_cont().unwrap_module_type_index()]
3966
.unwrap_func()
3967
.params()
3968
}
3969
3970
pub fn continuation_returns(&self, index: TypeIndex) -> &[WasmValType] {
3971
let idx = self.module.types[index].unwrap_module_type_index();
3972
self.types[self.types[idx].unwrap_cont().unwrap_module_type_index()]
3973
.unwrap_func()
3974
.returns()
3975
}
3976
3977
pub fn tag_params(&self, tag_index: TagIndex) -> &[WasmValType] {
3978
let idx = self.module.tags[tag_index].signature;
3979
self.types[idx.unwrap_module_type_index()]
3980
.unwrap_func()
3981
.params()
3982
}
3983
3984
pub fn tag_returns(&self, tag_index: TagIndex) -> &[WasmValType] {
3985
let idx = self.module.tags[tag_index].signature;
3986
self.types[idx.unwrap_module_type_index()]
3987
.unwrap_func()
3988
.returns()
3989
}
3990
3991
pub fn use_blendv_for_relaxed_laneselect(&self, ty: Type) -> bool {
3992
self.isa.has_blendv_lowering(ty)
3993
}
3994
3995
pub fn use_x86_pmulhrsw_for_relaxed_q15mul(&self) -> bool {
3996
self.isa.has_x86_pmulhrsw_lowering()
3997
}
3998
3999
pub fn use_x86_pmaddubsw_for_dot(&self) -> bool {
4000
self.isa.has_x86_pmaddubsw_lowering()
4001
}
4002
4003
pub fn handle_before_return(&mut self, retvals: &[ir::Value], builder: &mut FunctionBuilder) {
4004
#[cfg(feature = "wmemcheck")]
4005
if self.compiler.wmemcheck {
4006
let func_name = self.current_func_name(builder);
4007
if func_name == Some("malloc") {
4008
self.hook_malloc_exit(builder, retvals);
4009
} else if func_name == Some("free") {
4010
self.hook_free_exit(builder);
4011
}
4012
}
4013
#[cfg(not(feature = "wmemcheck"))]
4014
let _ = (retvals, builder);
4015
}
4016
4017
pub fn before_load(
4018
&mut self,
4019
builder: &mut FunctionBuilder,
4020
val_size: u8,
4021
addr: ir::Value,
4022
offset: u64,
4023
) {
4024
#[cfg(feature = "wmemcheck")]
4025
if self.compiler.wmemcheck {
4026
let check_load = self.builtin_functions.check_load(builder.func);
4027
let vmctx = self.vmctx_val(&mut builder.cursor());
4028
let num_bytes = builder.ins().iconst(I32, val_size as i64);
4029
let offset_val = builder.ins().iconst(I64, offset as i64);
4030
builder
4031
.ins()
4032
.call(check_load, &[vmctx, num_bytes, addr, offset_val]);
4033
}
4034
#[cfg(not(feature = "wmemcheck"))]
4035
let _ = (builder, val_size, addr, offset);
4036
}
4037
4038
pub fn before_store(
4039
&mut self,
4040
builder: &mut FunctionBuilder,
4041
val_size: u8,
4042
addr: ir::Value,
4043
offset: u64,
4044
) {
4045
#[cfg(feature = "wmemcheck")]
4046
if self.compiler.wmemcheck {
4047
let check_store = self.builtin_functions.check_store(builder.func);
4048
let vmctx = self.vmctx_val(&mut builder.cursor());
4049
let num_bytes = builder.ins().iconst(I32, val_size as i64);
4050
let offset_val = builder.ins().iconst(I64, offset as i64);
4051
builder
4052
.ins()
4053
.call(check_store, &[vmctx, num_bytes, addr, offset_val]);
4054
}
4055
#[cfg(not(feature = "wmemcheck"))]
4056
let _ = (builder, val_size, addr, offset);
4057
}
4058
4059
pub fn update_global(
4060
&mut self,
4061
builder: &mut FunctionBuilder,
4062
global_index: GlobalIndex,
4063
value: ir::Value,
4064
) {
4065
#[cfg(feature = "wmemcheck")]
4066
if self.compiler.wmemcheck {
4067
if global_index.index() == 0 {
4068
// We are making the assumption that global 0 is the auxiliary stack pointer.
4069
let update_stack_pointer =
4070
self.builtin_functions.update_stack_pointer(builder.func);
4071
let vmctx = self.vmctx_val(&mut builder.cursor());
4072
builder.ins().call(update_stack_pointer, &[vmctx, value]);
4073
}
4074
}
4075
#[cfg(not(feature = "wmemcheck"))]
4076
let _ = (builder, global_index, value);
4077
}
4078
4079
pub fn before_memory_grow(
4080
&mut self,
4081
builder: &mut FunctionBuilder,
4082
num_pages: ir::Value,
4083
mem_index: MemoryIndex,
4084
) {
4085
#[cfg(feature = "wmemcheck")]
4086
if self.compiler.wmemcheck && mem_index.as_u32() == 0 {
4087
let update_mem_size = self.builtin_functions.update_mem_size(builder.func);
4088
let vmctx = self.vmctx_val(&mut builder.cursor());
4089
builder.ins().call(update_mem_size, &[vmctx, num_pages]);
4090
}
4091
#[cfg(not(feature = "wmemcheck"))]
4092
let _ = (builder, num_pages, mem_index);
4093
}
4094
4095
/// If the ISA has rounding instructions, let Cranelift use them. But if
4096
/// not, lower to a libcall here, rather than having Cranelift do it. We
4097
/// can pass our libcall the vmctx pointer, which we use for stack
4098
/// overflow checking.
4099
///
4100
/// This helper is generic for all rounding instructions below, both for
4101
/// scalar and simd types. The `clif_round` argument is the CLIF-level
4102
/// rounding instruction to use if the ISA has the instruction, and the
4103
/// `round_builtin` helper is used to determine which element-level
4104
/// rounding operation builtin is used. Note that this handles the case
4105
/// when `value` is a vector by doing an element-wise libcall invocation.
4106
fn isa_round(
4107
&mut self,
4108
builder: &mut FunctionBuilder,
4109
value: ir::Value,
4110
clif_round: fn(FuncInstBuilder<'_, '_>, ir::Value) -> ir::Value,
4111
round_builtin: fn(&mut BuiltinFunctions, &mut Function) -> ir::FuncRef,
4112
) -> ir::Value {
4113
if self.isa.has_round() {
4114
return clif_round(builder.ins(), value);
4115
}
4116
4117
let vmctx = self.vmctx_val(&mut builder.cursor());
4118
let round = round_builtin(&mut self.builtin_functions, builder.func);
4119
let round_one = |builder: &mut FunctionBuilder, value: ir::Value| {
4120
let call = builder.ins().call(round, &[vmctx, value]);
4121
*builder.func.dfg.inst_results(call).first().unwrap()
4122
};
4123
4124
let ty = builder.func.dfg.value_type(value);
4125
if !ty.is_vector() {
4126
return round_one(builder, value);
4127
}
4128
4129
assert_eq!(ty.bits(), 128);
4130
let zero = builder.func.dfg.constants.insert(V128Imm([0; 16]).into());
4131
let mut result = builder.ins().vconst(ty, zero);
4132
for i in 0..u8::try_from(ty.lane_count()).unwrap() {
4133
let element = builder.ins().extractlane(value, i);
4134
let element_rounded = round_one(builder, element);
4135
result = builder.ins().insertlane(result, element_rounded, i);
4136
}
4137
result
4138
}
4139
4140
pub fn ceil_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4141
self.isa_round(
4142
builder,
4143
value,
4144
|ins, val| ins.ceil(val),
4145
BuiltinFunctions::ceil_f32,
4146
)
4147
}
4148
4149
pub fn ceil_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4150
self.isa_round(
4151
builder,
4152
value,
4153
|ins, val| ins.ceil(val),
4154
BuiltinFunctions::ceil_f64,
4155
)
4156
}
4157
4158
pub fn ceil_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4159
self.isa_round(
4160
builder,
4161
value,
4162
|ins, val| ins.ceil(val),
4163
BuiltinFunctions::ceil_f32,
4164
)
4165
}
4166
4167
pub fn ceil_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4168
self.isa_round(
4169
builder,
4170
value,
4171
|ins, val| ins.ceil(val),
4172
BuiltinFunctions::ceil_f64,
4173
)
4174
}
4175
4176
pub fn floor_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4177
self.isa_round(
4178
builder,
4179
value,
4180
|ins, val| ins.floor(val),
4181
BuiltinFunctions::floor_f32,
4182
)
4183
}
4184
4185
pub fn floor_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4186
self.isa_round(
4187
builder,
4188
value,
4189
|ins, val| ins.floor(val),
4190
BuiltinFunctions::floor_f64,
4191
)
4192
}
4193
4194
pub fn floor_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4195
self.isa_round(
4196
builder,
4197
value,
4198
|ins, val| ins.floor(val),
4199
BuiltinFunctions::floor_f32,
4200
)
4201
}
4202
4203
pub fn floor_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4204
self.isa_round(
4205
builder,
4206
value,
4207
|ins, val| ins.floor(val),
4208
BuiltinFunctions::floor_f64,
4209
)
4210
}
4211
4212
pub fn trunc_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4213
self.isa_round(
4214
builder,
4215
value,
4216
|ins, val| ins.trunc(val),
4217
BuiltinFunctions::trunc_f32,
4218
)
4219
}
4220
4221
pub fn trunc_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4222
self.isa_round(
4223
builder,
4224
value,
4225
|ins, val| ins.trunc(val),
4226
BuiltinFunctions::trunc_f64,
4227
)
4228
}
4229
4230
pub fn trunc_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4231
self.isa_round(
4232
builder,
4233
value,
4234
|ins, val| ins.trunc(val),
4235
BuiltinFunctions::trunc_f32,
4236
)
4237
}
4238
4239
pub fn trunc_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4240
self.isa_round(
4241
builder,
4242
value,
4243
|ins, val| ins.trunc(val),
4244
BuiltinFunctions::trunc_f64,
4245
)
4246
}
4247
4248
pub fn nearest_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4249
self.isa_round(
4250
builder,
4251
value,
4252
|ins, val| ins.nearest(val),
4253
BuiltinFunctions::nearest_f32,
4254
)
4255
}
4256
4257
pub fn nearest_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4258
self.isa_round(
4259
builder,
4260
value,
4261
|ins, val| ins.nearest(val),
4262
BuiltinFunctions::nearest_f64,
4263
)
4264
}
4265
4266
pub fn nearest_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4267
self.isa_round(
4268
builder,
4269
value,
4270
|ins, val| ins.nearest(val),
4271
BuiltinFunctions::nearest_f32,
4272
)
4273
}
4274
4275
pub fn nearest_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4276
self.isa_round(
4277
builder,
4278
value,
4279
|ins, val| ins.nearest(val),
4280
BuiltinFunctions::nearest_f64,
4281
)
4282
}
4283
4284
pub fn swizzle(
4285
&mut self,
4286
builder: &mut FunctionBuilder,
4287
a: ir::Value,
4288
b: ir::Value,
4289
) -> ir::Value {
4290
// On x86, swizzle would typically be compiled to `pshufb`, except
4291
// that that's not available on CPUs that lack SSSE3. In that case,
4292
// fall back to a builtin function.
4293
if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4294
builder.ins().swizzle(a, b)
4295
} else {
4296
let swizzle = self.builtin_functions.i8x16_swizzle(builder.func);
4297
let vmctx = self.vmctx_val(&mut builder.cursor());
4298
let call = builder.ins().call(swizzle, &[vmctx, a, b]);
4299
*builder.func.dfg.inst_results(call).first().unwrap()
4300
}
4301
}
4302
4303
pub fn relaxed_swizzle(
4304
&mut self,
4305
builder: &mut FunctionBuilder,
4306
a: ir::Value,
4307
b: ir::Value,
4308
) -> ir::Value {
4309
// As above, fall back to a builtin if we lack SSSE3.
4310
if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4311
if !self.is_x86() || self.relaxed_simd_deterministic() {
4312
builder.ins().swizzle(a, b)
4313
} else {
4314
builder.ins().x86_pshufb(a, b)
4315
}
4316
} else {
4317
let swizzle = self.builtin_functions.i8x16_swizzle(builder.func);
4318
let vmctx = self.vmctx_val(&mut builder.cursor());
4319
let call = builder.ins().call(swizzle, &[vmctx, a, b]);
4320
*builder.func.dfg.inst_results(call).first().unwrap()
4321
}
4322
}
4323
4324
pub fn i8x16_shuffle(
4325
&mut self,
4326
builder: &mut FunctionBuilder,
4327
a: ir::Value,
4328
b: ir::Value,
4329
lanes: &[u8; 16],
4330
) -> ir::Value {
4331
// As with swizzle, i8x16.shuffle would also commonly be implemented
4332
// with pshufb, so if we lack SSSE3, fall back to a builtin.
4333
if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4334
let lanes = ConstantData::from(&lanes[..]);
4335
let mask = builder.func.dfg.immediates.push(lanes);
4336
builder.ins().shuffle(a, b, mask)
4337
} else {
4338
let lanes = builder
4339
.func
4340
.dfg
4341
.constants
4342
.insert(ConstantData::from(&lanes[..]));
4343
let lanes = builder.ins().vconst(I8X16, lanes);
4344
let i8x16_shuffle = self.builtin_functions.i8x16_shuffle(builder.func);
4345
let vmctx = self.vmctx_val(&mut builder.cursor());
4346
let call = builder.ins().call(i8x16_shuffle, &[vmctx, a, b, lanes]);
4347
*builder.func.dfg.inst_results(call).first().unwrap()
4348
}
4349
}
4350
4351
pub fn fma_f32x4(
4352
&mut self,
4353
builder: &mut FunctionBuilder,
4354
a: ir::Value,
4355
b: ir::Value,
4356
c: ir::Value,
4357
) -> ir::Value {
4358
if self.has_native_fma() {
4359
builder.ins().fma(a, b, c)
4360
} else if self.relaxed_simd_deterministic() {
4361
// Deterministic semantics are "fused multiply and add".
4362
let fma = self.builtin_functions.fma_f32x4(builder.func);
4363
let vmctx = self.vmctx_val(&mut builder.cursor());
4364
let call = builder.ins().call(fma, &[vmctx, a, b, c]);
4365
*builder.func.dfg.inst_results(call).first().unwrap()
4366
} else {
4367
let mul = builder.ins().fmul(a, b);
4368
builder.ins().fadd(mul, c)
4369
}
4370
}
4371
4372
pub fn fma_f64x2(
4373
&mut self,
4374
builder: &mut FunctionBuilder,
4375
a: ir::Value,
4376
b: ir::Value,
4377
c: ir::Value,
4378
) -> ir::Value {
4379
if self.has_native_fma() {
4380
builder.ins().fma(a, b, c)
4381
} else if self.relaxed_simd_deterministic() {
4382
// Deterministic semantics are "fused multiply and add".
4383
let fma = self.builtin_functions.fma_f64x2(builder.func);
4384
let vmctx = self.vmctx_val(&mut builder.cursor());
4385
let call = builder.ins().call(fma, &[vmctx, a, b, c]);
4386
*builder.func.dfg.inst_results(call).first().unwrap()
4387
} else {
4388
let mul = builder.ins().fmul(a, b);
4389
builder.ins().fadd(mul, c)
4390
}
4391
}
4392
4393
pub fn isa(&self) -> &dyn TargetIsa {
4394
&*self.isa
4395
}
4396
4397
pub fn trap(&mut self, builder: &mut FunctionBuilder, trap: ir::TrapCode) {
4398
match (
4399
self.clif_instruction_traps_enabled(),
4400
crate::clif_trap_to_env_trap(trap),
4401
) {
4402
// If libcall traps are disabled or there's no wasmtime-defined trap
4403
// code for this, then emit a native trap instruction.
4404
(true, _) | (_, None) => {
4405
builder.ins().trap(trap);
4406
}
4407
// ... otherwise with libcall traps explicitly enabled and a
4408
// wasmtime-based trap code invoke the libcall to raise a trap and
4409
// pass in our trap code. Leave a debug `unreachable` in place
4410
// afterwards as a defense-in-depth measure.
4411
(false, Some(trap)) => {
4412
let libcall = self.builtin_functions.trap(&mut builder.func);
4413
let vmctx = self.vmctx_val(&mut builder.cursor());
4414
let trap_code = builder.ins().iconst(I8, i64::from(trap as u8));
4415
builder.ins().call(libcall, &[vmctx, trap_code]);
4416
let raise = self.builtin_functions.raise(&mut builder.func);
4417
builder.ins().call(raise, &[vmctx]);
4418
builder.ins().trap(TRAP_INTERNAL_ASSERT);
4419
}
4420
}
4421
}
4422
4423
pub fn trapz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
4424
if self.clif_instruction_traps_enabled() {
4425
builder.ins().trapz(value, trap);
4426
} else {
4427
let ty = builder.func.dfg.value_type(value);
4428
let zero = builder.ins().iconst(ty, 0);
4429
let cmp = builder.ins().icmp(IntCC::Equal, value, zero);
4430
self.conditionally_trap(builder, cmp, trap);
4431
}
4432
}
4433
4434
pub fn trapnz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
4435
if self.clif_instruction_traps_enabled() {
4436
builder.ins().trapnz(value, trap);
4437
} else {
4438
let ty = builder.func.dfg.value_type(value);
4439
let zero = builder.ins().iconst(ty, 0);
4440
let cmp = builder.ins().icmp(IntCC::NotEqual, value, zero);
4441
self.conditionally_trap(builder, cmp, trap);
4442
}
4443
}
4444
4445
pub fn uadd_overflow_trap(
4446
&mut self,
4447
builder: &mut FunctionBuilder,
4448
lhs: ir::Value,
4449
rhs: ir::Value,
4450
trap: ir::TrapCode,
4451
) -> ir::Value {
4452
if self.clif_instruction_traps_enabled() {
4453
builder.ins().uadd_overflow_trap(lhs, rhs, trap)
4454
} else {
4455
let (ret, overflow) = builder.ins().uadd_overflow(lhs, rhs);
4456
self.conditionally_trap(builder, overflow, trap);
4457
ret
4458
}
4459
}
4460
4461
pub fn translate_sdiv(
4462
&mut self,
4463
builder: &mut FunctionBuilder,
4464
lhs: ir::Value,
4465
rhs: ir::Value,
4466
) -> ir::Value {
4467
self.guard_signed_divide(builder, lhs, rhs);
4468
builder.ins().sdiv(lhs, rhs)
4469
}
4470
4471
pub fn translate_udiv(
4472
&mut self,
4473
builder: &mut FunctionBuilder,
4474
lhs: ir::Value,
4475
rhs: ir::Value,
4476
) -> ir::Value {
4477
self.guard_zero_divisor(builder, rhs);
4478
builder.ins().udiv(lhs, rhs)
4479
}
4480
4481
pub fn translate_srem(
4482
&mut self,
4483
builder: &mut FunctionBuilder,
4484
lhs: ir::Value,
4485
rhs: ir::Value,
4486
) -> ir::Value {
4487
self.guard_zero_divisor(builder, rhs);
4488
builder.ins().srem(lhs, rhs)
4489
}
4490
4491
pub fn translate_urem(
4492
&mut self,
4493
builder: &mut FunctionBuilder,
4494
lhs: ir::Value,
4495
rhs: ir::Value,
4496
) -> ir::Value {
4497
self.guard_zero_divisor(builder, rhs);
4498
builder.ins().urem(lhs, rhs)
4499
}
4500
4501
pub fn translate_fcvt_to_sint(
4502
&mut self,
4503
builder: &mut FunctionBuilder,
4504
ty: ir::Type,
4505
val: ir::Value,
4506
) -> ir::Value {
4507
// NB: for now avoid translating this entire instruction to CLIF and
4508
// just do it in a libcall.
4509
if !self.clif_instruction_traps_enabled() {
4510
self.guard_fcvt_to_int(builder, ty, val, true);
4511
}
4512
builder.ins().fcvt_to_sint(ty, val)
4513
}
4514
4515
pub fn translate_fcvt_to_uint(
4516
&mut self,
4517
builder: &mut FunctionBuilder,
4518
ty: ir::Type,
4519
val: ir::Value,
4520
) -> ir::Value {
4521
if !self.clif_instruction_traps_enabled() {
4522
self.guard_fcvt_to_int(builder, ty, val, false);
4523
}
4524
builder.ins().fcvt_to_uint(ty, val)
4525
}
4526
4527
/// Returns whether it's acceptable to rely on traps in CLIF memory-related
4528
/// instructions (e.g. loads and stores).
4529
///
4530
/// This is enabled if `signals_based_traps` is `true` since signal handlers
4531
/// are available, but this is additionally forcibly disabled if Pulley is
4532
/// being targeted since the Pulley runtime doesn't catch segfaults for
4533
/// itself.
4534
pub fn clif_memory_traps_enabled(&self) -> bool {
4535
self.tunables.signals_based_traps && !self.is_pulley()
4536
}
4537
4538
/// Returns whether it's acceptable to have CLIF instructions natively trap,
4539
/// such as division-by-zero.
4540
///
4541
/// This is enabled if `signals_based_traps` is `true` or on
4542
/// Pulley unconditionally since Pulley doesn't use hardware-based
4543
/// traps in its runtime. However, if guest debugging is enabled,
4544
/// then we cannot rely on Pulley traps and still need a libcall
4545
/// to gain proper ownership of the store in the runtime's
4546
/// debugger hooks.
4547
pub fn clif_instruction_traps_enabled(&self) -> bool {
4548
self.tunables.signals_based_traps || (self.is_pulley() && !self.tunables.debug_guest)
4549
}
4550
4551
/// Returns whether loads from the null address are allowed as signals of
4552
/// whether to trap or not.
4553
pub fn load_from_zero_allowed(&self) -> bool {
4554
// Pulley allows loads-from-zero and otherwise this is only allowed with
4555
// traps + spectre mitigations.
4556
self.is_pulley()
4557
|| (self.clif_memory_traps_enabled() && self.heap_access_spectre_mitigation())
4558
}
4559
4560
/// Returns whether translation is happening for Pulley bytecode.
4561
pub fn is_pulley(&self) -> bool {
4562
self.isa.triple().is_pulley()
4563
}
4564
4565
/// Returns whether the current location is reachable.
4566
pub fn is_reachable(&self) -> bool {
4567
self.stacks.reachable()
4568
}
4569
}
4570
4571
// Helper function to convert an `IndexType` to an `ir::Type`.
4572
//
4573
// Implementing From/Into trait for `IndexType` or `ir::Type` would
4574
// introduce an extra dependency between `wasmtime_types` and `cranelift_codegen`.
4575
fn index_type_to_ir_type(index_type: IndexType) -> ir::Type {
4576
match index_type {
4577
IndexType::I32 => I32,
4578
IndexType::I64 => I64,
4579
}
4580
}
4581
4582