Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/cranelift/src/func_environ/gc/enabled.rs
3090 views
1
use super::{ArrayInit, GcCompiler};
2
use crate::bounds_checks::BoundsCheck;
3
use crate::func_environ::{Extension, FuncEnvironment};
4
use crate::translate::{Heap, HeapData, StructFieldsVec, TargetEnvironment};
5
use crate::{Reachability, TRAP_INTERNAL_ASSERT};
6
use cranelift_codegen::ir::immediates::Offset32;
7
use cranelift_codegen::ir::{BlockArg, ExceptionTableData, ExceptionTableItem};
8
use cranelift_codegen::{
9
cursor::FuncCursor,
10
ir::{self, InstBuilder, condcodes::IntCC},
11
};
12
use cranelift_entity::packed_option::ReservedValue;
13
use cranelift_frontend::FunctionBuilder;
14
use smallvec::{SmallVec, smallvec};
15
use wasmtime_environ::{
16
Collector, GcArrayLayout, GcLayout, GcStructLayout, I31_DISCRIMINANT, ModuleInternedTypeIndex,
17
PtrSize, TagIndex, TypeIndex, VMGcKind, WasmCompositeInnerType, WasmHeapTopType, WasmHeapType,
18
WasmRefType, WasmResult, WasmStorageType, WasmValType, wasm_unsupported,
19
};
20
21
#[cfg(feature = "gc-drc")]
22
mod drc;
23
#[cfg(feature = "gc-null")]
24
mod null;
25
26
/// Get the default GC compiler.
27
pub fn gc_compiler(func_env: &mut FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
28
// If this function requires a GC compiler, that is not too bad of an
29
// over-approximation for it requiring a GC heap.
30
func_env.needs_gc_heap = true;
31
32
match func_env.tunables.collector {
33
#[cfg(feature = "gc-drc")]
34
Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
35
#[cfg(not(feature = "gc-drc"))]
36
Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
37
"the DRC collector is unavailable because the `gc-drc` feature \
38
was disabled at compile time",
39
)),
40
41
#[cfg(feature = "gc-null")]
42
Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
43
#[cfg(not(feature = "gc-null"))]
44
Some(Collector::Null) => Err(wasm_unsupported!(
45
"the null collector is unavailable because the `gc-null` feature \
46
was disabled at compile time",
47
)),
48
49
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
50
None => Err(wasm_unsupported!(
51
"support for GC types disabled at configuration time"
52
)),
53
#[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
54
None => Err(wasm_unsupported!(
55
"support for GC types disabled because no collector implementation \
56
was selected at compile time; enable one of the `gc-drc` or \
57
`gc-null` features",
58
)),
59
}
60
}
61
62
#[cfg_attr(
63
not(feature = "gc-drc"),
64
expect(dead_code, reason = "easier to define")
65
)]
66
fn unbarriered_load_gc_ref(
67
builder: &mut FunctionBuilder,
68
ty: WasmHeapType,
69
ptr_to_gc_ref: ir::Value,
70
flags: ir::MemFlags,
71
) -> WasmResult<ir::Value> {
72
debug_assert!(ty.is_vmgcref_type());
73
let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
74
if ty != WasmHeapType::I31 {
75
builder.declare_value_needs_stack_map(gc_ref);
76
}
77
Ok(gc_ref)
78
}
79
80
#[cfg_attr(
81
not(any(feature = "gc-drc", feature = "gc-null")),
82
expect(dead_code, reason = "easier to define")
83
)]
84
fn unbarriered_store_gc_ref(
85
builder: &mut FunctionBuilder,
86
ty: WasmHeapType,
87
dst: ir::Value,
88
gc_ref: ir::Value,
89
flags: ir::MemFlags,
90
) -> WasmResult<()> {
91
debug_assert!(ty.is_vmgcref_type());
92
builder.ins().store(flags, gc_ref, dst, 0);
93
Ok(())
94
}
95
96
/// Emit code to read a struct field or array element from its raw address in
97
/// the GC heap.
98
///
99
/// The given address MUST have already been bounds-checked via
100
/// `prepare_gc_ref_access`.
101
fn read_field_at_addr(
102
func_env: &mut FuncEnvironment<'_>,
103
builder: &mut FunctionBuilder<'_>,
104
ty: WasmStorageType,
105
addr: ir::Value,
106
extension: Option<Extension>,
107
) -> WasmResult<ir::Value> {
108
assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
109
assert_eq!(
110
extension.is_some(),
111
matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
112
);
113
114
// Data inside GC objects is always little endian.
115
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
116
117
let value = match ty {
118
WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
119
WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
120
WasmStorageType::Val(v) => match v {
121
WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
122
WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
123
WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
124
WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
125
WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
126
WasmValType::Ref(r) => match r.heap_type.top() {
127
WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
128
gc_compiler(func_env)?
129
.translate_read_gc_reference(func_env, builder, r, addr, flags)?
130
}
131
WasmHeapTopType::Func => {
132
let expected_ty = match r.heap_type {
133
WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
134
WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
135
WasmHeapType::NoFunc => {
136
let null = builder.ins().iconst(func_env.pointer_type(), 0);
137
if !r.nullable {
138
// Because `nofunc` is uninhabited, and this
139
// reference is non-null, this is unreachable
140
// code. Unconditionally trap via conditional
141
// trap instructions to avoid inserting block
142
// terminators in the middle of this block.
143
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
144
}
145
return Ok(null);
146
}
147
_ => unreachable!("not a function heap type"),
148
};
149
let expected_ty = builder
150
.ins()
151
.iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
152
153
let vmctx = func_env.vmctx_val(&mut builder.cursor());
154
155
let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
156
let get_interned_func_ref = func_env
157
.builtin_functions
158
.get_interned_func_ref(builder.func);
159
160
let call_inst = builder
161
.ins()
162
.call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
163
builder.func.dfg.first_result(call_inst)
164
}
165
WasmHeapTopType::Cont => {
166
// TODO(#10248) GC integration for stack switching
167
return Err(wasmtime_environ::WasmError::Unsupported(
168
"Stack switching feature not compatible with GC, yet".to_string(),
169
));
170
}
171
},
172
},
173
};
174
175
let value = match extension {
176
Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
177
Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
178
None => value,
179
};
180
181
Ok(value)
182
}
183
184
fn write_func_ref_at_addr(
185
func_env: &mut FuncEnvironment<'_>,
186
builder: &mut FunctionBuilder<'_>,
187
ref_type: WasmRefType,
188
flags: ir::MemFlags,
189
field_addr: ir::Value,
190
func_ref: ir::Value,
191
) -> WasmResult<()> {
192
assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
193
194
let vmctx = func_env.vmctx_val(&mut builder.cursor());
195
196
let intern_func_ref_for_gc_heap = func_env
197
.builtin_functions
198
.intern_func_ref_for_gc_heap(builder.func);
199
200
let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
201
let null = builder.ins().iconst(func_env.pointer_type(), 0);
202
if !ref_type.nullable {
203
// Because `nofunc` is uninhabited, and this reference is
204
// non-null, this is unreachable code. Unconditionally trap
205
// via conditional trap instructions to avoid inserting
206
// block terminators in the middle of this block.
207
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
208
}
209
null
210
} else {
211
func_ref
212
};
213
214
// Convert the raw `funcref` into a `FuncRefTableId` for use in the
215
// GC heap.
216
let call_inst = builder
217
.ins()
218
.call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
219
let func_ref_id = builder.func.dfg.first_result(call_inst);
220
let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
221
222
// Store the id in the field.
223
builder.ins().store(flags, func_ref_id, field_addr, 0);
224
225
Ok(())
226
}
227
228
fn write_field_at_addr(
229
func_env: &mut FuncEnvironment<'_>,
230
builder: &mut FunctionBuilder<'_>,
231
field_ty: WasmStorageType,
232
field_addr: ir::Value,
233
new_val: ir::Value,
234
) -> WasmResult<()> {
235
// Data inside GC objects is always little endian.
236
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
237
238
match field_ty {
239
WasmStorageType::I8 => {
240
builder.ins().istore8(flags, new_val, field_addr, 0);
241
}
242
WasmStorageType::I16 => {
243
builder.ins().istore16(flags, new_val, field_addr, 0);
244
}
245
WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
246
write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
247
}
248
WasmStorageType::Val(WasmValType::Ref(r)) => {
249
gc_compiler(func_env)?
250
.translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
251
}
252
WasmStorageType::Val(_) => {
253
assert_eq!(
254
builder.func.dfg.value_type(new_val).bytes(),
255
wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
256
);
257
builder.ins().store(flags, new_val, field_addr, 0);
258
}
259
}
260
Ok(())
261
}
262
263
pub fn translate_struct_new(
264
func_env: &mut FuncEnvironment<'_>,
265
builder: &mut FunctionBuilder<'_>,
266
struct_type_index: TypeIndex,
267
fields: &[ir::Value],
268
) -> WasmResult<ir::Value> {
269
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
270
}
271
272
fn default_value(
273
cursor: &mut FuncCursor,
274
func_env: &FuncEnvironment<'_>,
275
ty: &WasmStorageType,
276
) -> ir::Value {
277
match ty {
278
WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
279
WasmStorageType::Val(v) => match v {
280
WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
281
WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
282
WasmValType::F32 => cursor.ins().f32const(0.0),
283
WasmValType::F64 => cursor.ins().f64const(0.0),
284
WasmValType::V128 => {
285
let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
286
cursor.ins().vconst(ir::types::I8X16, c)
287
}
288
WasmValType::Ref(r) => {
289
assert!(r.nullable);
290
let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
291
292
// NB: The collector doesn't need to know about null references.
293
let _ = needs_stack_map;
294
295
cursor.ins().iconst(ty, 0)
296
}
297
},
298
}
299
}
300
301
pub fn translate_struct_new_default(
302
func_env: &mut FuncEnvironment<'_>,
303
builder: &mut FunctionBuilder<'_>,
304
struct_type_index: TypeIndex,
305
) -> WasmResult<ir::Value> {
306
let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
307
let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
308
let fields = struct_ty
309
.fields
310
.iter()
311
.map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
312
.collect::<StructFieldsVec>();
313
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
314
}
315
316
pub fn translate_struct_get(
317
func_env: &mut FuncEnvironment<'_>,
318
builder: &mut FunctionBuilder<'_>,
319
struct_type_index: TypeIndex,
320
field_index: u32,
321
struct_ref: ir::Value,
322
extension: Option<Extension>,
323
) -> WasmResult<ir::Value> {
324
log::trace!(
325
"translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})"
326
);
327
328
// TODO: If we know we have a `(ref $my_struct)` here, instead of maybe a
329
// `(ref null $my_struct)`, we could omit the `trapz`. But plumbing that
330
// type info from `wasmparser` and through to here is a bit funky.
331
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
332
333
let field_index = usize::try_from(field_index).unwrap();
334
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
335
336
let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
337
let struct_size = struct_layout.size;
338
339
let field_offset = struct_layout.fields[field_index].offset;
340
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
341
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
342
assert!(field_offset + field_size <= struct_size);
343
344
let field_addr = func_env.prepare_gc_ref_access(
345
builder,
346
struct_ref,
347
BoundsCheck::StaticObjectField {
348
offset: field_offset,
349
access_size: u8::try_from(field_size).unwrap(),
350
object_size: struct_size,
351
},
352
);
353
354
let result = read_field_at_addr(
355
func_env,
356
builder,
357
field_ty.element_type,
358
field_addr,
359
extension,
360
);
361
log::trace!("translate_struct_get(..) -> {result:?}");
362
result
363
}
364
365
pub fn translate_struct_set(
366
func_env: &mut FuncEnvironment<'_>,
367
builder: &mut FunctionBuilder<'_>,
368
struct_type_index: TypeIndex,
369
field_index: u32,
370
struct_ref: ir::Value,
371
new_val: ir::Value,
372
) -> WasmResult<()> {
373
log::trace!(
374
"translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
375
);
376
377
// TODO: See comment in `translate_struct_get` about the `trapz`.
378
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
379
380
let field_index = usize::try_from(field_index).unwrap();
381
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
382
383
let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
384
let struct_size = struct_layout.size;
385
386
let field_offset = struct_layout.fields[field_index].offset;
387
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
388
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
389
assert!(field_offset + field_size <= struct_size);
390
391
let field_addr = func_env.prepare_gc_ref_access(
392
builder,
393
struct_ref,
394
BoundsCheck::StaticObjectField {
395
offset: field_offset,
396
access_size: u8::try_from(field_size).unwrap(),
397
object_size: struct_size,
398
},
399
);
400
401
write_field_at_addr(
402
func_env,
403
builder,
404
field_ty.element_type,
405
field_addr,
406
new_val,
407
)?;
408
409
log::trace!("translate_struct_set: finished");
410
Ok(())
411
}
412
413
pub fn translate_exn_unbox(
414
func_env: &mut FuncEnvironment<'_>,
415
builder: &mut FunctionBuilder<'_>,
416
tag_index: TagIndex,
417
exn_ref: ir::Value,
418
) -> WasmResult<SmallVec<[ir::Value; 4]>> {
419
log::trace!("translate_exn_unbox({tag_index:?}, {exn_ref:?})");
420
421
// We know that the `exn_ref` is not null because we reach this
422
// operation only in catch blocks, and throws are initiated from
423
// runtime code that checks for nulls first.
424
425
// Get the GcExceptionLayout associated with this tag's
426
// function type, and generate loads for each field.
427
let exception_ty_idx = func_env
428
.exception_type_from_tag(tag_index)
429
.unwrap_module_type_index();
430
let exception_ty = func_env.types.unwrap_exn(exception_ty_idx)?;
431
let exn_layout = func_env.struct_or_exn_layout(exception_ty_idx);
432
let exn_size = exn_layout.size;
433
434
// Gather accesses first because these require a borrow on
435
// `func_env`, which we later mutate below via
436
// `prepare_gc_ref_access()`.
437
let mut accesses: SmallVec<[_; 4]> = smallvec![];
438
for (field_ty, field_layout) in exception_ty.fields.iter().zip(exn_layout.fields.iter()) {
439
accesses.push((field_layout.offset, field_ty.element_type));
440
}
441
442
let mut result = smallvec![];
443
for (field_offset, field_ty) in accesses {
444
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty);
445
assert!(field_offset + field_size <= exn_size);
446
let field_addr = func_env.prepare_gc_ref_access(
447
builder,
448
exn_ref,
449
BoundsCheck::StaticObjectField {
450
offset: field_offset,
451
access_size: u8::try_from(field_size).unwrap(),
452
object_size: exn_size,
453
},
454
);
455
456
let value = read_field_at_addr(func_env, builder, field_ty, field_addr, None)?;
457
result.push(value);
458
}
459
460
log::trace!("translate_exn_unbox(..) -> {result:?}");
461
Ok(result)
462
}
463
464
pub fn translate_exn_throw(
465
func_env: &mut FuncEnvironment<'_>,
466
builder: &mut FunctionBuilder<'_>,
467
tag_index: TagIndex,
468
args: &[ir::Value],
469
) -> WasmResult<()> {
470
let (instance_id, defined_tag_id) = func_env.get_instance_and_tag(builder, tag_index);
471
let exnref = gc_compiler(func_env)?.alloc_exn(
472
func_env,
473
builder,
474
tag_index,
475
args,
476
instance_id,
477
defined_tag_id,
478
)?;
479
translate_exn_throw_ref(func_env, builder, exnref)
480
}
481
482
pub fn translate_exn_throw_ref(
483
func_env: &mut FuncEnvironment<'_>,
484
builder: &mut FunctionBuilder<'_>,
485
exnref: ir::Value,
486
) -> WasmResult<()> {
487
let builtin = func_env.builtin_functions.throw_ref(builder.func);
488
let sig = builder.func.dfg.ext_funcs[builtin].signature;
489
let vmctx = func_env.vmctx_val(&mut builder.cursor());
490
491
// Generate a `try_call` with handlers from the current
492
// stack. This libcall is unique among libcall implementations of
493
// opcodes: we know the others will not throw, but `throw_ref`'s
494
// entire purpose is to throw. So if there are any handlers in the
495
// local function body, we need to attach them to this callsite
496
// like any other.
497
let continuation = builder.create_block();
498
let current_block = builder.current_block().unwrap();
499
builder.insert_block_after(continuation, current_block);
500
let continuation_call = builder.func.dfg.block_call(continuation, &[]);
501
let mut table_items = vec![ExceptionTableItem::Context(vmctx)];
502
for (tag, block) in func_env.stacks.handlers.handlers() {
503
let block_call = builder
504
.func
505
.dfg
506
.block_call(block, &[BlockArg::TryCallExn(0)]);
507
table_items.push(match tag {
508
Some(tag) => ExceptionTableItem::Tag(tag, block_call),
509
None => ExceptionTableItem::Default(block_call),
510
});
511
}
512
let etd = ExceptionTableData::new(sig, continuation_call, table_items);
513
let et = builder.func.dfg.exception_tables.push(etd);
514
515
builder.ins().try_call(builtin, &[vmctx, exnref], et);
516
517
builder.switch_to_block(continuation);
518
builder.seal_block(continuation);
519
func_env.trap(builder, crate::TRAP_UNREACHABLE);
520
521
Ok(())
522
}
523
524
pub fn translate_array_new(
525
func_env: &mut FuncEnvironment<'_>,
526
builder: &mut FunctionBuilder,
527
array_type_index: TypeIndex,
528
elem: ir::Value,
529
len: ir::Value,
530
) -> WasmResult<ir::Value> {
531
log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
532
let result = gc_compiler(func_env)?.alloc_array(
533
func_env,
534
builder,
535
array_type_index,
536
ArrayInit::Fill { elem, len },
537
)?;
538
log::trace!("translate_array_new(..) -> {result:?}");
539
Ok(result)
540
}
541
542
pub fn translate_array_new_default(
543
func_env: &mut FuncEnvironment<'_>,
544
builder: &mut FunctionBuilder,
545
array_type_index: TypeIndex,
546
len: ir::Value,
547
) -> WasmResult<ir::Value> {
548
log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
549
550
let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
551
let array_ty = func_env.types.unwrap_array(interned_ty)?;
552
let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
553
let result = gc_compiler(func_env)?.alloc_array(
554
func_env,
555
builder,
556
array_type_index,
557
ArrayInit::Fill { elem, len },
558
)?;
559
log::trace!("translate_array_new_default(..) -> {result:?}");
560
Ok(result)
561
}
562
563
pub fn translate_array_new_fixed(
564
func_env: &mut FuncEnvironment<'_>,
565
builder: &mut FunctionBuilder,
566
array_type_index: TypeIndex,
567
elems: &[ir::Value],
568
) -> WasmResult<ir::Value> {
569
log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
570
let result = gc_compiler(func_env)?.alloc_array(
571
func_env,
572
builder,
573
array_type_index,
574
ArrayInit::Elems(elems),
575
)?;
576
log::trace!("translate_array_new_fixed(..) -> {result:?}");
577
Ok(result)
578
}
579
580
impl ArrayInit<'_> {
581
/// Get the length (as an `i32`-typed `ir::Value`) of these array elements.
582
#[cfg_attr(
583
not(any(feature = "gc-drc", feature = "gc-null")),
584
expect(dead_code, reason = "easier to define")
585
)]
586
fn len(self, pos: &mut FuncCursor) -> ir::Value {
587
match self {
588
ArrayInit::Fill { len, .. } => len,
589
ArrayInit::Elems(e) => {
590
let len = u32::try_from(e.len()).unwrap();
591
pos.ins().iconst(ir::types::I32, i64::from(len))
592
}
593
}
594
}
595
596
/// Initialize a newly-allocated array's elements.
597
#[cfg_attr(
598
not(any(feature = "gc-drc", feature = "gc-null")),
599
expect(dead_code, reason = "easier to define")
600
)]
601
fn initialize(
602
self,
603
func_env: &mut FuncEnvironment<'_>,
604
builder: &mut FunctionBuilder<'_>,
605
interned_type_index: ModuleInternedTypeIndex,
606
base_size: u32,
607
size: ir::Value,
608
elems_addr: ir::Value,
609
mut init_field: impl FnMut(
610
&mut FuncEnvironment<'_>,
611
&mut FunctionBuilder<'_>,
612
WasmStorageType,
613
ir::Value,
614
ir::Value,
615
) -> WasmResult<()>,
616
) -> WasmResult<()> {
617
log::trace!(
618
"initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
619
);
620
621
assert!(!func_env.types[interned_type_index].composite_type.shared);
622
let array_ty = func_env.types[interned_type_index]
623
.composite_type
624
.inner
625
.unwrap_array();
626
let elem_ty = array_ty.0.element_type;
627
let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
628
let pointer_type = func_env.pointer_type();
629
let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
630
match self {
631
ArrayInit::Elems(elems) => {
632
let mut elem_addr = elems_addr;
633
for val in elems {
634
init_field(func_env, builder, elem_ty, elem_addr, *val)?;
635
elem_addr = builder.ins().iadd(elem_addr, elem_size);
636
}
637
}
638
ArrayInit::Fill { elem, len: _ } => {
639
// Compute the end address of the elements.
640
let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
641
let array_addr = builder.ins().isub(elems_addr, base_size);
642
let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
643
let elems_end = builder.ins().iadd(array_addr, size);
644
645
emit_array_fill_impl(
646
func_env,
647
builder,
648
elems_addr,
649
elem_size,
650
elems_end,
651
|func_env, builder, elem_addr| {
652
init_field(func_env, builder, elem_ty, elem_addr, elem)
653
},
654
)?;
655
}
656
}
657
log::trace!("initialize_array: finished");
658
Ok(())
659
}
660
}
661
662
fn emit_array_fill_impl(
663
func_env: &mut FuncEnvironment<'_>,
664
builder: &mut FunctionBuilder<'_>,
665
elem_addr: ir::Value,
666
elem_size: ir::Value,
667
fill_end: ir::Value,
668
mut emit_elem_write: impl FnMut(
669
&mut FuncEnvironment<'_>,
670
&mut FunctionBuilder<'_>,
671
ir::Value,
672
) -> WasmResult<()>,
673
) -> WasmResult<()> {
674
log::trace!(
675
"emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})"
676
);
677
678
let pointer_ty = func_env.pointer_type();
679
680
assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
681
assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
682
assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
683
684
// Loop to fill the elements, emitting the equivalent of the following
685
// pseudo-CLIF:
686
//
687
// current_block:
688
// ...
689
// jump loop_header_block(elem_addr)
690
//
691
// loop_header_block(elem_addr: i32):
692
// done = icmp eq elem_addr, fill_end
693
// brif done, continue_block, loop_body_block
694
//
695
// loop_body_block:
696
// emit_elem_write()
697
// next_elem_addr = iadd elem_addr, elem_size
698
// jump loop_header_block(next_elem_addr)
699
//
700
// continue_block:
701
// ...
702
703
let current_block = builder.current_block().unwrap();
704
let loop_header_block = builder.create_block();
705
let loop_body_block = builder.create_block();
706
let continue_block = builder.create_block();
707
708
builder.ensure_inserted_block();
709
builder.insert_block_after(loop_header_block, current_block);
710
builder.insert_block_after(loop_body_block, loop_header_block);
711
builder.insert_block_after(continue_block, loop_body_block);
712
713
// Current block: jump to the loop header block with the first element's
714
// address.
715
builder.ins().jump(loop_header_block, &[elem_addr.into()]);
716
717
// Loop header block: check if we're done, then jump to either the continue
718
// block or the loop body block.
719
builder.switch_to_block(loop_header_block);
720
builder.append_block_param(loop_header_block, pointer_ty);
721
log::trace!("emit_array_fill_impl: loop header");
722
func_env.translate_loop_header(builder)?;
723
let elem_addr = builder.block_params(loop_header_block)[0];
724
let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
725
builder
726
.ins()
727
.brif(done, continue_block, &[], loop_body_block, &[]);
728
729
// Loop body block: write the value to the current element, compute the next
730
// element's address, and then jump back to the loop header block.
731
builder.switch_to_block(loop_body_block);
732
log::trace!("emit_array_fill_impl: loop body");
733
emit_elem_write(func_env, builder, elem_addr)?;
734
let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
735
builder
736
.ins()
737
.jump(loop_header_block, &[next_elem_addr.into()]);
738
739
// Continue...
740
builder.switch_to_block(continue_block);
741
log::trace!("emit_array_fill_impl: finished");
742
builder.seal_block(loop_header_block);
743
builder.seal_block(loop_body_block);
744
builder.seal_block(continue_block);
745
Ok(())
746
}
747
748
pub fn translate_array_fill(
749
func_env: &mut FuncEnvironment<'_>,
750
builder: &mut FunctionBuilder<'_>,
751
array_type_index: TypeIndex,
752
array_ref: ir::Value,
753
index: ir::Value,
754
value: ir::Value,
755
n: ir::Value,
756
) -> WasmResult<()> {
757
log::trace!(
758
"translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
759
);
760
761
let len = translate_array_len(func_env, builder, array_ref)?;
762
763
// Check that the full range of elements we want to fill is within bounds.
764
let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
765
let out_of_bounds = builder
766
.ins()
767
.icmp(IntCC::UnsignedGreaterThan, end_index, len);
768
func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
769
770
// Get the address of the first element we want to fill.
771
let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
772
let ArraySizeInfo {
773
obj_size,
774
one_elem_size,
775
base_size,
776
} = emit_array_size_info(func_env, builder, interned_type_index, len);
777
let offset_in_elems = builder.ins().imul(index, one_elem_size);
778
let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
779
let elem_addr = func_env.prepare_gc_ref_access(
780
builder,
781
array_ref,
782
BoundsCheck::DynamicObjectField {
783
offset: obj_offset,
784
object_size: obj_size,
785
},
786
);
787
788
// Calculate the end address, just after the filled region.
789
let fill_size = builder.ins().imul(n, one_elem_size);
790
let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), fill_size);
791
let fill_end = builder.ins().iadd(elem_addr, fill_size);
792
793
let one_elem_size =
794
uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
795
796
let result = emit_array_fill_impl(
797
func_env,
798
builder,
799
elem_addr,
800
one_elem_size,
801
fill_end,
802
|func_env, builder, elem_addr| {
803
let elem_ty = func_env
804
.types
805
.unwrap_array(interned_type_index)?
806
.0
807
.element_type;
808
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
809
},
810
);
811
log::trace!("translate_array_fill(..) -> {result:?}");
812
result
813
}
814
815
pub fn translate_array_len(
816
func_env: &mut FuncEnvironment<'_>,
817
builder: &mut FunctionBuilder,
818
array_ref: ir::Value,
819
) -> WasmResult<ir::Value> {
820
log::trace!("translate_array_len({array_ref:?})");
821
822
func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
823
824
let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
825
let len_field = func_env.prepare_gc_ref_access(
826
builder,
827
array_ref,
828
// Note: We can't bounds check the whole array object's size because we
829
// don't know its length yet. Chicken and egg problem.
830
BoundsCheck::StaticOffset {
831
offset: len_offset,
832
access_size: u8::try_from(ir::types::I32.bytes()).unwrap(),
833
},
834
);
835
let result = builder.ins().load(
836
ir::types::I32,
837
ir::MemFlags::trusted().with_readonly(),
838
len_field,
839
0,
840
);
841
log::trace!("translate_array_len(..) -> {result:?}");
842
Ok(result)
843
}
844
845
struct ArraySizeInfo {
846
/// The `i32` size of the whole array object, in bytes.
847
obj_size: ir::Value,
848
849
/// The `i32` size of each one of the array's elements, in bytes.
850
one_elem_size: ir::Value,
851
852
/// The `i32` size of the array's base object, in bytes. This is also the
853
/// offset from the start of the array object to its elements.
854
base_size: ir::Value,
855
}
856
857
/// Emit code to get the dynamic size (in bytes) of a whole array object, along
858
/// with some other related bits.
859
fn emit_array_size_info(
860
func_env: &mut FuncEnvironment<'_>,
861
builder: &mut FunctionBuilder<'_>,
862
array_type_index: ModuleInternedTypeIndex,
863
// `i32` value containing the array's length.
864
array_len: ir::Value,
865
) -> ArraySizeInfo {
866
let array_layout = func_env.array_layout(array_type_index);
867
868
// Note that we check for overflow below because we can't trust the array's
869
// length: it came from inside the GC heap.
870
//
871
// We check for 32-bit multiplication overflow by performing a 64-bit
872
// multiplication and testing the high bits.
873
let one_elem_size = builder
874
.ins()
875
.iconst(ir::types::I64, i64::from(array_layout.elem_size));
876
let array_len = builder.ins().uextend(ir::types::I64, array_len);
877
let all_elems_size = builder.ins().imul(one_elem_size, array_len);
878
879
let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
880
builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
881
882
let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
883
let base_size = builder
884
.ins()
885
.iconst(ir::types::I32, i64::from(array_layout.base_size));
886
let obj_size =
887
builder
888
.ins()
889
.uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
890
891
let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
892
893
ArraySizeInfo {
894
obj_size,
895
one_elem_size,
896
base_size,
897
}
898
}
899
900
/// Get the bounds-checked address of an element in an array.
901
///
902
/// The emitted code will trap if `index >= array.length`.
903
///
904
/// Returns the `ir::Value` containing the address of the `index`th element in
905
/// the array. You may read or write a value of the array's element type at this
906
/// address. You may not use it for any other kind of access, nor reuse this
907
/// value across GC safepoints.
908
fn array_elem_addr(
909
func_env: &mut FuncEnvironment<'_>,
910
builder: &mut FunctionBuilder<'_>,
911
array_type_index: ModuleInternedTypeIndex,
912
array_ref: ir::Value,
913
index: ir::Value,
914
) -> ir::Value {
915
// First, assert that `index < array.length`.
916
//
917
// This check is visible at the Wasm-semantics level.
918
//
919
// TODO: We should emit spectre-safe bounds checks for array accesses (if
920
// configured) but we don't currently have a great way to do that here. The
921
// proper solution is to use linear memories to back GC heaps and reuse the
922
// code in `bounds_check.rs` to implement these bounds checks. That is all
923
// planned, but not yet implemented.
924
925
let len = translate_array_len(func_env, builder, array_ref).unwrap();
926
927
let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
928
func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
929
930
// Compute the size (in bytes) of the whole array object.
931
let ArraySizeInfo {
932
obj_size,
933
one_elem_size,
934
base_size,
935
} = emit_array_size_info(func_env, builder, array_type_index, len);
936
937
// Compute the offset of the `index`th element within the array object.
938
//
939
// NB: no need to check for overflow here, since at this point we know that
940
// `len * elem_size + base_size` did not overflow and `i < len`.
941
let offset_in_elems = builder.ins().imul(index, one_elem_size);
942
let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
943
944
// Finally, use the object size and element offset we just computed to
945
// perform our implementation-internal bounds checks.
946
//
947
// Checking the whole object's size, rather than the `index`th element's
948
// size allows these bounds checks to be deduplicated across repeated
949
// accesses to the same array at different indices.
950
//
951
// This check should not be visible to Wasm, and serve to protect us from
952
// our own implementation bugs. The goal is to keep any potential widgets
953
// confined within the GC heap, and turn what would otherwise be a security
954
// vulnerability into a simple bug.
955
//
956
// TODO: Ideally we should fold the first Wasm-visible bounds check into
957
// this internal bounds check, so that we aren't performing multiple,
958
// redundant bounds checks. But we should figure out how to do this in a way
959
// that doesn't defeat the object-size bounds checking's deduplication
960
// mentioned above.
961
func_env.prepare_gc_ref_access(
962
builder,
963
array_ref,
964
BoundsCheck::DynamicObjectField {
965
offset: offset_in_array,
966
object_size: obj_size,
967
},
968
)
969
}
970
971
pub fn translate_array_get(
972
func_env: &mut FuncEnvironment<'_>,
973
builder: &mut FunctionBuilder,
974
array_type_index: TypeIndex,
975
array_ref: ir::Value,
976
index: ir::Value,
977
extension: Option<Extension>,
978
) -> WasmResult<ir::Value> {
979
log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
980
981
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
982
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
983
984
let array_ty = func_env.types.unwrap_array(array_type_index)?;
985
let elem_ty = array_ty.0.element_type;
986
987
let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
988
log::trace!("translate_array_get(..) -> {result:?}");
989
Ok(result)
990
}
991
992
pub fn translate_array_set(
993
func_env: &mut FuncEnvironment<'_>,
994
builder: &mut FunctionBuilder,
995
array_type_index: TypeIndex,
996
array_ref: ir::Value,
997
index: ir::Value,
998
value: ir::Value,
999
) -> WasmResult<()> {
1000
log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
1001
1002
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
1003
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
1004
1005
let array_ty = func_env.types.unwrap_array(array_type_index)?;
1006
let elem_ty = array_ty.0.element_type;
1007
1008
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
1009
1010
log::trace!("translate_array_set: finished");
1011
Ok(())
1012
}
1013
1014
pub fn translate_ref_test(
1015
func_env: &mut FuncEnvironment<'_>,
1016
builder: &mut FunctionBuilder<'_>,
1017
test_ty: WasmRefType,
1018
val: ir::Value,
1019
val_ty: WasmRefType,
1020
) -> WasmResult<ir::Value> {
1021
log::trace!("translate_ref_test({test_ty:?}, {val:?})");
1022
1023
// First special case: testing for references to bottom types.
1024
if test_ty.heap_type.is_bottom() {
1025
let result = if test_ty.nullable {
1026
// All null references (within the same type hierarchy) match null
1027
// references to the bottom type.
1028
func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?
1029
} else {
1030
// `ref.test` is always false for non-nullable bottom types, as the
1031
// bottom types are uninhabited.
1032
builder.ins().iconst(ir::types::I32, 0)
1033
};
1034
log::trace!("translate_ref_test(..) -> {result:?}");
1035
return Ok(result);
1036
}
1037
1038
// And because `ref.test heap_ty` is only valid on operands whose type is in
1039
// the same type hierarchy as `heap_ty`, if `heap_ty` is its hierarchy's top
1040
// type, we only need to worry about whether we are testing for nullability
1041
// or not.
1042
if test_ty.heap_type.is_top() {
1043
let result = if test_ty.nullable {
1044
builder.ins().iconst(ir::types::I32, 1)
1045
} else {
1046
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1047
let zero = builder.ins().iconst(ir::types::I32, 0);
1048
let one = builder.ins().iconst(ir::types::I32, 1);
1049
builder.ins().select(is_null, zero, one)
1050
};
1051
log::trace!("translate_ref_test(..) -> {result:?}");
1052
return Ok(result);
1053
}
1054
1055
// `i31ref`s are a little interesting because they don't point to GC
1056
// objects; we test the bit pattern of the reference itself.
1057
if test_ty.heap_type == WasmHeapType::I31 {
1058
let i31_mask = builder.ins().iconst(
1059
ir::types::I32,
1060
i64::from(wasmtime_environ::I31_DISCRIMINANT),
1061
);
1062
let is_i31 = builder.ins().band(val, i31_mask);
1063
let result = if test_ty.nullable {
1064
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1065
builder.ins().bor(is_null, is_i31)
1066
} else {
1067
is_i31
1068
};
1069
log::trace!("translate_ref_test(..) -> {result:?}");
1070
return Ok(result);
1071
}
1072
1073
// Otherwise, in the general case, we need to inspect our given object's
1074
// actual type, which also requires null-checking and i31-checking it.
1075
1076
let is_any_hierarchy = test_ty.heap_type.top() == WasmHeapTopType::Any;
1077
1078
let non_null_block = builder.create_block();
1079
let non_null_non_i31_block = builder.create_block();
1080
let continue_block = builder.create_block();
1081
1082
// Current block: check if the reference is null and branch appropriately.
1083
let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1084
let result_when_is_null = builder
1085
.ins()
1086
.iconst(ir::types::I32, test_ty.nullable as i64);
1087
builder.ins().brif(
1088
is_null,
1089
continue_block,
1090
&[result_when_is_null.into()],
1091
non_null_block,
1092
&[],
1093
);
1094
1095
// Non-null block: We know the GC ref is non-null, but we need to also check
1096
// for `i31` references that don't point to GC objects.
1097
builder.switch_to_block(non_null_block);
1098
log::trace!("translate_ref_test: non-null ref block");
1099
if is_any_hierarchy {
1100
let i31_mask = builder.ins().iconst(
1101
ir::types::I32,
1102
i64::from(wasmtime_environ::I31_DISCRIMINANT),
1103
);
1104
let is_i31 = builder.ins().band(val, i31_mask);
1105
// If it is an `i31`, then create the result value based on whether we
1106
// want `i31`s to pass the test or not.
1107
let result_when_is_i31 = builder.ins().iconst(
1108
ir::types::I32,
1109
matches!(
1110
test_ty.heap_type,
1111
WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
1112
) as i64,
1113
);
1114
builder.ins().brif(
1115
is_i31,
1116
continue_block,
1117
&[result_when_is_i31.into()],
1118
non_null_non_i31_block,
1119
&[],
1120
);
1121
} else {
1122
// If we aren't testing the `any` hierarchy, the reference cannot be an
1123
// `i31ref`. Jump directly to the non-null and non-i31 block; rely on
1124
// branch folding during lowering to clean this up.
1125
builder.ins().jump(non_null_non_i31_block, &[]);
1126
}
1127
1128
// Non-null and non-i31 block: Read the actual `VMGcKind` or
1129
// `VMSharedTypeIndex` out of the object's header and check whether it
1130
// matches the expected type.
1131
builder.switch_to_block(non_null_non_i31_block);
1132
log::trace!("translate_ref_test: non-null and non-i31 ref block");
1133
let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
1134
builder: &mut FunctionBuilder,
1135
val: ir::Value,
1136
expected_kind: VMGcKind|
1137
-> ir::Value {
1138
let kind_addr = func_env.prepare_gc_ref_access(
1139
builder,
1140
val,
1141
BoundsCheck::StaticObjectField {
1142
offset: wasmtime_environ::VM_GC_HEADER_KIND_OFFSET,
1143
access_size: wasmtime_environ::VM_GC_KIND_SIZE,
1144
object_size: wasmtime_environ::VM_GC_HEADER_SIZE,
1145
},
1146
);
1147
let actual_kind = builder.ins().load(
1148
ir::types::I32,
1149
ir::MemFlags::trusted().with_readonly(),
1150
kind_addr,
1151
0,
1152
);
1153
let expected_kind = builder
1154
.ins()
1155
.iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
1156
// Inline version of `VMGcKind::matches`.
1157
let and = builder.ins().band(actual_kind, expected_kind);
1158
let kind_matches = builder
1159
.ins()
1160
.icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
1161
builder.ins().uextend(ir::types::I32, kind_matches)
1162
};
1163
let result = match test_ty.heap_type {
1164
WasmHeapType::Any
1165
| WasmHeapType::None
1166
| WasmHeapType::Extern
1167
| WasmHeapType::NoExtern
1168
| WasmHeapType::Func
1169
| WasmHeapType::NoFunc
1170
| WasmHeapType::Cont
1171
| WasmHeapType::NoCont
1172
| WasmHeapType::Exn
1173
| WasmHeapType::NoExn
1174
| WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
1175
1176
// For these abstract but non-top and non-bottom types, we check the
1177
// `VMGcKind` that is in the object's header.
1178
WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
1179
WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
1180
WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
1181
1182
// For concrete types, we need to do a full subtype check between the
1183
// `VMSharedTypeIndex` in the object's header and the
1184
// `ModuleInternedTypeIndex` we have here.
1185
//
1186
// TODO: This check should ideally be done inline, but we don't have a
1187
// good way to access the `TypeRegistry`'s supertypes arrays from Wasm
1188
// code at the moment.
1189
WasmHeapType::ConcreteArray(ty)
1190
| WasmHeapType::ConcreteStruct(ty)
1191
| WasmHeapType::ConcreteExn(ty) => {
1192
let expected_interned_ty = ty.unwrap_module_type_index();
1193
let expected_shared_ty =
1194
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1195
1196
let ty_addr = func_env.prepare_gc_ref_access(
1197
builder,
1198
val,
1199
BoundsCheck::StaticOffset {
1200
offset: wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET,
1201
access_size: func_env.offsets.size_of_vmshared_type_index(),
1202
},
1203
);
1204
let actual_shared_ty = builder.ins().load(
1205
ir::types::I32,
1206
ir::MemFlags::trusted().with_readonly(),
1207
ty_addr,
1208
0,
1209
);
1210
1211
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1212
}
1213
1214
// Same as for concrete arrays and structs except that a `VMFuncRef`
1215
// doesn't begin with a `VMGcHeader` and is a raw pointer rather than GC
1216
// heap index.
1217
WasmHeapType::ConcreteFunc(ty) => {
1218
let expected_interned_ty = ty.unwrap_module_type_index();
1219
let expected_shared_ty =
1220
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1221
1222
let actual_shared_ty = func_env.load_funcref_type_index(
1223
&mut builder.cursor(),
1224
ir::MemFlags::trusted().with_readonly(),
1225
val,
1226
);
1227
1228
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1229
}
1230
WasmHeapType::ConcreteCont(_) => {
1231
// TODO(#10248) GC integration for stack switching
1232
return Err(wasmtime_environ::WasmError::Unsupported(
1233
"Stack switching feature not compatible with GC, yet".to_string(),
1234
));
1235
}
1236
};
1237
builder.ins().jump(continue_block, &[result.into()]);
1238
1239
// Control flow join point with the result.
1240
builder.switch_to_block(continue_block);
1241
let result = builder.append_block_param(continue_block, ir::types::I32);
1242
log::trace!("translate_ref_test(..) -> {result:?}");
1243
1244
builder.seal_block(non_null_block);
1245
builder.seal_block(non_null_non_i31_block);
1246
builder.seal_block(continue_block);
1247
1248
Ok(result)
1249
}
1250
1251
fn uextend_i32_to_pointer_type(
1252
builder: &mut FunctionBuilder,
1253
pointer_type: ir::Type,
1254
value: ir::Value,
1255
) -> ir::Value {
1256
assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
1257
match pointer_type {
1258
ir::types::I32 => value,
1259
ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
1260
_ => unreachable!(),
1261
}
1262
}
1263
1264
/// Emit CLIF to compute an array object's total size, given the dynamic length
1265
/// in its initialization.
1266
///
1267
/// Traps if the size overflows.
1268
#[cfg_attr(
1269
not(any(feature = "gc-drc", feature = "gc-null")),
1270
expect(dead_code, reason = "easier to define")
1271
)]
1272
fn emit_array_size(
1273
func_env: &mut FuncEnvironment<'_>,
1274
builder: &mut FunctionBuilder<'_>,
1275
array_layout: &GcArrayLayout,
1276
len: ir::Value,
1277
) -> ir::Value {
1278
let base_size = builder
1279
.ins()
1280
.iconst(ir::types::I32, i64::from(array_layout.base_size));
1281
1282
// `elems_size = len * elem_size`
1283
//
1284
// Check for multiplication overflow and trap if it occurs, since that
1285
// means Wasm is attempting to allocate an array that is larger than our
1286
// implementation limits. (Note: there is no standard implementation
1287
// limit for array length beyond `u32::MAX`.)
1288
//
1289
// We implement this check by encoding our logically-32-bit operands as
1290
// i64 values, doing a 64-bit multiplication, and then checking the high
1291
// 32 bits of the multiplication's result. If the high 32 bits are not
1292
// all zeros, then the multiplication overflowed.
1293
debug_assert_eq!(builder.func.dfg.value_type(len), ir::types::I32);
1294
let len = builder.ins().uextend(ir::types::I64, len);
1295
let elems_size_64 = builder
1296
.ins()
1297
.imul_imm(len, i64::from(array_layout.elem_size));
1298
let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
1299
func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
1300
let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
1301
1302
// And if adding the base size and elements size overflows, then the
1303
// allocation is too large.
1304
let size = func_env.uadd_overflow_trap(
1305
builder,
1306
base_size,
1307
elems_size,
1308
crate::TRAP_ALLOCATION_TOO_LARGE,
1309
);
1310
1311
size
1312
}
1313
1314
/// Common helper for struct-field initialization that can be reused across
1315
/// collectors.
1316
#[cfg_attr(
1317
not(any(feature = "gc-drc", feature = "gc-null")),
1318
expect(dead_code, reason = "easier to define")
1319
)]
1320
fn initialize_struct_fields(
1321
func_env: &mut FuncEnvironment<'_>,
1322
builder: &mut FunctionBuilder<'_>,
1323
struct_ty: ModuleInternedTypeIndex,
1324
raw_ptr_to_struct: ir::Value,
1325
field_values: &[ir::Value],
1326
mut init_field: impl FnMut(
1327
&mut FuncEnvironment<'_>,
1328
&mut FunctionBuilder<'_>,
1329
WasmStorageType,
1330
ir::Value,
1331
ir::Value,
1332
) -> WasmResult<()>,
1333
) -> WasmResult<()> {
1334
let struct_layout = func_env.struct_or_exn_layout(struct_ty);
1335
let struct_size = struct_layout.size;
1336
let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().map(|f| f.offset).collect();
1337
assert_eq!(field_offsets.len(), field_values.len());
1338
1339
assert!(!func_env.types[struct_ty].composite_type.shared);
1340
let fields = match &func_env.types[struct_ty].composite_type.inner {
1341
WasmCompositeInnerType::Struct(s) => &s.fields,
1342
WasmCompositeInnerType::Exn(e) => &e.fields,
1343
_ => panic!("Not a struct or exception type"),
1344
};
1345
1346
let field_types: SmallVec<[_; 8]> = fields.iter().cloned().collect();
1347
assert_eq!(field_types.len(), field_values.len());
1348
1349
for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
1350
let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
1351
assert!(offset + size_of_access <= struct_size);
1352
let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
1353
init_field(func_env, builder, ty.element_type, field_addr, *val)?;
1354
}
1355
1356
Ok(())
1357
}
1358
1359
impl FuncEnvironment<'_> {
1360
fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
1361
// Lazily compute and cache the layout.
1362
if !self.ty_to_gc_layout.contains_key(&type_index) {
1363
let ty = &self.types[type_index].composite_type;
1364
let layout = gc_compiler(self)
1365
.unwrap()
1366
.layouts()
1367
.gc_layout(ty)
1368
.expect("should only call `FuncEnvironment::gc_layout` for GC types");
1369
self.ty_to_gc_layout.insert(type_index, layout);
1370
}
1371
1372
self.ty_to_gc_layout.get(&type_index).unwrap()
1373
}
1374
1375
/// Get the `GcArrayLayout` for the array type at the given `type_index`.
1376
fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
1377
self.gc_layout(type_index).unwrap_array()
1378
}
1379
1380
/// Get the `GcStructLayout` for the struct or exception type at the given `type_index`.
1381
fn struct_or_exn_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
1382
let result = self.gc_layout(type_index).unwrap_struct();
1383
result
1384
}
1385
1386
/// Get or create the global for our GC heap's base pointer.
1387
fn get_gc_heap_base_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
1388
if let Some(base) = self.gc_heap_base {
1389
return base;
1390
}
1391
1392
let store_context_ptr = self.get_vmstore_context_ptr_global(func);
1393
let offset = self.offsets.ptr.vmstore_context_gc_heap_base();
1394
1395
let mut flags = ir::MemFlags::trusted();
1396
if !self
1397
.tunables
1398
.gc_heap_memory_type()
1399
.memory_may_move(self.tunables)
1400
{
1401
flags.set_readonly();
1402
flags.set_can_move();
1403
}
1404
1405
let base = func.create_global_value(ir::GlobalValueData::Load {
1406
base: store_context_ptr,
1407
offset: Offset32::new(offset.into()),
1408
global_type: self.pointer_type(),
1409
flags,
1410
});
1411
1412
self.gc_heap_base = Some(base);
1413
base
1414
}
1415
1416
/// Get the GC heap's base.
1417
#[cfg(any(feature = "gc-null", feature = "gc-drc"))]
1418
fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1419
let global = self.get_gc_heap_base_global(&mut builder.func);
1420
builder.ins().global_value(self.pointer_type(), global)
1421
}
1422
1423
fn get_gc_heap_bound_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
1424
if let Some(bound) = self.gc_heap_bound {
1425
return bound;
1426
}
1427
let store_context_ptr = self.get_vmstore_context_ptr_global(func);
1428
let offset = self.offsets.ptr.vmstore_context_gc_heap_current_length();
1429
let bound = func.create_global_value(ir::GlobalValueData::Load {
1430
base: store_context_ptr,
1431
offset: Offset32::new(offset.into()),
1432
global_type: self.pointer_type(),
1433
flags: ir::MemFlags::trusted(),
1434
});
1435
self.gc_heap_bound = Some(bound);
1436
bound
1437
}
1438
1439
/// Get the GC heap's bound.
1440
#[cfg(feature = "gc-null")]
1441
fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1442
let global = self.get_gc_heap_bound_global(&mut builder.func);
1443
builder.ins().global_value(self.pointer_type(), global)
1444
}
1445
1446
/// Get or create the `Heap` for our GC heap.
1447
fn get_gc_heap(&mut self, func: &mut ir::Function) -> Heap {
1448
if let Some(heap) = self.gc_heap {
1449
return heap;
1450
}
1451
1452
let base = self.get_gc_heap_base_global(func);
1453
let bound = self.get_gc_heap_bound_global(func);
1454
let memory = self.tunables.gc_heap_memory_type();
1455
let heap = self.heaps.push(HeapData {
1456
base,
1457
bound,
1458
pcc_memory_type: None,
1459
memory,
1460
});
1461
self.gc_heap = Some(heap);
1462
heap
1463
}
1464
1465
/// Get the raw pointer of `gc_ref[offset]` bounds checked for an access of
1466
/// `size` bytes.
1467
///
1468
/// The given `gc_ref` must be a non-null, non-i31 GC reference.
1469
///
1470
/// If `check` is a `BoundsCheck::Object`, then it is the callers
1471
/// responsibility to ensure that `offset + access_size <= object_size`.
1472
///
1473
/// Returns a raw pointer to `gc_ref[offset]` -- not a raw pointer to the GC
1474
/// object itself (unless `offset` happens to be `0`). This raw pointer may
1475
/// be used to read or write up to as many bytes as described by `bound`. Do
1476
/// NOT attempt accesses bytes outside of `bound`; that may lead to
1477
/// unchecked out-of-bounds accesses.
1478
///
1479
/// This method is collector-agnostic.
1480
fn prepare_gc_ref_access(
1481
&mut self,
1482
builder: &mut FunctionBuilder,
1483
gc_ref: ir::Value,
1484
bounds_check: BoundsCheck,
1485
) -> ir::Value {
1486
log::trace!("prepare_gc_ref_access({gc_ref:?}, {bounds_check:?})");
1487
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1488
1489
let gc_heap = self.get_gc_heap(&mut builder.func);
1490
let gc_heap = self.heaps[gc_heap].clone();
1491
let result = match crate::bounds_checks::bounds_check_and_compute_addr(
1492
builder,
1493
self,
1494
&gc_heap,
1495
gc_ref,
1496
bounds_check,
1497
crate::TRAP_INTERNAL_ASSERT,
1498
) {
1499
Reachability::Reachable(v) => v,
1500
Reachability::Unreachable => {
1501
// We are now in unreachable code, but we don't want to plumb
1502
// through a bunch of `Reachability` through all of our callers,
1503
// so just assert we won't reach here and return `null`
1504
let null = builder.ins().iconst(self.pointer_type(), 0);
1505
builder.ins().trapz(null, crate::TRAP_INTERNAL_ASSERT);
1506
null
1507
}
1508
};
1509
log::trace!("prepare_gc_ref_access(..) -> {result:?}");
1510
result
1511
}
1512
1513
/// Emit checks (if necessary) for whether the given `gc_ref` is null or is
1514
/// an `i31ref`.
1515
///
1516
/// Takes advantage of static information based on `ty` as to whether the GC
1517
/// reference is nullable or can ever be an `i31`.
1518
///
1519
/// Returns an `ir::Value` that is an `i32` will be non-zero if the GC
1520
/// reference is null or is an `i31ref`; otherwise, it will be zero.
1521
///
1522
/// This method is collector-agnostic.
1523
#[cfg_attr(
1524
not(feature = "gc-drc"),
1525
expect(dead_code, reason = "easier to define")
1526
)]
1527
fn gc_ref_is_null_or_i31(
1528
&mut self,
1529
builder: &mut FunctionBuilder,
1530
ty: WasmRefType,
1531
gc_ref: ir::Value,
1532
) -> ir::Value {
1533
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1534
assert!(ty.is_vmgcref_type_and_not_i31());
1535
1536
let might_be_i31 = match ty.heap_type {
1537
// If we are definitely dealing with an i31, we shouldn't be
1538
// emitting dynamic checks for it, and the caller shouldn't call
1539
// this function. Should have been caught by the assertion at the
1540
// start of the function.
1541
WasmHeapType::I31 => unreachable!(),
1542
1543
// Could potentially be an i31.
1544
WasmHeapType::Any | WasmHeapType::Eq => true,
1545
1546
// If it is definitely a struct, array, or uninhabited type, then it
1547
// is definitely not an i31.
1548
WasmHeapType::Array
1549
| WasmHeapType::ConcreteArray(_)
1550
| WasmHeapType::Struct
1551
| WasmHeapType::ConcreteStruct(_)
1552
| WasmHeapType::None => false,
1553
1554
// Despite being a different type hierarchy, this *could* be an
1555
// `i31` if it is the result of
1556
//
1557
// (extern.convert_any (ref.i31 ...))
1558
WasmHeapType::Extern => true,
1559
1560
// Can only ever be `null`.
1561
WasmHeapType::NoExtern => false,
1562
1563
WasmHeapType::Exn | WasmHeapType::ConcreteExn(_) | WasmHeapType::NoExn => false,
1564
1565
// Wrong type hierarchy, and also funcrefs are not GC-managed
1566
// types. Should have been caught by the assertion at the start of
1567
// the function.
1568
WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
1569
unreachable!()
1570
}
1571
WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => {
1572
unreachable!()
1573
}
1574
};
1575
1576
match (ty.nullable, might_be_i31) {
1577
// This GC reference statically cannot be null nor an i31. (Let
1578
// Cranelift's optimizer const-propagate this value and erase any
1579
// unnecessary control flow resulting from branching on this value.)
1580
(false, false) => builder.ins().iconst(ir::types::I32, 0),
1581
1582
// This GC reference is always non-null, but might be an i31.
1583
(false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
1584
1585
// This GC reference might be null, but can never be an i31.
1586
(true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
1587
1588
// Fully general case: this GC reference could be either null or an
1589
// i31.
1590
(true, true) => {
1591
let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
1592
let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
1593
let is_null = builder.ins().uextend(ir::types::I32, is_null);
1594
builder.ins().bor(is_i31, is_null)
1595
}
1596
}
1597
}
1598
1599
// Emit code to check whether `a <: b` for two `VMSharedTypeIndex`es.
1600
pub(crate) fn is_subtype(
1601
&mut self,
1602
builder: &mut FunctionBuilder<'_>,
1603
a: ir::Value,
1604
b: ir::Value,
1605
) -> ir::Value {
1606
log::trace!("is_subtype({a:?}, {b:?})");
1607
1608
let diff_tys_block = builder.create_block();
1609
let continue_block = builder.create_block();
1610
1611
// Current block: fast path for when `a == b`.
1612
log::trace!("is_subtype: fast path check for exact same types");
1613
let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
1614
let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
1615
builder.ins().brif(
1616
same_ty,
1617
continue_block,
1618
&[same_ty.into()],
1619
diff_tys_block,
1620
&[],
1621
);
1622
1623
// Different types block: fall back to the `is_subtype` libcall.
1624
builder.switch_to_block(diff_tys_block);
1625
log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
1626
let is_subtype = self.builtin_functions.is_subtype(builder.func);
1627
let vmctx = self.vmctx_val(&mut builder.cursor());
1628
let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
1629
let result = builder.func.dfg.first_result(call_inst);
1630
builder.ins().jump(continue_block, &[result.into()]);
1631
1632
// Continue block: join point for the result.
1633
builder.switch_to_block(continue_block);
1634
let result = builder.append_block_param(continue_block, ir::types::I32);
1635
log::trace!("is_subtype(..) -> {result:?}");
1636
1637
builder.seal_block(diff_tys_block);
1638
builder.seal_block(continue_block);
1639
1640
result
1641
}
1642
}
1643
1644