Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/cranelift/src/func_environ/gc/enabled/null.rs
1693 views
1
//! Compiler for the null collector.
2
//!
3
//! Note that we don't need to mark any value as requiring inclusion in stack
4
//! maps inside this module, because the null collector doesn't ever collect
5
//! anything.
6
7
use super::*;
8
use crate::func_environ::FuncEnvironment;
9
use cranelift_codegen::ir::{self, InstBuilder};
10
use cranelift_frontend::FunctionBuilder;
11
use wasmtime_environ::VMSharedTypeIndex;
12
use wasmtime_environ::null::{EXCEPTION_TAG_DEFINED_OFFSET, EXCEPTION_TAG_INSTANCE_OFFSET};
13
use wasmtime_environ::{
14
GcTypeLayouts, ModuleInternedTypeIndex, PtrSize, TypeIndex, VMGcKind, WasmRefType, WasmResult,
15
null::NullTypeLayouts,
16
};
17
18
#[derive(Default)]
19
pub struct NullCompiler {
20
layouts: NullTypeLayouts,
21
}
22
23
impl NullCompiler {
24
/// Emit code to perform an allocation inline.
25
///
26
/// `kind` may be `VMGcKind::ExternRef` iff `ty` is `None`.
27
///
28
/// `size` must be greater than or equal to `size_of(VMGcHeader)`.
29
///
30
/// `align` must be greater than or equal to `align_of(VMGcHeader)` and a
31
/// power of two.
32
///
33
/// The resulting values are
34
///
35
/// 1. The `VMGcRef` indexing into the GC heap.
36
///
37
/// 2. The raw pointer to the start of the object inside the GC heap. This
38
/// may be used to access up to `size` bytes.
39
fn emit_inline_alloc(
40
&mut self,
41
func_env: &mut FuncEnvironment<'_>,
42
builder: &mut FunctionBuilder,
43
kind: VMGcKind,
44
ty: Option<ModuleInternedTypeIndex>,
45
size: ir::Value,
46
align: ir::Value,
47
) -> (ir::Value, ir::Value) {
48
log::trace!("emit_inline_alloc(kind={kind:?}, ty={ty:?}, size={size}, align={align})");
49
50
assert_eq!(builder.func.dfg.value_type(size), ir::types::I32);
51
assert_eq!(builder.func.dfg.value_type(align), ir::types::I32);
52
53
let current_block = builder.current_block().unwrap();
54
let continue_block = builder.create_block();
55
let grow_block = builder.create_block();
56
57
builder.ensure_inserted_block();
58
builder.insert_block_after(continue_block, current_block);
59
builder.insert_block_after(grow_block, continue_block);
60
61
// Check that the size fits in the unused bits of a `VMGcKind`, since
62
// the null collector stores the object's size there.
63
let mask = builder
64
.ins()
65
.iconst(ir::types::I32, i64::from(VMGcKind::MASK));
66
let masked = builder.ins().band(size, mask);
67
func_env.trapnz(builder, masked, crate::TRAP_ALLOCATION_TOO_LARGE);
68
69
// Load the bump "pointer" (it is actually an index into the GC heap,
70
// not a raw pointer).
71
let pointer_type = func_env.pointer_type();
72
let vmctx = func_env.vmctx_val(&mut builder.cursor());
73
let ptr_to_next = builder.ins().load(
74
pointer_type,
75
ir::MemFlags::trusted().with_readonly(),
76
vmctx,
77
i32::from(func_env.offsets.ptr.vmctx_gc_heap_data()),
78
);
79
let next = builder
80
.ins()
81
.load(ir::types::I32, ir::MemFlags::trusted(), ptr_to_next, 0);
82
83
// Increment the bump "pointer" to the requested alignment:
84
//
85
// next + (align - 1) & !(align - 1)
86
//
87
// Overflow means that the alignment is too large to satisfy, so trap
88
// accordingly. Note that `align - 1` can't overflow because `align` is
89
// a power of two.
90
let minus_one = builder.ins().iconst(ir::types::I32, -1);
91
let align_minus_one = builder.ins().iadd(align, minus_one);
92
let next_plus_align_minus_one = func_env.uadd_overflow_trap(
93
builder,
94
next,
95
align_minus_one,
96
crate::TRAP_ALLOCATION_TOO_LARGE,
97
);
98
let not_align_minus_one = builder.ins().bnot(align_minus_one);
99
let aligned = builder
100
.ins()
101
.band(next_plus_align_minus_one, not_align_minus_one);
102
103
// Check whether the allocation fits in the heap space we have left.
104
let end_of_object =
105
func_env.uadd_overflow_trap(builder, aligned, size, crate::TRAP_ALLOCATION_TOO_LARGE);
106
let uext_end_of_object = uextend_i32_to_pointer_type(builder, pointer_type, end_of_object);
107
let bound = func_env.get_gc_heap_bound(builder);
108
let is_in_bounds = builder.ins().icmp(
109
ir::condcodes::IntCC::UnsignedLessThanOrEqual,
110
uext_end_of_object,
111
bound,
112
);
113
builder
114
.ins()
115
.brif(is_in_bounds, continue_block, &[], grow_block, &[]);
116
117
log::trace!("emit_inline_alloc: grow_block");
118
builder.switch_to_block(grow_block);
119
builder.seal_block(grow_block);
120
builder.set_cold_block(grow_block);
121
let grow_gc_heap_builtin = func_env.builtin_functions.grow_gc_heap(builder.func);
122
let vmctx = func_env.vmctx_val(&mut builder.cursor());
123
let bytes_needed = builder.ins().isub(uext_end_of_object, bound);
124
let bytes_needed = match func_env.pointer_type() {
125
ir::types::I32 => builder.ins().uextend(ir::types::I64, bytes_needed),
126
ir::types::I64 => bytes_needed,
127
_ => unreachable!(),
128
};
129
builder
130
.ins()
131
.call(grow_gc_heap_builtin, &[vmctx, bytes_needed]);
132
builder.ins().jump(continue_block, &[]);
133
134
// Write the header, update the bump "pointer", and return the newly
135
// allocated object.
136
log::trace!("emit_inline_alloc: continue_block");
137
builder.switch_to_block(continue_block);
138
builder.seal_block(continue_block);
139
140
// TODO: Ideally we would use a single `i64` store to write both the
141
// header and the type index, but that requires generating different
142
// code for big-endian architectures, and I haven't bothered doing that
143
// yet.
144
let base = func_env.get_gc_heap_base(builder);
145
let uext_aligned = uextend_i32_to_pointer_type(builder, pointer_type, aligned);
146
let ptr_to_object = builder.ins().iadd(base, uext_aligned);
147
let kind = builder
148
.ins()
149
.iconst(ir::types::I32, i64::from(kind.as_u32()));
150
let kind_and_size = builder.ins().bor(kind, size);
151
let ty = match ty {
152
Some(ty) => func_env.module_interned_to_shared_ty(&mut builder.cursor(), ty),
153
None => builder.ins().iconst(
154
func_env.vmshared_type_index_ty(),
155
i64::from(VMSharedTypeIndex::reserved_value().as_bits()),
156
),
157
};
158
builder.ins().store(
159
ir::MemFlags::trusted(),
160
kind_and_size,
161
ptr_to_object,
162
i32::try_from(wasmtime_environ::VM_GC_HEADER_KIND_OFFSET).unwrap(),
163
);
164
builder.ins().store(
165
ir::MemFlags::trusted(),
166
ty,
167
ptr_to_object,
168
i32::try_from(wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET).unwrap(),
169
);
170
builder
171
.ins()
172
.store(ir::MemFlags::trusted(), end_of_object, ptr_to_next, 0);
173
174
log::trace!("emit_inline_alloc(..) -> ({aligned}, {ptr_to_object})");
175
(aligned, ptr_to_object)
176
}
177
}
178
179
impl GcCompiler for NullCompiler {
180
fn layouts(&self) -> &dyn GcTypeLayouts {
181
&self.layouts
182
}
183
184
fn alloc_array(
185
&mut self,
186
func_env: &mut FuncEnvironment<'_>,
187
builder: &mut FunctionBuilder<'_>,
188
array_type_index: TypeIndex,
189
init: super::ArrayInit<'_>,
190
) -> WasmResult<ir::Value> {
191
let interned_type_index =
192
func_env.module.types[array_type_index].unwrap_module_type_index();
193
let ptr_ty = func_env.pointer_type();
194
195
let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
196
let array_layout = func_env.array_layout(interned_type_index).clone();
197
let base_size = array_layout.base_size;
198
let align = array_layout.align;
199
let len_to_elems_delta = base_size.checked_sub(len_offset).unwrap();
200
201
// First, compute the array's total size from its base size, element
202
// size, and length.
203
let len = init.len(&mut builder.cursor());
204
let size = emit_array_size(func_env, builder, &array_layout, len);
205
206
// Next, allocate the array.
207
assert!(align.is_power_of_two());
208
let align = builder.ins().iconst(ir::types::I32, i64::from(align));
209
let (gc_ref, ptr_to_object) = self.emit_inline_alloc(
210
func_env,
211
builder,
212
VMGcKind::ArrayRef,
213
Some(interned_type_index),
214
size,
215
align,
216
);
217
218
// Write the array's length into its field.
219
//
220
// Note: we don't need to bounds-check the GC ref access here, because
221
// the result of the inline allocation is trusted and we aren't reading
222
// any pointers or offsets out from the (untrusted) GC heap.
223
let len_addr = builder.ins().iadd_imm(ptr_to_object, i64::from(len_offset));
224
let len = init.len(&mut builder.cursor());
225
builder
226
.ins()
227
.store(ir::MemFlags::trusted(), len, len_addr, 0);
228
229
// Finally, initialize the elements.
230
let len_to_elems_delta = builder.ins().iconst(ptr_ty, i64::from(len_to_elems_delta));
231
let elems_addr = builder.ins().iadd(len_addr, len_to_elems_delta);
232
init.initialize(
233
func_env,
234
builder,
235
interned_type_index,
236
base_size,
237
size,
238
elems_addr,
239
|func_env, builder, elem_ty, elem_addr, val| {
240
write_field_at_addr(func_env, builder, elem_ty, elem_addr, val)
241
},
242
)?;
243
244
Ok(gc_ref)
245
}
246
247
fn alloc_struct(
248
&mut self,
249
func_env: &mut FuncEnvironment<'_>,
250
builder: &mut FunctionBuilder<'_>,
251
struct_type_index: TypeIndex,
252
field_vals: &[ir::Value],
253
) -> WasmResult<ir::Value> {
254
let interned_type_index =
255
func_env.module.types[struct_type_index].unwrap_module_type_index();
256
let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
257
258
// Copy some stuff out of the struct layout to avoid borrowing issues.
259
let struct_size = struct_layout.size;
260
let struct_align = struct_layout.align;
261
262
assert_eq!(VMGcKind::MASK & struct_size, 0);
263
assert_eq!(VMGcKind::UNUSED_MASK & struct_size, struct_size);
264
let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
265
266
let align = builder
267
.ins()
268
.iconst(ir::types::I32, i64::from(struct_align));
269
270
let (struct_ref, raw_struct_pointer) = self.emit_inline_alloc(
271
func_env,
272
builder,
273
VMGcKind::StructRef,
274
Some(interned_type_index),
275
struct_size_val,
276
align,
277
);
278
279
// Initialize the struct's fields.
280
//
281
// Note: we don't need to bounds-check the GC ref access here, because
282
// the result of the inline allocation is trusted and we aren't reading
283
// any pointers or offsets out from the (untrusted) GC heap.
284
initialize_struct_fields(
285
func_env,
286
builder,
287
interned_type_index,
288
raw_struct_pointer,
289
field_vals,
290
|func_env, builder, ty, field_addr, val| {
291
write_field_at_addr(func_env, builder, ty, field_addr, val)
292
},
293
)?;
294
295
Ok(struct_ref)
296
}
297
298
fn alloc_exn(
299
&mut self,
300
func_env: &mut FuncEnvironment<'_>,
301
builder: &mut FunctionBuilder<'_>,
302
tag_index: TagIndex,
303
field_vals: &[ir::Value],
304
instance_id: ir::Value,
305
tag: ir::Value,
306
) -> WasmResult<ir::Value> {
307
let interned_type_index = func_env.module.tags[tag_index]
308
.exception
309
.unwrap_module_type_index();
310
let exn_layout = func_env.struct_or_exn_layout(interned_type_index);
311
312
// Copy some stuff out of the exception layout to avoid borrowing issues.
313
let exn_size = exn_layout.size;
314
let exn_align = exn_layout.align;
315
316
assert_eq!(VMGcKind::MASK & exn_size, 0);
317
assert_eq!(VMGcKind::UNUSED_MASK & exn_size, exn_size);
318
let exn_size_val = builder.ins().iconst(ir::types::I32, i64::from(exn_size));
319
320
let align = builder.ins().iconst(ir::types::I32, i64::from(exn_align));
321
322
let (exn_ref, raw_exn_pointer) = self.emit_inline_alloc(
323
func_env,
324
builder,
325
VMGcKind::ExnRef,
326
Some(interned_type_index),
327
exn_size_val,
328
align,
329
);
330
331
// Initialize the exception object's fields.
332
//
333
// Note: we don't need to bounds-check the GC ref access here, because
334
// the result of the inline allocation is trusted and we aren't reading
335
// any pointers or offsets out from the (untrusted) GC heap.
336
initialize_struct_fields(
337
func_env,
338
builder,
339
interned_type_index,
340
raw_exn_pointer,
341
field_vals,
342
|func_env, builder, ty, field_addr, val| {
343
write_field_at_addr(func_env, builder, ty, field_addr, val)
344
},
345
)?;
346
347
// Initialize the tag fields.
348
let instance_id_addr = builder
349
.ins()
350
.iadd_imm(raw_exn_pointer, i64::from(EXCEPTION_TAG_INSTANCE_OFFSET));
351
write_field_at_addr(
352
func_env,
353
builder,
354
WasmStorageType::Val(WasmValType::I32),
355
instance_id_addr,
356
instance_id,
357
)?;
358
let tag_addr = builder
359
.ins()
360
.iadd_imm(raw_exn_pointer, i64::from(EXCEPTION_TAG_DEFINED_OFFSET));
361
write_field_at_addr(
362
func_env,
363
builder,
364
WasmStorageType::Val(WasmValType::I32),
365
tag_addr,
366
tag,
367
)?;
368
369
Ok(exn_ref)
370
}
371
372
fn translate_read_gc_reference(
373
&mut self,
374
_func_env: &mut FuncEnvironment<'_>,
375
builder: &mut FunctionBuilder,
376
_ty: WasmRefType,
377
src: ir::Value,
378
flags: ir::MemFlags,
379
) -> WasmResult<ir::Value> {
380
// NB: Don't use `unbarriered_load_gc_ref` here because we don't need to
381
// mark the value as requiring inclusion in stack maps.
382
Ok(builder.ins().load(ir::types::I32, flags, src, 0))
383
}
384
385
fn translate_write_gc_reference(
386
&mut self,
387
_func_env: &mut FuncEnvironment<'_>,
388
builder: &mut FunctionBuilder,
389
ty: WasmRefType,
390
dst: ir::Value,
391
new_val: ir::Value,
392
flags: ir::MemFlags,
393
) -> WasmResult<()> {
394
unbarriered_store_gc_ref(builder, ty.heap_type, dst, new_val, flags)
395
}
396
}
397
398