Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/winch/codegen/src/visitor.rs
3050 views
1
//! This module is the central place for machine code emission.
2
//! It defines an implementation of wasmparser's Visitor trait
3
//! for `CodeGen`; which defines a visitor per op-code,
4
//! which validates and dispatches to the corresponding
5
//! machine code emitter.
6
7
use crate::abi::RetArea;
8
use crate::codegen::{
9
Callee, CodeGen, CodeGenError, ConditionalBranch, ControlStackFrame, Emission, FnCall,
10
UnconditionalBranch, control_index,
11
};
12
use crate::masm::{
13
AtomicWaitKind, DivKind, Extend, ExtractLaneKind, FloatCmpKind, IntCmpKind, LoadKind,
14
MacroAssembler, MulWideKind, OperandSize, RegImm, RemKind, ReplaceLaneKind, RmwOp,
15
RoundingMode, SPOffset, ShiftKind, Signed, SplatKind, SplatLoadKind, StoreKind, TruncKind,
16
V128AbsKind, V128AddKind, V128ConvertKind, V128ExtAddKind, V128ExtMulKind, V128ExtendKind,
17
V128LoadExtendKind, V128MaxKind, V128MinKind, V128MulKind, V128NarrowKind, V128NegKind,
18
V128SubKind, V128TruncKind, VectorCompareKind, VectorEqualityKind, Zero,
19
};
20
use crate::reg::{Reg, writable};
21
use crate::stack::{TypedReg, Val};
22
use crate::{Result, bail, ensure, format_err};
23
use regalloc2::RegClass;
24
use smallvec::{SmallVec, smallvec};
25
use wasmparser::{
26
BlockType, BrTable, Ieee32, Ieee64, MemArg, V128, VisitOperator, VisitSimdOperator,
27
};
28
use wasmtime_cranelift::TRAP_INDIRECT_CALL_TO_NULL;
29
use wasmtime_environ::{
30
FUNCREF_INIT_BIT, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TypeIndex, WasmHeapType,
31
WasmValType,
32
};
33
34
/// A macro to define unsupported WebAssembly operators.
35
///
36
/// This macro calls itself recursively;
37
/// 1. It no-ops when matching a supported operator.
38
/// 2. Defines the visitor function and panics when
39
/// matching an unsupported operator.
40
macro_rules! def_unsupported {
41
($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident $ann:tt)*) => {
42
$(
43
def_unsupported!(
44
emit
45
$op
46
47
fn $visit(&mut self $($(,$arg: $argty)*)?) -> Self::Output {
48
$($(let _ = $arg;)*)?
49
50
Err(format_err!(CodeGenError::unimplemented_wasm_instruction()))
51
}
52
);
53
)*
54
};
55
56
(emit I32Const $($rest:tt)*) => {};
57
(emit I64Const $($rest:tt)*) => {};
58
(emit F32Const $($rest:tt)*) => {};
59
(emit F64Const $($rest:tt)*) => {};
60
(emit V128Const $($rest:tt)*) => {};
61
(emit F32Add $($rest:tt)*) => {};
62
(emit F64Add $($rest:tt)*) => {};
63
(emit F32Sub $($rest:tt)*) => {};
64
(emit F64Sub $($rest:tt)*) => {};
65
(emit F32Mul $($rest:tt)*) => {};
66
(emit F64Mul $($rest:tt)*) => {};
67
(emit F32Div $($rest:tt)*) => {};
68
(emit F64Div $($rest:tt)*) => {};
69
(emit F32Min $($rest:tt)*) => {};
70
(emit F64Min $($rest:tt)*) => {};
71
(emit F32Max $($rest:tt)*) => {};
72
(emit F64Max $($rest:tt)*) => {};
73
(emit F32Copysign $($rest:tt)*) => {};
74
(emit F64Copysign $($rest:tt)*) => {};
75
(emit F32Abs $($rest:tt)*) => {};
76
(emit F64Abs $($rest:tt)*) => {};
77
(emit F32Neg $($rest:tt)*) => {};
78
(emit F64Neg $($rest:tt)*) => {};
79
(emit F32Floor $($rest:tt)*) => {};
80
(emit F64Floor $($rest:tt)*) => {};
81
(emit F32Ceil $($rest:tt)*) => {};
82
(emit F64Ceil $($rest:tt)*) => {};
83
(emit F32Nearest $($rest:tt)*) => {};
84
(emit F64Nearest $($rest:tt)*) => {};
85
(emit F32Trunc $($rest:tt)*) => {};
86
(emit F64Trunc $($rest:tt)*) => {};
87
(emit F32Sqrt $($rest:tt)*) => {};
88
(emit F64Sqrt $($rest:tt)*) => {};
89
(emit F32Eq $($rest:tt)*) => {};
90
(emit F64Eq $($rest:tt)*) => {};
91
(emit F32Ne $($rest:tt)*) => {};
92
(emit F64Ne $($rest:tt)*) => {};
93
(emit F32Lt $($rest:tt)*) => {};
94
(emit F64Lt $($rest:tt)*) => {};
95
(emit F32Gt $($rest:tt)*) => {};
96
(emit F64Gt $($rest:tt)*) => {};
97
(emit F32Le $($rest:tt)*) => {};
98
(emit F64Le $($rest:tt)*) => {};
99
(emit F32Ge $($rest:tt)*) => {};
100
(emit F64Ge $($rest:tt)*) => {};
101
(emit F32ConvertI32S $($rest:tt)*) => {};
102
(emit F32ConvertI32U $($rest:tt)*) => {};
103
(emit F32ConvertI64S $($rest:tt)*) => {};
104
(emit F32ConvertI64U $($rest:tt)*) => {};
105
(emit F64ConvertI32S $($rest:tt)*) => {};
106
(emit F64ConvertI32U $($rest:tt)*) => {};
107
(emit F64ConvertI64S $($rest:tt)*) => {};
108
(emit F64ConvertI64U $($rest:tt)*) => {};
109
(emit F32ReinterpretI32 $($rest:tt)*) => {};
110
(emit F64ReinterpretI64 $($rest:tt)*) => {};
111
(emit F32DemoteF64 $($rest:tt)*) => {};
112
(emit F64PromoteF32 $($rest:tt)*) => {};
113
(emit I32Add $($rest:tt)*) => {};
114
(emit I64Add $($rest:tt)*) => {};
115
(emit I32Sub $($rest:tt)*) => {};
116
(emit I32Mul $($rest:tt)*) => {};
117
(emit I32DivS $($rest:tt)*) => {};
118
(emit I32DivU $($rest:tt)*) => {};
119
(emit I64DivS $($rest:tt)*) => {};
120
(emit I64DivU $($rest:tt)*) => {};
121
(emit I64RemU $($rest:tt)*) => {};
122
(emit I64RemS $($rest:tt)*) => {};
123
(emit I32RemU $($rest:tt)*) => {};
124
(emit I32RemS $($rest:tt)*) => {};
125
(emit I64Mul $($rest:tt)*) => {};
126
(emit I64Sub $($rest:tt)*) => {};
127
(emit I32Eq $($rest:tt)*) => {};
128
(emit I64Eq $($rest:tt)*) => {};
129
(emit I32Ne $($rest:tt)*) => {};
130
(emit I64Ne $($rest:tt)*) => {};
131
(emit I32LtS $($rest:tt)*) => {};
132
(emit I64LtS $($rest:tt)*) => {};
133
(emit I32LtU $($rest:tt)*) => {};
134
(emit I64LtU $($rest:tt)*) => {};
135
(emit I32LeS $($rest:tt)*) => {};
136
(emit I64LeS $($rest:tt)*) => {};
137
(emit I32LeU $($rest:tt)*) => {};
138
(emit I64LeU $($rest:tt)*) => {};
139
(emit I32GtS $($rest:tt)*) => {};
140
(emit I64GtS $($rest:tt)*) => {};
141
(emit I32GtU $($rest:tt)*) => {};
142
(emit I64GtU $($rest:tt)*) => {};
143
(emit I32GeS $($rest:tt)*) => {};
144
(emit I64GeS $($rest:tt)*) => {};
145
(emit I32GeU $($rest:tt)*) => {};
146
(emit I64GeU $($rest:tt)*) => {};
147
(emit I32Eqz $($rest:tt)*) => {};
148
(emit I64Eqz $($rest:tt)*) => {};
149
(emit I32And $($rest:tt)*) => {};
150
(emit I64And $($rest:tt)*) => {};
151
(emit I32Or $($rest:tt)*) => {};
152
(emit I64Or $($rest:tt)*) => {};
153
(emit I32Xor $($rest:tt)*) => {};
154
(emit I64Xor $($rest:tt)*) => {};
155
(emit I32Shl $($rest:tt)*) => {};
156
(emit I64Shl $($rest:tt)*) => {};
157
(emit I32ShrS $($rest:tt)*) => {};
158
(emit I64ShrS $($rest:tt)*) => {};
159
(emit I32ShrU $($rest:tt)*) => {};
160
(emit I64ShrU $($rest:tt)*) => {};
161
(emit I32Rotl $($rest:tt)*) => {};
162
(emit I64Rotl $($rest:tt)*) => {};
163
(emit I32Rotr $($rest:tt)*) => {};
164
(emit I64Rotr $($rest:tt)*) => {};
165
(emit I32Clz $($rest:tt)*) => {};
166
(emit I64Clz $($rest:tt)*) => {};
167
(emit I32Ctz $($rest:tt)*) => {};
168
(emit I64Ctz $($rest:tt)*) => {};
169
(emit I32Popcnt $($rest:tt)*) => {};
170
(emit I64Popcnt $($rest:tt)*) => {};
171
(emit I32WrapI64 $($rest:tt)*) => {};
172
(emit I64ExtendI32S $($rest:tt)*) => {};
173
(emit I64ExtendI32U $($rest:tt)*) => {};
174
(emit I32Extend8S $($rest:tt)*) => {};
175
(emit I32Extend16S $($rest:tt)*) => {};
176
(emit I64Extend8S $($rest:tt)*) => {};
177
(emit I64Extend16S $($rest:tt)*) => {};
178
(emit I64Extend32S $($rest:tt)*) => {};
179
(emit I32TruncF32S $($rest:tt)*) => {};
180
(emit I32TruncF32U $($rest:tt)*) => {};
181
(emit I32TruncF64S $($rest:tt)*) => {};
182
(emit I32TruncF64U $($rest:tt)*) => {};
183
(emit I64TruncF32S $($rest:tt)*) => {};
184
(emit I64TruncF32U $($rest:tt)*) => {};
185
(emit I64TruncF64S $($rest:tt)*) => {};
186
(emit I64TruncF64U $($rest:tt)*) => {};
187
(emit I32ReinterpretF32 $($rest:tt)*) => {};
188
(emit I64ReinterpretF64 $($rest:tt)*) => {};
189
(emit LocalGet $($rest:tt)*) => {};
190
(emit LocalSet $($rest:tt)*) => {};
191
(emit Call $($rest:tt)*) => {};
192
(emit End $($rest:tt)*) => {};
193
(emit Nop $($rest:tt)*) => {};
194
(emit If $($rest:tt)*) => {};
195
(emit Else $($rest:tt)*) => {};
196
(emit Block $($rest:tt)*) => {};
197
(emit Loop $($rest:tt)*) => {};
198
(emit Br $($rest:tt)*) => {};
199
(emit BrIf $($rest:tt)*) => {};
200
(emit Return $($rest:tt)*) => {};
201
(emit Unreachable $($rest:tt)*) => {};
202
(emit LocalTee $($rest:tt)*) => {};
203
(emit GlobalGet $($rest:tt)*) => {};
204
(emit GlobalSet $($rest:tt)*) => {};
205
(emit Select $($rest:tt)*) => {};
206
(emit Drop $($rest:tt)*) => {};
207
(emit BrTable $($rest:tt)*) => {};
208
(emit CallIndirect $($rest:tt)*) => {};
209
(emit TableInit $($rest:tt)*) => {};
210
(emit TableCopy $($rest:tt)*) => {};
211
(emit TableGet $($rest:tt)*) => {};
212
(emit TableSet $($rest:tt)*) => {};
213
(emit TableGrow $($rest:tt)*) => {};
214
(emit TableSize $($rest:tt)*) => {};
215
(emit TableFill $($rest:tt)*) => {};
216
(emit ElemDrop $($rest:tt)*) => {};
217
(emit MemoryInit $($rest:tt)*) => {};
218
(emit MemoryCopy $($rest:tt)*) => {};
219
(emit DataDrop $($rest:tt)*) => {};
220
(emit MemoryFill $($rest:tt)*) => {};
221
(emit MemorySize $($rest:tt)*) => {};
222
(emit MemoryGrow $($rest:tt)*) => {};
223
(emit I32Load $($rest:tt)*) => {};
224
(emit I32Load8S $($rest:tt)*) => {};
225
(emit I32Load8U $($rest:tt)*) => {};
226
(emit I32Load16S $($rest:tt)*) => {};
227
(emit I32Load16U $($rest:tt)*) => {};
228
(emit I64Load8S $($rest:tt)*) => {};
229
(emit I64Load8U $($rest:tt)*) => {};
230
(emit I64Load16S $($rest:tt)*) => {};
231
(emit I64Load16U $($rest:tt)*) => {};
232
(emit I64Load32S $($rest:tt)*) => {};
233
(emit I64Load32U $($rest:tt)*) => {};
234
(emit I64Load $($rest:tt)*) => {};
235
(emit I32Store $($rest:tt)*) => {};
236
(emit I32Store8 $($rest:tt)*) => {};
237
(emit I32Store16 $($rest:tt)*) => {};
238
(emit I64Store $($rest:tt)*) => {};
239
(emit I64Store8 $($rest:tt)*) => {};
240
(emit I64Store16 $($rest:tt)*) => {};
241
(emit I64Store32 $($rest:tt)*) => {};
242
(emit F32Load $($rest:tt)*) => {};
243
(emit F32Store $($rest:tt)*) => {};
244
(emit F64Load $($rest:tt)*) => {};
245
(emit F64Store $($rest:tt)*) => {};
246
(emit I32TruncSatF32S $($rest:tt)*) => {};
247
(emit I32TruncSatF32U $($rest:tt)*) => {};
248
(emit I32TruncSatF64S $($rest:tt)*) => {};
249
(emit I32TruncSatF64U $($rest:tt)*) => {};
250
(emit I64TruncSatF32S $($rest:tt)*) => {};
251
(emit I64TruncSatF32U $($rest:tt)*) => {};
252
(emit I64TruncSatF64S $($rest:tt)*) => {};
253
(emit I64TruncSatF64U $($rest:tt)*) => {};
254
(emit V128Load $($rest:tt)*) => {};
255
(emit V128Store $($rest:tt)*) => {};
256
(emit I64Add128 $($rest:tt)*) => {};
257
(emit I64Sub128 $($rest:tt)*) => {};
258
(emit I64MulWideS $($rest:tt)*) => {};
259
(emit I64MulWideU $($rest:tt)*) => {};
260
(emit I32AtomicLoad8U $($rest:tt)*) => {};
261
(emit I32AtomicLoad16U $($rest:tt)*) => {};
262
(emit I32AtomicLoad $($rest:tt)*) => {};
263
(emit I64AtomicLoad8U $($rest:tt)*) => {};
264
(emit I64AtomicLoad16U $($rest:tt)*) => {};
265
(emit I64AtomicLoad32U $($rest:tt)*) => {};
266
(emit I64AtomicLoad $($rest:tt)*) => {};
267
(emit V128Load8x8S $($rest:tt)*) => {};
268
(emit V128Load8x8U $($rest:tt)*) => {};
269
(emit V128Load16x4S $($rest:tt)*) => {};
270
(emit V128Load16x4U $($rest:tt)*) => {};
271
(emit V128Load32x2S $($rest:tt)*) => {};
272
(emit V128Load32x2U $($rest:tt)*) => {};
273
(emit V128Load8Splat $($rest:tt)*) => {};
274
(emit V128Load16Splat $($rest:tt)*) => {};
275
(emit V128Load32Splat $($rest:tt)*) => {};
276
(emit V128Load64Splat $($rest:tt)*) => {};
277
(emit I8x16Splat $($rest:tt)*) => {};
278
(emit I16x8Splat $($rest:tt)*) => {};
279
(emit I32x4Splat $($rest:tt)*) => {};
280
(emit I64x2Splat $($rest:tt)*) => {};
281
(emit F32x4Splat $($rest:tt)*) => {};
282
(emit F64x2Splat $($rest:tt)*) => {};
283
(emit I32AtomicStore8 $($rest:tt)*) => {};
284
(emit I32AtomicStore16 $($rest:tt)*) => {};
285
(emit I32AtomicStore $($rest:tt)*) => {};
286
(emit I64AtomicStore8 $($rest:tt)*) => {};
287
(emit I64AtomicStore16 $($rest:tt)*) => {};
288
(emit I64AtomicStore32 $($rest:tt)*) => {};
289
(emit I64AtomicStore $($rest:tt)*) => {};
290
(emit I32AtomicRmw8AddU $($rest:tt)*) => {};
291
(emit I32AtomicRmw16AddU $($rest:tt)*) => {};
292
(emit I32AtomicRmwAdd $($rest:tt)*) => {};
293
(emit I64AtomicRmw8AddU $($rest:tt)*) => {};
294
(emit I64AtomicRmw16AddU $($rest:tt)*) => {};
295
(emit I64AtomicRmw32AddU $($rest:tt)*) => {};
296
(emit I64AtomicRmwAdd $($rest:tt)*) => {};
297
(emit I8x16Shuffle $($rest:tt)*) => {};
298
(emit I8x16Swizzle $($rest:tt)*) => {};
299
(emit I32AtomicRmw8SubU $($rest:tt)*) => {};
300
(emit I32AtomicRmw16SubU $($rest:tt)*) => {};
301
(emit I32AtomicRmwSub $($rest:tt)*) => {};
302
(emit I64AtomicRmw8SubU $($rest:tt)*) => {};
303
(emit I64AtomicRmw16SubU $($rest:tt)*) => {};
304
(emit I64AtomicRmw32SubU $($rest:tt)*) => {};
305
(emit I64AtomicRmwSub $($rest:tt)*) => {};
306
(emit I32AtomicRmw8XchgU $($rest:tt)*) => {};
307
(emit I32AtomicRmw16XchgU $($rest:tt)*) => {};
308
(emit I32AtomicRmwXchg $($rest:tt)*) => {};
309
(emit I64AtomicRmw8XchgU $($rest:tt)*) => {};
310
(emit I64AtomicRmw16XchgU $($rest:tt)*) => {};
311
(emit I64AtomicRmw32XchgU $($rest:tt)*) => {};
312
(emit I64AtomicRmwXchg $($rest:tt)*) => {};
313
(emit I8x16ExtractLaneS $($rest:tt)*) => {};
314
(emit I8x16ExtractLaneU $($rest:tt)*) => {};
315
(emit I16x8ExtractLaneS $($rest:tt)*) => {};
316
(emit I16x8ExtractLaneU $($rest:tt)*) => {};
317
(emit I32x4ExtractLane $($rest:tt)*) => {};
318
(emit I64x2ExtractLane $($rest:tt)*) => {};
319
(emit F32x4ExtractLane $($rest:tt)*) => {};
320
(emit F64x2ExtractLane $($rest:tt)*) => {};
321
(emit I32AtomicRmw8AndU $($rest:tt)*) => {};
322
(emit I32AtomicRmw16AndU $($rest:tt)*) => {};
323
(emit I32AtomicRmwAnd $($rest:tt)*) => {};
324
(emit I64AtomicRmw8AndU $($rest:tt)*) => {};
325
(emit I64AtomicRmw16AndU $($rest:tt)*) => {};
326
(emit I64AtomicRmw32AndU $($rest:tt)*) => {};
327
(emit I64AtomicRmwAnd $($rest:tt)*) => {};
328
(emit I32AtomicRmw8OrU $($rest:tt)*) => {};
329
(emit I32AtomicRmw16OrU $($rest:tt)*) => {};
330
(emit I32AtomicRmwOr $($rest:tt)*) => {};
331
(emit I64AtomicRmw8OrU $($rest:tt)*) => {};
332
(emit I64AtomicRmw16OrU $($rest:tt)*) => {};
333
(emit I64AtomicRmw32OrU $($rest:tt)*) => {};
334
(emit I64AtomicRmwOr $($rest:tt)*) => {};
335
(emit I32AtomicRmw8XorU $($rest:tt)*) => {};
336
(emit I32AtomicRmw16XorU $($rest:tt)*) => {};
337
(emit I32AtomicRmwXor $($rest:tt)*) => {};
338
(emit I64AtomicRmw8XorU $($rest:tt)*) => {};
339
(emit I64AtomicRmw16XorU $($rest:tt)*) => {};
340
(emit I64AtomicRmw32XorU $($rest:tt)*) => {};
341
(emit I64AtomicRmwXor $($rest:tt)*) => {};
342
(emit I8x16ReplaceLane $($rest:tt)*) => {};
343
(emit I16x8ReplaceLane $($rest:tt)*) => {};
344
(emit I32x4ReplaceLane $($rest:tt)*) => {};
345
(emit I64x2ReplaceLane $($rest:tt)*) => {};
346
(emit F32x4ReplaceLane $($rest:tt)*) => {};
347
(emit F64x2ReplaceLane $($rest:tt)*) => {};
348
(emit I32AtomicRmw8CmpxchgU $($rest:tt)*) => {};
349
(emit I32AtomicRmw16CmpxchgU $($rest:tt)*) => {};
350
(emit I32AtomicRmwCmpxchg $($rest:tt)*) => {};
351
(emit I64AtomicRmw8CmpxchgU $($rest:tt)*) => {};
352
(emit I64AtomicRmw16CmpxchgU $($rest:tt)*) => {};
353
(emit I64AtomicRmw32CmpxchgU $($rest:tt)*) => {};
354
(emit I64AtomicRmwCmpxchg $($rest:tt)*) => {};
355
(emit I8x16Eq $($rest:tt)*) => {};
356
(emit I16x8Eq $($rest:tt)*) => {};
357
(emit I32x4Eq $($rest:tt)*) => {};
358
(emit I64x2Eq $($rest:tt)*) => {};
359
(emit F32x4Eq $($rest:tt)*) => {};
360
(emit F64x2Eq $($rest:tt)*) => {};
361
(emit I8x16Ne $($rest:tt)*) => {};
362
(emit I16x8Ne $($rest:tt)*) => {};
363
(emit I32x4Ne $($rest:tt)*) => {};
364
(emit I64x2Ne $($rest:tt)*) => {};
365
(emit F32x4Ne $($rest:tt)*) => {};
366
(emit F64x2Ne $($rest:tt)*) => {};
367
(emit I8x16LtS $($rest:tt)*) => {};
368
(emit I8x16LtU $($rest:tt)*) => {};
369
(emit I16x8LtS $($rest:tt)*) => {};
370
(emit I16x8LtU $($rest:tt)*) => {};
371
(emit I32x4LtS $($rest:tt)*) => {};
372
(emit I32x4LtU $($rest:tt)*) => {};
373
(emit I64x2LtS $($rest:tt)*) => {};
374
(emit F32x4Lt $($rest:tt)*) => {};
375
(emit F64x2Lt $($rest:tt)*) => {};
376
(emit I8x16LeS $($rest:tt)*) => {};
377
(emit I8x16LeU $($rest:tt)*) => {};
378
(emit I16x8LeS $($rest:tt)*) => {};
379
(emit I16x8LeU $($rest:tt)*) => {};
380
(emit I32x4LeS $($rest:tt)*) => {};
381
(emit I32x4LeU $($rest:tt)*) => {};
382
(emit I64x2LeS $($rest:tt)*) => {};
383
(emit F32x4Le $($rest:tt)*) => {};
384
(emit F64x2Le $($rest:tt)*) => {};
385
(emit I8x16GtS $($rest:tt)*) => {};
386
(emit I8x16GtU $($rest:tt)*) => {};
387
(emit I16x8GtS $($rest:tt)*) => {};
388
(emit I16x8GtU $($rest:tt)*) => {};
389
(emit I32x4GtS $($rest:tt)*) => {};
390
(emit I32x4GtU $($rest:tt)*) => {};
391
(emit I64x2GtS $($rest:tt)*) => {};
392
(emit F32x4Gt $($rest:tt)*) => {};
393
(emit F64x2Gt $($rest:tt)*) => {};
394
(emit I8x16GeS $($rest:tt)*) => {};
395
(emit I8x16GeU $($rest:tt)*) => {};
396
(emit I16x8GeS $($rest:tt)*) => {};
397
(emit I16x8GeU $($rest:tt)*) => {};
398
(emit I32x4GeS $($rest:tt)*) => {};
399
(emit I32x4GeU $($rest:tt)*) => {};
400
(emit I64x2GeS $($rest:tt)*) => {};
401
(emit F32x4Ge $($rest:tt)*) => {};
402
(emit F64x2Ge $($rest:tt)*) => {};
403
(emit MemoryAtomicWait32 $($rest:tt)*) => {};
404
(emit MemoryAtomicWait64 $($rest:tt)*) => {};
405
(emit MemoryAtomicNotify $($rest:tt)*) => {};
406
(emit AtomicFence $($rest:tt)*) => {};
407
(emit V128Not $($rest:tt)*) => {};
408
(emit V128And $($rest:tt)*) => {};
409
(emit V128AndNot $($rest:tt)*) => {};
410
(emit V128Or $($rest:tt)*) => {};
411
(emit V128Xor $($rest:tt)*) => {};
412
(emit V128Bitselect $($rest:tt)*) => {};
413
(emit V128AnyTrue $($rest:tt)*) => {};
414
(emit V128Load8Lane $($rest:tt)*) => {};
415
(emit V128Load16Lane $($rest:tt)*) => {};
416
(emit V128Load32Lane $($rest:tt)*) => {};
417
(emit V128Load64Lane $($rest:tt)*) => {};
418
(emit V128Store8Lane $($rest:tt)*) => {};
419
(emit V128Store16Lane $($rest:tt)*) => {};
420
(emit V128Store32Lane $($rest:tt)*) => {};
421
(emit V128Store64Lane $($rest:tt)*) => {};
422
(emit F32x4ConvertI32x4S $($rest:tt)*) => {};
423
(emit F32x4ConvertI32x4U $($rest:tt)*) => {};
424
(emit F64x2ConvertLowI32x4S $($rest:tt)*) => {};
425
(emit F64x2ConvertLowI32x4U $($rest:tt)*) => {};
426
(emit I8x16NarrowI16x8S $($rest:tt)*) => {};
427
(emit I8x16NarrowI16x8U $($rest:tt)*) => {};
428
(emit I16x8NarrowI32x4S $($rest:tt)*) => {};
429
(emit I16x8NarrowI32x4U $($rest:tt)*) => {};
430
(emit F32x4DemoteF64x2Zero $($rest:tt)*) => {};
431
(emit F64x2PromoteLowF32x4 $($rest:tt)*) => {};
432
(emit I16x8ExtendLowI8x16S $($rest:tt)*) => {};
433
(emit I16x8ExtendHighI8x16S $($rest:tt)*) => {};
434
(emit I16x8ExtendLowI8x16U $($rest:tt)*) => {};
435
(emit I16x8ExtendHighI8x16U $($rest:tt)*) => {};
436
(emit I32x4ExtendLowI16x8S $($rest:tt)*) => {};
437
(emit I32x4ExtendHighI16x8S $($rest:tt)*) => {};
438
(emit I32x4ExtendLowI16x8U $($rest:tt)*) => {};
439
(emit I32x4ExtendHighI16x8U $($rest:tt)*) => {};
440
(emit I64x2ExtendLowI32x4S $($rest:tt)*) => {};
441
(emit I64x2ExtendHighI32x4S $($rest:tt)*) => {};
442
(emit I64x2ExtendLowI32x4U $($rest:tt)*) => {};
443
(emit I64x2ExtendHighI32x4U $($rest:tt)*) => {};
444
(emit I8x16Add $($rest:tt)*) => {};
445
(emit I16x8Add $($rest:tt)*) => {};
446
(emit I32x4Add $($rest:tt)*) => {};
447
(emit I64x2Add $($rest:tt)*) => {};
448
(emit I8x16Sub $($rest:tt)*) => {};
449
(emit I16x8Sub $($rest:tt)*) => {};
450
(emit I32x4Sub $($rest:tt)*) => {};
451
(emit I64x2Sub $($rest:tt)*) => {};
452
(emit I16x8Mul $($rest:tt)*) => {};
453
(emit I32x4Mul $($rest:tt)*) => {};
454
(emit I64x2Mul $($rest:tt)*) => {};
455
(emit I8x16AddSatS $($rest:tt)*) => {};
456
(emit I16x8AddSatS $($rest:tt)*) => {};
457
(emit I8x16AddSatU $($rest:tt)*) => {};
458
(emit I16x8AddSatU $($rest:tt)*) => {};
459
(emit I8x16SubSatS $($rest:tt)*) => {};
460
(emit I16x8SubSatS $($rest:tt)*) => {};
461
(emit I8x16SubSatU $($rest:tt)*) => {};
462
(emit I16x8SubSatU $($rest:tt)*) => {};
463
(emit I8x16Abs $($rest:tt)*) => {};
464
(emit I16x8Abs $($rest:tt)*) => {};
465
(emit I32x4Abs $($rest:tt)*) => {};
466
(emit I64x2Abs $($rest:tt)*) => {};
467
(emit F32x4Abs $($rest:tt)*) => {};
468
(emit F64x2Abs $($rest:tt)*) => {};
469
(emit I8x16Neg $($rest:tt)*) => {};
470
(emit I16x8Neg $($rest:tt)*) => {};
471
(emit I32x4Neg $($rest:tt)*) => {};
472
(emit I64x2Neg $($rest:tt)*) => {};
473
(emit I8x16Shl $($rest:tt)*) => {};
474
(emit I16x8Shl $($rest:tt)*) => {};
475
(emit I32x4Shl $($rest:tt)*) => {};
476
(emit I64x2Shl $($rest:tt)*) => {};
477
(emit I8x16ShrU $($rest:tt)*) => {};
478
(emit I16x8ShrU $($rest:tt)*) => {};
479
(emit I32x4ShrU $($rest:tt)*) => {};
480
(emit I64x2ShrU $($rest:tt)*) => {};
481
(emit I8x16ShrS $($rest:tt)*) => {};
482
(emit I16x8ShrS $($rest:tt)*) => {};
483
(emit I32x4ShrS $($rest:tt)*) => {};
484
(emit I64x2ShrS $($rest:tt)*) => {};
485
(emit I16x8Q15MulrSatS $($rest:tt)*) => {};
486
(emit I8x16AllTrue $($rest:tt)*) => {};
487
(emit I16x8AllTrue $($rest:tt)*) => {};
488
(emit I32x4AllTrue $($rest:tt)*) => {};
489
(emit I64x2AllTrue $($rest:tt)*) => {};
490
(emit I8x16Bitmask $($rest:tt)*) => {};
491
(emit I16x8Bitmask $($rest:tt)*) => {};
492
(emit I32x4Bitmask $($rest:tt)*) => {};
493
(emit I64x2Bitmask $($rest:tt)*) => {};
494
(emit I32x4TruncSatF32x4S $($rest:tt)*) => {};
495
(emit I32x4TruncSatF32x4U $($rest:tt)*) => {};
496
(emit I32x4TruncSatF64x2SZero $($rest:tt)*) => {};
497
(emit I32x4TruncSatF64x2UZero $($rest:tt)*) => {};
498
(emit I8x16MinU $($rest:tt)*) => {};
499
(emit I16x8MinU $($rest:tt)*) => {};
500
(emit I32x4MinU $($rest:tt)*) => {};
501
(emit I8x16MinS $($rest:tt)*) => {};
502
(emit I16x8MinS $($rest:tt)*) => {};
503
(emit I32x4MinS $($rest:tt)*) => {};
504
(emit I8x16MaxU $($rest:tt)*) => {};
505
(emit I16x8MaxU $($rest:tt)*) => {};
506
(emit I32x4MaxU $($rest:tt)*) => {};
507
(emit I8x16MaxS $($rest:tt)*) => {};
508
(emit I16x8MaxS $($rest:tt)*) => {};
509
(emit I32x4MaxS $($rest:tt)*) => {};
510
(emit I16x8ExtMulLowI8x16S $($rest:tt)*) => {};
511
(emit I32x4ExtMulLowI16x8S $($rest:tt)*) => {};
512
(emit I64x2ExtMulLowI32x4S $($rest:tt)*) => {};
513
(emit I16x8ExtMulHighI8x16S $($rest:tt)*) => {};
514
(emit I32x4ExtMulHighI16x8S $($rest:tt)*) => {};
515
(emit I64x2ExtMulHighI32x4S $($rest:tt)*) => {};
516
(emit I16x8ExtMulLowI8x16U $($rest:tt)*) => {};
517
(emit I32x4ExtMulLowI16x8U $($rest:tt)*) => {};
518
(emit I64x2ExtMulLowI32x4U $($rest:tt)*) => {};
519
(emit I16x8ExtMulHighI8x16U $($rest:tt)*) => {};
520
(emit I32x4ExtMulHighI16x8U $($rest:tt)*) => {};
521
(emit I64x2ExtMulHighI32x4U $($rest:tt)*) => {};
522
(emit I16x8ExtAddPairwiseI8x16U $($rest:tt)*) => {};
523
(emit I16x8ExtAddPairwiseI8x16S $($rest:tt)*) => {};
524
(emit I32x4ExtAddPairwiseI16x8U $($rest:tt)*) => {};
525
(emit I32x4ExtAddPairwiseI16x8S $($rest:tt)*) => {};
526
(emit I32x4DotI16x8S $($rest:tt)*) => {};
527
(emit I8x16Popcnt $($rest:tt)*) => {};
528
(emit I8x16AvgrU $($rest:tt)*) => {};
529
(emit I16x8AvgrU $($rest:tt)*) => {};
530
(emit F32x4Add $($rest:tt)*) => {};
531
(emit F64x2Add $($rest:tt)*) => {};
532
(emit F32x4Sub $($rest:tt)*) => {};
533
(emit F64x2Sub $($rest:tt)*) => {};
534
(emit F32x4Mul $($rest:tt)*) => {};
535
(emit F64x2Mul $($rest:tt)*) => {};
536
(emit F32x4Div $($rest:tt)*) => {};
537
(emit F64x2Div $($rest:tt)*) => {};
538
(emit F32x4Neg $($rest:tt)*) => {};
539
(emit F64x2Neg $($rest:tt)*) => {};
540
(emit F32x4Sqrt $($rest:tt)*) => {};
541
(emit F64x2Sqrt $($rest:tt)*) => {};
542
(emit F32x4Ceil $($rest:tt)*) => {};
543
(emit F64x2Ceil $($rest:tt)*) => {};
544
(emit F32x4Floor $($rest:tt)*) => {};
545
(emit F64x2Floor $($rest:tt)*) => {};
546
(emit F32x4Nearest $($rest:tt)*) => {};
547
(emit F64x2Nearest $($rest:tt)*) => {};
548
(emit F32x4Trunc $($rest:tt)*) => {};
549
(emit F64x2Trunc $($rest:tt)*) => {};
550
(emit V128Load32Zero $($rest:tt)*) => {};
551
(emit V128Load64Zero $($rest:tt)*) => {};
552
(emit F32x4PMin $($rest:tt)*) => {};
553
(emit F64x2PMin $($rest:tt)*) => {};
554
(emit F32x4PMax $($rest:tt)*) => {};
555
(emit F64x2PMax $($rest:tt)*) => {};
556
(emit F32x4Min $($rest:tt)*) => {};
557
(emit F64x2Min $($rest:tt)*) => {};
558
(emit F32x4Max $($rest:tt)*) => {};
559
(emit F64x2Max $($rest:tt)*) => {};
560
561
(emit $unsupported:tt $($rest:tt)*) => {$($rest)*};
562
}
563
564
impl<'a, 'translation, 'data, M> VisitOperator<'a> for CodeGen<'a, 'translation, 'data, M, Emission>
565
where
566
M: MacroAssembler,
567
{
568
type Output = Result<()>;
569
570
fn visit_i32_const(&mut self, val: i32) -> Self::Output {
571
self.context.stack.push(Val::i32(val));
572
573
Ok(())
574
}
575
576
fn visit_i64_const(&mut self, val: i64) -> Self::Output {
577
self.context.stack.push(Val::i64(val));
578
Ok(())
579
}
580
581
fn visit_f32_const(&mut self, val: Ieee32) -> Self::Output {
582
self.context.stack.push(Val::f32(val));
583
Ok(())
584
}
585
586
fn visit_f64_const(&mut self, val: Ieee64) -> Self::Output {
587
self.context.stack.push(Val::f64(val));
588
Ok(())
589
}
590
591
fn visit_f32_add(&mut self) -> Self::Output {
592
self.context.binop(
593
self.masm,
594
OperandSize::S32,
595
&mut |masm: &mut M, dst, src, size| {
596
masm.float_add(writable!(dst), dst, src, size)?;
597
Ok(TypedReg::f32(dst))
598
},
599
)
600
}
601
602
fn visit_f64_add(&mut self) -> Self::Output {
603
self.context.binop(
604
self.masm,
605
OperandSize::S64,
606
&mut |masm: &mut M, dst, src, size| {
607
masm.float_add(writable!(dst), dst, src, size)?;
608
Ok(TypedReg::f64(dst))
609
},
610
)
611
}
612
613
fn visit_f32_sub(&mut self) -> Self::Output {
614
self.context.binop(
615
self.masm,
616
OperandSize::S32,
617
&mut |masm: &mut M, dst, src, size| {
618
masm.float_sub(writable!(dst), dst, src, size)?;
619
Ok(TypedReg::f32(dst))
620
},
621
)
622
}
623
624
fn visit_f64_sub(&mut self) -> Self::Output {
625
self.context.binop(
626
self.masm,
627
OperandSize::S64,
628
&mut |masm: &mut M, dst, src, size| {
629
masm.float_sub(writable!(dst), dst, src, size)?;
630
Ok(TypedReg::f64(dst))
631
},
632
)
633
}
634
635
fn visit_f32_mul(&mut self) -> Self::Output {
636
self.context.binop(
637
self.masm,
638
OperandSize::S32,
639
&mut |masm: &mut M, dst, src, size| {
640
masm.float_mul(writable!(dst), dst, src, size)?;
641
Ok(TypedReg::f32(dst))
642
},
643
)
644
}
645
646
fn visit_f64_mul(&mut self) -> Self::Output {
647
self.context.binop(
648
self.masm,
649
OperandSize::S64,
650
&mut |masm: &mut M, dst, src, size| {
651
masm.float_mul(writable!(dst), dst, src, size)?;
652
Ok(TypedReg::f64(dst))
653
},
654
)
655
}
656
657
fn visit_f32_div(&mut self) -> Self::Output {
658
self.context.binop(
659
self.masm,
660
OperandSize::S32,
661
&mut |masm: &mut M, dst, src, size| {
662
masm.float_div(writable!(dst), dst, src, size)?;
663
Ok(TypedReg::f32(dst))
664
},
665
)
666
}
667
668
fn visit_f64_div(&mut self) -> Self::Output {
669
self.context.binop(
670
self.masm,
671
OperandSize::S64,
672
&mut |masm: &mut M, dst, src, size| {
673
masm.float_div(writable!(dst), dst, src, size)?;
674
Ok(TypedReg::f64(dst))
675
},
676
)
677
}
678
679
fn visit_f32_min(&mut self) -> Self::Output {
680
self.context.binop(
681
self.masm,
682
OperandSize::S32,
683
&mut |masm: &mut M, dst, src, size| {
684
masm.float_min(writable!(dst), dst, src, size)?;
685
Ok(TypedReg::f32(dst))
686
},
687
)
688
}
689
690
fn visit_f64_min(&mut self) -> Self::Output {
691
self.context.binop(
692
self.masm,
693
OperandSize::S64,
694
&mut |masm: &mut M, dst, src, size| {
695
masm.float_min(writable!(dst), dst, src, size)?;
696
Ok(TypedReg::f64(dst))
697
},
698
)
699
}
700
701
fn visit_f32_max(&mut self) -> Self::Output {
702
self.context.binop(
703
self.masm,
704
OperandSize::S32,
705
&mut |masm: &mut M, dst, src, size| {
706
masm.float_max(writable!(dst), dst, src, size)?;
707
Ok(TypedReg::f32(dst))
708
},
709
)
710
}
711
712
fn visit_f64_max(&mut self) -> Self::Output {
713
self.context.binop(
714
self.masm,
715
OperandSize::S64,
716
&mut |masm: &mut M, dst, src, size| {
717
masm.float_max(writable!(dst), dst, src, size)?;
718
Ok(TypedReg::f64(dst))
719
},
720
)
721
}
722
723
fn visit_f32_copysign(&mut self) -> Self::Output {
724
self.context.binop(
725
self.masm,
726
OperandSize::S32,
727
&mut |masm: &mut M, dst, src, size| {
728
masm.float_copysign(writable!(dst), dst, src, size)?;
729
Ok(TypedReg::f32(dst))
730
},
731
)
732
}
733
734
fn visit_f64_copysign(&mut self) -> Self::Output {
735
self.context.binop(
736
self.masm,
737
OperandSize::S64,
738
&mut |masm: &mut M, dst, src, size| {
739
masm.float_copysign(writable!(dst), dst, src, size)?;
740
Ok(TypedReg::f64(dst))
741
},
742
)
743
}
744
745
fn visit_f32_abs(&mut self) -> Self::Output {
746
self.context.unop(self.masm, |masm, reg| {
747
masm.float_abs(writable!(reg), OperandSize::S32)?;
748
Ok(TypedReg::f32(reg))
749
})
750
}
751
752
fn visit_f64_abs(&mut self) -> Self::Output {
753
self.context.unop(self.masm, |masm, reg| {
754
masm.float_abs(writable!(reg), OperandSize::S64)?;
755
Ok(TypedReg::f64(reg))
756
})
757
}
758
759
fn visit_f32_neg(&mut self) -> Self::Output {
760
self.context.unop(self.masm, |masm, reg| {
761
masm.float_neg(writable!(reg), OperandSize::S32)?;
762
Ok(TypedReg::f32(reg))
763
})
764
}
765
766
fn visit_f64_neg(&mut self) -> Self::Output {
767
self.context.unop(self.masm, |masm, reg| {
768
masm.float_neg(writable!(reg), OperandSize::S64)?;
769
Ok(TypedReg::f64(reg))
770
})
771
}
772
773
fn visit_f32_floor(&mut self) -> Self::Output {
774
self.masm.float_round(
775
RoundingMode::Down,
776
&mut self.env,
777
&mut self.context,
778
OperandSize::S32,
779
|env, cx, masm| {
780
let builtin = env.builtins.floor_f32::<M::ABI>()?;
781
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
782
},
783
)
784
}
785
786
fn visit_f64_floor(&mut self) -> Self::Output {
787
self.masm.float_round(
788
RoundingMode::Down,
789
&mut self.env,
790
&mut self.context,
791
OperandSize::S64,
792
|env, cx, masm| {
793
let builtin = env.builtins.floor_f64::<M::ABI>()?;
794
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
795
},
796
)
797
}
798
799
fn visit_f32_ceil(&mut self) -> Self::Output {
800
self.masm.float_round(
801
RoundingMode::Up,
802
&mut self.env,
803
&mut self.context,
804
OperandSize::S32,
805
|env, cx, masm| {
806
let builtin = env.builtins.ceil_f32::<M::ABI>()?;
807
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
808
},
809
)
810
}
811
812
fn visit_f64_ceil(&mut self) -> Self::Output {
813
self.masm.float_round(
814
RoundingMode::Up,
815
&mut self.env,
816
&mut self.context,
817
OperandSize::S64,
818
|env, cx, masm| {
819
let builtin = env.builtins.ceil_f64::<M::ABI>()?;
820
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
821
},
822
)
823
}
824
825
fn visit_f32_nearest(&mut self) -> Self::Output {
826
self.masm.float_round(
827
RoundingMode::Nearest,
828
&mut self.env,
829
&mut self.context,
830
OperandSize::S32,
831
|env, cx, masm| {
832
let builtin = env.builtins.nearest_f32::<M::ABI>()?;
833
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
834
},
835
)
836
}
837
838
fn visit_f64_nearest(&mut self) -> Self::Output {
839
self.masm.float_round(
840
RoundingMode::Nearest,
841
&mut self.env,
842
&mut self.context,
843
OperandSize::S64,
844
|env, cx, masm| {
845
let builtin = env.builtins.nearest_f64::<M::ABI>()?;
846
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
847
},
848
)
849
}
850
851
fn visit_f32_trunc(&mut self) -> Self::Output {
852
self.masm.float_round(
853
RoundingMode::Zero,
854
&mut self.env,
855
&mut self.context,
856
OperandSize::S32,
857
|env, cx, masm| {
858
let builtin = env.builtins.trunc_f32::<M::ABI>()?;
859
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
860
},
861
)
862
}
863
864
fn visit_f64_trunc(&mut self) -> Self::Output {
865
self.masm.float_round(
866
RoundingMode::Zero,
867
&mut self.env,
868
&mut self.context,
869
OperandSize::S64,
870
|env, cx, masm| {
871
let builtin = env.builtins.trunc_f64::<M::ABI>()?;
872
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
873
},
874
)
875
}
876
877
fn visit_f32_sqrt(&mut self) -> Self::Output {
878
self.context.unop(self.masm, |masm, reg| {
879
masm.float_sqrt(writable!(reg), reg, OperandSize::S32)?;
880
Ok(TypedReg::f32(reg))
881
})
882
}
883
884
fn visit_f64_sqrt(&mut self) -> Self::Output {
885
self.context.unop(self.masm, |masm, reg| {
886
masm.float_sqrt(writable!(reg), reg, OperandSize::S64)?;
887
Ok(TypedReg::f64(reg))
888
})
889
}
890
891
fn visit_f32_eq(&mut self) -> Self::Output {
892
self.context.float_cmp_op(
893
self.masm,
894
OperandSize::S32,
895
&mut |masm: &mut M, dst, src1, src2, size| {
896
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Eq, size)
897
},
898
)
899
}
900
901
fn visit_f64_eq(&mut self) -> Self::Output {
902
self.context.float_cmp_op(
903
self.masm,
904
OperandSize::S64,
905
&mut |masm: &mut M, dst, src1, src2, size| {
906
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Eq, size)
907
},
908
)
909
}
910
911
fn visit_f32_ne(&mut self) -> Self::Output {
912
self.context.float_cmp_op(
913
self.masm,
914
OperandSize::S32,
915
&mut |masm: &mut M, dst, src1, src2, size| {
916
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ne, size)
917
},
918
)
919
}
920
921
fn visit_f64_ne(&mut self) -> Self::Output {
922
self.context.float_cmp_op(
923
self.masm,
924
OperandSize::S64,
925
&mut |masm: &mut M, dst, src1, src2, size| {
926
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ne, size)
927
},
928
)
929
}
930
931
fn visit_f32_lt(&mut self) -> Self::Output {
932
self.context.float_cmp_op(
933
self.masm,
934
OperandSize::S32,
935
&mut |masm: &mut M, dst, src1, src2, size| {
936
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Lt, size)
937
},
938
)
939
}
940
941
fn visit_f64_lt(&mut self) -> Self::Output {
942
self.context.float_cmp_op(
943
self.masm,
944
OperandSize::S64,
945
&mut |masm: &mut M, dst, src1, src2, size| {
946
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Lt, size)
947
},
948
)
949
}
950
951
fn visit_f32_gt(&mut self) -> Self::Output {
952
self.context.float_cmp_op(
953
self.masm,
954
OperandSize::S32,
955
&mut |masm: &mut M, dst, src1, src2, size| {
956
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Gt, size)
957
},
958
)
959
}
960
961
fn visit_f64_gt(&mut self) -> Self::Output {
962
self.context.float_cmp_op(
963
self.masm,
964
OperandSize::S64,
965
&mut |masm: &mut M, dst, src1, src2, size| {
966
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Gt, size)
967
},
968
)
969
}
970
971
fn visit_f32_le(&mut self) -> Self::Output {
972
self.context.float_cmp_op(
973
self.masm,
974
OperandSize::S32,
975
&mut |masm: &mut M, dst, src1, src2, size| {
976
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Le, size)
977
},
978
)
979
}
980
981
fn visit_f64_le(&mut self) -> Self::Output {
982
self.context.float_cmp_op(
983
self.masm,
984
OperandSize::S64,
985
&mut |masm: &mut M, dst, src1, src2, size| {
986
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Le, size)
987
},
988
)
989
}
990
991
fn visit_f32_ge(&mut self) -> Self::Output {
992
self.context.float_cmp_op(
993
self.masm,
994
OperandSize::S32,
995
&mut |masm: &mut M, dst, src1, src2, size| {
996
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ge, size)
997
},
998
)
999
}
1000
1001
fn visit_f64_ge(&mut self) -> Self::Output {
1002
self.context.float_cmp_op(
1003
self.masm,
1004
OperandSize::S64,
1005
&mut |masm: &mut M, dst, src1, src2, size| {
1006
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ge, size)
1007
},
1008
)
1009
}
1010
1011
fn visit_f32_convert_i32_s(&mut self) -> Self::Output {
1012
self.context
1013
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, dst_size| {
1014
masm.signed_convert(writable!(dst), src, OperandSize::S32, dst_size)
1015
})
1016
}
1017
1018
fn visit_f32_convert_i32_u(&mut self) -> Self::Output {
1019
self.context.convert_op_with_tmp_reg(
1020
self.masm,
1021
WasmValType::F32,
1022
RegClass::Int,
1023
|masm, dst, src, tmp_gpr, dst_size| {
1024
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S32, dst_size)
1025
},
1026
)
1027
}
1028
1029
fn visit_f32_convert_i64_s(&mut self) -> Self::Output {
1030
self.context
1031
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, dst_size| {
1032
masm.signed_convert(writable!(dst), src, OperandSize::S64, dst_size)
1033
})
1034
}
1035
1036
fn visit_f32_convert_i64_u(&mut self) -> Self::Output {
1037
self.context.convert_op_with_tmp_reg(
1038
self.masm,
1039
WasmValType::F32,
1040
RegClass::Int,
1041
|masm, dst, src, tmp_gpr, dst_size| {
1042
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S64, dst_size)
1043
},
1044
)
1045
}
1046
1047
fn visit_f64_convert_i32_s(&mut self) -> Self::Output {
1048
self.context
1049
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, dst_size| {
1050
masm.signed_convert(writable!(dst), src, OperandSize::S32, dst_size)
1051
})
1052
}
1053
1054
fn visit_f64_convert_i32_u(&mut self) -> Self::Output {
1055
self.context.convert_op_with_tmp_reg(
1056
self.masm,
1057
WasmValType::F64,
1058
RegClass::Int,
1059
|masm, dst, src, tmp_gpr, dst_size| {
1060
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S32, dst_size)
1061
},
1062
)
1063
}
1064
1065
fn visit_f64_convert_i64_s(&mut self) -> Self::Output {
1066
self.context
1067
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, dst_size| {
1068
masm.signed_convert(writable!(dst), src, OperandSize::S64, dst_size)
1069
})
1070
}
1071
1072
fn visit_f64_convert_i64_u(&mut self) -> Self::Output {
1073
self.context.convert_op_with_tmp_reg(
1074
self.masm,
1075
WasmValType::F64,
1076
RegClass::Int,
1077
|masm, dst, src, tmp_gpr, dst_size| {
1078
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S64, dst_size)
1079
},
1080
)
1081
}
1082
1083
fn visit_f32_reinterpret_i32(&mut self) -> Self::Output {
1084
self.context
1085
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, size| {
1086
masm.reinterpret_int_as_float(writable!(dst), src, size)
1087
})
1088
}
1089
1090
fn visit_f64_reinterpret_i64(&mut self) -> Self::Output {
1091
self.context
1092
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, size| {
1093
masm.reinterpret_int_as_float(writable!(dst), src, size)
1094
})
1095
}
1096
1097
fn visit_f32_demote_f64(&mut self) -> Self::Output {
1098
self.context.unop(self.masm, |masm, reg| {
1099
masm.demote(writable!(reg), reg)?;
1100
Ok(TypedReg::f32(reg))
1101
})
1102
}
1103
1104
fn visit_f64_promote_f32(&mut self) -> Self::Output {
1105
self.context.unop(self.masm, |masm, reg| {
1106
masm.promote(writable!(reg), reg)?;
1107
Ok(TypedReg::f64(reg))
1108
})
1109
}
1110
1111
fn visit_i32_add(&mut self) -> Self::Output {
1112
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1113
masm.add(writable!(dst), dst, src, size)?;
1114
Ok(TypedReg::i32(dst))
1115
})
1116
}
1117
1118
fn visit_i64_add(&mut self) -> Self::Output {
1119
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1120
masm.add(writable!(dst), dst, src, size)?;
1121
Ok(TypedReg::i64(dst))
1122
})
1123
}
1124
1125
fn visit_i32_sub(&mut self) -> Self::Output {
1126
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1127
masm.sub(writable!(dst), dst, src, size)?;
1128
Ok(TypedReg::i32(dst))
1129
})
1130
}
1131
1132
fn visit_i64_sub(&mut self) -> Self::Output {
1133
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1134
masm.sub(writable!(dst), dst, src, size)?;
1135
Ok(TypedReg::i64(dst))
1136
})
1137
}
1138
1139
fn visit_i32_mul(&mut self) -> Self::Output {
1140
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1141
masm.mul(writable!(dst), dst, src, size)?;
1142
Ok(TypedReg::i32(dst))
1143
})
1144
}
1145
1146
fn visit_i64_mul(&mut self) -> Self::Output {
1147
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1148
masm.mul(writable!(dst), dst, src, size)?;
1149
Ok(TypedReg::i64(dst))
1150
})
1151
}
1152
1153
fn visit_i32_div_s(&mut self) -> Self::Output {
1154
use DivKind::*;
1155
use OperandSize::*;
1156
1157
self.masm.div(&mut self.context, Signed, S32)
1158
}
1159
1160
fn visit_i32_div_u(&mut self) -> Self::Output {
1161
use DivKind::*;
1162
use OperandSize::*;
1163
1164
self.masm.div(&mut self.context, Unsigned, S32)
1165
}
1166
1167
fn visit_i64_div_s(&mut self) -> Self::Output {
1168
use DivKind::*;
1169
use OperandSize::*;
1170
1171
self.masm.div(&mut self.context, Signed, S64)
1172
}
1173
1174
fn visit_i64_div_u(&mut self) -> Self::Output {
1175
use DivKind::*;
1176
use OperandSize::*;
1177
1178
self.masm.div(&mut self.context, Unsigned, S64)
1179
}
1180
1181
fn visit_i32_rem_s(&mut self) -> Self::Output {
1182
use OperandSize::*;
1183
use RemKind::*;
1184
1185
self.masm.rem(&mut self.context, Signed, S32)
1186
}
1187
1188
fn visit_i32_rem_u(&mut self) -> Self::Output {
1189
use OperandSize::*;
1190
use RemKind::*;
1191
1192
self.masm.rem(&mut self.context, Unsigned, S32)
1193
}
1194
1195
fn visit_i64_rem_s(&mut self) -> Self::Output {
1196
use OperandSize::*;
1197
use RemKind::*;
1198
1199
self.masm.rem(&mut self.context, Signed, S64)
1200
}
1201
1202
fn visit_i64_rem_u(&mut self) -> Self::Output {
1203
use OperandSize::*;
1204
use RemKind::*;
1205
1206
self.masm.rem(&mut self.context, Unsigned, S64)
1207
}
1208
1209
fn visit_i32_eq(&mut self) -> Self::Output {
1210
self.cmp_i32s(IntCmpKind::Eq)
1211
}
1212
1213
fn visit_i64_eq(&mut self) -> Self::Output {
1214
self.cmp_i64s(IntCmpKind::Eq)
1215
}
1216
1217
fn visit_i32_ne(&mut self) -> Self::Output {
1218
self.cmp_i32s(IntCmpKind::Ne)
1219
}
1220
1221
fn visit_i64_ne(&mut self) -> Self::Output {
1222
self.cmp_i64s(IntCmpKind::Ne)
1223
}
1224
1225
fn visit_i32_lt_s(&mut self) -> Self::Output {
1226
self.cmp_i32s(IntCmpKind::LtS)
1227
}
1228
1229
fn visit_i64_lt_s(&mut self) -> Self::Output {
1230
self.cmp_i64s(IntCmpKind::LtS)
1231
}
1232
1233
fn visit_i32_lt_u(&mut self) -> Self::Output {
1234
self.cmp_i32s(IntCmpKind::LtU)
1235
}
1236
1237
fn visit_i64_lt_u(&mut self) -> Self::Output {
1238
self.cmp_i64s(IntCmpKind::LtU)
1239
}
1240
1241
fn visit_i32_le_s(&mut self) -> Self::Output {
1242
self.cmp_i32s(IntCmpKind::LeS)
1243
}
1244
1245
fn visit_i64_le_s(&mut self) -> Self::Output {
1246
self.cmp_i64s(IntCmpKind::LeS)
1247
}
1248
1249
fn visit_i32_le_u(&mut self) -> Self::Output {
1250
self.cmp_i32s(IntCmpKind::LeU)
1251
}
1252
1253
fn visit_i64_le_u(&mut self) -> Self::Output {
1254
self.cmp_i64s(IntCmpKind::LeU)
1255
}
1256
1257
fn visit_i32_gt_s(&mut self) -> Self::Output {
1258
self.cmp_i32s(IntCmpKind::GtS)
1259
}
1260
1261
fn visit_i64_gt_s(&mut self) -> Self::Output {
1262
self.cmp_i64s(IntCmpKind::GtS)
1263
}
1264
1265
fn visit_i32_gt_u(&mut self) -> Self::Output {
1266
self.cmp_i32s(IntCmpKind::GtU)
1267
}
1268
1269
fn visit_i64_gt_u(&mut self) -> Self::Output {
1270
self.cmp_i64s(IntCmpKind::GtU)
1271
}
1272
1273
fn visit_i32_ge_s(&mut self) -> Self::Output {
1274
self.cmp_i32s(IntCmpKind::GeS)
1275
}
1276
1277
fn visit_i64_ge_s(&mut self) -> Self::Output {
1278
self.cmp_i64s(IntCmpKind::GeS)
1279
}
1280
1281
fn visit_i32_ge_u(&mut self) -> Self::Output {
1282
self.cmp_i32s(IntCmpKind::GeU)
1283
}
1284
1285
fn visit_i64_ge_u(&mut self) -> Self::Output {
1286
self.cmp_i64s(IntCmpKind::GeU)
1287
}
1288
1289
fn visit_i32_eqz(&mut self) -> Self::Output {
1290
use OperandSize::*;
1291
1292
self.context.unop(self.masm, |masm, reg| {
1293
masm.cmp_with_set(writable!(reg), RegImm::i32(0), IntCmpKind::Eq, S32)?;
1294
Ok(TypedReg::i32(reg))
1295
})
1296
}
1297
1298
fn visit_i64_eqz(&mut self) -> Self::Output {
1299
use OperandSize::*;
1300
1301
self.context.unop(self.masm, |masm, reg| {
1302
masm.cmp_with_set(writable!(reg), RegImm::i64(0), IntCmpKind::Eq, S64)?;
1303
Ok(TypedReg::i32(reg)) // Return value for `i64.eqz` is an `i32`.
1304
})
1305
}
1306
1307
fn visit_i32_clz(&mut self) -> Self::Output {
1308
use OperandSize::*;
1309
1310
self.context.unop(self.masm, |masm, reg| {
1311
masm.clz(writable!(reg), reg, S32)?;
1312
Ok(TypedReg::i32(reg))
1313
})
1314
}
1315
1316
fn visit_i64_clz(&mut self) -> Self::Output {
1317
use OperandSize::*;
1318
1319
self.context.unop(self.masm, |masm, reg| {
1320
masm.clz(writable!(reg), reg, S64)?;
1321
Ok(TypedReg::i64(reg))
1322
})
1323
}
1324
1325
fn visit_i32_ctz(&mut self) -> Self::Output {
1326
use OperandSize::*;
1327
1328
self.context.unop(self.masm, |masm, reg| {
1329
masm.ctz(writable!(reg), reg, S32)?;
1330
Ok(TypedReg::i32(reg))
1331
})
1332
}
1333
1334
fn visit_i64_ctz(&mut self) -> Self::Output {
1335
use OperandSize::*;
1336
1337
self.context.unop(self.masm, |masm, reg| {
1338
masm.ctz(writable!(reg), reg, S64)?;
1339
Ok(TypedReg::i64(reg))
1340
})
1341
}
1342
1343
fn visit_i32_and(&mut self) -> Self::Output {
1344
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1345
masm.and(writable!(dst), dst, src, size)?;
1346
Ok(TypedReg::i32(dst))
1347
})
1348
}
1349
1350
fn visit_i64_and(&mut self) -> Self::Output {
1351
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1352
masm.and(writable!(dst), dst, src, size)?;
1353
Ok(TypedReg::i64(dst))
1354
})
1355
}
1356
1357
fn visit_i32_or(&mut self) -> Self::Output {
1358
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1359
masm.or(writable!(dst), dst, src, size)?;
1360
Ok(TypedReg::i32(dst))
1361
})
1362
}
1363
1364
fn visit_i64_or(&mut self) -> Self::Output {
1365
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1366
masm.or(writable!(dst), dst, src, size)?;
1367
Ok(TypedReg::i64(dst))
1368
})
1369
}
1370
1371
fn visit_i32_xor(&mut self) -> Self::Output {
1372
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1373
masm.xor(writable!(dst), dst, src, size)?;
1374
Ok(TypedReg::i32(dst))
1375
})
1376
}
1377
1378
fn visit_i64_xor(&mut self) -> Self::Output {
1379
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1380
masm.xor(writable!(dst), dst, src, size)?;
1381
Ok(TypedReg::i64(dst))
1382
})
1383
}
1384
1385
fn visit_i32_shl(&mut self) -> Self::Output {
1386
use ShiftKind::*;
1387
1388
self.context.i32_shift(self.masm, Shl)
1389
}
1390
1391
fn visit_i64_shl(&mut self) -> Self::Output {
1392
use ShiftKind::*;
1393
1394
self.context.i64_shift(self.masm, Shl)
1395
}
1396
1397
fn visit_i32_shr_s(&mut self) -> Self::Output {
1398
use ShiftKind::*;
1399
1400
self.context.i32_shift(self.masm, ShrS)
1401
}
1402
1403
fn visit_i64_shr_s(&mut self) -> Self::Output {
1404
use ShiftKind::*;
1405
1406
self.context.i64_shift(self.masm, ShrS)
1407
}
1408
1409
fn visit_i32_shr_u(&mut self) -> Self::Output {
1410
use ShiftKind::*;
1411
1412
self.context.i32_shift(self.masm, ShrU)
1413
}
1414
1415
fn visit_i64_shr_u(&mut self) -> Self::Output {
1416
use ShiftKind::*;
1417
1418
self.context.i64_shift(self.masm, ShrU)
1419
}
1420
1421
fn visit_i32_rotl(&mut self) -> Self::Output {
1422
use ShiftKind::*;
1423
1424
self.context.i32_shift(self.masm, Rotl)
1425
}
1426
1427
fn visit_i64_rotl(&mut self) -> Self::Output {
1428
use ShiftKind::*;
1429
1430
self.context.i64_shift(self.masm, Rotl)
1431
}
1432
1433
fn visit_i32_rotr(&mut self) -> Self::Output {
1434
use ShiftKind::*;
1435
1436
self.context.i32_shift(self.masm, Rotr)
1437
}
1438
1439
fn visit_i64_rotr(&mut self) -> Self::Output {
1440
use ShiftKind::*;
1441
1442
self.context.i64_shift(self.masm, Rotr)
1443
}
1444
1445
fn visit_end(&mut self) -> Self::Output {
1446
if !self.context.reachable {
1447
self.handle_unreachable_end()
1448
} else {
1449
let mut control = self.pop_control_frame()?;
1450
control.emit_end(self.masm, &mut self.context)
1451
}
1452
}
1453
1454
fn visit_i32_popcnt(&mut self) -> Self::Output {
1455
use OperandSize::*;
1456
self.masm.popcnt(&mut self.context, S32)
1457
}
1458
1459
fn visit_i64_popcnt(&mut self) -> Self::Output {
1460
use OperandSize::*;
1461
1462
self.masm.popcnt(&mut self.context, S64)
1463
}
1464
1465
fn visit_i32_wrap_i64(&mut self) -> Self::Output {
1466
self.context.unop(self.masm, |masm, reg| {
1467
masm.wrap(writable!(reg), reg)?;
1468
Ok(TypedReg::i32(reg))
1469
})
1470
}
1471
1472
fn visit_i64_extend_i32_s(&mut self) -> Self::Output {
1473
self.context.unop(self.masm, |masm, reg| {
1474
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend32.into())?;
1475
Ok(TypedReg::i64(reg))
1476
})
1477
}
1478
1479
fn visit_i64_extend_i32_u(&mut self) -> Self::Output {
1480
self.context.unop(self.masm, |masm, reg| {
1481
masm.extend(writable!(reg), reg, Extend::<Zero>::I64Extend32.into())?;
1482
Ok(TypedReg::i64(reg))
1483
})
1484
}
1485
1486
fn visit_i32_extend8_s(&mut self) -> Self::Output {
1487
self.context.unop(self.masm, |masm, reg| {
1488
masm.extend(writable!(reg), reg, Extend::<Signed>::I32Extend8.into())?;
1489
Ok(TypedReg::i32(reg))
1490
})
1491
}
1492
1493
fn visit_i32_extend16_s(&mut self) -> Self::Output {
1494
self.context.unop(self.masm, |masm, reg| {
1495
masm.extend(writable!(reg), reg, Extend::<Signed>::I32Extend16.into())?;
1496
Ok(TypedReg::i32(reg))
1497
})
1498
}
1499
1500
fn visit_i64_extend8_s(&mut self) -> Self::Output {
1501
self.context.unop(self.masm, |masm, reg| {
1502
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend8.into())?;
1503
Ok(TypedReg::i64(reg))
1504
})
1505
}
1506
1507
fn visit_i64_extend16_s(&mut self) -> Self::Output {
1508
self.context.unop(self.masm, |masm, reg| {
1509
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend16.into())?;
1510
Ok(TypedReg::i64(reg))
1511
})
1512
}
1513
1514
fn visit_i64_extend32_s(&mut self) -> Self::Output {
1515
self.context.unop(self.masm, |masm, reg| {
1516
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend32.into())?;
1517
Ok(TypedReg::i64(reg))
1518
})
1519
}
1520
1521
fn visit_i32_trunc_f32_s(&mut self) -> Self::Output {
1522
use OperandSize::*;
1523
1524
self.context
1525
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
1526
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Unchecked)
1527
})
1528
}
1529
1530
fn visit_i32_trunc_f32_u(&mut self) -> Self::Output {
1531
use OperandSize::*;
1532
1533
self.masm
1534
.unsigned_truncate(&mut self.context, S32, S32, TruncKind::Unchecked)
1535
}
1536
1537
fn visit_i32_trunc_f64_s(&mut self) -> Self::Output {
1538
use OperandSize::*;
1539
1540
self.context
1541
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
1542
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Unchecked)
1543
})
1544
}
1545
1546
fn visit_i32_trunc_f64_u(&mut self) -> Self::Output {
1547
use OperandSize::*;
1548
self.masm
1549
.unsigned_truncate(&mut self.context, S64, S32, TruncKind::Unchecked)
1550
}
1551
1552
fn visit_i64_trunc_f32_s(&mut self) -> Self::Output {
1553
use OperandSize::*;
1554
1555
self.context
1556
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
1557
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Unchecked)
1558
})
1559
}
1560
1561
fn visit_i64_trunc_f32_u(&mut self) -> Self::Output {
1562
use OperandSize::*;
1563
1564
self.masm
1565
.unsigned_truncate(&mut self.context, S32, S64, TruncKind::Unchecked)
1566
}
1567
1568
fn visit_i64_trunc_f64_s(&mut self) -> Self::Output {
1569
use OperandSize::*;
1570
1571
self.context
1572
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
1573
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Unchecked)
1574
})
1575
}
1576
1577
fn visit_i64_trunc_f64_u(&mut self) -> Self::Output {
1578
use OperandSize::*;
1579
1580
self.masm
1581
.unsigned_truncate(&mut self.context, S64, S64, TruncKind::Unchecked)
1582
}
1583
1584
fn visit_i32_reinterpret_f32(&mut self) -> Self::Output {
1585
self.context
1586
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, size| {
1587
masm.reinterpret_float_as_int(writable!(dst), src, size)
1588
})
1589
}
1590
1591
fn visit_i64_reinterpret_f64(&mut self) -> Self::Output {
1592
self.context
1593
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, size| {
1594
masm.reinterpret_float_as_int(writable!(dst), src, size)
1595
})
1596
}
1597
1598
fn visit_local_get(&mut self, index: u32) -> Self::Output {
1599
use WasmValType::*;
1600
let context = &mut self.context;
1601
let slot = context.frame.get_wasm_local(index);
1602
match slot.ty {
1603
I32 | I64 | F32 | F64 | V128 => context.stack.push(Val::local(index, slot.ty)),
1604
Ref(rt) => match rt.heap_type {
1605
WasmHeapType::Func => context.stack.push(Val::local(index, slot.ty)),
1606
_ => bail!(CodeGenError::unsupported_wasm_type()),
1607
},
1608
}
1609
1610
Ok(())
1611
}
1612
1613
fn visit_local_set(&mut self, index: u32) -> Self::Output {
1614
let src = self.emit_set_local(index)?;
1615
self.context.free_reg(src);
1616
Ok(())
1617
}
1618
1619
fn visit_call(&mut self, index: u32) -> Self::Output {
1620
let callee = self.env.callee_from_index(FuncIndex::from_u32(index));
1621
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, callee)?;
1622
Ok(())
1623
}
1624
1625
fn visit_call_indirect(&mut self, type_index: u32, table_index: u32) -> Self::Output {
1626
// Spill now because `emit_lazy_init_funcref` and the `FnCall::emit`
1627
// invocations will both trigger spills since they both call functions.
1628
// However, the machine instructions for the spill emitted by
1629
// `emit_lazy_funcref` will be jumped over if the funcref was previously
1630
// initialized which may result in the machine stack becoming
1631
// unbalanced.
1632
self.context.spill(self.masm)?;
1633
1634
let type_index = TypeIndex::from_u32(type_index);
1635
let table_index = TableIndex::from_u32(table_index);
1636
1637
self.emit_lazy_init_funcref(table_index)?;
1638
1639
// Perform the indirect call.
1640
// This code assumes that [`Self::emit_lazy_init_funcref`] will
1641
// push the funcref to the value stack.
1642
let funcref_ptr = self
1643
.context
1644
.stack
1645
.peek()
1646
.map(|v| v.unwrap_reg())
1647
.ok_or_else(|| CodeGenError::missing_values_in_stack())?;
1648
self.masm
1649
.trapz(funcref_ptr.into(), TRAP_INDIRECT_CALL_TO_NULL)?;
1650
self.emit_typecheck_funcref(funcref_ptr.into(), type_index)?;
1651
1652
let callee = self.env.funcref(type_index);
1653
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, callee)?;
1654
Ok(())
1655
}
1656
1657
fn visit_table_init(&mut self, elem: u32, table: u32) -> Self::Output {
1658
let at = self.context.stack.ensure_index_at(3)?;
1659
1660
self.context
1661
.stack
1662
.insert_many(at, &[table.try_into()?, elem.try_into()?]);
1663
1664
let builtin = self.env.builtins.table_init::<M::ABI>()?;
1665
FnCall::emit::<M>(
1666
&mut self.env,
1667
self.masm,
1668
&mut self.context,
1669
Callee::Builtin(builtin.clone()),
1670
)?;
1671
self.context.pop_and_free(self.masm)
1672
}
1673
1674
fn visit_table_copy(&mut self, dst: u32, src: u32) -> Self::Output {
1675
let at = self.context.stack.ensure_index_at(3)?;
1676
self.context
1677
.stack
1678
.insert_many(at, &[dst.try_into()?, src.try_into()?]);
1679
1680
let builtin = self.env.builtins.table_copy::<M::ABI>()?;
1681
FnCall::emit::<M>(
1682
&mut self.env,
1683
self.masm,
1684
&mut self.context,
1685
Callee::Builtin(builtin),
1686
)?;
1687
self.context.pop_and_free(self.masm)
1688
}
1689
1690
fn visit_table_get(&mut self, table: u32) -> Self::Output {
1691
let table_index = TableIndex::from_u32(table);
1692
let table = self.env.table(table_index);
1693
let heap_type = table.ref_type.heap_type;
1694
1695
match heap_type {
1696
WasmHeapType::Func => self.emit_lazy_init_funcref(table_index),
1697
_ => Err(format_err!(CodeGenError::unsupported_wasm_type())),
1698
}
1699
}
1700
1701
fn visit_table_grow(&mut self, table: u32) -> Self::Output {
1702
let table_index = TableIndex::from_u32(table);
1703
let table_ty = self.env.table(table_index);
1704
let builtin = match table_ty.ref_type.heap_type {
1705
WasmHeapType::Func => self.env.builtins.table_grow_func_ref::<M::ABI>()?,
1706
_ => bail!(CodeGenError::unsupported_wasm_type()),
1707
};
1708
1709
let len = self.context.stack.len();
1710
// table.grow` requires at least 2 elements on the value stack.
1711
let at = self.context.stack.ensure_index_at(2)?;
1712
1713
// The table_grow builtin expects the parameters in a different
1714
// order.
1715
// The value stack at this point should contain:
1716
// [ init_value | delta ] (stack top)
1717
// but the builtin function expects the init value as the last
1718
// argument.
1719
self.context.stack.inner_mut().swap(len - 1, len - 2);
1720
1721
let builtin = self.prepare_builtin_defined_table_arg(table_index, at, builtin)?;
1722
1723
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1724
1725
Ok(())
1726
}
1727
1728
fn visit_table_size(&mut self, table: u32) -> Self::Output {
1729
let table_index = TableIndex::from_u32(table);
1730
let table_data = self.env.resolve_table_data(table_index);
1731
self.emit_compute_table_size(&table_data)
1732
}
1733
1734
fn visit_table_fill(&mut self, table: u32) -> Self::Output {
1735
let table_index = TableIndex::from_u32(table);
1736
let table_ty = self.env.table(table_index);
1737
1738
ensure!(
1739
table_ty.ref_type.heap_type == WasmHeapType::Func,
1740
CodeGenError::unsupported_wasm_type()
1741
);
1742
1743
let builtin = self.env.builtins.table_fill_func_ref::<M::ABI>()?;
1744
1745
let at = self.context.stack.ensure_index_at(3)?;
1746
1747
self.context.stack.insert_many(at, &[table.try_into()?]);
1748
FnCall::emit::<M>(
1749
&mut self.env,
1750
self.masm,
1751
&mut self.context,
1752
Callee::Builtin(builtin.clone()),
1753
)?;
1754
self.context.pop_and_free(self.masm)
1755
}
1756
1757
fn visit_table_set(&mut self, table: u32) -> Self::Output {
1758
let ptr_type = self.env.ptr_type();
1759
let table_index = TableIndex::from_u32(table);
1760
let table_data = self.env.resolve_table_data(table_index);
1761
let table = self.env.table(table_index);
1762
match table.ref_type.heap_type {
1763
WasmHeapType::Func => {
1764
ensure!(
1765
self.tunables.table_lazy_init,
1766
CodeGenError::unsupported_table_eager_init()
1767
);
1768
let value = self.context.pop_to_reg(self.masm, None)?;
1769
let index = self.context.pop_to_reg(self.masm, None)?;
1770
let base = self.context.any_gpr(self.masm)?;
1771
let elem_addr =
1772
self.emit_compute_table_elem_addr(index.into(), base, &table_data)?;
1773
// Set the initialized bit.
1774
self.masm.or(
1775
writable!(value.into()),
1776
value.into(),
1777
RegImm::i64(FUNCREF_INIT_BIT as i64),
1778
ptr_type.try_into()?,
1779
)?;
1780
1781
self.masm.store_ptr(value.into(), elem_addr)?;
1782
1783
self.context.free_reg(value);
1784
self.context.free_reg(index);
1785
self.context.free_reg(base);
1786
Ok(())
1787
}
1788
_ => Err(format_err!(CodeGenError::unsupported_wasm_type())),
1789
}
1790
}
1791
1792
fn visit_elem_drop(&mut self, index: u32) -> Self::Output {
1793
let elem_drop = self.env.builtins.elem_drop::<M::ABI>()?;
1794
self.context.stack.extend([index.try_into()?]);
1795
FnCall::emit::<M>(
1796
&mut self.env,
1797
self.masm,
1798
&mut self.context,
1799
Callee::Builtin(elem_drop),
1800
)?;
1801
self.context.pop_and_free(self.masm)
1802
}
1803
1804
fn visit_memory_init(&mut self, data_index: u32, mem: u32) -> Self::Output {
1805
let at = self.context.stack.ensure_index_at(3)?;
1806
self.context
1807
.stack
1808
.insert_many(at, &[mem.try_into()?, data_index.try_into()?]);
1809
let builtin = self.env.builtins.memory_init::<M::ABI>()?;
1810
FnCall::emit::<M>(
1811
&mut self.env,
1812
self.masm,
1813
&mut self.context,
1814
Callee::Builtin(builtin),
1815
)?;
1816
self.context.pop_and_free(self.masm)
1817
}
1818
1819
fn visit_memory_copy(&mut self, dst_mem: u32, src_mem: u32) -> Self::Output {
1820
// At this point, the stack is expected to contain:
1821
// [ dst_offset, src_offset, len ]
1822
// The following code inserts the missing params, so that stack contains:
1823
// [ vmctx, dst_mem, dst_offset, src_mem, src_offset, len ]
1824
// Which is the order expected by the builtin function.
1825
let _ = self.context.stack.ensure_index_at(3)?;
1826
let at = self.context.stack.ensure_index_at(2)?;
1827
self.context.stack.insert_many(at, &[src_mem.try_into()?]);
1828
1829
// One element was inserted above, so instead of 3, we use 4.
1830
let at = self.context.stack.ensure_index_at(4)?;
1831
self.context.stack.insert_many(at, &[dst_mem.try_into()?]);
1832
1833
let builtin = self.env.builtins.memory_copy::<M::ABI>()?;
1834
1835
FnCall::emit::<M>(
1836
&mut self.env,
1837
self.masm,
1838
&mut self.context,
1839
Callee::Builtin(builtin),
1840
)?;
1841
self.context.pop_and_free(self.masm)
1842
}
1843
1844
fn visit_memory_fill(&mut self, mem: u32) -> Self::Output {
1845
let at = self.context.stack.ensure_index_at(3)?;
1846
let mem = MemoryIndex::from_u32(mem);
1847
1848
let builtin = self.env.builtins.memory_fill::<M::ABI>()?;
1849
let builtin = self.prepare_builtin_defined_memory_arg(mem, at, builtin)?;
1850
1851
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1852
self.context.pop_and_free(self.masm)
1853
}
1854
1855
fn visit_memory_size(&mut self, mem: u32) -> Self::Output {
1856
let heap = self.env.resolve_heap(MemoryIndex::from_u32(mem));
1857
self.emit_compute_memory_size(&heap)
1858
}
1859
1860
fn visit_memory_grow(&mut self, mem: u32) -> Self::Output {
1861
let at = self.context.stack.ensure_index_at(1)?;
1862
let mem = MemoryIndex::from_u32(mem);
1863
// The stack at this point contains: [ delta ]
1864
// The desired state is
1865
// [ vmctx, delta, index ]
1866
let builtin = self.env.builtins.memory_grow::<M::ABI>()?;
1867
let builtin = self.prepare_builtin_defined_memory_arg(mem, at + 1, builtin)?;
1868
1869
let heap = self.env.resolve_heap(mem);
1870
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1871
1872
// The memory32_grow builtin returns a pointer type, therefore we must
1873
// ensure that the return type is representative of the address space of
1874
// the heap type.
1875
match (self.env.ptr_type(), heap.index_type()) {
1876
(WasmValType::I64, WasmValType::I64) => Ok(()),
1877
// When the heap type is smaller than the pointer type, we adjust
1878
// the result of the memory32_grow builtin.
1879
(WasmValType::I64, WasmValType::I32) => {
1880
let top: Reg = self.context.pop_to_reg(self.masm, None)?.into();
1881
self.masm.wrap(writable!(top), top)?;
1882
self.context.stack.push(TypedReg::i32(top).into());
1883
Ok(())
1884
}
1885
_ => Err(format_err!(CodeGenError::unsupported_32_bit_platform())),
1886
}
1887
}
1888
1889
fn visit_data_drop(&mut self, data_index: u32) -> Self::Output {
1890
self.context.stack.extend([data_index.try_into()?]);
1891
1892
let builtin = self.env.builtins.data_drop::<M::ABI>()?;
1893
FnCall::emit::<M>(
1894
&mut self.env,
1895
self.masm,
1896
&mut self.context,
1897
Callee::Builtin(builtin),
1898
)?;
1899
self.context.pop_and_free(self.masm)
1900
}
1901
1902
fn visit_nop(&mut self) -> Self::Output {
1903
Ok(())
1904
}
1905
1906
fn visit_if(&mut self, blockty: BlockType) -> Self::Output {
1907
self.control_frames.push(ControlStackFrame::r#if(
1908
self.env.resolve_block_sig(blockty)?,
1909
self.masm,
1910
&mut self.context,
1911
)?);
1912
1913
Ok(())
1914
}
1915
1916
fn visit_else(&mut self) -> Self::Output {
1917
if !self.context.reachable {
1918
self.handle_unreachable_else()
1919
} else {
1920
let control = self
1921
.control_frames
1922
.last_mut()
1923
.ok_or_else(|| CodeGenError::control_frame_expected())?;
1924
control.emit_else(self.masm, &mut self.context)
1925
}
1926
}
1927
1928
fn visit_block(&mut self, blockty: BlockType) -> Self::Output {
1929
self.control_frames.push(ControlStackFrame::block(
1930
self.env.resolve_block_sig(blockty)?,
1931
self.masm,
1932
&mut self.context,
1933
)?);
1934
1935
Ok(())
1936
}
1937
1938
fn visit_loop(&mut self, blockty: BlockType) -> Self::Output {
1939
self.control_frames.push(ControlStackFrame::r#loop(
1940
self.env.resolve_block_sig(blockty)?,
1941
self.masm,
1942
&mut self.context,
1943
)?);
1944
1945
self.maybe_emit_epoch_check()?;
1946
self.maybe_emit_fuel_check()
1947
}
1948
1949
fn visit_br(&mut self, depth: u32) -> Self::Output {
1950
let index = control_index(depth, self.control_frames.len())?;
1951
let frame = &mut self.control_frames[index];
1952
self.context
1953
.br::<_, _, UnconditionalBranch>(frame, self.masm, |masm, cx, frame| {
1954
frame.pop_abi_results::<M, _>(cx, masm, |results, _, _| {
1955
Ok(results.ret_area().copied())
1956
})
1957
})
1958
}
1959
1960
fn visit_br_if(&mut self, depth: u32) -> Self::Output {
1961
let index = control_index(depth, self.control_frames.len())?;
1962
let frame = &mut self.control_frames[index];
1963
frame.set_as_target();
1964
1965
let top = {
1966
let top = self.context.without::<Result<TypedReg>, M, _>(
1967
frame.results::<M>()?.regs(),
1968
self.masm,
1969
|ctx, masm| ctx.pop_to_reg(masm, None),
1970
)??;
1971
// Explicitly save any live registers and locals before setting up
1972
// the branch state.
1973
// In some cases, calculating the `top` value above, will result in
1974
// a spill, thus the following one will result in a no-op.
1975
self.context.spill(self.masm)?;
1976
frame.top_abi_results::<M, _>(
1977
&mut self.context,
1978
self.masm,
1979
|results, context, masm| {
1980
// In the case of `br_if` there's a possibility that we'll
1981
// exit early from the block or fallthrough, for
1982
// a fallthrough, we cannot rely on the pre-computed return area;
1983
// it must be recalculated so that any values that are
1984
// generated are correctly placed near the current stack
1985
// pointer.
1986
if results.on_stack() {
1987
let stack_consumed = context.stack.sizeof(results.stack_operands_len());
1988
let base = masm.sp_offset()?.as_u32() - stack_consumed;
1989
let offs = base + results.size();
1990
Ok(Some(RetArea::sp(SPOffset::from_u32(offs))))
1991
} else {
1992
Ok(None)
1993
}
1994
},
1995
)?;
1996
top
1997
};
1998
1999
// Emit instructions to balance the machine stack.
2000
let current_sp_offset = self.masm.sp_offset()?;
2001
let unbalanced = frame.unbalanced(self.masm)?;
2002
let (label, cmp) = if unbalanced {
2003
(self.masm.get_label()?, IntCmpKind::Eq)
2004
} else {
2005
(*frame.label(), IntCmpKind::Ne)
2006
};
2007
2008
self.masm
2009
.branch(cmp, top.reg, top.reg.into(), label, OperandSize::S32)?;
2010
self.context.free_reg(top);
2011
2012
if unbalanced {
2013
self.context
2014
.br::<_, _, ConditionalBranch>(frame, self.masm, |_, _, _| Ok(()))?;
2015
2016
// Restore sp_offset to what it was for falling through and emit
2017
// fallthrough label.
2018
self.masm.reset_stack_pointer(current_sp_offset)?;
2019
self.masm.bind(label)?;
2020
}
2021
2022
Ok(())
2023
}
2024
2025
fn visit_br_table(&mut self, targets: BrTable<'a>) -> Self::Output {
2026
// +1 to account for the default target.
2027
let len = targets.len() + 1;
2028
// SmallVec<[_; 5]> to match the binary emission layer (e.g
2029
// see `JmpTableSeq'), but here we use 5 instead since we
2030
// bundle the default target as the last element in the array.
2031
let mut labels: SmallVec<[_; 5]> = smallvec![];
2032
for _ in 0..len {
2033
labels.push(self.masm.get_label()?);
2034
}
2035
2036
// Find the innermost target and use it as the relative frame
2037
// for result handling below.
2038
//
2039
// This approach ensures that
2040
// 1. The stack pointer offset is correctly positioned
2041
// according to the expectations of the innermost block end
2042
// sequence.
2043
// 2. We meet the jump site invariants introduced by
2044
// `CodegenContext::br`, which take advantage of Wasm
2045
// semantics given that all jumps are "outward".
2046
let mut innermost = targets.default();
2047
for target in targets.targets() {
2048
let target = target?;
2049
if target < innermost {
2050
innermost = target;
2051
}
2052
}
2053
2054
let innermost_index = control_index(innermost, self.control_frames.len())?;
2055
let innermost_frame = &mut self.control_frames[innermost_index];
2056
let innermost_result = innermost_frame.results::<M>()?;
2057
2058
let (index, tmp) = {
2059
let index_and_tmp = self.context.without::<Result<(TypedReg, _)>, M, _>(
2060
innermost_result.regs(),
2061
self.masm,
2062
|cx, masm| Ok((cx.pop_to_reg(masm, None)?, cx.any_gpr(masm)?)),
2063
)??;
2064
2065
// Materialize any constants or locals into their result
2066
// representation, so that when reachability is restored,
2067
// they are correctly located. NB: the results are popped
2068
// in function of the innermost branch specified for
2069
// `br_table`, which implies that the machine stack will
2070
// be correctly balanced, by virtue of calling
2071
// `pop_abi_results`.
2072
2073
// It's possible that we need to balance the stack for the
2074
// rest of the targets, which will be done before emitting
2075
// the unconditional jump below.
2076
innermost_frame.pop_abi_results::<M, _>(
2077
&mut self.context,
2078
self.masm,
2079
|results, _, _| Ok(results.ret_area().copied()),
2080
)?;
2081
index_and_tmp
2082
};
2083
2084
self.masm.jmp_table(&labels, index.into(), tmp)?;
2085
// Save the original stack pointer offset; we will reset the stack
2086
// pointer to this offset after jumping to each of the targets. Each
2087
// jump might adjust the stack according to the base offset of the
2088
// target.
2089
let current_sp = self.masm.sp_offset()?;
2090
2091
for (t, l) in targets
2092
.targets()
2093
.chain(std::iter::once(Ok(targets.default())))
2094
.zip(labels.iter())
2095
{
2096
let control_index = control_index(t?, self.control_frames.len())?;
2097
let frame = &mut self.control_frames[control_index];
2098
// Reset the stack pointer to its original offset. This is needed
2099
// because each jump will potentially adjust the stack pointer
2100
// according to the base offset of the target.
2101
self.masm.reset_stack_pointer(current_sp)?;
2102
2103
// NB: We don't perform any result handling as it was
2104
// already taken care of above before jumping to the
2105
// jump table.
2106
self.masm.bind(*l)?;
2107
// Ensure that the stack pointer is correctly positioned before
2108
// jumping to the jump table code.
2109
self.context
2110
.br::<_, _, UnconditionalBranch>(frame, self.masm, |_, _, _| Ok(()))?;
2111
}
2112
// Finally reset the stack pointer to the original location.
2113
// The reachability analysis, will ensure it's correctly located
2114
// once reachability is restored.
2115
self.masm.reset_stack_pointer(current_sp)?;
2116
self.context.reachable = false;
2117
self.context.free_reg(index.reg);
2118
self.context.free_reg(tmp);
2119
2120
Ok(())
2121
}
2122
2123
fn visit_return(&mut self) -> Self::Output {
2124
// Grab the outermost frame, which is the function's body
2125
// frame. We don't rely on [`codegen::control_index`] since
2126
// this frame is implicit and we know that it should exist at
2127
// index 0.
2128
let outermost = &mut self.control_frames[0];
2129
self.context
2130
.br::<_, _, UnconditionalBranch>(outermost, self.masm, |masm, cx, frame| {
2131
frame.pop_abi_results::<M, _>(cx, masm, |results, _, _| {
2132
Ok(results.ret_area().copied())
2133
})
2134
})
2135
}
2136
2137
fn visit_unreachable(&mut self) -> Self::Output {
2138
self.masm.unreachable()?;
2139
self.context.reachable = false;
2140
// Set the implicit outermost frame as target to perform the necessary
2141
// stack clean up.
2142
let outermost = &mut self.control_frames[0];
2143
outermost.set_as_target();
2144
2145
Ok(())
2146
}
2147
2148
fn visit_local_tee(&mut self, index: u32) -> Self::Output {
2149
let typed_reg = self.emit_set_local(index)?;
2150
self.context.stack.push(typed_reg.into());
2151
2152
Ok(())
2153
}
2154
2155
fn visit_global_get(&mut self, global_index: u32) -> Self::Output {
2156
let index = GlobalIndex::from_u32(global_index);
2157
let (ty, base, offset) = self.emit_get_global_addr(index)?;
2158
let addr = self.masm.address_at_reg(base, offset)?;
2159
let dst = self.context.reg_for_type(ty, self.masm)?;
2160
self.masm.load(addr, writable!(dst), ty.try_into()?)?;
2161
self.context.stack.push(Val::reg(dst, ty));
2162
2163
self.context.free_reg(base);
2164
2165
Ok(())
2166
}
2167
2168
fn visit_global_set(&mut self, global_index: u32) -> Self::Output {
2169
let index = GlobalIndex::from_u32(global_index);
2170
let (ty, base, offset) = self.emit_get_global_addr(index)?;
2171
let addr = self.masm.address_at_reg(base, offset)?;
2172
2173
let typed_reg = self.context.pop_to_reg(self.masm, None)?;
2174
self.masm
2175
.store(typed_reg.reg.into(), addr, ty.try_into()?)?;
2176
self.context.free_reg(typed_reg.reg);
2177
self.context.free_reg(base);
2178
2179
Ok(())
2180
}
2181
2182
fn visit_drop(&mut self) -> Self::Output {
2183
self.context.drop_last(1, |regalloc, val| match val {
2184
Val::Reg(tr) => Ok(regalloc.free(tr.reg)),
2185
Val::Memory(m) => self.masm.free_stack(m.slot.size),
2186
_ => Ok(()),
2187
})
2188
}
2189
2190
fn visit_select(&mut self) -> Self::Output {
2191
let cond = self.context.pop_to_reg(self.masm, None)?;
2192
let val2 = self.context.pop_to_reg(self.masm, None)?;
2193
let val1 = self.context.pop_to_reg(self.masm, None)?;
2194
self.masm.cmp(cond.reg, RegImm::i32(0), OperandSize::S32)?;
2195
// Conditionally move val1 to val2 if the comparison is
2196
// not zero.
2197
self.masm.cmov(
2198
writable!(val2.into()),
2199
val1.into(),
2200
IntCmpKind::Ne,
2201
val1.ty.try_into()?,
2202
)?;
2203
self.context.stack.push(val2.into());
2204
self.context.free_reg(val1.reg);
2205
self.context.free_reg(cond);
2206
2207
Ok(())
2208
}
2209
2210
fn visit_i32_load(&mut self, memarg: MemArg) -> Self::Output {
2211
self.emit_wasm_load(
2212
&memarg,
2213
WasmValType::I32,
2214
LoadKind::Operand(OperandSize::S32),
2215
)
2216
}
2217
2218
fn visit_i32_load8_s(&mut self, memarg: MemArg) -> Self::Output {
2219
self.emit_wasm_load(
2220
&memarg,
2221
WasmValType::I32,
2222
LoadKind::ScalarExtend(Extend::<Signed>::I32Extend8.into()),
2223
)
2224
}
2225
2226
fn visit_i32_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2227
self.emit_wasm_load(
2228
&memarg,
2229
WasmValType::I32,
2230
LoadKind::ScalarExtend(Extend::<Zero>::I32Extend8.into()),
2231
)
2232
}
2233
2234
fn visit_i32_load16_s(&mut self, memarg: MemArg) -> Self::Output {
2235
self.emit_wasm_load(
2236
&memarg,
2237
WasmValType::I32,
2238
LoadKind::ScalarExtend(Extend::<Signed>::I32Extend16.into()),
2239
)
2240
}
2241
2242
fn visit_i32_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2243
self.emit_wasm_load(
2244
&memarg,
2245
WasmValType::I32,
2246
LoadKind::ScalarExtend(Extend::<Zero>::I32Extend16.into()),
2247
)
2248
}
2249
2250
fn visit_i32_store(&mut self, memarg: MemArg) -> Self::Output {
2251
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2252
}
2253
2254
fn visit_i32_store8(&mut self, memarg: MemArg) -> Self::Output {
2255
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S8))
2256
}
2257
2258
fn visit_i32_store16(&mut self, memarg: MemArg) -> Self::Output {
2259
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S16))
2260
}
2261
2262
fn visit_i64_load8_s(&mut self, memarg: MemArg) -> Self::Output {
2263
self.emit_wasm_load(
2264
&memarg,
2265
WasmValType::I64,
2266
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend8.into()),
2267
)
2268
}
2269
2270
fn visit_i64_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2271
self.emit_wasm_load(
2272
&memarg,
2273
WasmValType::I64,
2274
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend8.into()),
2275
)
2276
}
2277
2278
fn visit_i64_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2279
self.emit_wasm_load(
2280
&memarg,
2281
WasmValType::I64,
2282
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend16.into()),
2283
)
2284
}
2285
2286
fn visit_i64_load16_s(&mut self, memarg: MemArg) -> Self::Output {
2287
self.emit_wasm_load(
2288
&memarg,
2289
WasmValType::I64,
2290
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend16.into()),
2291
)
2292
}
2293
2294
fn visit_i64_load32_u(&mut self, memarg: MemArg) -> Self::Output {
2295
self.emit_wasm_load(
2296
&memarg,
2297
WasmValType::I64,
2298
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend32.into()),
2299
)
2300
}
2301
2302
fn visit_i64_load32_s(&mut self, memarg: MemArg) -> Self::Output {
2303
self.emit_wasm_load(
2304
&memarg,
2305
WasmValType::I64,
2306
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend32.into()),
2307
)
2308
}
2309
2310
fn visit_i64_load(&mut self, memarg: MemArg) -> Self::Output {
2311
self.emit_wasm_load(
2312
&memarg,
2313
WasmValType::I64,
2314
LoadKind::Operand(OperandSize::S64),
2315
)
2316
}
2317
2318
fn visit_i64_store(&mut self, memarg: MemArg) -> Self::Output {
2319
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S64))
2320
}
2321
2322
fn visit_i64_store8(&mut self, memarg: MemArg) -> Self::Output {
2323
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S8))
2324
}
2325
2326
fn visit_i64_store16(&mut self, memarg: MemArg) -> Self::Output {
2327
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S16))
2328
}
2329
2330
fn visit_i64_store32(&mut self, memarg: MemArg) -> Self::Output {
2331
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2332
}
2333
2334
fn visit_f32_load(&mut self, memarg: MemArg) -> Self::Output {
2335
self.emit_wasm_load(
2336
&memarg,
2337
WasmValType::F32,
2338
LoadKind::Operand(OperandSize::S32),
2339
)
2340
}
2341
2342
fn visit_f32_store(&mut self, memarg: MemArg) -> Self::Output {
2343
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2344
}
2345
2346
fn visit_f64_load(&mut self, memarg: MemArg) -> Self::Output {
2347
self.emit_wasm_load(
2348
&memarg,
2349
WasmValType::F64,
2350
LoadKind::Operand(OperandSize::S64),
2351
)
2352
}
2353
2354
fn visit_f64_store(&mut self, memarg: MemArg) -> Self::Output {
2355
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S64))
2356
}
2357
2358
fn visit_i32_trunc_sat_f32_s(&mut self) -> Self::Output {
2359
use OperandSize::*;
2360
2361
self.context
2362
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
2363
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Checked)
2364
})
2365
}
2366
2367
fn visit_i32_trunc_sat_f32_u(&mut self) -> Self::Output {
2368
use OperandSize::*;
2369
2370
self.masm
2371
.unsigned_truncate(&mut self.context, S32, S32, TruncKind::Checked)
2372
}
2373
2374
fn visit_i32_trunc_sat_f64_s(&mut self) -> Self::Output {
2375
use OperandSize::*;
2376
2377
self.context
2378
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
2379
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Checked)
2380
})
2381
}
2382
2383
fn visit_i32_trunc_sat_f64_u(&mut self) -> Self::Output {
2384
use OperandSize::*;
2385
2386
self.masm
2387
.unsigned_truncate(&mut self.context, S64, S32, TruncKind::Checked)
2388
}
2389
2390
fn visit_i64_trunc_sat_f32_s(&mut self) -> Self::Output {
2391
use OperandSize::*;
2392
2393
self.context
2394
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
2395
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Checked)
2396
})
2397
}
2398
2399
fn visit_i64_trunc_sat_f32_u(&mut self) -> Self::Output {
2400
use OperandSize::*;
2401
2402
self.masm
2403
.unsigned_truncate(&mut self.context, S32, S64, TruncKind::Checked)
2404
}
2405
2406
fn visit_i64_trunc_sat_f64_s(&mut self) -> Self::Output {
2407
use OperandSize::*;
2408
2409
self.context
2410
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
2411
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Checked)
2412
})
2413
}
2414
2415
fn visit_i64_trunc_sat_f64_u(&mut self) -> Self::Output {
2416
use OperandSize::*;
2417
2418
self.masm
2419
.unsigned_truncate(&mut self.context, S64, S64, TruncKind::Checked)
2420
}
2421
2422
fn visit_i64_add128(&mut self) -> Self::Output {
2423
self.context
2424
.binop128(self.masm, |masm, lhs_lo, lhs_hi, rhs_lo, rhs_hi| {
2425
masm.add128(
2426
writable!(lhs_lo),
2427
writable!(lhs_hi),
2428
lhs_lo,
2429
lhs_hi,
2430
rhs_lo,
2431
rhs_hi,
2432
)?;
2433
Ok((TypedReg::i64(lhs_lo), TypedReg::i64(lhs_hi)))
2434
})
2435
}
2436
2437
fn visit_i64_sub128(&mut self) -> Self::Output {
2438
self.context
2439
.binop128(self.masm, |masm, lhs_lo, lhs_hi, rhs_lo, rhs_hi| {
2440
masm.sub128(
2441
writable!(lhs_lo),
2442
writable!(lhs_hi),
2443
lhs_lo,
2444
lhs_hi,
2445
rhs_lo,
2446
rhs_hi,
2447
)?;
2448
Ok((TypedReg::i64(lhs_lo), TypedReg::i64(lhs_hi)))
2449
})
2450
}
2451
2452
fn visit_i64_mul_wide_s(&mut self) -> Self::Output {
2453
self.masm.mul_wide(&mut self.context, MulWideKind::Signed)
2454
}
2455
2456
fn visit_i64_mul_wide_u(&mut self) -> Self::Output {
2457
self.masm.mul_wide(&mut self.context, MulWideKind::Unsigned)
2458
}
2459
2460
fn visit_i32_atomic_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2461
self.emit_wasm_load(
2462
&memarg,
2463
WasmValType::I32,
2464
LoadKind::Atomic(OperandSize::S8, Some(Extend::<Zero>::I32Extend8.into())),
2465
)
2466
}
2467
2468
fn visit_i32_atomic_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2469
self.emit_wasm_load(
2470
&memarg,
2471
WasmValType::I32,
2472
LoadKind::Atomic(OperandSize::S16, Some(Extend::<Zero>::I32Extend16.into())),
2473
)
2474
}
2475
2476
fn visit_i32_atomic_load(&mut self, memarg: MemArg) -> Self::Output {
2477
self.emit_wasm_load(
2478
&memarg,
2479
WasmValType::I32,
2480
LoadKind::Atomic(OperandSize::S32, None),
2481
)
2482
}
2483
2484
fn visit_i64_atomic_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2485
self.emit_wasm_load(
2486
&memarg,
2487
WasmValType::I64,
2488
LoadKind::Atomic(OperandSize::S8, Some(Extend::<Zero>::I64Extend8.into())),
2489
)
2490
}
2491
2492
fn visit_i64_atomic_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2493
self.emit_wasm_load(
2494
&memarg,
2495
WasmValType::I64,
2496
LoadKind::Atomic(OperandSize::S16, Some(Extend::<Zero>::I64Extend16.into())),
2497
)
2498
}
2499
2500
fn visit_i64_atomic_load32_u(&mut self, memarg: MemArg) -> Self::Output {
2501
self.emit_wasm_load(
2502
&memarg,
2503
WasmValType::I64,
2504
LoadKind::Atomic(OperandSize::S32, Some(Extend::<Zero>::I64Extend32.into())),
2505
)
2506
}
2507
2508
fn visit_i64_atomic_load(&mut self, memarg: MemArg) -> Self::Output {
2509
self.emit_wasm_load(
2510
&memarg,
2511
WasmValType::I64,
2512
LoadKind::Atomic(OperandSize::S64, None),
2513
)
2514
}
2515
2516
fn visit_i32_atomic_store(&mut self, memarg: MemArg) -> Self::Output {
2517
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S32))
2518
}
2519
2520
fn visit_i64_atomic_store(&mut self, memarg: MemArg) -> Self::Output {
2521
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S64))
2522
}
2523
2524
fn visit_i32_atomic_store8(&mut self, memarg: MemArg) -> Self::Output {
2525
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S8))
2526
}
2527
2528
fn visit_i32_atomic_store16(&mut self, memarg: MemArg) -> Self::Output {
2529
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S16))
2530
}
2531
2532
fn visit_i64_atomic_store8(&mut self, memarg: MemArg) -> Self::Output {
2533
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S8))
2534
}
2535
2536
fn visit_i64_atomic_store16(&mut self, memarg: MemArg) -> Self::Output {
2537
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S16))
2538
}
2539
2540
fn visit_i64_atomic_store32(&mut self, memarg: MemArg) -> Self::Output {
2541
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S32))
2542
}
2543
2544
fn visit_i32_atomic_rmw8_add_u(&mut self, arg: MemArg) -> Self::Output {
2545
self.emit_atomic_rmw(
2546
&arg,
2547
RmwOp::Add,
2548
OperandSize::S8,
2549
Some(Extend::<Zero>::I32Extend8),
2550
)
2551
}
2552
2553
fn visit_i32_atomic_rmw16_add_u(&mut self, arg: MemArg) -> Self::Output {
2554
self.emit_atomic_rmw(
2555
&arg,
2556
RmwOp::Add,
2557
OperandSize::S16,
2558
Some(Extend::<Zero>::I32Extend16),
2559
)
2560
}
2561
2562
fn visit_i32_atomic_rmw_add(&mut self, arg: MemArg) -> Self::Output {
2563
self.emit_atomic_rmw(&arg, RmwOp::Add, OperandSize::S32, None)
2564
}
2565
2566
fn visit_i64_atomic_rmw8_add_u(&mut self, arg: MemArg) -> Self::Output {
2567
self.emit_atomic_rmw(
2568
&arg,
2569
RmwOp::Add,
2570
OperandSize::S8,
2571
Some(Extend::<Zero>::I64Extend8),
2572
)
2573
}
2574
2575
fn visit_i64_atomic_rmw16_add_u(&mut self, arg: MemArg) -> Self::Output {
2576
self.emit_atomic_rmw(
2577
&arg,
2578
RmwOp::Add,
2579
OperandSize::S16,
2580
Some(Extend::<Zero>::I64Extend16),
2581
)
2582
}
2583
2584
fn visit_i64_atomic_rmw32_add_u(&mut self, arg: MemArg) -> Self::Output {
2585
self.emit_atomic_rmw(
2586
&arg,
2587
RmwOp::Add,
2588
OperandSize::S32,
2589
Some(Extend::<Zero>::I64Extend32),
2590
)
2591
}
2592
2593
fn visit_i64_atomic_rmw_add(&mut self, arg: MemArg) -> Self::Output {
2594
self.emit_atomic_rmw(&arg, RmwOp::Add, OperandSize::S64, None)
2595
}
2596
2597
fn visit_i32_atomic_rmw8_sub_u(&mut self, arg: MemArg) -> Self::Output {
2598
self.emit_atomic_rmw(
2599
&arg,
2600
RmwOp::Sub,
2601
OperandSize::S8,
2602
Some(Extend::<Zero>::I32Extend8),
2603
)
2604
}
2605
fn visit_i32_atomic_rmw16_sub_u(&mut self, arg: MemArg) -> Self::Output {
2606
self.emit_atomic_rmw(
2607
&arg,
2608
RmwOp::Sub,
2609
OperandSize::S16,
2610
Some(Extend::<Zero>::I32Extend16),
2611
)
2612
}
2613
2614
fn visit_i32_atomic_rmw_sub(&mut self, arg: MemArg) -> Self::Output {
2615
self.emit_atomic_rmw(&arg, RmwOp::Sub, OperandSize::S32, None)
2616
}
2617
2618
fn visit_i64_atomic_rmw8_sub_u(&mut self, arg: MemArg) -> Self::Output {
2619
self.emit_atomic_rmw(
2620
&arg,
2621
RmwOp::Sub,
2622
OperandSize::S8,
2623
Some(Extend::<Zero>::I64Extend8),
2624
)
2625
}
2626
2627
fn visit_i64_atomic_rmw16_sub_u(&mut self, arg: MemArg) -> Self::Output {
2628
self.emit_atomic_rmw(
2629
&arg,
2630
RmwOp::Sub,
2631
OperandSize::S16,
2632
Some(Extend::<Zero>::I64Extend16),
2633
)
2634
}
2635
2636
fn visit_i64_atomic_rmw32_sub_u(&mut self, arg: MemArg) -> Self::Output {
2637
self.emit_atomic_rmw(
2638
&arg,
2639
RmwOp::Sub,
2640
OperandSize::S32,
2641
Some(Extend::<Zero>::I64Extend32),
2642
)
2643
}
2644
2645
fn visit_i64_atomic_rmw_sub(&mut self, arg: MemArg) -> Self::Output {
2646
self.emit_atomic_rmw(&arg, RmwOp::Sub, OperandSize::S64, None)
2647
}
2648
2649
fn visit_i32_atomic_rmw8_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2650
self.emit_atomic_rmw(
2651
&arg,
2652
RmwOp::Xchg,
2653
OperandSize::S8,
2654
Some(Extend::<Zero>::I32Extend8),
2655
)
2656
}
2657
2658
fn visit_i32_atomic_rmw16_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2659
self.emit_atomic_rmw(
2660
&arg,
2661
RmwOp::Xchg,
2662
OperandSize::S16,
2663
Some(Extend::<Zero>::I32Extend16),
2664
)
2665
}
2666
2667
fn visit_i32_atomic_rmw_xchg(&mut self, arg: MemArg) -> Self::Output {
2668
self.emit_atomic_rmw(&arg, RmwOp::Xchg, OperandSize::S32, None)
2669
}
2670
2671
fn visit_i64_atomic_rmw8_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2672
self.emit_atomic_rmw(
2673
&arg,
2674
RmwOp::Xchg,
2675
OperandSize::S8,
2676
Some(Extend::<Zero>::I64Extend8),
2677
)
2678
}
2679
2680
fn visit_i64_atomic_rmw16_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2681
self.emit_atomic_rmw(
2682
&arg,
2683
RmwOp::Xchg,
2684
OperandSize::S16,
2685
Some(Extend::<Zero>::I64Extend16),
2686
)
2687
}
2688
2689
fn visit_i64_atomic_rmw32_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2690
self.emit_atomic_rmw(
2691
&arg,
2692
RmwOp::Xchg,
2693
OperandSize::S32,
2694
Some(Extend::<Zero>::I64Extend32),
2695
)
2696
}
2697
2698
fn visit_i64_atomic_rmw_xchg(&mut self, arg: MemArg) -> Self::Output {
2699
self.emit_atomic_rmw(&arg, RmwOp::Xchg, OperandSize::S64, None)
2700
}
2701
2702
fn visit_i32_atomic_rmw8_and_u(&mut self, arg: MemArg) -> Self::Output {
2703
self.emit_atomic_rmw(
2704
&arg,
2705
RmwOp::And,
2706
OperandSize::S8,
2707
Some(Extend::<Zero>::I32Extend8),
2708
)
2709
}
2710
2711
fn visit_i32_atomic_rmw16_and_u(&mut self, arg: MemArg) -> Self::Output {
2712
self.emit_atomic_rmw(
2713
&arg,
2714
RmwOp::And,
2715
OperandSize::S16,
2716
Some(Extend::<Zero>::I32Extend16),
2717
)
2718
}
2719
2720
fn visit_i32_atomic_rmw_and(&mut self, arg: MemArg) -> Self::Output {
2721
self.emit_atomic_rmw(&arg, RmwOp::And, OperandSize::S32, None)
2722
}
2723
2724
fn visit_i64_atomic_rmw8_and_u(&mut self, arg: MemArg) -> Self::Output {
2725
self.emit_atomic_rmw(
2726
&arg,
2727
RmwOp::And,
2728
OperandSize::S8,
2729
Some(Extend::<Zero>::I64Extend8),
2730
)
2731
}
2732
2733
fn visit_i64_atomic_rmw16_and_u(&mut self, arg: MemArg) -> Self::Output {
2734
self.emit_atomic_rmw(
2735
&arg,
2736
RmwOp::And,
2737
OperandSize::S16,
2738
Some(Extend::<Zero>::I64Extend16),
2739
)
2740
}
2741
2742
fn visit_i64_atomic_rmw32_and_u(&mut self, arg: MemArg) -> Self::Output {
2743
self.emit_atomic_rmw(
2744
&arg,
2745
RmwOp::And,
2746
OperandSize::S32,
2747
Some(Extend::<Zero>::I64Extend32),
2748
)
2749
}
2750
2751
fn visit_i64_atomic_rmw_and(&mut self, arg: MemArg) -> Self::Output {
2752
self.emit_atomic_rmw(&arg, RmwOp::And, OperandSize::S64, None)
2753
}
2754
2755
fn visit_i32_atomic_rmw8_or_u(&mut self, arg: MemArg) -> Self::Output {
2756
self.emit_atomic_rmw(
2757
&arg,
2758
RmwOp::Or,
2759
OperandSize::S8,
2760
Some(Extend::<Zero>::I32Extend8),
2761
)
2762
}
2763
2764
fn visit_i32_atomic_rmw16_or_u(&mut self, arg: MemArg) -> Self::Output {
2765
self.emit_atomic_rmw(
2766
&arg,
2767
RmwOp::Or,
2768
OperandSize::S16,
2769
Some(Extend::<Zero>::I32Extend16),
2770
)
2771
}
2772
2773
fn visit_i32_atomic_rmw_or(&mut self, arg: MemArg) -> Self::Output {
2774
self.emit_atomic_rmw(&arg, RmwOp::Or, OperandSize::S32, None)
2775
}
2776
2777
fn visit_i64_atomic_rmw8_or_u(&mut self, arg: MemArg) -> Self::Output {
2778
self.emit_atomic_rmw(
2779
&arg,
2780
RmwOp::Or,
2781
OperandSize::S8,
2782
Some(Extend::<Zero>::I64Extend8),
2783
)
2784
}
2785
2786
fn visit_i64_atomic_rmw16_or_u(&mut self, arg: MemArg) -> Self::Output {
2787
self.emit_atomic_rmw(
2788
&arg,
2789
RmwOp::Or,
2790
OperandSize::S16,
2791
Some(Extend::<Zero>::I64Extend16),
2792
)
2793
}
2794
2795
fn visit_i64_atomic_rmw32_or_u(&mut self, arg: MemArg) -> Self::Output {
2796
self.emit_atomic_rmw(
2797
&arg,
2798
RmwOp::Or,
2799
OperandSize::S32,
2800
Some(Extend::<Zero>::I64Extend32),
2801
)
2802
}
2803
2804
fn visit_i64_atomic_rmw_or(&mut self, arg: MemArg) -> Self::Output {
2805
self.emit_atomic_rmw(&arg, RmwOp::Or, OperandSize::S64, None)
2806
}
2807
2808
fn visit_i32_atomic_rmw8_xor_u(&mut self, arg: MemArg) -> Self::Output {
2809
self.emit_atomic_rmw(
2810
&arg,
2811
RmwOp::Xor,
2812
OperandSize::S8,
2813
Some(Extend::<Zero>::I32Extend8),
2814
)
2815
}
2816
2817
fn visit_i32_atomic_rmw16_xor_u(&mut self, arg: MemArg) -> Self::Output {
2818
self.emit_atomic_rmw(
2819
&arg,
2820
RmwOp::Xor,
2821
OperandSize::S16,
2822
Some(Extend::<Zero>::I32Extend16),
2823
)
2824
}
2825
2826
fn visit_i32_atomic_rmw_xor(&mut self, arg: MemArg) -> Self::Output {
2827
self.emit_atomic_rmw(&arg, RmwOp::Xor, OperandSize::S32, None)
2828
}
2829
2830
fn visit_i64_atomic_rmw8_xor_u(&mut self, arg: MemArg) -> Self::Output {
2831
self.emit_atomic_rmw(
2832
&arg,
2833
RmwOp::Xor,
2834
OperandSize::S8,
2835
Some(Extend::<Zero>::I64Extend8),
2836
)
2837
}
2838
2839
fn visit_i64_atomic_rmw16_xor_u(&mut self, arg: MemArg) -> Self::Output {
2840
self.emit_atomic_rmw(
2841
&arg,
2842
RmwOp::Xor,
2843
OperandSize::S16,
2844
Some(Extend::<Zero>::I64Extend16),
2845
)
2846
}
2847
2848
fn visit_i64_atomic_rmw32_xor_u(&mut self, arg: MemArg) -> Self::Output {
2849
self.emit_atomic_rmw(
2850
&arg,
2851
RmwOp::Xor,
2852
OperandSize::S32,
2853
Some(Extend::<Zero>::I64Extend32),
2854
)
2855
}
2856
2857
fn visit_i64_atomic_rmw_xor(&mut self, arg: MemArg) -> Self::Output {
2858
self.emit_atomic_rmw(&arg, RmwOp::Xor, OperandSize::S64, None)
2859
}
2860
2861
fn visit_i32_atomic_rmw8_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2862
self.emit_atomic_cmpxchg(&arg, OperandSize::S8, Some(Extend::I32Extend8))
2863
}
2864
2865
fn visit_i32_atomic_rmw16_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2866
self.emit_atomic_cmpxchg(&arg, OperandSize::S16, Some(Extend::I32Extend16))
2867
}
2868
2869
fn visit_i32_atomic_rmw_cmpxchg(&mut self, arg: MemArg) -> Self::Output {
2870
self.emit_atomic_cmpxchg(&arg, OperandSize::S32, None)
2871
}
2872
2873
fn visit_i64_atomic_rmw8_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2874
self.emit_atomic_cmpxchg(&arg, OperandSize::S8, Some(Extend::I64Extend8))
2875
}
2876
2877
fn visit_i64_atomic_rmw16_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2878
self.emit_atomic_cmpxchg(&arg, OperandSize::S16, Some(Extend::I64Extend16))
2879
}
2880
2881
fn visit_i64_atomic_rmw32_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2882
self.emit_atomic_cmpxchg(&arg, OperandSize::S32, Some(Extend::I64Extend32))
2883
}
2884
2885
fn visit_i64_atomic_rmw_cmpxchg(&mut self, arg: MemArg) -> Self::Output {
2886
self.emit_atomic_cmpxchg(&arg, OperandSize::S64, None)
2887
}
2888
2889
fn visit_memory_atomic_wait32(&mut self, arg: MemArg) -> Self::Output {
2890
self.emit_atomic_wait(&arg, AtomicWaitKind::Wait32)
2891
}
2892
2893
fn visit_memory_atomic_wait64(&mut self, arg: MemArg) -> Self::Output {
2894
self.emit_atomic_wait(&arg, AtomicWaitKind::Wait64)
2895
}
2896
2897
fn visit_memory_atomic_notify(&mut self, arg: MemArg) -> Self::Output {
2898
self.emit_atomic_notify(&arg)
2899
}
2900
2901
fn visit_atomic_fence(&mut self) -> Self::Output {
2902
self.masm.fence()
2903
}
2904
2905
wasmparser::for_each_visit_operator!(def_unsupported);
2906
}
2907
2908
impl<'a, 'translation, 'data, M> VisitSimdOperator<'a>
2909
for CodeGen<'a, 'translation, 'data, M, Emission>
2910
where
2911
M: MacroAssembler,
2912
{
2913
fn visit_v128_const(&mut self, val: V128) -> Self::Output {
2914
self.context.stack.push(Val::v128(val.i128()));
2915
Ok(())
2916
}
2917
2918
fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output {
2919
self.emit_wasm_load(
2920
&memarg,
2921
WasmValType::V128,
2922
LoadKind::Operand(OperandSize::S128),
2923
)
2924
}
2925
2926
fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output {
2927
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S128))
2928
}
2929
2930
fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output {
2931
self.emit_wasm_load(
2932
&memarg,
2933
WasmValType::V128,
2934
LoadKind::VectorExtend(V128LoadExtendKind::E8x8S),
2935
)
2936
}
2937
2938
fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output {
2939
self.emit_wasm_load(
2940
&memarg,
2941
WasmValType::V128,
2942
LoadKind::VectorExtend(V128LoadExtendKind::E8x8U),
2943
)
2944
}
2945
2946
fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output {
2947
self.emit_wasm_load(
2948
&memarg,
2949
WasmValType::V128,
2950
LoadKind::VectorExtend(V128LoadExtendKind::E16x4S),
2951
)
2952
}
2953
2954
fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output {
2955
self.emit_wasm_load(
2956
&memarg,
2957
WasmValType::V128,
2958
LoadKind::VectorExtend(V128LoadExtendKind::E16x4U),
2959
)
2960
}
2961
2962
fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output {
2963
self.emit_wasm_load(
2964
&memarg,
2965
WasmValType::V128,
2966
LoadKind::VectorExtend(V128LoadExtendKind::E32x2S),
2967
)
2968
}
2969
2970
fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output {
2971
self.emit_wasm_load(
2972
&memarg,
2973
WasmValType::V128,
2974
LoadKind::VectorExtend(V128LoadExtendKind::E32x2U),
2975
)
2976
}
2977
2978
fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output {
2979
self.emit_wasm_load(
2980
&memarg,
2981
WasmValType::V128,
2982
LoadKind::Splat(SplatLoadKind::S8),
2983
)
2984
}
2985
2986
fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output {
2987
self.emit_wasm_load(
2988
&memarg,
2989
WasmValType::V128,
2990
LoadKind::Splat(SplatLoadKind::S16),
2991
)
2992
}
2993
2994
fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output {
2995
self.emit_wasm_load(
2996
&memarg,
2997
WasmValType::V128,
2998
LoadKind::Splat(SplatLoadKind::S32),
2999
)
3000
}
3001
3002
fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output {
3003
self.emit_wasm_load(
3004
&memarg,
3005
WasmValType::V128,
3006
LoadKind::Splat(SplatLoadKind::S64),
3007
)
3008
}
3009
3010
fn visit_i8x16_splat(&mut self) -> Self::Output {
3011
self.masm.splat(&mut self.context, SplatKind::I8x16)
3012
}
3013
3014
fn visit_i16x8_splat(&mut self) -> Self::Output {
3015
self.masm.splat(&mut self.context, SplatKind::I16x8)
3016
}
3017
3018
fn visit_i32x4_splat(&mut self) -> Self::Output {
3019
self.masm.splat(&mut self.context, SplatKind::I32x4)
3020
}
3021
3022
fn visit_i64x2_splat(&mut self) -> Self::Output {
3023
self.masm.splat(&mut self.context, SplatKind::I64x2)
3024
}
3025
3026
fn visit_f32x4_splat(&mut self) -> Self::Output {
3027
self.masm.splat(&mut self.context, SplatKind::F32x4)
3028
}
3029
3030
fn visit_f64x2_splat(&mut self) -> Self::Output {
3031
self.masm.splat(&mut self.context, SplatKind::F64x2)
3032
}
3033
3034
fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output {
3035
let rhs = self.context.pop_to_reg(self.masm, None)?;
3036
let lhs = self.context.pop_to_reg(self.masm, None)?;
3037
self.masm
3038
.shuffle(writable!(lhs.into()), lhs.into(), rhs.into(), lanes)?;
3039
self.context.stack.push(TypedReg::v128(lhs.into()).into());
3040
self.context.free_reg(rhs);
3041
Ok(())
3042
}
3043
3044
fn visit_i8x16_swizzle(&mut self) -> Self::Output {
3045
let rhs = self.context.pop_to_reg(self.masm, None)?;
3046
let lhs = self.context.pop_to_reg(self.masm, None)?;
3047
self.masm
3048
.swizzle(writable!(lhs.into()), lhs.into(), rhs.into())?;
3049
self.context.stack.push(TypedReg::v128(lhs.into()).into());
3050
self.context.free_reg(rhs);
3051
Ok(())
3052
}
3053
3054
fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output {
3055
self.context.extract_lane_op(
3056
self.masm,
3057
ExtractLaneKind::I8x16S,
3058
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3059
)
3060
}
3061
3062
fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output {
3063
self.context.extract_lane_op(
3064
self.masm,
3065
ExtractLaneKind::I8x16U,
3066
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3067
)
3068
}
3069
3070
fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output {
3071
self.context.extract_lane_op(
3072
self.masm,
3073
ExtractLaneKind::I16x8S,
3074
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3075
)
3076
}
3077
3078
fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output {
3079
self.context.extract_lane_op(
3080
self.masm,
3081
ExtractLaneKind::I16x8U,
3082
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3083
)
3084
}
3085
3086
fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output {
3087
self.context
3088
.extract_lane_op(self.masm, ExtractLaneKind::I32x4, |masm, src, dst, kind| {
3089
masm.extract_lane(src, dst, lane, kind)
3090
})
3091
}
3092
3093
fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output {
3094
self.context
3095
.extract_lane_op(self.masm, ExtractLaneKind::I64x2, |masm, src, dst, kind| {
3096
masm.extract_lane(src, dst, lane, kind)
3097
})
3098
}
3099
3100
fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output {
3101
self.context
3102
.extract_lane_op(self.masm, ExtractLaneKind::F32x4, |masm, src, dst, kind| {
3103
masm.extract_lane(src, dst, lane, kind)
3104
})
3105
}
3106
3107
fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output {
3108
self.context
3109
.extract_lane_op(self.masm, ExtractLaneKind::F64x2, |masm, src, dst, kind| {
3110
masm.extract_lane(src, dst, lane, kind)
3111
})
3112
}
3113
3114
fn visit_i8x16_eq(&mut self) -> Self::Output {
3115
self.context
3116
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3117
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I8x16)?;
3118
Ok(TypedReg::v128(dst))
3119
})
3120
}
3121
3122
fn visit_i16x8_eq(&mut self) -> Self::Output {
3123
self.context
3124
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3125
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I16x8)?;
3126
Ok(TypedReg::v128(dst))
3127
})
3128
}
3129
3130
fn visit_i32x4_eq(&mut self) -> Self::Output {
3131
self.context
3132
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3133
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I32x4)?;
3134
Ok(TypedReg::v128(dst))
3135
})
3136
}
3137
3138
fn visit_i64x2_eq(&mut self) -> Self::Output {
3139
self.context
3140
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3141
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I64x2)?;
3142
Ok(TypedReg::v128(dst))
3143
})
3144
}
3145
3146
fn visit_f32x4_eq(&mut self) -> Self::Output {
3147
self.context
3148
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3149
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::F32x4)?;
3150
Ok(TypedReg::v128(dst))
3151
})
3152
}
3153
3154
fn visit_f64x2_eq(&mut self) -> Self::Output {
3155
self.context
3156
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3157
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::F64x2)?;
3158
Ok(TypedReg::v128(dst))
3159
})
3160
}
3161
3162
fn visit_i8x16_ne(&mut self) -> Self::Output {
3163
self.context
3164
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3165
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I8x16)?;
3166
Ok(TypedReg::v128(dst))
3167
})
3168
}
3169
3170
fn visit_i16x8_ne(&mut self) -> Self::Output {
3171
self.context
3172
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3173
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I16x8)?;
3174
Ok(TypedReg::v128(dst))
3175
})
3176
}
3177
3178
fn visit_i32x4_ne(&mut self) -> Self::Output {
3179
self.context
3180
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3181
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I32x4)?;
3182
Ok(TypedReg::v128(dst))
3183
})
3184
}
3185
3186
fn visit_i64x2_ne(&mut self) -> Self::Output {
3187
self.context
3188
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3189
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I64x2)?;
3190
Ok(TypedReg::v128(dst))
3191
})
3192
}
3193
3194
fn visit_f32x4_ne(&mut self) -> Self::Output {
3195
self.context
3196
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3197
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::F32x4)?;
3198
Ok(TypedReg::v128(dst))
3199
})
3200
}
3201
3202
fn visit_f64x2_ne(&mut self) -> Self::Output {
3203
self.context
3204
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3205
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::F64x2)?;
3206
Ok(TypedReg::v128(dst))
3207
})
3208
}
3209
3210
fn visit_i8x16_lt_s(&mut self) -> Self::Output {
3211
self.context
3212
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3213
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3214
Ok(TypedReg::v128(dst))
3215
})
3216
}
3217
3218
fn visit_i8x16_lt_u(&mut self) -> Self::Output {
3219
self.context
3220
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3221
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3222
Ok(TypedReg::v128(dst))
3223
})
3224
}
3225
3226
fn visit_i16x8_lt_s(&mut self) -> Self::Output {
3227
self.context
3228
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3229
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3230
Ok(TypedReg::v128(dst))
3231
})
3232
}
3233
3234
fn visit_i16x8_lt_u(&mut self) -> Self::Output {
3235
self.context
3236
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3237
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3238
Ok(TypedReg::v128(dst))
3239
})
3240
}
3241
3242
fn visit_i32x4_lt_s(&mut self) -> Self::Output {
3243
self.context
3244
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3245
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3246
Ok(TypedReg::v128(dst))
3247
})
3248
}
3249
3250
fn visit_i32x4_lt_u(&mut self) -> Self::Output {
3251
self.context
3252
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3253
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3254
Ok(TypedReg::v128(dst))
3255
})
3256
}
3257
3258
fn visit_i64x2_lt_s(&mut self) -> Self::Output {
3259
self.context
3260
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3261
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3262
Ok(TypedReg::v128(dst))
3263
})
3264
}
3265
3266
fn visit_f32x4_lt(&mut self) -> Self::Output {
3267
self.context
3268
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3269
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3270
Ok(TypedReg::v128(dst))
3271
})
3272
}
3273
3274
fn visit_f64x2_lt(&mut self) -> Self::Output {
3275
self.context
3276
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3277
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3278
Ok(TypedReg::v128(dst))
3279
})
3280
}
3281
3282
fn visit_i8x16_le_s(&mut self) -> Self::Output {
3283
self.context
3284
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3285
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3286
Ok(TypedReg::v128(dst))
3287
})
3288
}
3289
3290
fn visit_i8x16_le_u(&mut self) -> Self::Output {
3291
self.context
3292
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3293
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3294
Ok(TypedReg::v128(dst))
3295
})
3296
}
3297
3298
fn visit_i16x8_le_s(&mut self) -> Self::Output {
3299
self.context
3300
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3301
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3302
Ok(TypedReg::v128(dst))
3303
})
3304
}
3305
3306
fn visit_i16x8_le_u(&mut self) -> Self::Output {
3307
self.context
3308
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3309
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3310
Ok(TypedReg::v128(dst))
3311
})
3312
}
3313
3314
fn visit_i32x4_le_s(&mut self) -> Self::Output {
3315
self.context
3316
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3317
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3318
Ok(TypedReg::v128(dst))
3319
})
3320
}
3321
3322
fn visit_i32x4_le_u(&mut self) -> Self::Output {
3323
self.context
3324
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3325
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3326
Ok(TypedReg::v128(dst))
3327
})
3328
}
3329
3330
fn visit_i64x2_le_s(&mut self) -> Self::Output {
3331
self.context
3332
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3333
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3334
Ok(TypedReg::v128(dst))
3335
})
3336
}
3337
3338
fn visit_f32x4_le(&mut self) -> Self::Output {
3339
self.context
3340
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3341
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3342
Ok(TypedReg::v128(dst))
3343
})
3344
}
3345
3346
fn visit_f64x2_le(&mut self) -> Self::Output {
3347
self.context
3348
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3349
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3350
Ok(TypedReg::v128(dst))
3351
})
3352
}
3353
3354
fn visit_i8x16_gt_s(&mut self) -> Self::Output {
3355
self.context
3356
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3357
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3358
Ok(TypedReg::v128(dst))
3359
})
3360
}
3361
3362
fn visit_i8x16_gt_u(&mut self) -> Self::Output {
3363
self.context
3364
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3365
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3366
Ok(TypedReg::v128(dst))
3367
})
3368
}
3369
3370
fn visit_i16x8_gt_s(&mut self) -> Self::Output {
3371
self.context
3372
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3373
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3374
Ok(TypedReg::v128(dst))
3375
})
3376
}
3377
3378
fn visit_i16x8_gt_u(&mut self) -> Self::Output {
3379
self.context
3380
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3381
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3382
Ok(TypedReg::v128(dst))
3383
})
3384
}
3385
3386
fn visit_i32x4_gt_s(&mut self) -> Self::Output {
3387
self.context
3388
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3389
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3390
Ok(TypedReg::v128(dst))
3391
})
3392
}
3393
3394
fn visit_i32x4_gt_u(&mut self) -> Self::Output {
3395
self.context
3396
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3397
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3398
Ok(TypedReg::v128(dst))
3399
})
3400
}
3401
3402
fn visit_i64x2_gt_s(&mut self) -> Self::Output {
3403
self.context
3404
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3405
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3406
Ok(TypedReg::v128(dst))
3407
})
3408
}
3409
3410
fn visit_f32x4_gt(&mut self) -> Self::Output {
3411
self.context
3412
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3413
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3414
Ok(TypedReg::v128(dst))
3415
})
3416
}
3417
3418
fn visit_f64x2_gt(&mut self) -> Self::Output {
3419
self.context
3420
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3421
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3422
Ok(TypedReg::v128(dst))
3423
})
3424
}
3425
3426
fn visit_i8x16_ge_s(&mut self) -> Self::Output {
3427
self.context
3428
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3429
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3430
Ok(TypedReg::v128(dst))
3431
})
3432
}
3433
3434
fn visit_i8x16_ge_u(&mut self) -> Self::Output {
3435
self.context
3436
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3437
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3438
Ok(TypedReg::v128(dst))
3439
})
3440
}
3441
3442
fn visit_i16x8_ge_s(&mut self) -> Self::Output {
3443
self.context
3444
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3445
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3446
Ok(TypedReg::v128(dst))
3447
})
3448
}
3449
3450
fn visit_i16x8_ge_u(&mut self) -> Self::Output {
3451
self.context
3452
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3453
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3454
Ok(TypedReg::v128(dst))
3455
})
3456
}
3457
3458
fn visit_i32x4_ge_s(&mut self) -> Self::Output {
3459
self.context
3460
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3461
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3462
Ok(TypedReg::v128(dst))
3463
})
3464
}
3465
3466
fn visit_i32x4_ge_u(&mut self) -> Self::Output {
3467
self.context
3468
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3469
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3470
Ok(TypedReg::v128(dst))
3471
})
3472
}
3473
3474
fn visit_i64x2_ge_s(&mut self) -> Self::Output {
3475
self.context
3476
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3477
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3478
Ok(TypedReg::v128(dst))
3479
})
3480
}
3481
3482
fn visit_f32x4_ge(&mut self) -> Self::Output {
3483
self.context
3484
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3485
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3486
Ok(TypedReg::v128(dst))
3487
})
3488
}
3489
3490
fn visit_f64x2_ge(&mut self) -> Self::Output {
3491
self.context
3492
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3493
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3494
Ok(TypedReg::v128(dst))
3495
})
3496
}
3497
3498
fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output {
3499
self.context
3500
.replace_lane_op(self.masm, ReplaceLaneKind::I8x16, |masm, src, dst, kind| {
3501
masm.replace_lane(src, dst, lane, kind)
3502
})
3503
}
3504
3505
fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output {
3506
self.context
3507
.replace_lane_op(self.masm, ReplaceLaneKind::I16x8, |masm, src, dst, kind| {
3508
masm.replace_lane(src, dst, lane, kind)
3509
})
3510
}
3511
3512
fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output {
3513
self.context
3514
.replace_lane_op(self.masm, ReplaceLaneKind::I32x4, |masm, src, dst, kind| {
3515
masm.replace_lane(src, dst, lane, kind)
3516
})
3517
}
3518
3519
fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output {
3520
self.context
3521
.replace_lane_op(self.masm, ReplaceLaneKind::I64x2, |masm, src, dst, kind| {
3522
masm.replace_lane(src, dst, lane, kind)
3523
})
3524
}
3525
3526
fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output {
3527
self.context
3528
.replace_lane_op(self.masm, ReplaceLaneKind::F32x4, |masm, src, dst, kind| {
3529
masm.replace_lane(src, dst, lane, kind)
3530
})
3531
}
3532
3533
fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output {
3534
self.context
3535
.replace_lane_op(self.masm, ReplaceLaneKind::F64x2, |masm, src, dst, kind| {
3536
masm.replace_lane(src, dst, lane, kind)
3537
})
3538
}
3539
3540
fn visit_v128_not(&mut self) -> Self::Output {
3541
self.context.unop(self.masm, |masm, reg| {
3542
masm.v128_not(writable!(reg))?;
3543
Ok(TypedReg::new(WasmValType::V128, reg))
3544
})
3545
}
3546
3547
fn visit_v128_and(&mut self) -> Self::Output {
3548
self.context
3549
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3550
masm.v128_and(dst, src, writable!(dst))?;
3551
Ok(TypedReg::new(WasmValType::V128, dst))
3552
})
3553
}
3554
3555
fn visit_v128_andnot(&mut self) -> Self::Output {
3556
self.context
3557
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3558
// careful here: and_not is *not* commutative: dst = !src1 & src2
3559
masm.v128_and_not(src, dst, writable!(dst))?;
3560
Ok(TypedReg::new(WasmValType::V128, dst))
3561
})
3562
}
3563
3564
fn visit_v128_or(&mut self) -> Self::Output {
3565
self.context
3566
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3567
// careful here: and_not is *not* commutative: dst = !src1 & src2
3568
masm.v128_or(src, dst, writable!(dst))?;
3569
Ok(TypedReg::new(WasmValType::V128, dst))
3570
})
3571
}
3572
3573
fn visit_v128_xor(&mut self) -> Self::Output {
3574
self.context
3575
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3576
// careful here: and_not is *not* commutative: dst = !src1 & src2
3577
masm.v128_xor(src, dst, writable!(dst))?;
3578
Ok(TypedReg::new(WasmValType::V128, dst))
3579
})
3580
}
3581
3582
fn visit_v128_bitselect(&mut self) -> Self::Output {
3583
let mask = self.context.pop_to_reg(self.masm, None)?;
3584
let op2 = self.context.pop_to_reg(self.masm, None)?;
3585
let op1 = self.context.pop_to_reg(self.masm, None)?;
3586
let dst = self.context.any_fpr(self.masm)?;
3587
3588
// careful here: bitselect is *not* commutative.
3589
self.masm
3590
.v128_bitselect(op1.reg, op2.reg, mask.reg, writable!(dst))?;
3591
3592
self.context
3593
.stack
3594
.push(TypedReg::new(WasmValType::V128, dst).into());
3595
self.context.free_reg(op1);
3596
self.context.free_reg(op2);
3597
self.context.free_reg(mask);
3598
3599
Ok(())
3600
}
3601
3602
fn visit_v128_any_true(&mut self) -> Self::Output {
3603
let src = self.context.pop_to_reg(self.masm, None)?;
3604
let dst = self.context.any_gpr(self.masm)?;
3605
3606
self.masm.v128_any_true(src.reg, writable!(dst))?;
3607
3608
self.context
3609
.stack
3610
.push(TypedReg::new(WasmValType::I32, dst).into());
3611
self.context.free_reg(src);
3612
3613
Ok(())
3614
}
3615
3616
fn visit_v128_load8_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3617
self.emit_wasm_load(
3618
&arg,
3619
WasmValType::V128,
3620
LoadKind::vector_lane(lane, OperandSize::S8),
3621
)
3622
}
3623
3624
fn visit_v128_load16_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3625
self.emit_wasm_load(
3626
&arg,
3627
WasmValType::V128,
3628
LoadKind::vector_lane(lane, OperandSize::S16),
3629
)
3630
}
3631
3632
fn visit_v128_load32_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3633
self.emit_wasm_load(
3634
&arg,
3635
WasmValType::V128,
3636
LoadKind::vector_lane(lane, OperandSize::S32),
3637
)
3638
}
3639
3640
fn visit_v128_load64_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3641
self.emit_wasm_load(
3642
&arg,
3643
WasmValType::V128,
3644
LoadKind::vector_lane(lane, OperandSize::S64),
3645
)
3646
}
3647
3648
fn visit_v128_store8_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3649
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S8))
3650
}
3651
3652
fn visit_v128_store16_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3653
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S16))
3654
}
3655
3656
fn visit_v128_store32_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3657
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S32))
3658
}
3659
3660
fn visit_v128_store64_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3661
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S64))
3662
}
3663
3664
fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output {
3665
self.context.unop(self.masm, |masm, reg| {
3666
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4S)?;
3667
Ok(TypedReg::v128(reg))
3668
})
3669
}
3670
3671
fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output {
3672
self.context.unop(self.masm, |masm, reg| {
3673
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4U)?;
3674
Ok(TypedReg::v128(reg))
3675
})
3676
}
3677
3678
fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output {
3679
self.context.unop(self.masm, |masm, reg| {
3680
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4LowS)?;
3681
Ok(TypedReg::v128(reg))
3682
})
3683
}
3684
3685
fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output {
3686
self.context.unop(self.masm, |masm, reg| {
3687
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4LowU)?;
3688
Ok(TypedReg::v128(reg))
3689
})
3690
}
3691
3692
fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output {
3693
self.context
3694
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3695
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I16x8S)?;
3696
Ok(TypedReg::v128(dst))
3697
})
3698
}
3699
3700
fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output {
3701
self.context
3702
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3703
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I16x8U)?;
3704
Ok(TypedReg::v128(dst))
3705
})
3706
}
3707
3708
fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output {
3709
self.context
3710
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3711
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I32x4S)?;
3712
Ok(TypedReg::v128(dst))
3713
})
3714
}
3715
3716
fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output {
3717
self.context
3718
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3719
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I32x4U)?;
3720
Ok(TypedReg::v128(dst))
3721
})
3722
}
3723
3724
fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output {
3725
self.context.unop(self.masm, |masm, reg| {
3726
masm.v128_demote(reg, writable!(reg))?;
3727
Ok(TypedReg::v128(reg))
3728
})
3729
}
3730
3731
fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output {
3732
self.context.unop(self.masm, |masm, reg| {
3733
masm.v128_promote(reg, writable!(reg))?;
3734
Ok(TypedReg::v128(reg))
3735
})
3736
}
3737
3738
fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output {
3739
self.context.unop(self.masm, |masm, reg| {
3740
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI8x16S)?;
3741
Ok(TypedReg::v128(reg))
3742
})
3743
}
3744
3745
fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output {
3746
self.context.unop(self.masm, |masm, reg| {
3747
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI8x16S)?;
3748
Ok(TypedReg::v128(reg))
3749
})
3750
}
3751
3752
fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output {
3753
self.context.unop(self.masm, |masm, reg| {
3754
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI8x16U)?;
3755
Ok(TypedReg::v128(reg))
3756
})
3757
}
3758
3759
fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output {
3760
self.context.unop(self.masm, |masm, reg| {
3761
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI8x16U)?;
3762
Ok(TypedReg::v128(reg))
3763
})
3764
}
3765
3766
fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output {
3767
self.context.unop(self.masm, |masm, reg| {
3768
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI16x8S)?;
3769
Ok(TypedReg::v128(reg))
3770
})
3771
}
3772
3773
fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output {
3774
self.context.unop(self.masm, |masm, reg| {
3775
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI16x8S)?;
3776
Ok(TypedReg::v128(reg))
3777
})
3778
}
3779
3780
fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output {
3781
self.context.unop(self.masm, |masm, reg| {
3782
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI16x8U)?;
3783
Ok(TypedReg::v128(reg))
3784
})
3785
}
3786
3787
fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output {
3788
self.context.unop(self.masm, |masm, reg| {
3789
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI16x8U)?;
3790
Ok(TypedReg::v128(reg))
3791
})
3792
}
3793
3794
fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output {
3795
self.context.unop(self.masm, |masm, reg| {
3796
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI32x4S)?;
3797
Ok(TypedReg::v128(reg))
3798
})
3799
}
3800
3801
fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output {
3802
self.context.unop(self.masm, |masm, reg| {
3803
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI32x4S)?;
3804
Ok(TypedReg::v128(reg))
3805
})
3806
}
3807
3808
fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output {
3809
self.context.unop(self.masm, |masm, reg| {
3810
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI32x4U)?;
3811
Ok(TypedReg::v128(reg))
3812
})
3813
}
3814
3815
fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output {
3816
self.context.unop(self.masm, |masm, reg| {
3817
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI32x4U)?;
3818
Ok(TypedReg::v128(reg))
3819
})
3820
}
3821
3822
fn visit_i8x16_add(&mut self) -> Self::Output {
3823
self.context
3824
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3825
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16)?;
3826
Ok(TypedReg::new(WasmValType::V128, dst))
3827
})
3828
}
3829
3830
fn visit_i16x8_add(&mut self) -> Self::Output {
3831
self.context
3832
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3833
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8)?;
3834
Ok(TypedReg::new(WasmValType::V128, dst))
3835
})
3836
}
3837
3838
fn visit_i32x4_add(&mut self) -> Self::Output {
3839
self.context
3840
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3841
masm.v128_add(dst, src, writable!(dst), V128AddKind::I32x4)?;
3842
Ok(TypedReg::new(WasmValType::V128, dst))
3843
})
3844
}
3845
3846
fn visit_i64x2_add(&mut self) -> Self::Output {
3847
self.context
3848
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3849
masm.v128_add(dst, src, writable!(dst), V128AddKind::I64x2)?;
3850
Ok(TypedReg::new(WasmValType::V128, dst))
3851
})
3852
}
3853
3854
fn visit_i8x16_sub(&mut self) -> Self::Output {
3855
self.context
3856
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3857
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16)?;
3858
Ok(TypedReg::new(WasmValType::V128, dst))
3859
})
3860
}
3861
3862
fn visit_i16x8_sub(&mut self) -> Self::Output {
3863
self.context
3864
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3865
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8)?;
3866
Ok(TypedReg::new(WasmValType::V128, dst))
3867
})
3868
}
3869
3870
fn visit_i32x4_sub(&mut self) -> Self::Output {
3871
self.context
3872
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3873
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I32x4)?;
3874
Ok(TypedReg::new(WasmValType::V128, dst))
3875
})
3876
}
3877
3878
fn visit_i64x2_sub(&mut self) -> Self::Output {
3879
self.context
3880
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3881
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I64x2)?;
3882
Ok(TypedReg::new(WasmValType::V128, dst))
3883
})
3884
}
3885
3886
fn visit_i16x8_mul(&mut self) -> Self::Output {
3887
self.masm.v128_mul(&mut self.context, V128MulKind::I16x8)
3888
}
3889
3890
fn visit_i32x4_mul(&mut self) -> Self::Output {
3891
self.masm.v128_mul(&mut self.context, V128MulKind::I32x4)
3892
}
3893
3894
fn visit_i64x2_mul(&mut self) -> Self::Output {
3895
self.masm.v128_mul(&mut self.context, V128MulKind::I64x2)
3896
}
3897
3898
fn visit_i8x16_add_sat_s(&mut self) -> Self::Output {
3899
self.context
3900
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3901
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16SatS)?;
3902
Ok(TypedReg::new(WasmValType::V128, dst))
3903
})
3904
}
3905
3906
fn visit_i16x8_add_sat_s(&mut self) -> Self::Output {
3907
self.context
3908
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3909
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8SatS)?;
3910
Ok(TypedReg::new(WasmValType::V128, dst))
3911
})
3912
}
3913
3914
fn visit_i8x16_add_sat_u(&mut self) -> Self::Output {
3915
self.context
3916
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3917
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16SatU)?;
3918
Ok(TypedReg::new(WasmValType::V128, dst))
3919
})
3920
}
3921
3922
fn visit_i16x8_add_sat_u(&mut self) -> Self::Output {
3923
self.context
3924
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3925
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8SatU)?;
3926
Ok(TypedReg::new(WasmValType::V128, dst))
3927
})
3928
}
3929
3930
fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output {
3931
self.context
3932
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3933
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16SatS)?;
3934
Ok(TypedReg::new(WasmValType::V128, dst))
3935
})
3936
}
3937
3938
fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output {
3939
self.context
3940
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3941
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8SatS)?;
3942
Ok(TypedReg::new(WasmValType::V128, dst))
3943
})
3944
}
3945
3946
fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output {
3947
self.context
3948
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3949
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16SatU)?;
3950
Ok(TypedReg::new(WasmValType::V128, dst))
3951
})
3952
}
3953
3954
fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output {
3955
self.context
3956
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3957
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8SatU)?;
3958
Ok(TypedReg::new(WasmValType::V128, dst))
3959
})
3960
}
3961
3962
fn visit_i8x16_abs(&mut self) -> Self::Output {
3963
self.context.unop(self.masm, |masm, reg| {
3964
masm.v128_abs(reg, writable!(reg), V128AbsKind::I8x16)?;
3965
Ok(TypedReg::new(WasmValType::V128, reg))
3966
})
3967
}
3968
3969
fn visit_i16x8_abs(&mut self) -> Self::Output {
3970
self.context.unop(self.masm, |masm, reg| {
3971
masm.v128_abs(reg, writable!(reg), V128AbsKind::I16x8)?;
3972
Ok(TypedReg::new(WasmValType::V128, reg))
3973
})
3974
}
3975
3976
fn visit_i32x4_abs(&mut self) -> Self::Output {
3977
self.context.unop(self.masm, |masm, reg| {
3978
masm.v128_abs(reg, writable!(reg), V128AbsKind::I32x4)?;
3979
Ok(TypedReg::new(WasmValType::V128, reg))
3980
})
3981
}
3982
3983
fn visit_i64x2_abs(&mut self) -> Self::Output {
3984
self.context.unop(self.masm, |masm, reg| {
3985
masm.v128_abs(reg, writable!(reg), V128AbsKind::I64x2)?;
3986
Ok(TypedReg::new(WasmValType::V128, reg))
3987
})
3988
}
3989
3990
fn visit_f32x4_abs(&mut self) -> Self::Output {
3991
self.context.unop(self.masm, |masm, reg| {
3992
masm.v128_abs(reg, writable!(reg), V128AbsKind::F32x4)?;
3993
Ok(TypedReg::new(WasmValType::V128, reg))
3994
})
3995
}
3996
3997
fn visit_f64x2_abs(&mut self) -> Self::Output {
3998
self.context.unop(self.masm, |masm, reg| {
3999
masm.v128_abs(reg, writable!(reg), V128AbsKind::F64x2)?;
4000
Ok(TypedReg::new(WasmValType::V128, reg))
4001
})
4002
}
4003
4004
fn visit_i8x16_neg(&mut self) -> Self::Output {
4005
self.context.unop(self.masm, |masm, op| {
4006
masm.v128_neg(writable!(op), V128NegKind::I8x16)?;
4007
Ok(TypedReg::new(WasmValType::V128, op))
4008
})
4009
}
4010
4011
fn visit_i16x8_neg(&mut self) -> Self::Output {
4012
self.context.unop(self.masm, |masm, op| {
4013
masm.v128_neg(writable!(op), V128NegKind::I16x8)?;
4014
Ok(TypedReg::new(WasmValType::V128, op))
4015
})
4016
}
4017
4018
fn visit_i32x4_neg(&mut self) -> Self::Output {
4019
self.context.unop(self.masm, |masm, op| {
4020
masm.v128_neg(writable!(op), V128NegKind::I32x4)?;
4021
Ok(TypedReg::new(WasmValType::V128, op))
4022
})
4023
}
4024
4025
fn visit_i64x2_neg(&mut self) -> Self::Output {
4026
self.context.unop(self.masm, |masm, op| {
4027
masm.v128_neg(writable!(op), V128NegKind::I64x2)?;
4028
Ok(TypedReg::new(WasmValType::V128, op))
4029
})
4030
}
4031
4032
fn visit_i8x16_shl(&mut self) -> Self::Output {
4033
self.masm
4034
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::Shl)
4035
}
4036
4037
fn visit_i16x8_shl(&mut self) -> Self::Output {
4038
self.masm
4039
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::Shl)
4040
}
4041
4042
fn visit_i32x4_shl(&mut self) -> Self::Output {
4043
self.masm
4044
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::Shl)
4045
}
4046
4047
fn visit_i64x2_shl(&mut self) -> Self::Output {
4048
self.masm
4049
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::Shl)
4050
}
4051
4052
fn visit_i8x16_shr_u(&mut self) -> Self::Output {
4053
self.masm
4054
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::ShrU)
4055
}
4056
4057
fn visit_i16x8_shr_u(&mut self) -> Self::Output {
4058
self.masm
4059
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::ShrU)
4060
}
4061
4062
fn visit_i32x4_shr_u(&mut self) -> Self::Output {
4063
self.masm
4064
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::ShrU)
4065
}
4066
4067
fn visit_i64x2_shr_u(&mut self) -> Self::Output {
4068
self.masm
4069
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::ShrU)
4070
}
4071
4072
fn visit_i8x16_shr_s(&mut self) -> Self::Output {
4073
self.masm
4074
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::ShrS)
4075
}
4076
4077
fn visit_i16x8_shr_s(&mut self) -> Self::Output {
4078
self.masm
4079
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::ShrS)
4080
}
4081
4082
fn visit_i32x4_shr_s(&mut self) -> Self::Output {
4083
self.masm
4084
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::ShrS)
4085
}
4086
4087
fn visit_i64x2_shr_s(&mut self) -> Self::Output {
4088
self.masm
4089
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::ShrS)
4090
}
4091
4092
fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output {
4093
self.context
4094
.binop(self.masm, OperandSize::S16, |masm, dst, src, size| {
4095
masm.v128_q15mulr_sat_s(dst, src, writable!(dst), size)?;
4096
Ok(TypedReg::v128(dst))
4097
})
4098
}
4099
4100
fn visit_i8x16_min_s(&mut self) -> Self::Output {
4101
self.context
4102
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4103
masm.v128_min(src, dst, writable!(dst), V128MinKind::I8x16S)?;
4104
Ok(TypedReg::v128(dst))
4105
})
4106
}
4107
4108
fn visit_i8x16_all_true(&mut self) -> Self::Output {
4109
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4110
masm.v128_all_true(src, writable!(dst), OperandSize::S8)
4111
})
4112
}
4113
4114
fn visit_i16x8_all_true(&mut self) -> Self::Output {
4115
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4116
masm.v128_all_true(src, writable!(dst), OperandSize::S16)
4117
})
4118
}
4119
4120
fn visit_i32x4_all_true(&mut self) -> Self::Output {
4121
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4122
masm.v128_all_true(src, writable!(dst), OperandSize::S32)
4123
})
4124
}
4125
4126
fn visit_i64x2_all_true(&mut self) -> Self::Output {
4127
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4128
masm.v128_all_true(src, writable!(dst), OperandSize::S64)
4129
})
4130
}
4131
4132
fn visit_i8x16_bitmask(&mut self) -> Self::Output {
4133
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4134
masm.v128_bitmask(src, writable!(dst), OperandSize::S8)
4135
})
4136
}
4137
4138
fn visit_i16x8_bitmask(&mut self) -> Self::Output {
4139
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4140
masm.v128_bitmask(src, writable!(dst), OperandSize::S16)
4141
})
4142
}
4143
4144
fn visit_i32x4_bitmask(&mut self) -> Self::Output {
4145
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4146
masm.v128_bitmask(src, writable!(dst), OperandSize::S32)
4147
})
4148
}
4149
4150
fn visit_i64x2_bitmask(&mut self) -> Self::Output {
4151
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4152
masm.v128_bitmask(src, writable!(dst), OperandSize::S64)
4153
})
4154
}
4155
4156
fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output {
4157
self.masm
4158
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF32x4S)
4159
}
4160
4161
fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output {
4162
self.masm
4163
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF32x4U)
4164
}
4165
4166
fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output {
4167
self.masm
4168
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF64x2SZero)
4169
}
4170
4171
fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output {
4172
self.masm
4173
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF64x2UZero)
4174
}
4175
4176
fn visit_i16x8_min_s(&mut self) -> Self::Output {
4177
self.context
4178
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4179
masm.v128_min(src, dst, writable!(dst), V128MinKind::I16x8S)?;
4180
Ok(TypedReg::v128(dst))
4181
})
4182
}
4183
4184
fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output {
4185
self.context
4186
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4187
masm.v128_dot(dst, src, writable!(dst))?;
4188
Ok(TypedReg::v128(dst))
4189
})
4190
}
4191
4192
fn visit_i8x16_popcnt(&mut self) -> Self::Output {
4193
self.masm.v128_popcnt(&mut self.context)
4194
}
4195
4196
fn visit_i8x16_avgr_u(&mut self) -> Self::Output {
4197
self.context
4198
.binop(self.masm, OperandSize::S8, |masm, dst, src, size| {
4199
masm.v128_avgr(dst, src, writable!(dst), size)?;
4200
Ok(TypedReg::v128(dst))
4201
})
4202
}
4203
4204
fn visit_i32x4_min_s(&mut self) -> Self::Output {
4205
self.context
4206
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4207
masm.v128_min(src, dst, writable!(dst), V128MinKind::I32x4S)?;
4208
Ok(TypedReg::v128(dst))
4209
})
4210
}
4211
4212
fn visit_i8x16_min_u(&mut self) -> Self::Output {
4213
self.context
4214
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4215
masm.v128_min(src, dst, writable!(dst), V128MinKind::I8x16U)?;
4216
Ok(TypedReg::v128(dst))
4217
})
4218
}
4219
4220
fn visit_i16x8_avgr_u(&mut self) -> Self::Output {
4221
self.context
4222
.binop(self.masm, OperandSize::S16, |masm, dst, src, size| {
4223
masm.v128_avgr(dst, src, writable!(dst), size)?;
4224
Ok(TypedReg::v128(dst))
4225
})
4226
}
4227
4228
fn visit_i16x8_min_u(&mut self) -> Self::Output {
4229
self.context
4230
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4231
masm.v128_min(src, dst, writable!(dst), V128MinKind::I16x8U)?;
4232
Ok(TypedReg::v128(dst))
4233
})
4234
}
4235
4236
fn visit_i32x4_min_u(&mut self) -> Self::Output {
4237
self.context
4238
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4239
masm.v128_min(src, dst, writable!(dst), V128MinKind::I32x4U)?;
4240
Ok(TypedReg::v128(dst))
4241
})
4242
}
4243
4244
fn visit_i8x16_max_s(&mut self) -> Self::Output {
4245
self.context
4246
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4247
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I8x16S)?;
4248
Ok(TypedReg::v128(dst))
4249
})
4250
}
4251
4252
fn visit_i16x8_max_s(&mut self) -> Self::Output {
4253
self.context
4254
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4255
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I16x8S)?;
4256
Ok(TypedReg::v128(dst))
4257
})
4258
}
4259
4260
fn visit_i32x4_max_s(&mut self) -> Self::Output {
4261
self.context
4262
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4263
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I32x4S)?;
4264
Ok(TypedReg::v128(dst))
4265
})
4266
}
4267
4268
fn visit_i8x16_max_u(&mut self) -> Self::Output {
4269
self.context
4270
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4271
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I8x16U)?;
4272
Ok(TypedReg::v128(dst))
4273
})
4274
}
4275
4276
fn visit_i16x8_max_u(&mut self) -> Self::Output {
4277
self.context
4278
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4279
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I16x8U)?;
4280
Ok(TypedReg::v128(dst))
4281
})
4282
}
4283
4284
fn visit_i32x4_max_u(&mut self) -> Self::Output {
4285
self.context
4286
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4287
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I32x4U)?;
4288
Ok(TypedReg::v128(dst))
4289
})
4290
}
4291
4292
fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output {
4293
self.masm
4294
.v128_extmul(&mut self.context, V128ExtMulKind::LowI8x16S)
4295
}
4296
4297
fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output {
4298
self.masm
4299
.v128_extmul(&mut self.context, V128ExtMulKind::LowI16x8S)
4300
}
4301
4302
fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output {
4303
self.masm
4304
.v128_extmul(&mut self.context, V128ExtMulKind::LowI32x4S)
4305
}
4306
4307
fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output {
4308
self.masm
4309
.v128_extmul(&mut self.context, V128ExtMulKind::LowI8x16U)
4310
}
4311
4312
fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output {
4313
self.masm
4314
.v128_extmul(&mut self.context, V128ExtMulKind::LowI16x8U)
4315
}
4316
4317
fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output {
4318
self.masm
4319
.v128_extmul(&mut self.context, V128ExtMulKind::LowI32x4U)
4320
}
4321
4322
fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output {
4323
self.masm
4324
.v128_extmul(&mut self.context, V128ExtMulKind::HighI8x16U)
4325
}
4326
4327
fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output {
4328
self.masm
4329
.v128_extmul(&mut self.context, V128ExtMulKind::HighI16x8U)
4330
}
4331
4332
fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output {
4333
self.masm
4334
.v128_extmul(&mut self.context, V128ExtMulKind::HighI32x4U)
4335
}
4336
4337
fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output {
4338
self.masm
4339
.v128_extmul(&mut self.context, V128ExtMulKind::HighI8x16S)
4340
}
4341
4342
fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output {
4343
self.masm
4344
.v128_extmul(&mut self.context, V128ExtMulKind::HighI16x8S)
4345
}
4346
4347
fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output {
4348
self.masm
4349
.v128_extmul(&mut self.context, V128ExtMulKind::HighI32x4S)
4350
}
4351
4352
fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output {
4353
self.context.unop(self.masm, |masm, op| {
4354
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I8x16S)?;
4355
Ok(TypedReg::v128(op))
4356
})
4357
}
4358
4359
fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output {
4360
self.context.unop(self.masm, |masm, op| {
4361
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I8x16U)?;
4362
Ok(TypedReg::v128(op))
4363
})
4364
}
4365
4366
fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output {
4367
self.context.unop(self.masm, |masm, op| {
4368
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I16x8S)?;
4369
Ok(TypedReg::v128(op))
4370
})
4371
}
4372
4373
fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output {
4374
self.context.unop(self.masm, |masm, op| {
4375
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I16x8U)?;
4376
Ok(TypedReg::v128(op))
4377
})
4378
}
4379
4380
fn visit_f32x4_add(&mut self) -> Self::Output {
4381
self.context
4382
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4383
masm.v128_add(dst, src, writable!(dst), V128AddKind::F32x4)?;
4384
Ok(TypedReg::v128(dst))
4385
})
4386
}
4387
4388
fn visit_f64x2_add(&mut self) -> Self::Output {
4389
self.context
4390
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4391
masm.v128_add(dst, src, writable!(dst), V128AddKind::F64x2)?;
4392
Ok(TypedReg::v128(dst))
4393
})
4394
}
4395
4396
fn visit_f32x4_sub(&mut self) -> Self::Output {
4397
self.context
4398
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4399
masm.v128_sub(dst, src, writable!(dst), V128SubKind::F32x4)?;
4400
Ok(TypedReg::v128(dst))
4401
})
4402
}
4403
4404
fn visit_f64x2_sub(&mut self) -> Self::Output {
4405
self.context
4406
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4407
masm.v128_sub(dst, src, writable!(dst), V128SubKind::F64x2)?;
4408
Ok(TypedReg::v128(dst))
4409
})
4410
}
4411
4412
fn visit_f32x4_mul(&mut self) -> Self::Output {
4413
self.masm.v128_mul(&mut self.context, V128MulKind::F32x4)
4414
}
4415
4416
fn visit_f64x2_mul(&mut self) -> Self::Output {
4417
self.masm.v128_mul(&mut self.context, V128MulKind::F64x2)
4418
}
4419
4420
fn visit_f32x4_div(&mut self) -> Self::Output {
4421
self.context
4422
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4423
masm.v128_div(dst, src, writable!(dst), size)?;
4424
Ok(TypedReg::v128(dst))
4425
})
4426
}
4427
4428
fn visit_f64x2_div(&mut self) -> Self::Output {
4429
self.context
4430
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4431
masm.v128_div(dst, src, writable!(dst), size)?;
4432
Ok(TypedReg::v128(dst))
4433
})
4434
}
4435
4436
fn visit_f32x4_neg(&mut self) -> Self::Output {
4437
self.context.unop(self.masm, |masm, reg| {
4438
masm.v128_neg(writable!(reg), V128NegKind::F32x4)?;
4439
Ok(TypedReg::v128(reg))
4440
})
4441
}
4442
4443
fn visit_f32x4_ceil(&mut self) -> Self::Output {
4444
self.context.unop(self.masm, |masm, reg| {
4445
masm.v128_ceil(reg, writable!(reg), OperandSize::S32)?;
4446
Ok(TypedReg::v128(reg))
4447
})
4448
}
4449
4450
fn visit_f64x2_neg(&mut self) -> Self::Output {
4451
self.context.unop(self.masm, |masm, reg| {
4452
masm.v128_neg(writable!(reg), V128NegKind::F64x2)?;
4453
Ok(TypedReg::v128(reg))
4454
})
4455
}
4456
4457
fn visit_f64x2_ceil(&mut self) -> Self::Output {
4458
self.context.unop(self.masm, |masm, reg| {
4459
masm.v128_ceil(reg, writable!(reg), OperandSize::S64)?;
4460
Ok(TypedReg::v128(reg))
4461
})
4462
}
4463
4464
fn visit_f32x4_sqrt(&mut self) -> Self::Output {
4465
self.context.unop(self.masm, |masm, reg| {
4466
masm.v128_sqrt(reg, writable!(reg), OperandSize::S32)?;
4467
Ok(TypedReg::v128(reg))
4468
})
4469
}
4470
4471
fn visit_f32x4_floor(&mut self) -> Self::Output {
4472
self.context.unop(self.masm, |masm, reg| {
4473
masm.v128_floor(reg, writable!(reg), OperandSize::S32)?;
4474
Ok(TypedReg::v128(reg))
4475
})
4476
}
4477
4478
fn visit_f64x2_sqrt(&mut self) -> Self::Output {
4479
self.context.unop(self.masm, |masm, reg| {
4480
masm.v128_sqrt(reg, writable!(reg), OperandSize::S64)?;
4481
Ok(TypedReg::v128(reg))
4482
})
4483
}
4484
4485
fn visit_f64x2_floor(&mut self) -> Self::Output {
4486
self.context.unop(self.masm, |masm, reg| {
4487
masm.v128_floor(reg, writable!(reg), OperandSize::S64)?;
4488
Ok(TypedReg::v128(reg))
4489
})
4490
}
4491
4492
fn visit_f32x4_nearest(&mut self) -> Self::Output {
4493
self.context.unop(self.masm, |masm, reg| {
4494
masm.v128_nearest(reg, writable!(reg), OperandSize::S32)?;
4495
Ok(TypedReg::v128(reg))
4496
})
4497
}
4498
4499
fn visit_f64x2_nearest(&mut self) -> Self::Output {
4500
self.context.unop(self.masm, |masm, reg| {
4501
masm.v128_nearest(reg, writable!(reg), OperandSize::S64)?;
4502
Ok(TypedReg::v128(reg))
4503
})
4504
}
4505
4506
fn visit_f32x4_trunc(&mut self) -> Self::Output {
4507
self.masm
4508
.v128_trunc(&mut self.context, V128TruncKind::F32x4)
4509
}
4510
4511
fn visit_f64x2_trunc(&mut self) -> Self::Output {
4512
self.masm
4513
.v128_trunc(&mut self.context, V128TruncKind::F64x2)
4514
}
4515
4516
fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output {
4517
self.emit_wasm_load(
4518
&memarg,
4519
WasmValType::V128,
4520
LoadKind::VectorZero(OperandSize::S32),
4521
)
4522
}
4523
4524
fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output {
4525
self.emit_wasm_load(
4526
&memarg,
4527
WasmValType::V128,
4528
LoadKind::VectorZero(OperandSize::S64),
4529
)
4530
}
4531
4532
fn visit_f32x4_pmin(&mut self) -> Self::Output {
4533
self.context
4534
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4535
masm.v128_pmin(dst, src, writable!(dst), size)?;
4536
Ok(TypedReg::v128(dst))
4537
})
4538
}
4539
4540
fn visit_f64x2_pmin(&mut self) -> Self::Output {
4541
self.context
4542
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4543
masm.v128_pmin(dst, src, writable!(dst), size)?;
4544
Ok(TypedReg::v128(dst))
4545
})
4546
}
4547
4548
fn visit_f32x4_pmax(&mut self) -> Self::Output {
4549
self.context
4550
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4551
masm.v128_pmax(dst, src, writable!(dst), size)?;
4552
Ok(TypedReg::v128(dst))
4553
})
4554
}
4555
4556
fn visit_f64x2_pmax(&mut self) -> Self::Output {
4557
self.context
4558
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4559
masm.v128_pmax(dst, src, writable!(dst), size)?;
4560
Ok(TypedReg::v128(dst))
4561
})
4562
}
4563
4564
fn visit_f32x4_min(&mut self) -> Self::Output {
4565
self.context
4566
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4567
masm.v128_min(dst, src, writable!(dst), V128MinKind::F32x4)?;
4568
Ok(TypedReg::v128(dst))
4569
})
4570
}
4571
4572
fn visit_f64x2_min(&mut self) -> Self::Output {
4573
self.context
4574
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4575
masm.v128_min(dst, src, writable!(dst), V128MinKind::F64x2)?;
4576
Ok(TypedReg::v128(dst))
4577
})
4578
}
4579
4580
fn visit_f32x4_max(&mut self) -> Self::Output {
4581
self.context
4582
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4583
masm.v128_max(dst, src, writable!(dst), V128MaxKind::F32x4)?;
4584
Ok(TypedReg::v128(dst))
4585
})
4586
}
4587
4588
fn visit_f64x2_max(&mut self) -> Self::Output {
4589
self.context
4590
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4591
masm.v128_max(dst, src, writable!(dst), V128MaxKind::F64x2)?;
4592
Ok(TypedReg::v128(dst))
4593
})
4594
}
4595
4596
wasmparser::for_each_visit_simd_operator!(def_unsupported);
4597
}
4598
4599
impl<'a, 'translation, 'data, M> CodeGen<'a, 'translation, 'data, M, Emission>
4600
where
4601
M: MacroAssembler,
4602
{
4603
fn cmp_i32s(&mut self, kind: IntCmpKind) -> Result<()> {
4604
self.context.i32_binop(self.masm, |masm, dst, src, size| {
4605
masm.cmp_with_set(writable!(dst), src, kind, size)?;
4606
Ok(TypedReg::i32(dst))
4607
})
4608
}
4609
4610
fn cmp_i64s(&mut self, kind: IntCmpKind) -> Result<()> {
4611
self.context
4612
.i64_binop(self.masm, move |masm, dst, src, size| {
4613
masm.cmp_with_set(writable!(dst), src, kind, size)?;
4614
Ok(TypedReg::i32(dst)) // Return value for comparisons is an `i32`.
4615
})
4616
}
4617
}
4618
4619
impl TryFrom<WasmValType> for OperandSize {
4620
type Error = crate::Error;
4621
fn try_from(ty: WasmValType) -> Result<OperandSize> {
4622
let ty = match ty {
4623
WasmValType::I32 | WasmValType::F32 => OperandSize::S32,
4624
WasmValType::I64 | WasmValType::F64 => OperandSize::S64,
4625
WasmValType::V128 => OperandSize::S128,
4626
WasmValType::Ref(rt) => {
4627
match rt.heap_type {
4628
// TODO: Hardcoded size, assuming 64-bit support only. Once
4629
// Wasmtime supports 32-bit architectures, this will need
4630
// to be updated in such a way that the calculation of the
4631
// OperandSize will depend on the target's pointer size.
4632
WasmHeapType::Func => OperandSize::S64,
4633
WasmHeapType::Extern => OperandSize::S64,
4634
_ => bail!(CodeGenError::unsupported_wasm_type()),
4635
}
4636
}
4637
};
4638
Ok(ty)
4639
}
4640
}
4641
4642