Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/winch/codegen/src/visitor.rs
1691 views
1
//! This module is the central place for machine code emission.
2
//! It defines an implementation of wasmparser's Visitor trait
3
//! for `CodeGen`; which defines a visitor per op-code,
4
//! which validates and dispatches to the corresponding
5
//! machine code emitter.
6
7
use crate::abi::RetArea;
8
use crate::codegen::{
9
Callee, CodeGen, CodeGenError, ConditionalBranch, ControlStackFrame, Emission, FnCall,
10
UnconditionalBranch, control_index,
11
};
12
use crate::masm::{
13
AtomicWaitKind, DivKind, Extend, ExtractLaneKind, FloatCmpKind, IntCmpKind, LoadKind,
14
MacroAssembler, MulWideKind, OperandSize, RegImm, RemKind, ReplaceLaneKind, RmwOp,
15
RoundingMode, SPOffset, ShiftKind, Signed, SplatKind, SplatLoadKind, StoreKind, TruncKind,
16
V128AbsKind, V128AddKind, V128ConvertKind, V128ExtAddKind, V128ExtMulKind, V128ExtendKind,
17
V128LoadExtendKind, V128MaxKind, V128MinKind, V128MulKind, V128NarrowKind, V128NegKind,
18
V128SubKind, V128TruncKind, VectorCompareKind, VectorEqualityKind, Zero,
19
};
20
21
use crate::reg::{Reg, writable};
22
use crate::stack::{TypedReg, Val};
23
use anyhow::{Result, anyhow, bail, ensure};
24
use regalloc2::RegClass;
25
use smallvec::{SmallVec, smallvec};
26
use wasmparser::{
27
BlockType, BrTable, Ieee32, Ieee64, MemArg, V128, VisitOperator, VisitSimdOperator,
28
};
29
use wasmtime_cranelift::TRAP_INDIRECT_CALL_TO_NULL;
30
use wasmtime_environ::{
31
FUNCREF_INIT_BIT, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TypeIndex, WasmHeapType,
32
WasmValType,
33
};
34
35
/// A macro to define unsupported WebAssembly operators.
36
///
37
/// This macro calls itself recursively;
38
/// 1. It no-ops when matching a supported operator.
39
/// 2. Defines the visitor function and panics when
40
/// matching an unsupported operator.
41
macro_rules! def_unsupported {
42
($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident $ann:tt)*) => {
43
$(
44
def_unsupported!(
45
emit
46
$op
47
48
fn $visit(&mut self $($(,$arg: $argty)*)?) -> Self::Output {
49
$($(let _ = $arg;)*)?
50
51
Err(anyhow!(CodeGenError::unimplemented_wasm_instruction()))
52
}
53
);
54
)*
55
};
56
57
(emit I32Const $($rest:tt)*) => {};
58
(emit I64Const $($rest:tt)*) => {};
59
(emit F32Const $($rest:tt)*) => {};
60
(emit F64Const $($rest:tt)*) => {};
61
(emit V128Const $($rest:tt)*) => {};
62
(emit F32Add $($rest:tt)*) => {};
63
(emit F64Add $($rest:tt)*) => {};
64
(emit F32Sub $($rest:tt)*) => {};
65
(emit F64Sub $($rest:tt)*) => {};
66
(emit F32Mul $($rest:tt)*) => {};
67
(emit F64Mul $($rest:tt)*) => {};
68
(emit F32Div $($rest:tt)*) => {};
69
(emit F64Div $($rest:tt)*) => {};
70
(emit F32Min $($rest:tt)*) => {};
71
(emit F64Min $($rest:tt)*) => {};
72
(emit F32Max $($rest:tt)*) => {};
73
(emit F64Max $($rest:tt)*) => {};
74
(emit F32Copysign $($rest:tt)*) => {};
75
(emit F64Copysign $($rest:tt)*) => {};
76
(emit F32Abs $($rest:tt)*) => {};
77
(emit F64Abs $($rest:tt)*) => {};
78
(emit F32Neg $($rest:tt)*) => {};
79
(emit F64Neg $($rest:tt)*) => {};
80
(emit F32Floor $($rest:tt)*) => {};
81
(emit F64Floor $($rest:tt)*) => {};
82
(emit F32Ceil $($rest:tt)*) => {};
83
(emit F64Ceil $($rest:tt)*) => {};
84
(emit F32Nearest $($rest:tt)*) => {};
85
(emit F64Nearest $($rest:tt)*) => {};
86
(emit F32Trunc $($rest:tt)*) => {};
87
(emit F64Trunc $($rest:tt)*) => {};
88
(emit F32Sqrt $($rest:tt)*) => {};
89
(emit F64Sqrt $($rest:tt)*) => {};
90
(emit F32Eq $($rest:tt)*) => {};
91
(emit F64Eq $($rest:tt)*) => {};
92
(emit F32Ne $($rest:tt)*) => {};
93
(emit F64Ne $($rest:tt)*) => {};
94
(emit F32Lt $($rest:tt)*) => {};
95
(emit F64Lt $($rest:tt)*) => {};
96
(emit F32Gt $($rest:tt)*) => {};
97
(emit F64Gt $($rest:tt)*) => {};
98
(emit F32Le $($rest:tt)*) => {};
99
(emit F64Le $($rest:tt)*) => {};
100
(emit F32Ge $($rest:tt)*) => {};
101
(emit F64Ge $($rest:tt)*) => {};
102
(emit F32ConvertI32S $($rest:tt)*) => {};
103
(emit F32ConvertI32U $($rest:tt)*) => {};
104
(emit F32ConvertI64S $($rest:tt)*) => {};
105
(emit F32ConvertI64U $($rest:tt)*) => {};
106
(emit F64ConvertI32S $($rest:tt)*) => {};
107
(emit F64ConvertI32U $($rest:tt)*) => {};
108
(emit F64ConvertI64S $($rest:tt)*) => {};
109
(emit F64ConvertI64U $($rest:tt)*) => {};
110
(emit F32ReinterpretI32 $($rest:tt)*) => {};
111
(emit F64ReinterpretI64 $($rest:tt)*) => {};
112
(emit F32DemoteF64 $($rest:tt)*) => {};
113
(emit F64PromoteF32 $($rest:tt)*) => {};
114
(emit I32Add $($rest:tt)*) => {};
115
(emit I64Add $($rest:tt)*) => {};
116
(emit I32Sub $($rest:tt)*) => {};
117
(emit I32Mul $($rest:tt)*) => {};
118
(emit I32DivS $($rest:tt)*) => {};
119
(emit I32DivU $($rest:tt)*) => {};
120
(emit I64DivS $($rest:tt)*) => {};
121
(emit I64DivU $($rest:tt)*) => {};
122
(emit I64RemU $($rest:tt)*) => {};
123
(emit I64RemS $($rest:tt)*) => {};
124
(emit I32RemU $($rest:tt)*) => {};
125
(emit I32RemS $($rest:tt)*) => {};
126
(emit I64Mul $($rest:tt)*) => {};
127
(emit I64Sub $($rest:tt)*) => {};
128
(emit I32Eq $($rest:tt)*) => {};
129
(emit I64Eq $($rest:tt)*) => {};
130
(emit I32Ne $($rest:tt)*) => {};
131
(emit I64Ne $($rest:tt)*) => {};
132
(emit I32LtS $($rest:tt)*) => {};
133
(emit I64LtS $($rest:tt)*) => {};
134
(emit I32LtU $($rest:tt)*) => {};
135
(emit I64LtU $($rest:tt)*) => {};
136
(emit I32LeS $($rest:tt)*) => {};
137
(emit I64LeS $($rest:tt)*) => {};
138
(emit I32LeU $($rest:tt)*) => {};
139
(emit I64LeU $($rest:tt)*) => {};
140
(emit I32GtS $($rest:tt)*) => {};
141
(emit I64GtS $($rest:tt)*) => {};
142
(emit I32GtU $($rest:tt)*) => {};
143
(emit I64GtU $($rest:tt)*) => {};
144
(emit I32GeS $($rest:tt)*) => {};
145
(emit I64GeS $($rest:tt)*) => {};
146
(emit I32GeU $($rest:tt)*) => {};
147
(emit I64GeU $($rest:tt)*) => {};
148
(emit I32Eqz $($rest:tt)*) => {};
149
(emit I64Eqz $($rest:tt)*) => {};
150
(emit I32And $($rest:tt)*) => {};
151
(emit I64And $($rest:tt)*) => {};
152
(emit I32Or $($rest:tt)*) => {};
153
(emit I64Or $($rest:tt)*) => {};
154
(emit I32Xor $($rest:tt)*) => {};
155
(emit I64Xor $($rest:tt)*) => {};
156
(emit I32Shl $($rest:tt)*) => {};
157
(emit I64Shl $($rest:tt)*) => {};
158
(emit I32ShrS $($rest:tt)*) => {};
159
(emit I64ShrS $($rest:tt)*) => {};
160
(emit I32ShrU $($rest:tt)*) => {};
161
(emit I64ShrU $($rest:tt)*) => {};
162
(emit I32Rotl $($rest:tt)*) => {};
163
(emit I64Rotl $($rest:tt)*) => {};
164
(emit I32Rotr $($rest:tt)*) => {};
165
(emit I64Rotr $($rest:tt)*) => {};
166
(emit I32Clz $($rest:tt)*) => {};
167
(emit I64Clz $($rest:tt)*) => {};
168
(emit I32Ctz $($rest:tt)*) => {};
169
(emit I64Ctz $($rest:tt)*) => {};
170
(emit I32Popcnt $($rest:tt)*) => {};
171
(emit I64Popcnt $($rest:tt)*) => {};
172
(emit I32WrapI64 $($rest:tt)*) => {};
173
(emit I64ExtendI32S $($rest:tt)*) => {};
174
(emit I64ExtendI32U $($rest:tt)*) => {};
175
(emit I32Extend8S $($rest:tt)*) => {};
176
(emit I32Extend16S $($rest:tt)*) => {};
177
(emit I64Extend8S $($rest:tt)*) => {};
178
(emit I64Extend16S $($rest:tt)*) => {};
179
(emit I64Extend32S $($rest:tt)*) => {};
180
(emit I32TruncF32S $($rest:tt)*) => {};
181
(emit I32TruncF32U $($rest:tt)*) => {};
182
(emit I32TruncF64S $($rest:tt)*) => {};
183
(emit I32TruncF64U $($rest:tt)*) => {};
184
(emit I64TruncF32S $($rest:tt)*) => {};
185
(emit I64TruncF32U $($rest:tt)*) => {};
186
(emit I64TruncF64S $($rest:tt)*) => {};
187
(emit I64TruncF64U $($rest:tt)*) => {};
188
(emit I32ReinterpretF32 $($rest:tt)*) => {};
189
(emit I64ReinterpretF64 $($rest:tt)*) => {};
190
(emit LocalGet $($rest:tt)*) => {};
191
(emit LocalSet $($rest:tt)*) => {};
192
(emit Call $($rest:tt)*) => {};
193
(emit End $($rest:tt)*) => {};
194
(emit Nop $($rest:tt)*) => {};
195
(emit If $($rest:tt)*) => {};
196
(emit Else $($rest:tt)*) => {};
197
(emit Block $($rest:tt)*) => {};
198
(emit Loop $($rest:tt)*) => {};
199
(emit Br $($rest:tt)*) => {};
200
(emit BrIf $($rest:tt)*) => {};
201
(emit Return $($rest:tt)*) => {};
202
(emit Unreachable $($rest:tt)*) => {};
203
(emit LocalTee $($rest:tt)*) => {};
204
(emit GlobalGet $($rest:tt)*) => {};
205
(emit GlobalSet $($rest:tt)*) => {};
206
(emit Select $($rest:tt)*) => {};
207
(emit Drop $($rest:tt)*) => {};
208
(emit BrTable $($rest:tt)*) => {};
209
(emit CallIndirect $($rest:tt)*) => {};
210
(emit TableInit $($rest:tt)*) => {};
211
(emit TableCopy $($rest:tt)*) => {};
212
(emit TableGet $($rest:tt)*) => {};
213
(emit TableSet $($rest:tt)*) => {};
214
(emit TableGrow $($rest:tt)*) => {};
215
(emit TableSize $($rest:tt)*) => {};
216
(emit TableFill $($rest:tt)*) => {};
217
(emit ElemDrop $($rest:tt)*) => {};
218
(emit MemoryInit $($rest:tt)*) => {};
219
(emit MemoryCopy $($rest:tt)*) => {};
220
(emit DataDrop $($rest:tt)*) => {};
221
(emit MemoryFill $($rest:tt)*) => {};
222
(emit MemorySize $($rest:tt)*) => {};
223
(emit MemoryGrow $($rest:tt)*) => {};
224
(emit I32Load $($rest:tt)*) => {};
225
(emit I32Load8S $($rest:tt)*) => {};
226
(emit I32Load8U $($rest:tt)*) => {};
227
(emit I32Load16S $($rest:tt)*) => {};
228
(emit I32Load16U $($rest:tt)*) => {};
229
(emit I64Load8S $($rest:tt)*) => {};
230
(emit I64Load8U $($rest:tt)*) => {};
231
(emit I64Load16S $($rest:tt)*) => {};
232
(emit I64Load16U $($rest:tt)*) => {};
233
(emit I64Load32S $($rest:tt)*) => {};
234
(emit I64Load32U $($rest:tt)*) => {};
235
(emit I64Load $($rest:tt)*) => {};
236
(emit I32Store $($rest:tt)*) => {};
237
(emit I32Store8 $($rest:tt)*) => {};
238
(emit I32Store16 $($rest:tt)*) => {};
239
(emit I64Store $($rest:tt)*) => {};
240
(emit I64Store8 $($rest:tt)*) => {};
241
(emit I64Store16 $($rest:tt)*) => {};
242
(emit I64Store32 $($rest:tt)*) => {};
243
(emit F32Load $($rest:tt)*) => {};
244
(emit F32Store $($rest:tt)*) => {};
245
(emit F64Load $($rest:tt)*) => {};
246
(emit F64Store $($rest:tt)*) => {};
247
(emit I32TruncSatF32S $($rest:tt)*) => {};
248
(emit I32TruncSatF32U $($rest:tt)*) => {};
249
(emit I32TruncSatF64S $($rest:tt)*) => {};
250
(emit I32TruncSatF64U $($rest:tt)*) => {};
251
(emit I64TruncSatF32S $($rest:tt)*) => {};
252
(emit I64TruncSatF32U $($rest:tt)*) => {};
253
(emit I64TruncSatF64S $($rest:tt)*) => {};
254
(emit I64TruncSatF64U $($rest:tt)*) => {};
255
(emit V128Load $($rest:tt)*) => {};
256
(emit V128Store $($rest:tt)*) => {};
257
(emit I64Add128 $($rest:tt)*) => {};
258
(emit I64Sub128 $($rest:tt)*) => {};
259
(emit I64MulWideS $($rest:tt)*) => {};
260
(emit I64MulWideU $($rest:tt)*) => {};
261
(emit I32AtomicLoad8U $($rest:tt)*) => {};
262
(emit I32AtomicLoad16U $($rest:tt)*) => {};
263
(emit I32AtomicLoad $($rest:tt)*) => {};
264
(emit I64AtomicLoad8U $($rest:tt)*) => {};
265
(emit I64AtomicLoad16U $($rest:tt)*) => {};
266
(emit I64AtomicLoad32U $($rest:tt)*) => {};
267
(emit I64AtomicLoad $($rest:tt)*) => {};
268
(emit V128Load8x8S $($rest:tt)*) => {};
269
(emit V128Load8x8U $($rest:tt)*) => {};
270
(emit V128Load16x4S $($rest:tt)*) => {};
271
(emit V128Load16x4U $($rest:tt)*) => {};
272
(emit V128Load32x2S $($rest:tt)*) => {};
273
(emit V128Load32x2U $($rest:tt)*) => {};
274
(emit V128Load8Splat $($rest:tt)*) => {};
275
(emit V128Load16Splat $($rest:tt)*) => {};
276
(emit V128Load32Splat $($rest:tt)*) => {};
277
(emit V128Load64Splat $($rest:tt)*) => {};
278
(emit I8x16Splat $($rest:tt)*) => {};
279
(emit I16x8Splat $($rest:tt)*) => {};
280
(emit I32x4Splat $($rest:tt)*) => {};
281
(emit I64x2Splat $($rest:tt)*) => {};
282
(emit F32x4Splat $($rest:tt)*) => {};
283
(emit F64x2Splat $($rest:tt)*) => {};
284
(emit I32AtomicStore8 $($rest:tt)*) => {};
285
(emit I32AtomicStore16 $($rest:tt)*) => {};
286
(emit I32AtomicStore $($rest:tt)*) => {};
287
(emit I64AtomicStore8 $($rest:tt)*) => {};
288
(emit I64AtomicStore16 $($rest:tt)*) => {};
289
(emit I64AtomicStore32 $($rest:tt)*) => {};
290
(emit I64AtomicStore $($rest:tt)*) => {};
291
(emit I32AtomicRmw8AddU $($rest:tt)*) => {};
292
(emit I32AtomicRmw16AddU $($rest:tt)*) => {};
293
(emit I32AtomicRmwAdd $($rest:tt)*) => {};
294
(emit I64AtomicRmw8AddU $($rest:tt)*) => {};
295
(emit I64AtomicRmw16AddU $($rest:tt)*) => {};
296
(emit I64AtomicRmw32AddU $($rest:tt)*) => {};
297
(emit I64AtomicRmwAdd $($rest:tt)*) => {};
298
(emit I8x16Shuffle $($rest:tt)*) => {};
299
(emit I8x16Swizzle $($rest:tt)*) => {};
300
(emit I32AtomicRmw8SubU $($rest:tt)*) => {};
301
(emit I32AtomicRmw16SubU $($rest:tt)*) => {};
302
(emit I32AtomicRmwSub $($rest:tt)*) => {};
303
(emit I64AtomicRmw8SubU $($rest:tt)*) => {};
304
(emit I64AtomicRmw16SubU $($rest:tt)*) => {};
305
(emit I64AtomicRmw32SubU $($rest:tt)*) => {};
306
(emit I64AtomicRmwSub $($rest:tt)*) => {};
307
(emit I32AtomicRmw8XchgU $($rest:tt)*) => {};
308
(emit I32AtomicRmw16XchgU $($rest:tt)*) => {};
309
(emit I32AtomicRmwXchg $($rest:tt)*) => {};
310
(emit I64AtomicRmw8XchgU $($rest:tt)*) => {};
311
(emit I64AtomicRmw16XchgU $($rest:tt)*) => {};
312
(emit I64AtomicRmw32XchgU $($rest:tt)*) => {};
313
(emit I64AtomicRmwXchg $($rest:tt)*) => {};
314
(emit I8x16ExtractLaneS $($rest:tt)*) => {};
315
(emit I8x16ExtractLaneU $($rest:tt)*) => {};
316
(emit I16x8ExtractLaneS $($rest:tt)*) => {};
317
(emit I16x8ExtractLaneU $($rest:tt)*) => {};
318
(emit I32x4ExtractLane $($rest:tt)*) => {};
319
(emit I64x2ExtractLane $($rest:tt)*) => {};
320
(emit F32x4ExtractLane $($rest:tt)*) => {};
321
(emit F64x2ExtractLane $($rest:tt)*) => {};
322
(emit I32AtomicRmw8AndU $($rest:tt)*) => {};
323
(emit I32AtomicRmw16AndU $($rest:tt)*) => {};
324
(emit I32AtomicRmwAnd $($rest:tt)*) => {};
325
(emit I64AtomicRmw8AndU $($rest:tt)*) => {};
326
(emit I64AtomicRmw16AndU $($rest:tt)*) => {};
327
(emit I64AtomicRmw32AndU $($rest:tt)*) => {};
328
(emit I64AtomicRmwAnd $($rest:tt)*) => {};
329
(emit I32AtomicRmw8OrU $($rest:tt)*) => {};
330
(emit I32AtomicRmw16OrU $($rest:tt)*) => {};
331
(emit I32AtomicRmwOr $($rest:tt)*) => {};
332
(emit I64AtomicRmw8OrU $($rest:tt)*) => {};
333
(emit I64AtomicRmw16OrU $($rest:tt)*) => {};
334
(emit I64AtomicRmw32OrU $($rest:tt)*) => {};
335
(emit I64AtomicRmwOr $($rest:tt)*) => {};
336
(emit I32AtomicRmw8XorU $($rest:tt)*) => {};
337
(emit I32AtomicRmw16XorU $($rest:tt)*) => {};
338
(emit I32AtomicRmwXor $($rest:tt)*) => {};
339
(emit I64AtomicRmw8XorU $($rest:tt)*) => {};
340
(emit I64AtomicRmw16XorU $($rest:tt)*) => {};
341
(emit I64AtomicRmw32XorU $($rest:tt)*) => {};
342
(emit I64AtomicRmwXor $($rest:tt)*) => {};
343
(emit I8x16ReplaceLane $($rest:tt)*) => {};
344
(emit I16x8ReplaceLane $($rest:tt)*) => {};
345
(emit I32x4ReplaceLane $($rest:tt)*) => {};
346
(emit I64x2ReplaceLane $($rest:tt)*) => {};
347
(emit F32x4ReplaceLane $($rest:tt)*) => {};
348
(emit F64x2ReplaceLane $($rest:tt)*) => {};
349
(emit I32AtomicRmw8CmpxchgU $($rest:tt)*) => {};
350
(emit I32AtomicRmw16CmpxchgU $($rest:tt)*) => {};
351
(emit I32AtomicRmwCmpxchg $($rest:tt)*) => {};
352
(emit I64AtomicRmw8CmpxchgU $($rest:tt)*) => {};
353
(emit I64AtomicRmw16CmpxchgU $($rest:tt)*) => {};
354
(emit I64AtomicRmw32CmpxchgU $($rest:tt)*) => {};
355
(emit I64AtomicRmwCmpxchg $($rest:tt)*) => {};
356
(emit I8x16Eq $($rest:tt)*) => {};
357
(emit I16x8Eq $($rest:tt)*) => {};
358
(emit I32x4Eq $($rest:tt)*) => {};
359
(emit I64x2Eq $($rest:tt)*) => {};
360
(emit F32x4Eq $($rest:tt)*) => {};
361
(emit F64x2Eq $($rest:tt)*) => {};
362
(emit I8x16Ne $($rest:tt)*) => {};
363
(emit I16x8Ne $($rest:tt)*) => {};
364
(emit I32x4Ne $($rest:tt)*) => {};
365
(emit I64x2Ne $($rest:tt)*) => {};
366
(emit F32x4Ne $($rest:tt)*) => {};
367
(emit F64x2Ne $($rest:tt)*) => {};
368
(emit I8x16LtS $($rest:tt)*) => {};
369
(emit I8x16LtU $($rest:tt)*) => {};
370
(emit I16x8LtS $($rest:tt)*) => {};
371
(emit I16x8LtU $($rest:tt)*) => {};
372
(emit I32x4LtS $($rest:tt)*) => {};
373
(emit I32x4LtU $($rest:tt)*) => {};
374
(emit I64x2LtS $($rest:tt)*) => {};
375
(emit F32x4Lt $($rest:tt)*) => {};
376
(emit F64x2Lt $($rest:tt)*) => {};
377
(emit I8x16LeS $($rest:tt)*) => {};
378
(emit I8x16LeU $($rest:tt)*) => {};
379
(emit I16x8LeS $($rest:tt)*) => {};
380
(emit I16x8LeU $($rest:tt)*) => {};
381
(emit I32x4LeS $($rest:tt)*) => {};
382
(emit I32x4LeU $($rest:tt)*) => {};
383
(emit I64x2LeS $($rest:tt)*) => {};
384
(emit F32x4Le $($rest:tt)*) => {};
385
(emit F64x2Le $($rest:tt)*) => {};
386
(emit I8x16GtS $($rest:tt)*) => {};
387
(emit I8x16GtU $($rest:tt)*) => {};
388
(emit I16x8GtS $($rest:tt)*) => {};
389
(emit I16x8GtU $($rest:tt)*) => {};
390
(emit I32x4GtS $($rest:tt)*) => {};
391
(emit I32x4GtU $($rest:tt)*) => {};
392
(emit I64x2GtS $($rest:tt)*) => {};
393
(emit F32x4Gt $($rest:tt)*) => {};
394
(emit F64x2Gt $($rest:tt)*) => {};
395
(emit I8x16GeS $($rest:tt)*) => {};
396
(emit I8x16GeU $($rest:tt)*) => {};
397
(emit I16x8GeS $($rest:tt)*) => {};
398
(emit I16x8GeU $($rest:tt)*) => {};
399
(emit I32x4GeS $($rest:tt)*) => {};
400
(emit I32x4GeU $($rest:tt)*) => {};
401
(emit I64x2GeS $($rest:tt)*) => {};
402
(emit F32x4Ge $($rest:tt)*) => {};
403
(emit F64x2Ge $($rest:tt)*) => {};
404
(emit MemoryAtomicWait32 $($rest:tt)*) => {};
405
(emit MemoryAtomicWait64 $($rest:tt)*) => {};
406
(emit MemoryAtomicNotify $($rest:tt)*) => {};
407
(emit AtomicFence $($rest:tt)*) => {};
408
(emit V128Not $($rest:tt)*) => {};
409
(emit V128And $($rest:tt)*) => {};
410
(emit V128AndNot $($rest:tt)*) => {};
411
(emit V128Or $($rest:tt)*) => {};
412
(emit V128Xor $($rest:tt)*) => {};
413
(emit V128Bitselect $($rest:tt)*) => {};
414
(emit V128AnyTrue $($rest:tt)*) => {};
415
(emit V128Load8Lane $($rest:tt)*) => {};
416
(emit V128Load16Lane $($rest:tt)*) => {};
417
(emit V128Load32Lane $($rest:tt)*) => {};
418
(emit V128Load64Lane $($rest:tt)*) => {};
419
(emit V128Store8Lane $($rest:tt)*) => {};
420
(emit V128Store16Lane $($rest:tt)*) => {};
421
(emit V128Store32Lane $($rest:tt)*) => {};
422
(emit V128Store64Lane $($rest:tt)*) => {};
423
(emit F32x4ConvertI32x4S $($rest:tt)*) => {};
424
(emit F32x4ConvertI32x4U $($rest:tt)*) => {};
425
(emit F64x2ConvertLowI32x4S $($rest:tt)*) => {};
426
(emit F64x2ConvertLowI32x4U $($rest:tt)*) => {};
427
(emit I8x16NarrowI16x8S $($rest:tt)*) => {};
428
(emit I8x16NarrowI16x8U $($rest:tt)*) => {};
429
(emit I16x8NarrowI32x4S $($rest:tt)*) => {};
430
(emit I16x8NarrowI32x4U $($rest:tt)*) => {};
431
(emit F32x4DemoteF64x2Zero $($rest:tt)*) => {};
432
(emit F64x2PromoteLowF32x4 $($rest:tt)*) => {};
433
(emit I16x8ExtendLowI8x16S $($rest:tt)*) => {};
434
(emit I16x8ExtendHighI8x16S $($rest:tt)*) => {};
435
(emit I16x8ExtendLowI8x16U $($rest:tt)*) => {};
436
(emit I16x8ExtendHighI8x16U $($rest:tt)*) => {};
437
(emit I32x4ExtendLowI16x8S $($rest:tt)*) => {};
438
(emit I32x4ExtendHighI16x8S $($rest:tt)*) => {};
439
(emit I32x4ExtendLowI16x8U $($rest:tt)*) => {};
440
(emit I32x4ExtendHighI16x8U $($rest:tt)*) => {};
441
(emit I64x2ExtendLowI32x4S $($rest:tt)*) => {};
442
(emit I64x2ExtendHighI32x4S $($rest:tt)*) => {};
443
(emit I64x2ExtendLowI32x4U $($rest:tt)*) => {};
444
(emit I64x2ExtendHighI32x4U $($rest:tt)*) => {};
445
(emit I8x16Add $($rest:tt)*) => {};
446
(emit I16x8Add $($rest:tt)*) => {};
447
(emit I32x4Add $($rest:tt)*) => {};
448
(emit I64x2Add $($rest:tt)*) => {};
449
(emit I8x16Sub $($rest:tt)*) => {};
450
(emit I16x8Sub $($rest:tt)*) => {};
451
(emit I32x4Sub $($rest:tt)*) => {};
452
(emit I64x2Sub $($rest:tt)*) => {};
453
(emit I16x8Mul $($rest:tt)*) => {};
454
(emit I32x4Mul $($rest:tt)*) => {};
455
(emit I64x2Mul $($rest:tt)*) => {};
456
(emit I8x16AddSatS $($rest:tt)*) => {};
457
(emit I16x8AddSatS $($rest:tt)*) => {};
458
(emit I8x16AddSatU $($rest:tt)*) => {};
459
(emit I16x8AddSatU $($rest:tt)*) => {};
460
(emit I8x16SubSatS $($rest:tt)*) => {};
461
(emit I16x8SubSatS $($rest:tt)*) => {};
462
(emit I8x16SubSatU $($rest:tt)*) => {};
463
(emit I16x8SubSatU $($rest:tt)*) => {};
464
(emit I8x16Abs $($rest:tt)*) => {};
465
(emit I16x8Abs $($rest:tt)*) => {};
466
(emit I32x4Abs $($rest:tt)*) => {};
467
(emit I64x2Abs $($rest:tt)*) => {};
468
(emit F32x4Abs $($rest:tt)*) => {};
469
(emit F64x2Abs $($rest:tt)*) => {};
470
(emit I8x16Neg $($rest:tt)*) => {};
471
(emit I16x8Neg $($rest:tt)*) => {};
472
(emit I32x4Neg $($rest:tt)*) => {};
473
(emit I64x2Neg $($rest:tt)*) => {};
474
(emit I8x16Shl $($rest:tt)*) => {};
475
(emit I16x8Shl $($rest:tt)*) => {};
476
(emit I32x4Shl $($rest:tt)*) => {};
477
(emit I64x2Shl $($rest:tt)*) => {};
478
(emit I8x16ShrU $($rest:tt)*) => {};
479
(emit I16x8ShrU $($rest:tt)*) => {};
480
(emit I32x4ShrU $($rest:tt)*) => {};
481
(emit I64x2ShrU $($rest:tt)*) => {};
482
(emit I8x16ShrS $($rest:tt)*) => {};
483
(emit I16x8ShrS $($rest:tt)*) => {};
484
(emit I32x4ShrS $($rest:tt)*) => {};
485
(emit I64x2ShrS $($rest:tt)*) => {};
486
(emit I16x8Q15MulrSatS $($rest:tt)*) => {};
487
(emit I8x16AllTrue $($rest:tt)*) => {};
488
(emit I16x8AllTrue $($rest:tt)*) => {};
489
(emit I32x4AllTrue $($rest:tt)*) => {};
490
(emit I64x2AllTrue $($rest:tt)*) => {};
491
(emit I8x16Bitmask $($rest:tt)*) => {};
492
(emit I16x8Bitmask $($rest:tt)*) => {};
493
(emit I32x4Bitmask $($rest:tt)*) => {};
494
(emit I64x2Bitmask $($rest:tt)*) => {};
495
(emit I32x4TruncSatF32x4S $($rest:tt)*) => {};
496
(emit I32x4TruncSatF32x4U $($rest:tt)*) => {};
497
(emit I32x4TruncSatF64x2SZero $($rest:tt)*) => {};
498
(emit I32x4TruncSatF64x2UZero $($rest:tt)*) => {};
499
(emit I8x16MinU $($rest:tt)*) => {};
500
(emit I16x8MinU $($rest:tt)*) => {};
501
(emit I32x4MinU $($rest:tt)*) => {};
502
(emit I8x16MinS $($rest:tt)*) => {};
503
(emit I16x8MinS $($rest:tt)*) => {};
504
(emit I32x4MinS $($rest:tt)*) => {};
505
(emit I8x16MaxU $($rest:tt)*) => {};
506
(emit I16x8MaxU $($rest:tt)*) => {};
507
(emit I32x4MaxU $($rest:tt)*) => {};
508
(emit I8x16MaxS $($rest:tt)*) => {};
509
(emit I16x8MaxS $($rest:tt)*) => {};
510
(emit I32x4MaxS $($rest:tt)*) => {};
511
(emit I16x8ExtMulLowI8x16S $($rest:tt)*) => {};
512
(emit I32x4ExtMulLowI16x8S $($rest:tt)*) => {};
513
(emit I64x2ExtMulLowI32x4S $($rest:tt)*) => {};
514
(emit I16x8ExtMulHighI8x16S $($rest:tt)*) => {};
515
(emit I32x4ExtMulHighI16x8S $($rest:tt)*) => {};
516
(emit I64x2ExtMulHighI32x4S $($rest:tt)*) => {};
517
(emit I16x8ExtMulLowI8x16U $($rest:tt)*) => {};
518
(emit I32x4ExtMulLowI16x8U $($rest:tt)*) => {};
519
(emit I64x2ExtMulLowI32x4U $($rest:tt)*) => {};
520
(emit I16x8ExtMulHighI8x16U $($rest:tt)*) => {};
521
(emit I32x4ExtMulHighI16x8U $($rest:tt)*) => {};
522
(emit I64x2ExtMulHighI32x4U $($rest:tt)*) => {};
523
(emit I16x8ExtAddPairwiseI8x16U $($rest:tt)*) => {};
524
(emit I16x8ExtAddPairwiseI8x16S $($rest:tt)*) => {};
525
(emit I32x4ExtAddPairwiseI16x8U $($rest:tt)*) => {};
526
(emit I32x4ExtAddPairwiseI16x8S $($rest:tt)*) => {};
527
(emit I32x4DotI16x8S $($rest:tt)*) => {};
528
(emit I8x16Popcnt $($rest:tt)*) => {};
529
(emit I8x16AvgrU $($rest:tt)*) => {};
530
(emit I16x8AvgrU $($rest:tt)*) => {};
531
(emit F32x4Add $($rest:tt)*) => {};
532
(emit F64x2Add $($rest:tt)*) => {};
533
(emit F32x4Sub $($rest:tt)*) => {};
534
(emit F64x2Sub $($rest:tt)*) => {};
535
(emit F32x4Mul $($rest:tt)*) => {};
536
(emit F64x2Mul $($rest:tt)*) => {};
537
(emit F32x4Div $($rest:tt)*) => {};
538
(emit F64x2Div $($rest:tt)*) => {};
539
(emit F32x4Neg $($rest:tt)*) => {};
540
(emit F64x2Neg $($rest:tt)*) => {};
541
(emit F32x4Sqrt $($rest:tt)*) => {};
542
(emit F64x2Sqrt $($rest:tt)*) => {};
543
(emit F32x4Ceil $($rest:tt)*) => {};
544
(emit F64x2Ceil $($rest:tt)*) => {};
545
(emit F32x4Floor $($rest:tt)*) => {};
546
(emit F64x2Floor $($rest:tt)*) => {};
547
(emit F32x4Nearest $($rest:tt)*) => {};
548
(emit F64x2Nearest $($rest:tt)*) => {};
549
(emit F32x4Trunc $($rest:tt)*) => {};
550
(emit F64x2Trunc $($rest:tt)*) => {};
551
(emit V128Load32Zero $($rest:tt)*) => {};
552
(emit V128Load64Zero $($rest:tt)*) => {};
553
(emit F32x4PMin $($rest:tt)*) => {};
554
(emit F64x2PMin $($rest:tt)*) => {};
555
(emit F32x4PMax $($rest:tt)*) => {};
556
(emit F64x2PMax $($rest:tt)*) => {};
557
(emit F32x4Min $($rest:tt)*) => {};
558
(emit F64x2Min $($rest:tt)*) => {};
559
(emit F32x4Max $($rest:tt)*) => {};
560
(emit F64x2Max $($rest:tt)*) => {};
561
562
(emit $unsupported:tt $($rest:tt)*) => {$($rest)*};
563
}
564
565
impl<'a, 'translation, 'data, M> VisitOperator<'a> for CodeGen<'a, 'translation, 'data, M, Emission>
566
where
567
M: MacroAssembler,
568
{
569
type Output = Result<()>;
570
571
fn visit_i32_const(&mut self, val: i32) -> Self::Output {
572
self.context.stack.push(Val::i32(val));
573
574
Ok(())
575
}
576
577
fn visit_i64_const(&mut self, val: i64) -> Self::Output {
578
self.context.stack.push(Val::i64(val));
579
Ok(())
580
}
581
582
fn visit_f32_const(&mut self, val: Ieee32) -> Self::Output {
583
self.context.stack.push(Val::f32(val));
584
Ok(())
585
}
586
587
fn visit_f64_const(&mut self, val: Ieee64) -> Self::Output {
588
self.context.stack.push(Val::f64(val));
589
Ok(())
590
}
591
592
fn visit_f32_add(&mut self) -> Self::Output {
593
self.context.binop(
594
self.masm,
595
OperandSize::S32,
596
&mut |masm: &mut M, dst, src, size| {
597
masm.float_add(writable!(dst), dst, src, size)?;
598
Ok(TypedReg::f32(dst))
599
},
600
)
601
}
602
603
fn visit_f64_add(&mut self) -> Self::Output {
604
self.context.binop(
605
self.masm,
606
OperandSize::S64,
607
&mut |masm: &mut M, dst, src, size| {
608
masm.float_add(writable!(dst), dst, src, size)?;
609
Ok(TypedReg::f64(dst))
610
},
611
)
612
}
613
614
fn visit_f32_sub(&mut self) -> Self::Output {
615
self.context.binop(
616
self.masm,
617
OperandSize::S32,
618
&mut |masm: &mut M, dst, src, size| {
619
masm.float_sub(writable!(dst), dst, src, size)?;
620
Ok(TypedReg::f32(dst))
621
},
622
)
623
}
624
625
fn visit_f64_sub(&mut self) -> Self::Output {
626
self.context.binop(
627
self.masm,
628
OperandSize::S64,
629
&mut |masm: &mut M, dst, src, size| {
630
masm.float_sub(writable!(dst), dst, src, size)?;
631
Ok(TypedReg::f64(dst))
632
},
633
)
634
}
635
636
fn visit_f32_mul(&mut self) -> Self::Output {
637
self.context.binop(
638
self.masm,
639
OperandSize::S32,
640
&mut |masm: &mut M, dst, src, size| {
641
masm.float_mul(writable!(dst), dst, src, size)?;
642
Ok(TypedReg::f32(dst))
643
},
644
)
645
}
646
647
fn visit_f64_mul(&mut self) -> Self::Output {
648
self.context.binop(
649
self.masm,
650
OperandSize::S64,
651
&mut |masm: &mut M, dst, src, size| {
652
masm.float_mul(writable!(dst), dst, src, size)?;
653
Ok(TypedReg::f64(dst))
654
},
655
)
656
}
657
658
fn visit_f32_div(&mut self) -> Self::Output {
659
self.context.binop(
660
self.masm,
661
OperandSize::S32,
662
&mut |masm: &mut M, dst, src, size| {
663
masm.float_div(writable!(dst), dst, src, size)?;
664
Ok(TypedReg::f32(dst))
665
},
666
)
667
}
668
669
fn visit_f64_div(&mut self) -> Self::Output {
670
self.context.binop(
671
self.masm,
672
OperandSize::S64,
673
&mut |masm: &mut M, dst, src, size| {
674
masm.float_div(writable!(dst), dst, src, size)?;
675
Ok(TypedReg::f64(dst))
676
},
677
)
678
}
679
680
fn visit_f32_min(&mut self) -> Self::Output {
681
self.context.binop(
682
self.masm,
683
OperandSize::S32,
684
&mut |masm: &mut M, dst, src, size| {
685
masm.float_min(writable!(dst), dst, src, size)?;
686
Ok(TypedReg::f32(dst))
687
},
688
)
689
}
690
691
fn visit_f64_min(&mut self) -> Self::Output {
692
self.context.binop(
693
self.masm,
694
OperandSize::S64,
695
&mut |masm: &mut M, dst, src, size| {
696
masm.float_min(writable!(dst), dst, src, size)?;
697
Ok(TypedReg::f64(dst))
698
},
699
)
700
}
701
702
fn visit_f32_max(&mut self) -> Self::Output {
703
self.context.binop(
704
self.masm,
705
OperandSize::S32,
706
&mut |masm: &mut M, dst, src, size| {
707
masm.float_max(writable!(dst), dst, src, size)?;
708
Ok(TypedReg::f32(dst))
709
},
710
)
711
}
712
713
fn visit_f64_max(&mut self) -> Self::Output {
714
self.context.binop(
715
self.masm,
716
OperandSize::S64,
717
&mut |masm: &mut M, dst, src, size| {
718
masm.float_max(writable!(dst), dst, src, size)?;
719
Ok(TypedReg::f64(dst))
720
},
721
)
722
}
723
724
fn visit_f32_copysign(&mut self) -> Self::Output {
725
self.context.binop(
726
self.masm,
727
OperandSize::S32,
728
&mut |masm: &mut M, dst, src, size| {
729
masm.float_copysign(writable!(dst), dst, src, size)?;
730
Ok(TypedReg::f32(dst))
731
},
732
)
733
}
734
735
fn visit_f64_copysign(&mut self) -> Self::Output {
736
self.context.binop(
737
self.masm,
738
OperandSize::S64,
739
&mut |masm: &mut M, dst, src, size| {
740
masm.float_copysign(writable!(dst), dst, src, size)?;
741
Ok(TypedReg::f64(dst))
742
},
743
)
744
}
745
746
fn visit_f32_abs(&mut self) -> Self::Output {
747
self.context.unop(self.masm, |masm, reg| {
748
masm.float_abs(writable!(reg), OperandSize::S32)?;
749
Ok(TypedReg::f32(reg))
750
})
751
}
752
753
fn visit_f64_abs(&mut self) -> Self::Output {
754
self.context.unop(self.masm, |masm, reg| {
755
masm.float_abs(writable!(reg), OperandSize::S64)?;
756
Ok(TypedReg::f64(reg))
757
})
758
}
759
760
fn visit_f32_neg(&mut self) -> Self::Output {
761
self.context.unop(self.masm, |masm, reg| {
762
masm.float_neg(writable!(reg), OperandSize::S32)?;
763
Ok(TypedReg::f32(reg))
764
})
765
}
766
767
fn visit_f64_neg(&mut self) -> Self::Output {
768
self.context.unop(self.masm, |masm, reg| {
769
masm.float_neg(writable!(reg), OperandSize::S64)?;
770
Ok(TypedReg::f64(reg))
771
})
772
}
773
774
fn visit_f32_floor(&mut self) -> Self::Output {
775
self.masm.float_round(
776
RoundingMode::Down,
777
&mut self.env,
778
&mut self.context,
779
OperandSize::S32,
780
|env, cx, masm| {
781
let builtin = env.builtins.floor_f32::<M::ABI, M::Ptr>()?;
782
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
783
},
784
)
785
}
786
787
fn visit_f64_floor(&mut self) -> Self::Output {
788
self.masm.float_round(
789
RoundingMode::Down,
790
&mut self.env,
791
&mut self.context,
792
OperandSize::S64,
793
|env, cx, masm| {
794
let builtin = env.builtins.floor_f64::<M::ABI, M::Ptr>()?;
795
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
796
},
797
)
798
}
799
800
fn visit_f32_ceil(&mut self) -> Self::Output {
801
self.masm.float_round(
802
RoundingMode::Up,
803
&mut self.env,
804
&mut self.context,
805
OperandSize::S32,
806
|env, cx, masm| {
807
let builtin = env.builtins.ceil_f32::<M::ABI, M::Ptr>()?;
808
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
809
},
810
)
811
}
812
813
fn visit_f64_ceil(&mut self) -> Self::Output {
814
self.masm.float_round(
815
RoundingMode::Up,
816
&mut self.env,
817
&mut self.context,
818
OperandSize::S64,
819
|env, cx, masm| {
820
let builtin = env.builtins.ceil_f64::<M::ABI, M::Ptr>()?;
821
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
822
},
823
)
824
}
825
826
fn visit_f32_nearest(&mut self) -> Self::Output {
827
self.masm.float_round(
828
RoundingMode::Nearest,
829
&mut self.env,
830
&mut self.context,
831
OperandSize::S32,
832
|env, cx, masm| {
833
let builtin = env.builtins.nearest_f32::<M::ABI, M::Ptr>()?;
834
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
835
},
836
)
837
}
838
839
fn visit_f64_nearest(&mut self) -> Self::Output {
840
self.masm.float_round(
841
RoundingMode::Nearest,
842
&mut self.env,
843
&mut self.context,
844
OperandSize::S64,
845
|env, cx, masm| {
846
let builtin = env.builtins.nearest_f64::<M::ABI, M::Ptr>()?;
847
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
848
},
849
)
850
}
851
852
fn visit_f32_trunc(&mut self) -> Self::Output {
853
self.masm.float_round(
854
RoundingMode::Zero,
855
&mut self.env,
856
&mut self.context,
857
OperandSize::S32,
858
|env, cx, masm| {
859
let builtin = env.builtins.trunc_f32::<M::ABI, M::Ptr>()?;
860
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
861
},
862
)
863
}
864
865
fn visit_f64_trunc(&mut self) -> Self::Output {
866
self.masm.float_round(
867
RoundingMode::Zero,
868
&mut self.env,
869
&mut self.context,
870
OperandSize::S64,
871
|env, cx, masm| {
872
let builtin = env.builtins.trunc_f64::<M::ABI, M::Ptr>()?;
873
FnCall::emit::<M>(env, masm, cx, Callee::Builtin(builtin))
874
},
875
)
876
}
877
878
fn visit_f32_sqrt(&mut self) -> Self::Output {
879
self.context.unop(self.masm, |masm, reg| {
880
masm.float_sqrt(writable!(reg), reg, OperandSize::S32)?;
881
Ok(TypedReg::f32(reg))
882
})
883
}
884
885
fn visit_f64_sqrt(&mut self) -> Self::Output {
886
self.context.unop(self.masm, |masm, reg| {
887
masm.float_sqrt(writable!(reg), reg, OperandSize::S64)?;
888
Ok(TypedReg::f64(reg))
889
})
890
}
891
892
fn visit_f32_eq(&mut self) -> Self::Output {
893
self.context.float_cmp_op(
894
self.masm,
895
OperandSize::S32,
896
&mut |masm: &mut M, dst, src1, src2, size| {
897
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Eq, size)
898
},
899
)
900
}
901
902
fn visit_f64_eq(&mut self) -> Self::Output {
903
self.context.float_cmp_op(
904
self.masm,
905
OperandSize::S64,
906
&mut |masm: &mut M, dst, src1, src2, size| {
907
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Eq, size)
908
},
909
)
910
}
911
912
fn visit_f32_ne(&mut self) -> Self::Output {
913
self.context.float_cmp_op(
914
self.masm,
915
OperandSize::S32,
916
&mut |masm: &mut M, dst, src1, src2, size| {
917
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ne, size)
918
},
919
)
920
}
921
922
fn visit_f64_ne(&mut self) -> Self::Output {
923
self.context.float_cmp_op(
924
self.masm,
925
OperandSize::S64,
926
&mut |masm: &mut M, dst, src1, src2, size| {
927
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ne, size)
928
},
929
)
930
}
931
932
fn visit_f32_lt(&mut self) -> Self::Output {
933
self.context.float_cmp_op(
934
self.masm,
935
OperandSize::S32,
936
&mut |masm: &mut M, dst, src1, src2, size| {
937
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Lt, size)
938
},
939
)
940
}
941
942
fn visit_f64_lt(&mut self) -> Self::Output {
943
self.context.float_cmp_op(
944
self.masm,
945
OperandSize::S64,
946
&mut |masm: &mut M, dst, src1, src2, size| {
947
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Lt, size)
948
},
949
)
950
}
951
952
fn visit_f32_gt(&mut self) -> Self::Output {
953
self.context.float_cmp_op(
954
self.masm,
955
OperandSize::S32,
956
&mut |masm: &mut M, dst, src1, src2, size| {
957
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Gt, size)
958
},
959
)
960
}
961
962
fn visit_f64_gt(&mut self) -> Self::Output {
963
self.context.float_cmp_op(
964
self.masm,
965
OperandSize::S64,
966
&mut |masm: &mut M, dst, src1, src2, size| {
967
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Gt, size)
968
},
969
)
970
}
971
972
fn visit_f32_le(&mut self) -> Self::Output {
973
self.context.float_cmp_op(
974
self.masm,
975
OperandSize::S32,
976
&mut |masm: &mut M, dst, src1, src2, size| {
977
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Le, size)
978
},
979
)
980
}
981
982
fn visit_f64_le(&mut self) -> Self::Output {
983
self.context.float_cmp_op(
984
self.masm,
985
OperandSize::S64,
986
&mut |masm: &mut M, dst, src1, src2, size| {
987
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Le, size)
988
},
989
)
990
}
991
992
fn visit_f32_ge(&mut self) -> Self::Output {
993
self.context.float_cmp_op(
994
self.masm,
995
OperandSize::S32,
996
&mut |masm: &mut M, dst, src1, src2, size| {
997
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ge, size)
998
},
999
)
1000
}
1001
1002
fn visit_f64_ge(&mut self) -> Self::Output {
1003
self.context.float_cmp_op(
1004
self.masm,
1005
OperandSize::S64,
1006
&mut |masm: &mut M, dst, src1, src2, size| {
1007
masm.float_cmp_with_set(writable!(dst), src1, src2, FloatCmpKind::Ge, size)
1008
},
1009
)
1010
}
1011
1012
fn visit_f32_convert_i32_s(&mut self) -> Self::Output {
1013
self.context
1014
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, dst_size| {
1015
masm.signed_convert(writable!(dst), src, OperandSize::S32, dst_size)
1016
})
1017
}
1018
1019
fn visit_f32_convert_i32_u(&mut self) -> Self::Output {
1020
self.context.convert_op_with_tmp_reg(
1021
self.masm,
1022
WasmValType::F32,
1023
RegClass::Int,
1024
|masm, dst, src, tmp_gpr, dst_size| {
1025
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S32, dst_size)
1026
},
1027
)
1028
}
1029
1030
fn visit_f32_convert_i64_s(&mut self) -> Self::Output {
1031
self.context
1032
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, dst_size| {
1033
masm.signed_convert(writable!(dst), src, OperandSize::S64, dst_size)
1034
})
1035
}
1036
1037
fn visit_f32_convert_i64_u(&mut self) -> Self::Output {
1038
self.context.convert_op_with_tmp_reg(
1039
self.masm,
1040
WasmValType::F32,
1041
RegClass::Int,
1042
|masm, dst, src, tmp_gpr, dst_size| {
1043
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S64, dst_size)
1044
},
1045
)
1046
}
1047
1048
fn visit_f64_convert_i32_s(&mut self) -> Self::Output {
1049
self.context
1050
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, dst_size| {
1051
masm.signed_convert(writable!(dst), src, OperandSize::S32, dst_size)
1052
})
1053
}
1054
1055
fn visit_f64_convert_i32_u(&mut self) -> Self::Output {
1056
self.context.convert_op_with_tmp_reg(
1057
self.masm,
1058
WasmValType::F64,
1059
RegClass::Int,
1060
|masm, dst, src, tmp_gpr, dst_size| {
1061
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S32, dst_size)
1062
},
1063
)
1064
}
1065
1066
fn visit_f64_convert_i64_s(&mut self) -> Self::Output {
1067
self.context
1068
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, dst_size| {
1069
masm.signed_convert(writable!(dst), src, OperandSize::S64, dst_size)
1070
})
1071
}
1072
1073
fn visit_f64_convert_i64_u(&mut self) -> Self::Output {
1074
self.context.convert_op_with_tmp_reg(
1075
self.masm,
1076
WasmValType::F64,
1077
RegClass::Int,
1078
|masm, dst, src, tmp_gpr, dst_size| {
1079
masm.unsigned_convert(writable!(dst), src, tmp_gpr, OperandSize::S64, dst_size)
1080
},
1081
)
1082
}
1083
1084
fn visit_f32_reinterpret_i32(&mut self) -> Self::Output {
1085
self.context
1086
.convert_op(self.masm, WasmValType::F32, |masm, dst, src, size| {
1087
masm.reinterpret_int_as_float(writable!(dst), src, size)
1088
})
1089
}
1090
1091
fn visit_f64_reinterpret_i64(&mut self) -> Self::Output {
1092
self.context
1093
.convert_op(self.masm, WasmValType::F64, |masm, dst, src, size| {
1094
masm.reinterpret_int_as_float(writable!(dst), src, size)
1095
})
1096
}
1097
1098
fn visit_f32_demote_f64(&mut self) -> Self::Output {
1099
self.context.unop(self.masm, |masm, reg| {
1100
masm.demote(writable!(reg), reg)?;
1101
Ok(TypedReg::f32(reg))
1102
})
1103
}
1104
1105
fn visit_f64_promote_f32(&mut self) -> Self::Output {
1106
self.context.unop(self.masm, |masm, reg| {
1107
masm.promote(writable!(reg), reg)?;
1108
Ok(TypedReg::f64(reg))
1109
})
1110
}
1111
1112
fn visit_i32_add(&mut self) -> Self::Output {
1113
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1114
masm.add(writable!(dst), dst, src, size)?;
1115
Ok(TypedReg::i32(dst))
1116
})
1117
}
1118
1119
fn visit_i64_add(&mut self) -> Self::Output {
1120
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1121
masm.add(writable!(dst), dst, src, size)?;
1122
Ok(TypedReg::i64(dst))
1123
})
1124
}
1125
1126
fn visit_i32_sub(&mut self) -> Self::Output {
1127
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1128
masm.sub(writable!(dst), dst, src, size)?;
1129
Ok(TypedReg::i32(dst))
1130
})
1131
}
1132
1133
fn visit_i64_sub(&mut self) -> Self::Output {
1134
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1135
masm.sub(writable!(dst), dst, src, size)?;
1136
Ok(TypedReg::i64(dst))
1137
})
1138
}
1139
1140
fn visit_i32_mul(&mut self) -> Self::Output {
1141
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1142
masm.mul(writable!(dst), dst, src, size)?;
1143
Ok(TypedReg::i32(dst))
1144
})
1145
}
1146
1147
fn visit_i64_mul(&mut self) -> Self::Output {
1148
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1149
masm.mul(writable!(dst), dst, src, size)?;
1150
Ok(TypedReg::i64(dst))
1151
})
1152
}
1153
1154
fn visit_i32_div_s(&mut self) -> Self::Output {
1155
use DivKind::*;
1156
use OperandSize::*;
1157
1158
self.masm.div(&mut self.context, Signed, S32)
1159
}
1160
1161
fn visit_i32_div_u(&mut self) -> Self::Output {
1162
use DivKind::*;
1163
use OperandSize::*;
1164
1165
self.masm.div(&mut self.context, Unsigned, S32)
1166
}
1167
1168
fn visit_i64_div_s(&mut self) -> Self::Output {
1169
use DivKind::*;
1170
use OperandSize::*;
1171
1172
self.masm.div(&mut self.context, Signed, S64)
1173
}
1174
1175
fn visit_i64_div_u(&mut self) -> Self::Output {
1176
use DivKind::*;
1177
use OperandSize::*;
1178
1179
self.masm.div(&mut self.context, Unsigned, S64)
1180
}
1181
1182
fn visit_i32_rem_s(&mut self) -> Self::Output {
1183
use OperandSize::*;
1184
use RemKind::*;
1185
1186
self.masm.rem(&mut self.context, Signed, S32)
1187
}
1188
1189
fn visit_i32_rem_u(&mut self) -> Self::Output {
1190
use OperandSize::*;
1191
use RemKind::*;
1192
1193
self.masm.rem(&mut self.context, Unsigned, S32)
1194
}
1195
1196
fn visit_i64_rem_s(&mut self) -> Self::Output {
1197
use OperandSize::*;
1198
use RemKind::*;
1199
1200
self.masm.rem(&mut self.context, Signed, S64)
1201
}
1202
1203
fn visit_i64_rem_u(&mut self) -> Self::Output {
1204
use OperandSize::*;
1205
use RemKind::*;
1206
1207
self.masm.rem(&mut self.context, Unsigned, S64)
1208
}
1209
1210
fn visit_i32_eq(&mut self) -> Self::Output {
1211
self.cmp_i32s(IntCmpKind::Eq)
1212
}
1213
1214
fn visit_i64_eq(&mut self) -> Self::Output {
1215
self.cmp_i64s(IntCmpKind::Eq)
1216
}
1217
1218
fn visit_i32_ne(&mut self) -> Self::Output {
1219
self.cmp_i32s(IntCmpKind::Ne)
1220
}
1221
1222
fn visit_i64_ne(&mut self) -> Self::Output {
1223
self.cmp_i64s(IntCmpKind::Ne)
1224
}
1225
1226
fn visit_i32_lt_s(&mut self) -> Self::Output {
1227
self.cmp_i32s(IntCmpKind::LtS)
1228
}
1229
1230
fn visit_i64_lt_s(&mut self) -> Self::Output {
1231
self.cmp_i64s(IntCmpKind::LtS)
1232
}
1233
1234
fn visit_i32_lt_u(&mut self) -> Self::Output {
1235
self.cmp_i32s(IntCmpKind::LtU)
1236
}
1237
1238
fn visit_i64_lt_u(&mut self) -> Self::Output {
1239
self.cmp_i64s(IntCmpKind::LtU)
1240
}
1241
1242
fn visit_i32_le_s(&mut self) -> Self::Output {
1243
self.cmp_i32s(IntCmpKind::LeS)
1244
}
1245
1246
fn visit_i64_le_s(&mut self) -> Self::Output {
1247
self.cmp_i64s(IntCmpKind::LeS)
1248
}
1249
1250
fn visit_i32_le_u(&mut self) -> Self::Output {
1251
self.cmp_i32s(IntCmpKind::LeU)
1252
}
1253
1254
fn visit_i64_le_u(&mut self) -> Self::Output {
1255
self.cmp_i64s(IntCmpKind::LeU)
1256
}
1257
1258
fn visit_i32_gt_s(&mut self) -> Self::Output {
1259
self.cmp_i32s(IntCmpKind::GtS)
1260
}
1261
1262
fn visit_i64_gt_s(&mut self) -> Self::Output {
1263
self.cmp_i64s(IntCmpKind::GtS)
1264
}
1265
1266
fn visit_i32_gt_u(&mut self) -> Self::Output {
1267
self.cmp_i32s(IntCmpKind::GtU)
1268
}
1269
1270
fn visit_i64_gt_u(&mut self) -> Self::Output {
1271
self.cmp_i64s(IntCmpKind::GtU)
1272
}
1273
1274
fn visit_i32_ge_s(&mut self) -> Self::Output {
1275
self.cmp_i32s(IntCmpKind::GeS)
1276
}
1277
1278
fn visit_i64_ge_s(&mut self) -> Self::Output {
1279
self.cmp_i64s(IntCmpKind::GeS)
1280
}
1281
1282
fn visit_i32_ge_u(&mut self) -> Self::Output {
1283
self.cmp_i32s(IntCmpKind::GeU)
1284
}
1285
1286
fn visit_i64_ge_u(&mut self) -> Self::Output {
1287
self.cmp_i64s(IntCmpKind::GeU)
1288
}
1289
1290
fn visit_i32_eqz(&mut self) -> Self::Output {
1291
use OperandSize::*;
1292
1293
self.context.unop(self.masm, |masm, reg| {
1294
masm.cmp_with_set(writable!(reg), RegImm::i32(0), IntCmpKind::Eq, S32)?;
1295
Ok(TypedReg::i32(reg))
1296
})
1297
}
1298
1299
fn visit_i64_eqz(&mut self) -> Self::Output {
1300
use OperandSize::*;
1301
1302
self.context.unop(self.masm, |masm, reg| {
1303
masm.cmp_with_set(writable!(reg), RegImm::i64(0), IntCmpKind::Eq, S64)?;
1304
Ok(TypedReg::i32(reg)) // Return value for `i64.eqz` is an `i32`.
1305
})
1306
}
1307
1308
fn visit_i32_clz(&mut self) -> Self::Output {
1309
use OperandSize::*;
1310
1311
self.context.unop(self.masm, |masm, reg| {
1312
masm.clz(writable!(reg), reg, S32)?;
1313
Ok(TypedReg::i32(reg))
1314
})
1315
}
1316
1317
fn visit_i64_clz(&mut self) -> Self::Output {
1318
use OperandSize::*;
1319
1320
self.context.unop(self.masm, |masm, reg| {
1321
masm.clz(writable!(reg), reg, S64)?;
1322
Ok(TypedReg::i64(reg))
1323
})
1324
}
1325
1326
fn visit_i32_ctz(&mut self) -> Self::Output {
1327
use OperandSize::*;
1328
1329
self.context.unop(self.masm, |masm, reg| {
1330
masm.ctz(writable!(reg), reg, S32)?;
1331
Ok(TypedReg::i32(reg))
1332
})
1333
}
1334
1335
fn visit_i64_ctz(&mut self) -> Self::Output {
1336
use OperandSize::*;
1337
1338
self.context.unop(self.masm, |masm, reg| {
1339
masm.ctz(writable!(reg), reg, S64)?;
1340
Ok(TypedReg::i64(reg))
1341
})
1342
}
1343
1344
fn visit_i32_and(&mut self) -> Self::Output {
1345
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1346
masm.and(writable!(dst), dst, src, size)?;
1347
Ok(TypedReg::i32(dst))
1348
})
1349
}
1350
1351
fn visit_i64_and(&mut self) -> Self::Output {
1352
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1353
masm.and(writable!(dst), dst, src, size)?;
1354
Ok(TypedReg::i64(dst))
1355
})
1356
}
1357
1358
fn visit_i32_or(&mut self) -> Self::Output {
1359
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1360
masm.or(writable!(dst), dst, src, size)?;
1361
Ok(TypedReg::i32(dst))
1362
})
1363
}
1364
1365
fn visit_i64_or(&mut self) -> Self::Output {
1366
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1367
masm.or(writable!(dst), dst, src, size)?;
1368
Ok(TypedReg::i64(dst))
1369
})
1370
}
1371
1372
fn visit_i32_xor(&mut self) -> Self::Output {
1373
self.context.i32_binop(self.masm, |masm, dst, src, size| {
1374
masm.xor(writable!(dst), dst, src, size)?;
1375
Ok(TypedReg::i32(dst))
1376
})
1377
}
1378
1379
fn visit_i64_xor(&mut self) -> Self::Output {
1380
self.context.i64_binop(self.masm, |masm, dst, src, size| {
1381
masm.xor(writable!(dst), dst, src, size)?;
1382
Ok(TypedReg::i64(dst))
1383
})
1384
}
1385
1386
fn visit_i32_shl(&mut self) -> Self::Output {
1387
use ShiftKind::*;
1388
1389
self.context.i32_shift(self.masm, Shl)
1390
}
1391
1392
fn visit_i64_shl(&mut self) -> Self::Output {
1393
use ShiftKind::*;
1394
1395
self.context.i64_shift(self.masm, Shl)
1396
}
1397
1398
fn visit_i32_shr_s(&mut self) -> Self::Output {
1399
use ShiftKind::*;
1400
1401
self.context.i32_shift(self.masm, ShrS)
1402
}
1403
1404
fn visit_i64_shr_s(&mut self) -> Self::Output {
1405
use ShiftKind::*;
1406
1407
self.context.i64_shift(self.masm, ShrS)
1408
}
1409
1410
fn visit_i32_shr_u(&mut self) -> Self::Output {
1411
use ShiftKind::*;
1412
1413
self.context.i32_shift(self.masm, ShrU)
1414
}
1415
1416
fn visit_i64_shr_u(&mut self) -> Self::Output {
1417
use ShiftKind::*;
1418
1419
self.context.i64_shift(self.masm, ShrU)
1420
}
1421
1422
fn visit_i32_rotl(&mut self) -> Self::Output {
1423
use ShiftKind::*;
1424
1425
self.context.i32_shift(self.masm, Rotl)
1426
}
1427
1428
fn visit_i64_rotl(&mut self) -> Self::Output {
1429
use ShiftKind::*;
1430
1431
self.context.i64_shift(self.masm, Rotl)
1432
}
1433
1434
fn visit_i32_rotr(&mut self) -> Self::Output {
1435
use ShiftKind::*;
1436
1437
self.context.i32_shift(self.masm, Rotr)
1438
}
1439
1440
fn visit_i64_rotr(&mut self) -> Self::Output {
1441
use ShiftKind::*;
1442
1443
self.context.i64_shift(self.masm, Rotr)
1444
}
1445
1446
fn visit_end(&mut self) -> Self::Output {
1447
if !self.context.reachable {
1448
self.handle_unreachable_end()
1449
} else {
1450
let mut control = self.pop_control_frame()?;
1451
control.emit_end(self.masm, &mut self.context)
1452
}
1453
}
1454
1455
fn visit_i32_popcnt(&mut self) -> Self::Output {
1456
use OperandSize::*;
1457
self.masm.popcnt(&mut self.context, S32)
1458
}
1459
1460
fn visit_i64_popcnt(&mut self) -> Self::Output {
1461
use OperandSize::*;
1462
1463
self.masm.popcnt(&mut self.context, S64)
1464
}
1465
1466
fn visit_i32_wrap_i64(&mut self) -> Self::Output {
1467
self.context.unop(self.masm, |masm, reg| {
1468
masm.wrap(writable!(reg), reg)?;
1469
Ok(TypedReg::i32(reg))
1470
})
1471
}
1472
1473
fn visit_i64_extend_i32_s(&mut self) -> Self::Output {
1474
self.context.unop(self.masm, |masm, reg| {
1475
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend32.into())?;
1476
Ok(TypedReg::i64(reg))
1477
})
1478
}
1479
1480
fn visit_i64_extend_i32_u(&mut self) -> Self::Output {
1481
self.context.unop(self.masm, |masm, reg| {
1482
masm.extend(writable!(reg), reg, Extend::<Zero>::I64Extend32.into())?;
1483
Ok(TypedReg::i64(reg))
1484
})
1485
}
1486
1487
fn visit_i32_extend8_s(&mut self) -> Self::Output {
1488
self.context.unop(self.masm, |masm, reg| {
1489
masm.extend(writable!(reg), reg, Extend::<Signed>::I32Extend8.into())?;
1490
Ok(TypedReg::i32(reg))
1491
})
1492
}
1493
1494
fn visit_i32_extend16_s(&mut self) -> Self::Output {
1495
self.context.unop(self.masm, |masm, reg| {
1496
masm.extend(writable!(reg), reg, Extend::<Signed>::I32Extend16.into())?;
1497
Ok(TypedReg::i32(reg))
1498
})
1499
}
1500
1501
fn visit_i64_extend8_s(&mut self) -> Self::Output {
1502
self.context.unop(self.masm, |masm, reg| {
1503
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend8.into())?;
1504
Ok(TypedReg::i64(reg))
1505
})
1506
}
1507
1508
fn visit_i64_extend16_s(&mut self) -> Self::Output {
1509
self.context.unop(self.masm, |masm, reg| {
1510
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend16.into())?;
1511
Ok(TypedReg::i64(reg))
1512
})
1513
}
1514
1515
fn visit_i64_extend32_s(&mut self) -> Self::Output {
1516
self.context.unop(self.masm, |masm, reg| {
1517
masm.extend(writable!(reg), reg, Extend::<Signed>::I64Extend32.into())?;
1518
Ok(TypedReg::i64(reg))
1519
})
1520
}
1521
1522
fn visit_i32_trunc_f32_s(&mut self) -> Self::Output {
1523
use OperandSize::*;
1524
1525
self.context
1526
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
1527
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Unchecked)
1528
})
1529
}
1530
1531
fn visit_i32_trunc_f32_u(&mut self) -> Self::Output {
1532
use OperandSize::*;
1533
1534
self.masm
1535
.unsigned_truncate(&mut self.context, S32, S32, TruncKind::Unchecked)
1536
}
1537
1538
fn visit_i32_trunc_f64_s(&mut self) -> Self::Output {
1539
use OperandSize::*;
1540
1541
self.context
1542
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
1543
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Unchecked)
1544
})
1545
}
1546
1547
fn visit_i32_trunc_f64_u(&mut self) -> Self::Output {
1548
use OperandSize::*;
1549
self.masm
1550
.unsigned_truncate(&mut self.context, S64, S32, TruncKind::Unchecked)
1551
}
1552
1553
fn visit_i64_trunc_f32_s(&mut self) -> Self::Output {
1554
use OperandSize::*;
1555
1556
self.context
1557
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
1558
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Unchecked)
1559
})
1560
}
1561
1562
fn visit_i64_trunc_f32_u(&mut self) -> Self::Output {
1563
use OperandSize::*;
1564
1565
self.masm
1566
.unsigned_truncate(&mut self.context, S32, S64, TruncKind::Unchecked)
1567
}
1568
1569
fn visit_i64_trunc_f64_s(&mut self) -> Self::Output {
1570
use OperandSize::*;
1571
1572
self.context
1573
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
1574
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Unchecked)
1575
})
1576
}
1577
1578
fn visit_i64_trunc_f64_u(&mut self) -> Self::Output {
1579
use OperandSize::*;
1580
1581
self.masm
1582
.unsigned_truncate(&mut self.context, S64, S64, TruncKind::Unchecked)
1583
}
1584
1585
fn visit_i32_reinterpret_f32(&mut self) -> Self::Output {
1586
self.context
1587
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, size| {
1588
masm.reinterpret_float_as_int(writable!(dst), src, size)
1589
})
1590
}
1591
1592
fn visit_i64_reinterpret_f64(&mut self) -> Self::Output {
1593
self.context
1594
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, size| {
1595
masm.reinterpret_float_as_int(writable!(dst), src, size)
1596
})
1597
}
1598
1599
fn visit_local_get(&mut self, index: u32) -> Self::Output {
1600
use WasmValType::*;
1601
let context = &mut self.context;
1602
let slot = context.frame.get_wasm_local(index);
1603
match slot.ty {
1604
I32 | I64 | F32 | F64 | V128 => context.stack.push(Val::local(index, slot.ty)),
1605
Ref(rt) => match rt.heap_type {
1606
WasmHeapType::Func => context.stack.push(Val::local(index, slot.ty)),
1607
_ => bail!(CodeGenError::unsupported_wasm_type()),
1608
},
1609
}
1610
1611
Ok(())
1612
}
1613
1614
fn visit_local_set(&mut self, index: u32) -> Self::Output {
1615
let src = self.emit_set_local(index)?;
1616
self.context.free_reg(src);
1617
Ok(())
1618
}
1619
1620
fn visit_call(&mut self, index: u32) -> Self::Output {
1621
let callee = self.env.callee_from_index(FuncIndex::from_u32(index));
1622
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, callee)?;
1623
Ok(())
1624
}
1625
1626
fn visit_call_indirect(&mut self, type_index: u32, table_index: u32) -> Self::Output {
1627
// Spill now because `emit_lazy_init_funcref` and the `FnCall::emit`
1628
// invocations will both trigger spills since they both call functions.
1629
// However, the machine instructions for the spill emitted by
1630
// `emit_lazy_funcref` will be jumped over if the funcref was previously
1631
// initialized which may result in the machine stack becoming
1632
// unbalanced.
1633
self.context.spill(self.masm)?;
1634
1635
let type_index = TypeIndex::from_u32(type_index);
1636
let table_index = TableIndex::from_u32(table_index);
1637
1638
self.emit_lazy_init_funcref(table_index)?;
1639
1640
// Perform the indirect call.
1641
// This code assumes that [`Self::emit_lazy_init_funcref`] will
1642
// push the funcref to the value stack.
1643
let funcref_ptr = self
1644
.context
1645
.stack
1646
.peek()
1647
.map(|v| v.unwrap_reg())
1648
.ok_or_else(|| CodeGenError::missing_values_in_stack())?;
1649
self.masm
1650
.trapz(funcref_ptr.into(), TRAP_INDIRECT_CALL_TO_NULL)?;
1651
self.emit_typecheck_funcref(funcref_ptr.into(), type_index)?;
1652
1653
let callee = self.env.funcref(type_index);
1654
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, callee)?;
1655
Ok(())
1656
}
1657
1658
fn visit_table_init(&mut self, elem: u32, table: u32) -> Self::Output {
1659
let at = self.context.stack.ensure_index_at(3)?;
1660
1661
self.context
1662
.stack
1663
.insert_many(at, &[table.try_into()?, elem.try_into()?]);
1664
1665
let builtin = self.env.builtins.table_init::<M::ABI, M::Ptr>()?;
1666
FnCall::emit::<M>(
1667
&mut self.env,
1668
self.masm,
1669
&mut self.context,
1670
Callee::Builtin(builtin.clone()),
1671
)?;
1672
self.context.pop_and_free(self.masm)
1673
}
1674
1675
fn visit_table_copy(&mut self, dst: u32, src: u32) -> Self::Output {
1676
let at = self.context.stack.ensure_index_at(3)?;
1677
self.context
1678
.stack
1679
.insert_many(at, &[dst.try_into()?, src.try_into()?]);
1680
1681
let builtin = self.env.builtins.table_copy::<M::ABI, M::Ptr>()?;
1682
FnCall::emit::<M>(
1683
&mut self.env,
1684
self.masm,
1685
&mut self.context,
1686
Callee::Builtin(builtin),
1687
)?;
1688
self.context.pop_and_free(self.masm)
1689
}
1690
1691
fn visit_table_get(&mut self, table: u32) -> Self::Output {
1692
let table_index = TableIndex::from_u32(table);
1693
let table = self.env.table(table_index);
1694
let heap_type = table.ref_type.heap_type;
1695
1696
match heap_type {
1697
WasmHeapType::Func => self.emit_lazy_init_funcref(table_index),
1698
_ => Err(anyhow!(CodeGenError::unsupported_wasm_type())),
1699
}
1700
}
1701
1702
fn visit_table_grow(&mut self, table: u32) -> Self::Output {
1703
let table_index = TableIndex::from_u32(table);
1704
let table_ty = self.env.table(table_index);
1705
let builtin = match table_ty.ref_type.heap_type {
1706
WasmHeapType::Func => self.env.builtins.table_grow_func_ref::<M::ABI, M::Ptr>()?,
1707
_ => bail!(CodeGenError::unsupported_wasm_type()),
1708
};
1709
1710
let len = self.context.stack.len();
1711
// table.grow` requires at least 2 elements on the value stack.
1712
let at = self.context.stack.ensure_index_at(2)?;
1713
1714
// The table_grow builtin expects the parameters in a different
1715
// order.
1716
// The value stack at this point should contain:
1717
// [ init_value | delta ] (stack top)
1718
// but the builtin function expects the init value as the last
1719
// argument.
1720
self.context.stack.inner_mut().swap(len - 1, len - 2);
1721
1722
let builtin = self.prepare_builtin_defined_table_arg(table_index, at, builtin)?;
1723
1724
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1725
1726
Ok(())
1727
}
1728
1729
fn visit_table_size(&mut self, table: u32) -> Self::Output {
1730
let table_index = TableIndex::from_u32(table);
1731
let table_data = self.env.resolve_table_data(table_index);
1732
self.emit_compute_table_size(&table_data)
1733
}
1734
1735
fn visit_table_fill(&mut self, table: u32) -> Self::Output {
1736
let table_index = TableIndex::from_u32(table);
1737
let table_ty = self.env.table(table_index);
1738
1739
ensure!(
1740
table_ty.ref_type.heap_type == WasmHeapType::Func,
1741
CodeGenError::unsupported_wasm_type()
1742
);
1743
1744
let builtin = self.env.builtins.table_fill_func_ref::<M::ABI, M::Ptr>()?;
1745
1746
let at = self.context.stack.ensure_index_at(3)?;
1747
1748
self.context.stack.insert_many(at, &[table.try_into()?]);
1749
FnCall::emit::<M>(
1750
&mut self.env,
1751
self.masm,
1752
&mut self.context,
1753
Callee::Builtin(builtin.clone()),
1754
)?;
1755
self.context.pop_and_free(self.masm)
1756
}
1757
1758
fn visit_table_set(&mut self, table: u32) -> Self::Output {
1759
let ptr_type = self.env.ptr_type();
1760
let table_index = TableIndex::from_u32(table);
1761
let table_data = self.env.resolve_table_data(table_index);
1762
let table = self.env.table(table_index);
1763
match table.ref_type.heap_type {
1764
WasmHeapType::Func => {
1765
ensure!(
1766
self.tunables.table_lazy_init,
1767
CodeGenError::unsupported_table_eager_init()
1768
);
1769
let value = self.context.pop_to_reg(self.masm, None)?;
1770
let index = self.context.pop_to_reg(self.masm, None)?;
1771
let base = self.context.any_gpr(self.masm)?;
1772
let elem_addr =
1773
self.emit_compute_table_elem_addr(index.into(), base, &table_data)?;
1774
// Set the initialized bit.
1775
self.masm.or(
1776
writable!(value.into()),
1777
value.into(),
1778
RegImm::i64(FUNCREF_INIT_BIT as i64),
1779
ptr_type.try_into()?,
1780
)?;
1781
1782
self.masm.store_ptr(value.into(), elem_addr)?;
1783
1784
self.context.free_reg(value);
1785
self.context.free_reg(index);
1786
self.context.free_reg(base);
1787
Ok(())
1788
}
1789
_ => Err(anyhow!(CodeGenError::unsupported_wasm_type())),
1790
}
1791
}
1792
1793
fn visit_elem_drop(&mut self, index: u32) -> Self::Output {
1794
let elem_drop = self.env.builtins.elem_drop::<M::ABI, M::Ptr>()?;
1795
self.context.stack.extend([index.try_into()?]);
1796
FnCall::emit::<M>(
1797
&mut self.env,
1798
self.masm,
1799
&mut self.context,
1800
Callee::Builtin(elem_drop),
1801
)?;
1802
Ok(())
1803
}
1804
1805
fn visit_memory_init(&mut self, data_index: u32, mem: u32) -> Self::Output {
1806
let at = self.context.stack.ensure_index_at(3)?;
1807
self.context
1808
.stack
1809
.insert_many(at, &[mem.try_into()?, data_index.try_into()?]);
1810
let builtin = self.env.builtins.memory_init::<M::ABI, M::Ptr>()?;
1811
FnCall::emit::<M>(
1812
&mut self.env,
1813
self.masm,
1814
&mut self.context,
1815
Callee::Builtin(builtin),
1816
)?;
1817
self.context.pop_and_free(self.masm)
1818
}
1819
1820
fn visit_memory_copy(&mut self, dst_mem: u32, src_mem: u32) -> Self::Output {
1821
// At this point, the stack is expected to contain:
1822
// [ dst_offset, src_offset, len ]
1823
// The following code inserts the missing params, so that stack contains:
1824
// [ vmctx, dst_mem, dst_offset, src_mem, src_offset, len ]
1825
// Which is the order expected by the builtin function.
1826
let _ = self.context.stack.ensure_index_at(3)?;
1827
let at = self.context.stack.ensure_index_at(2)?;
1828
self.context.stack.insert_many(at, &[src_mem.try_into()?]);
1829
1830
// One element was inserted above, so instead of 3, we use 4.
1831
let at = self.context.stack.ensure_index_at(4)?;
1832
self.context.stack.insert_many(at, &[dst_mem.try_into()?]);
1833
1834
let builtin = self.env.builtins.memory_copy::<M::ABI, M::Ptr>()?;
1835
1836
FnCall::emit::<M>(
1837
&mut self.env,
1838
self.masm,
1839
&mut self.context,
1840
Callee::Builtin(builtin),
1841
)?;
1842
self.context.pop_and_free(self.masm)
1843
}
1844
1845
fn visit_memory_fill(&mut self, mem: u32) -> Self::Output {
1846
let at = self.context.stack.ensure_index_at(3)?;
1847
let mem = MemoryIndex::from_u32(mem);
1848
1849
let builtin = self.env.builtins.memory_fill::<M::ABI, M::Ptr>()?;
1850
let builtin = self.prepare_builtin_defined_memory_arg(mem, at, builtin)?;
1851
1852
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1853
self.context.pop_and_free(self.masm)
1854
}
1855
1856
fn visit_memory_size(&mut self, mem: u32) -> Self::Output {
1857
let heap = self.env.resolve_heap(MemoryIndex::from_u32(mem));
1858
self.emit_compute_memory_size(&heap)
1859
}
1860
1861
fn visit_memory_grow(&mut self, mem: u32) -> Self::Output {
1862
let at = self.context.stack.ensure_index_at(1)?;
1863
let mem = MemoryIndex::from_u32(mem);
1864
// The stack at this point contains: [ delta ]
1865
// The desired state is
1866
// [ vmctx, delta, index ]
1867
let builtin = self.env.builtins.memory_grow::<M::ABI, M::Ptr>()?;
1868
let builtin = self.prepare_builtin_defined_memory_arg(mem, at + 1, builtin)?;
1869
1870
let heap = self.env.resolve_heap(mem);
1871
FnCall::emit::<M>(&mut self.env, self.masm, &mut self.context, builtin)?;
1872
1873
// The memory32_grow builtin returns a pointer type, therefore we must
1874
// ensure that the return type is representative of the address space of
1875
// the heap type.
1876
match (self.env.ptr_type(), heap.index_type()) {
1877
(WasmValType::I64, WasmValType::I64) => Ok(()),
1878
// When the heap type is smaller than the pointer type, we adjust
1879
// the result of the memory32_grow builtin.
1880
(WasmValType::I64, WasmValType::I32) => {
1881
let top: Reg = self.context.pop_to_reg(self.masm, None)?.into();
1882
self.masm.wrap(writable!(top), top)?;
1883
self.context.stack.push(TypedReg::i32(top).into());
1884
Ok(())
1885
}
1886
_ => Err(anyhow!(CodeGenError::unsupported_32_bit_platform())),
1887
}
1888
}
1889
1890
fn visit_data_drop(&mut self, data_index: u32) -> Self::Output {
1891
self.context.stack.extend([data_index.try_into()?]);
1892
1893
let builtin = self.env.builtins.data_drop::<M::ABI, M::Ptr>()?;
1894
FnCall::emit::<M>(
1895
&mut self.env,
1896
self.masm,
1897
&mut self.context,
1898
Callee::Builtin(builtin),
1899
)
1900
}
1901
1902
fn visit_nop(&mut self) -> Self::Output {
1903
Ok(())
1904
}
1905
1906
fn visit_if(&mut self, blockty: BlockType) -> Self::Output {
1907
self.control_frames.push(ControlStackFrame::r#if(
1908
self.env.resolve_block_sig(blockty)?,
1909
self.masm,
1910
&mut self.context,
1911
)?);
1912
1913
Ok(())
1914
}
1915
1916
fn visit_else(&mut self) -> Self::Output {
1917
if !self.context.reachable {
1918
self.handle_unreachable_else()
1919
} else {
1920
let control = self
1921
.control_frames
1922
.last_mut()
1923
.ok_or_else(|| CodeGenError::control_frame_expected())?;
1924
control.emit_else(self.masm, &mut self.context)
1925
}
1926
}
1927
1928
fn visit_block(&mut self, blockty: BlockType) -> Self::Output {
1929
self.control_frames.push(ControlStackFrame::block(
1930
self.env.resolve_block_sig(blockty)?,
1931
self.masm,
1932
&mut self.context,
1933
)?);
1934
1935
Ok(())
1936
}
1937
1938
fn visit_loop(&mut self, blockty: BlockType) -> Self::Output {
1939
self.control_frames.push(ControlStackFrame::r#loop(
1940
self.env.resolve_block_sig(blockty)?,
1941
self.masm,
1942
&mut self.context,
1943
)?);
1944
1945
self.maybe_emit_epoch_check()?;
1946
self.maybe_emit_fuel_check()
1947
}
1948
1949
fn visit_br(&mut self, depth: u32) -> Self::Output {
1950
let index = control_index(depth, self.control_frames.len())?;
1951
let frame = &mut self.control_frames[index];
1952
self.context
1953
.br::<_, _, UnconditionalBranch>(frame, self.masm, |masm, cx, frame| {
1954
frame.pop_abi_results::<M, _>(cx, masm, |results, _, _| {
1955
Ok(results.ret_area().copied())
1956
})
1957
})
1958
}
1959
1960
fn visit_br_if(&mut self, depth: u32) -> Self::Output {
1961
let index = control_index(depth, self.control_frames.len())?;
1962
let frame = &mut self.control_frames[index];
1963
frame.set_as_target();
1964
1965
let top = {
1966
let top = self.context.without::<Result<TypedReg>, M, _>(
1967
frame.results::<M>()?.regs(),
1968
self.masm,
1969
|ctx, masm| ctx.pop_to_reg(masm, None),
1970
)??;
1971
// Explicitly save any live registers and locals before setting up
1972
// the branch state.
1973
// In some cases, calculating the `top` value above, will result in
1974
// a spill, thus the following one will result in a no-op.
1975
self.context.spill(self.masm)?;
1976
frame.top_abi_results::<M, _>(
1977
&mut self.context,
1978
self.masm,
1979
|results, context, masm| {
1980
// In the case of `br_if` there's a possibility that we'll
1981
// exit early from the block or fallthrough, for
1982
// a fallthrough, we cannot rely on the pre-computed return area;
1983
// it must be recalculated so that any values that are
1984
// generated are correctly placed near the current stack
1985
// pointer.
1986
if results.on_stack() {
1987
let stack_consumed = context.stack.sizeof(results.stack_operands_len());
1988
let base = masm.sp_offset()?.as_u32() - stack_consumed;
1989
let offs = base + results.size();
1990
Ok(Some(RetArea::sp(SPOffset::from_u32(offs))))
1991
} else {
1992
Ok(None)
1993
}
1994
},
1995
)?;
1996
top
1997
};
1998
1999
// Emit instructions to balance the machine stack.
2000
let current_sp_offset = self.masm.sp_offset()?;
2001
let unbalanced = frame.unbalanced(self.masm)?;
2002
let (label, cmp) = if unbalanced {
2003
(self.masm.get_label()?, IntCmpKind::Eq)
2004
} else {
2005
(*frame.label(), IntCmpKind::Ne)
2006
};
2007
2008
self.masm
2009
.branch(cmp, top.reg, top.reg.into(), label, OperandSize::S32)?;
2010
self.context.free_reg(top);
2011
2012
if unbalanced {
2013
self.context
2014
.br::<_, _, ConditionalBranch>(frame, self.masm, |_, _, _| Ok(()))?;
2015
2016
// Restore sp_offset to what it was for falling through and emit
2017
// fallthrough label.
2018
self.masm.reset_stack_pointer(current_sp_offset)?;
2019
self.masm.bind(label)?;
2020
}
2021
2022
Ok(())
2023
}
2024
2025
fn visit_br_table(&mut self, targets: BrTable<'a>) -> Self::Output {
2026
// +1 to account for the default target.
2027
let len = targets.len() + 1;
2028
// SmallVec<[_; 5]> to match the binary emission layer (e.g
2029
// see `JmpTableSeq'), but here we use 5 instead since we
2030
// bundle the default target as the last element in the array.
2031
let mut labels: SmallVec<[_; 5]> = smallvec![];
2032
for _ in 0..len {
2033
labels.push(self.masm.get_label()?);
2034
}
2035
2036
// Find the innermost target and use it as the relative frame
2037
// for result handling below.
2038
//
2039
// This approach ensures that
2040
// 1. The stack pointer offset is correctly positioned
2041
// according to the expectations of the innermost block end
2042
// sequence.
2043
// 2. We meet the jump site invariants introduced by
2044
// `CodegenContext::br`, which take advantage of Wasm
2045
// semantics given that all jumps are "outward".
2046
let mut innermost = targets.default();
2047
for target in targets.targets() {
2048
let target = target?;
2049
if target < innermost {
2050
innermost = target;
2051
}
2052
}
2053
2054
let innermost_index = control_index(innermost, self.control_frames.len())?;
2055
let innermost_frame = &mut self.control_frames[innermost_index];
2056
let innermost_result = innermost_frame.results::<M>()?;
2057
2058
let (index, tmp) = {
2059
let index_and_tmp = self.context.without::<Result<(TypedReg, _)>, M, _>(
2060
innermost_result.regs(),
2061
self.masm,
2062
|cx, masm| Ok((cx.pop_to_reg(masm, None)?, cx.any_gpr(masm)?)),
2063
)??;
2064
2065
// Materialize any constants or locals into their result
2066
// representation, so that when reachability is restored,
2067
// they are correctly located. NB: the results are popped
2068
// in function of the innermost branch specified for
2069
// `br_table`, which implies that the machine stack will
2070
// be correctly balanced, by virtue of calling
2071
// `pop_abi_results`.
2072
2073
// It's possible that we need to balance the stack for the
2074
// rest of the targets, which will be done before emitting
2075
// the unconditional jump below.
2076
innermost_frame.pop_abi_results::<M, _>(
2077
&mut self.context,
2078
self.masm,
2079
|results, _, _| Ok(results.ret_area().copied()),
2080
)?;
2081
index_and_tmp
2082
};
2083
2084
self.masm.jmp_table(&labels, index.into(), tmp)?;
2085
// Save the original stack pointer offset; we will reset the stack
2086
// pointer to this offset after jumping to each of the targets. Each
2087
// jump might adjust the stack according to the base offset of the
2088
// target.
2089
let current_sp = self.masm.sp_offset()?;
2090
2091
for (t, l) in targets
2092
.targets()
2093
.chain(std::iter::once(Ok(targets.default())))
2094
.zip(labels.iter())
2095
{
2096
let control_index = control_index(t?, self.control_frames.len())?;
2097
let frame = &mut self.control_frames[control_index];
2098
// Reset the stack pointer to its original offset. This is needed
2099
// because each jump will potentially adjust the stack pointer
2100
// according to the base offset of the target.
2101
self.masm.reset_stack_pointer(current_sp)?;
2102
2103
// NB: We don't perform any result handling as it was
2104
// already taken care of above before jumping to the
2105
// jump table.
2106
self.masm.bind(*l)?;
2107
// Ensure that the stack pointer is correctly positioned before
2108
// jumping to the jump table code.
2109
self.context
2110
.br::<_, _, UnconditionalBranch>(frame, self.masm, |_, _, _| Ok(()))?;
2111
}
2112
// Finally reset the stack pointer to the original location.
2113
// The reachability analysis, will ensure it's correctly located
2114
// once reachability is restored.
2115
self.masm.reset_stack_pointer(current_sp)?;
2116
self.context.reachable = false;
2117
self.context.free_reg(index.reg);
2118
self.context.free_reg(tmp);
2119
2120
Ok(())
2121
}
2122
2123
fn visit_return(&mut self) -> Self::Output {
2124
// Grab the outermost frame, which is the function's body
2125
// frame. We don't rely on [`codegen::control_index`] since
2126
// this frame is implicit and we know that it should exist at
2127
// index 0.
2128
let outermost = &mut self.control_frames[0];
2129
self.context
2130
.br::<_, _, UnconditionalBranch>(outermost, self.masm, |masm, cx, frame| {
2131
frame.pop_abi_results::<M, _>(cx, masm, |results, _, _| {
2132
Ok(results.ret_area().copied())
2133
})
2134
})
2135
}
2136
2137
fn visit_unreachable(&mut self) -> Self::Output {
2138
self.masm.unreachable()?;
2139
self.context.reachable = false;
2140
// Set the implicit outermost frame as target to perform the necessary
2141
// stack clean up.
2142
let outermost = &mut self.control_frames[0];
2143
outermost.set_as_target();
2144
2145
Ok(())
2146
}
2147
2148
fn visit_local_tee(&mut self, index: u32) -> Self::Output {
2149
let typed_reg = self.emit_set_local(index)?;
2150
self.context.stack.push(typed_reg.into());
2151
2152
Ok(())
2153
}
2154
2155
fn visit_global_get(&mut self, global_index: u32) -> Self::Output {
2156
let index = GlobalIndex::from_u32(global_index);
2157
let (ty, base, offset) = self.emit_get_global_addr(index)?;
2158
let addr = self.masm.address_at_reg(base, offset)?;
2159
let dst = self.context.reg_for_type(ty, self.masm)?;
2160
self.masm.load(addr, writable!(dst), ty.try_into()?)?;
2161
self.context.stack.push(Val::reg(dst, ty));
2162
2163
self.context.free_reg(base);
2164
2165
Ok(())
2166
}
2167
2168
fn visit_global_set(&mut self, global_index: u32) -> Self::Output {
2169
let index = GlobalIndex::from_u32(global_index);
2170
let (ty, base, offset) = self.emit_get_global_addr(index)?;
2171
let addr = self.masm.address_at_reg(base, offset)?;
2172
2173
let typed_reg = self.context.pop_to_reg(self.masm, None)?;
2174
self.masm
2175
.store(typed_reg.reg.into(), addr, ty.try_into()?)?;
2176
self.context.free_reg(typed_reg.reg);
2177
self.context.free_reg(base);
2178
2179
Ok(())
2180
}
2181
2182
fn visit_drop(&mut self) -> Self::Output {
2183
self.context.drop_last(1, |regalloc, val| match val {
2184
Val::Reg(tr) => Ok(regalloc.free(tr.reg)),
2185
Val::Memory(m) => self.masm.free_stack(m.slot.size),
2186
_ => Ok(()),
2187
})
2188
}
2189
2190
fn visit_select(&mut self) -> Self::Output {
2191
let cond = self.context.pop_to_reg(self.masm, None)?;
2192
let val2 = self.context.pop_to_reg(self.masm, None)?;
2193
let val1 = self.context.pop_to_reg(self.masm, None)?;
2194
self.masm.cmp(cond.reg, RegImm::i32(0), OperandSize::S32)?;
2195
// Conditionally move val1 to val2 if the comparison is
2196
// not zero.
2197
self.masm.cmov(
2198
writable!(val2.into()),
2199
val1.into(),
2200
IntCmpKind::Ne,
2201
val1.ty.try_into()?,
2202
)?;
2203
self.context.stack.push(val2.into());
2204
self.context.free_reg(val1.reg);
2205
self.context.free_reg(cond);
2206
2207
Ok(())
2208
}
2209
2210
fn visit_i32_load(&mut self, memarg: MemArg) -> Self::Output {
2211
self.emit_wasm_load(
2212
&memarg,
2213
WasmValType::I32,
2214
LoadKind::Operand(OperandSize::S32),
2215
)
2216
}
2217
2218
fn visit_i32_load8_s(&mut self, memarg: MemArg) -> Self::Output {
2219
self.emit_wasm_load(
2220
&memarg,
2221
WasmValType::I32,
2222
LoadKind::ScalarExtend(Extend::<Signed>::I32Extend8.into()),
2223
)
2224
}
2225
2226
fn visit_i32_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2227
self.emit_wasm_load(
2228
&memarg,
2229
WasmValType::I32,
2230
LoadKind::ScalarExtend(Extend::<Zero>::I32Extend8.into()),
2231
)
2232
}
2233
2234
fn visit_i32_load16_s(&mut self, memarg: MemArg) -> Self::Output {
2235
self.emit_wasm_load(
2236
&memarg,
2237
WasmValType::I32,
2238
LoadKind::ScalarExtend(Extend::<Signed>::I32Extend16.into()),
2239
)
2240
}
2241
2242
fn visit_i32_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2243
self.emit_wasm_load(
2244
&memarg,
2245
WasmValType::I32,
2246
LoadKind::ScalarExtend(Extend::<Zero>::I32Extend16.into()),
2247
)
2248
}
2249
2250
fn visit_i32_store(&mut self, memarg: MemArg) -> Self::Output {
2251
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2252
}
2253
2254
fn visit_i32_store8(&mut self, memarg: MemArg) -> Self::Output {
2255
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S8))
2256
}
2257
2258
fn visit_i32_store16(&mut self, memarg: MemArg) -> Self::Output {
2259
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S16))
2260
}
2261
2262
fn visit_i64_load8_s(&mut self, memarg: MemArg) -> Self::Output {
2263
self.emit_wasm_load(
2264
&memarg,
2265
WasmValType::I64,
2266
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend8.into()),
2267
)
2268
}
2269
2270
fn visit_i64_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2271
self.emit_wasm_load(
2272
&memarg,
2273
WasmValType::I64,
2274
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend8.into()),
2275
)
2276
}
2277
2278
fn visit_i64_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2279
self.emit_wasm_load(
2280
&memarg,
2281
WasmValType::I64,
2282
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend16.into()),
2283
)
2284
}
2285
2286
fn visit_i64_load16_s(&mut self, memarg: MemArg) -> Self::Output {
2287
self.emit_wasm_load(
2288
&memarg,
2289
WasmValType::I64,
2290
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend16.into()),
2291
)
2292
}
2293
2294
fn visit_i64_load32_u(&mut self, memarg: MemArg) -> Self::Output {
2295
self.emit_wasm_load(
2296
&memarg,
2297
WasmValType::I64,
2298
LoadKind::ScalarExtend(Extend::<Zero>::I64Extend32.into()),
2299
)
2300
}
2301
2302
fn visit_i64_load32_s(&mut self, memarg: MemArg) -> Self::Output {
2303
self.emit_wasm_load(
2304
&memarg,
2305
WasmValType::I64,
2306
LoadKind::ScalarExtend(Extend::<Signed>::I64Extend32.into()),
2307
)
2308
}
2309
2310
fn visit_i64_load(&mut self, memarg: MemArg) -> Self::Output {
2311
self.emit_wasm_load(
2312
&memarg,
2313
WasmValType::I64,
2314
LoadKind::Operand(OperandSize::S64),
2315
)
2316
}
2317
2318
fn visit_i64_store(&mut self, memarg: MemArg) -> Self::Output {
2319
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S64))
2320
}
2321
2322
fn visit_i64_store8(&mut self, memarg: MemArg) -> Self::Output {
2323
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S8))
2324
}
2325
2326
fn visit_i64_store16(&mut self, memarg: MemArg) -> Self::Output {
2327
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S16))
2328
}
2329
2330
fn visit_i64_store32(&mut self, memarg: MemArg) -> Self::Output {
2331
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2332
}
2333
2334
fn visit_f32_load(&mut self, memarg: MemArg) -> Self::Output {
2335
self.emit_wasm_load(
2336
&memarg,
2337
WasmValType::F32,
2338
LoadKind::Operand(OperandSize::S32),
2339
)
2340
}
2341
2342
fn visit_f32_store(&mut self, memarg: MemArg) -> Self::Output {
2343
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S32))
2344
}
2345
2346
fn visit_f64_load(&mut self, memarg: MemArg) -> Self::Output {
2347
self.emit_wasm_load(
2348
&memarg,
2349
WasmValType::F64,
2350
LoadKind::Operand(OperandSize::S64),
2351
)
2352
}
2353
2354
fn visit_f64_store(&mut self, memarg: MemArg) -> Self::Output {
2355
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S64))
2356
}
2357
2358
fn visit_i32_trunc_sat_f32_s(&mut self) -> Self::Output {
2359
use OperandSize::*;
2360
2361
self.context
2362
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
2363
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Checked)
2364
})
2365
}
2366
2367
fn visit_i32_trunc_sat_f32_u(&mut self) -> Self::Output {
2368
use OperandSize::*;
2369
2370
self.masm
2371
.unsigned_truncate(&mut self.context, S32, S32, TruncKind::Checked)
2372
}
2373
2374
fn visit_i32_trunc_sat_f64_s(&mut self) -> Self::Output {
2375
use OperandSize::*;
2376
2377
self.context
2378
.convert_op(self.masm, WasmValType::I32, |masm, dst, src, dst_size| {
2379
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Checked)
2380
})
2381
}
2382
2383
fn visit_i32_trunc_sat_f64_u(&mut self) -> Self::Output {
2384
use OperandSize::*;
2385
2386
self.masm
2387
.unsigned_truncate(&mut self.context, S64, S32, TruncKind::Checked)
2388
}
2389
2390
fn visit_i64_trunc_sat_f32_s(&mut self) -> Self::Output {
2391
use OperandSize::*;
2392
2393
self.context
2394
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
2395
masm.signed_truncate(writable!(dst), src, S32, dst_size, TruncKind::Checked)
2396
})
2397
}
2398
2399
fn visit_i64_trunc_sat_f32_u(&mut self) -> Self::Output {
2400
use OperandSize::*;
2401
2402
self.masm
2403
.unsigned_truncate(&mut self.context, S32, S64, TruncKind::Checked)
2404
}
2405
2406
fn visit_i64_trunc_sat_f64_s(&mut self) -> Self::Output {
2407
use OperandSize::*;
2408
2409
self.context
2410
.convert_op(self.masm, WasmValType::I64, |masm, dst, src, dst_size| {
2411
masm.signed_truncate(writable!(dst), src, S64, dst_size, TruncKind::Checked)
2412
})
2413
}
2414
2415
fn visit_i64_trunc_sat_f64_u(&mut self) -> Self::Output {
2416
use OperandSize::*;
2417
2418
self.masm
2419
.unsigned_truncate(&mut self.context, S64, S64, TruncKind::Checked)
2420
}
2421
2422
fn visit_i64_add128(&mut self) -> Self::Output {
2423
self.context
2424
.binop128(self.masm, |masm, lhs_lo, lhs_hi, rhs_lo, rhs_hi| {
2425
masm.add128(
2426
writable!(lhs_lo),
2427
writable!(lhs_hi),
2428
lhs_lo,
2429
lhs_hi,
2430
rhs_lo,
2431
rhs_hi,
2432
)?;
2433
Ok((TypedReg::i64(lhs_lo), TypedReg::i64(lhs_hi)))
2434
})
2435
}
2436
2437
fn visit_i64_sub128(&mut self) -> Self::Output {
2438
self.context
2439
.binop128(self.masm, |masm, lhs_lo, lhs_hi, rhs_lo, rhs_hi| {
2440
masm.sub128(
2441
writable!(lhs_lo),
2442
writable!(lhs_hi),
2443
lhs_lo,
2444
lhs_hi,
2445
rhs_lo,
2446
rhs_hi,
2447
)?;
2448
Ok((TypedReg::i64(lhs_lo), TypedReg::i64(lhs_hi)))
2449
})
2450
}
2451
2452
fn visit_i64_mul_wide_s(&mut self) -> Self::Output {
2453
self.masm.mul_wide(&mut self.context, MulWideKind::Signed)
2454
}
2455
2456
fn visit_i64_mul_wide_u(&mut self) -> Self::Output {
2457
self.masm.mul_wide(&mut self.context, MulWideKind::Unsigned)
2458
}
2459
2460
fn visit_i32_atomic_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2461
self.emit_wasm_load(
2462
&memarg,
2463
WasmValType::I32,
2464
LoadKind::Atomic(OperandSize::S8, Some(Extend::<Zero>::I32Extend8.into())),
2465
)
2466
}
2467
2468
fn visit_i32_atomic_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2469
self.emit_wasm_load(
2470
&memarg,
2471
WasmValType::I32,
2472
LoadKind::Atomic(OperandSize::S16, Some(Extend::<Zero>::I32Extend16.into())),
2473
)
2474
}
2475
2476
fn visit_i32_atomic_load(&mut self, memarg: MemArg) -> Self::Output {
2477
self.emit_wasm_load(
2478
&memarg,
2479
WasmValType::I32,
2480
LoadKind::Atomic(OperandSize::S32, None),
2481
)
2482
}
2483
2484
fn visit_i64_atomic_load8_u(&mut self, memarg: MemArg) -> Self::Output {
2485
self.emit_wasm_load(
2486
&memarg,
2487
WasmValType::I64,
2488
LoadKind::Atomic(OperandSize::S8, Some(Extend::<Zero>::I64Extend8.into())),
2489
)
2490
}
2491
2492
fn visit_i64_atomic_load16_u(&mut self, memarg: MemArg) -> Self::Output {
2493
self.emit_wasm_load(
2494
&memarg,
2495
WasmValType::I64,
2496
LoadKind::Atomic(OperandSize::S16, Some(Extend::<Zero>::I64Extend16.into())),
2497
)
2498
}
2499
2500
fn visit_i64_atomic_load32_u(&mut self, memarg: MemArg) -> Self::Output {
2501
self.emit_wasm_load(
2502
&memarg,
2503
WasmValType::I64,
2504
LoadKind::Atomic(OperandSize::S32, Some(Extend::<Zero>::I64Extend32.into())),
2505
)
2506
}
2507
2508
fn visit_i64_atomic_load(&mut self, memarg: MemArg) -> Self::Output {
2509
self.emit_wasm_load(
2510
&memarg,
2511
WasmValType::I64,
2512
LoadKind::Atomic(OperandSize::S64, None),
2513
)
2514
}
2515
2516
fn visit_i32_atomic_store(&mut self, memarg: MemArg) -> Self::Output {
2517
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S32))
2518
}
2519
2520
fn visit_i64_atomic_store(&mut self, memarg: MemArg) -> Self::Output {
2521
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S64))
2522
}
2523
2524
fn visit_i32_atomic_store8(&mut self, memarg: MemArg) -> Self::Output {
2525
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S8))
2526
}
2527
2528
fn visit_i32_atomic_store16(&mut self, memarg: MemArg) -> Self::Output {
2529
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S16))
2530
}
2531
2532
fn visit_i64_atomic_store8(&mut self, memarg: MemArg) -> Self::Output {
2533
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S8))
2534
}
2535
2536
fn visit_i64_atomic_store16(&mut self, memarg: MemArg) -> Self::Output {
2537
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S16))
2538
}
2539
2540
fn visit_i64_atomic_store32(&mut self, memarg: MemArg) -> Self::Output {
2541
self.emit_wasm_store(&memarg, StoreKind::Atomic(OperandSize::S32))
2542
}
2543
2544
fn visit_i32_atomic_rmw8_add_u(&mut self, arg: MemArg) -> Self::Output {
2545
self.emit_atomic_rmw(
2546
&arg,
2547
RmwOp::Add,
2548
OperandSize::S8,
2549
Some(Extend::<Zero>::I32Extend8),
2550
)
2551
}
2552
2553
fn visit_i32_atomic_rmw16_add_u(&mut self, arg: MemArg) -> Self::Output {
2554
self.emit_atomic_rmw(
2555
&arg,
2556
RmwOp::Add,
2557
OperandSize::S16,
2558
Some(Extend::<Zero>::I32Extend16),
2559
)
2560
}
2561
2562
fn visit_i32_atomic_rmw_add(&mut self, arg: MemArg) -> Self::Output {
2563
self.emit_atomic_rmw(&arg, RmwOp::Add, OperandSize::S32, None)
2564
}
2565
2566
fn visit_i64_atomic_rmw8_add_u(&mut self, arg: MemArg) -> Self::Output {
2567
self.emit_atomic_rmw(
2568
&arg,
2569
RmwOp::Add,
2570
OperandSize::S8,
2571
Some(Extend::<Zero>::I64Extend8),
2572
)
2573
}
2574
2575
fn visit_i64_atomic_rmw16_add_u(&mut self, arg: MemArg) -> Self::Output {
2576
self.emit_atomic_rmw(
2577
&arg,
2578
RmwOp::Add,
2579
OperandSize::S16,
2580
Some(Extend::<Zero>::I64Extend16),
2581
)
2582
}
2583
2584
fn visit_i64_atomic_rmw32_add_u(&mut self, arg: MemArg) -> Self::Output {
2585
self.emit_atomic_rmw(
2586
&arg,
2587
RmwOp::Add,
2588
OperandSize::S32,
2589
Some(Extend::<Zero>::I64Extend32),
2590
)
2591
}
2592
2593
fn visit_i64_atomic_rmw_add(&mut self, arg: MemArg) -> Self::Output {
2594
self.emit_atomic_rmw(&arg, RmwOp::Add, OperandSize::S64, None)
2595
}
2596
2597
fn visit_i32_atomic_rmw8_sub_u(&mut self, arg: MemArg) -> Self::Output {
2598
self.emit_atomic_rmw(
2599
&arg,
2600
RmwOp::Sub,
2601
OperandSize::S8,
2602
Some(Extend::<Zero>::I32Extend8),
2603
)
2604
}
2605
fn visit_i32_atomic_rmw16_sub_u(&mut self, arg: MemArg) -> Self::Output {
2606
self.emit_atomic_rmw(
2607
&arg,
2608
RmwOp::Sub,
2609
OperandSize::S16,
2610
Some(Extend::<Zero>::I32Extend16),
2611
)
2612
}
2613
2614
fn visit_i32_atomic_rmw_sub(&mut self, arg: MemArg) -> Self::Output {
2615
self.emit_atomic_rmw(&arg, RmwOp::Sub, OperandSize::S32, None)
2616
}
2617
2618
fn visit_i64_atomic_rmw8_sub_u(&mut self, arg: MemArg) -> Self::Output {
2619
self.emit_atomic_rmw(
2620
&arg,
2621
RmwOp::Sub,
2622
OperandSize::S8,
2623
Some(Extend::<Zero>::I64Extend8),
2624
)
2625
}
2626
2627
fn visit_i64_atomic_rmw16_sub_u(&mut self, arg: MemArg) -> Self::Output {
2628
self.emit_atomic_rmw(
2629
&arg,
2630
RmwOp::Sub,
2631
OperandSize::S16,
2632
Some(Extend::<Zero>::I64Extend16),
2633
)
2634
}
2635
2636
fn visit_i64_atomic_rmw32_sub_u(&mut self, arg: MemArg) -> Self::Output {
2637
self.emit_atomic_rmw(
2638
&arg,
2639
RmwOp::Sub,
2640
OperandSize::S32,
2641
Some(Extend::<Zero>::I64Extend32),
2642
)
2643
}
2644
2645
fn visit_i64_atomic_rmw_sub(&mut self, arg: MemArg) -> Self::Output {
2646
self.emit_atomic_rmw(&arg, RmwOp::Sub, OperandSize::S64, None)
2647
}
2648
2649
fn visit_i32_atomic_rmw8_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2650
self.emit_atomic_rmw(
2651
&arg,
2652
RmwOp::Xchg,
2653
OperandSize::S8,
2654
Some(Extend::<Zero>::I32Extend8),
2655
)
2656
}
2657
2658
fn visit_i32_atomic_rmw16_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2659
self.emit_atomic_rmw(
2660
&arg,
2661
RmwOp::Xchg,
2662
OperandSize::S16,
2663
Some(Extend::<Zero>::I32Extend16),
2664
)
2665
}
2666
2667
fn visit_i32_atomic_rmw_xchg(&mut self, arg: MemArg) -> Self::Output {
2668
self.emit_atomic_rmw(&arg, RmwOp::Xchg, OperandSize::S32, None)
2669
}
2670
2671
fn visit_i64_atomic_rmw8_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2672
self.emit_atomic_rmw(
2673
&arg,
2674
RmwOp::Xchg,
2675
OperandSize::S8,
2676
Some(Extend::<Zero>::I64Extend8),
2677
)
2678
}
2679
2680
fn visit_i64_atomic_rmw16_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2681
self.emit_atomic_rmw(
2682
&arg,
2683
RmwOp::Xchg,
2684
OperandSize::S16,
2685
Some(Extend::<Zero>::I64Extend16),
2686
)
2687
}
2688
2689
fn visit_i64_atomic_rmw32_xchg_u(&mut self, arg: MemArg) -> Self::Output {
2690
self.emit_atomic_rmw(
2691
&arg,
2692
RmwOp::Xchg,
2693
OperandSize::S32,
2694
Some(Extend::<Zero>::I64Extend32),
2695
)
2696
}
2697
2698
fn visit_i64_atomic_rmw_xchg(&mut self, arg: MemArg) -> Self::Output {
2699
self.emit_atomic_rmw(&arg, RmwOp::Xchg, OperandSize::S64, None)
2700
}
2701
2702
fn visit_i32_atomic_rmw8_and_u(&mut self, arg: MemArg) -> Self::Output {
2703
self.emit_atomic_rmw(
2704
&arg,
2705
RmwOp::And,
2706
OperandSize::S8,
2707
Some(Extend::<Zero>::I32Extend8),
2708
)
2709
}
2710
2711
fn visit_i32_atomic_rmw16_and_u(&mut self, arg: MemArg) -> Self::Output {
2712
self.emit_atomic_rmw(
2713
&arg,
2714
RmwOp::And,
2715
OperandSize::S16,
2716
Some(Extend::<Zero>::I32Extend16),
2717
)
2718
}
2719
2720
fn visit_i32_atomic_rmw_and(&mut self, arg: MemArg) -> Self::Output {
2721
self.emit_atomic_rmw(&arg, RmwOp::And, OperandSize::S32, None)
2722
}
2723
2724
fn visit_i64_atomic_rmw8_and_u(&mut self, arg: MemArg) -> Self::Output {
2725
self.emit_atomic_rmw(
2726
&arg,
2727
RmwOp::And,
2728
OperandSize::S8,
2729
Some(Extend::<Zero>::I64Extend8),
2730
)
2731
}
2732
2733
fn visit_i64_atomic_rmw16_and_u(&mut self, arg: MemArg) -> Self::Output {
2734
self.emit_atomic_rmw(
2735
&arg,
2736
RmwOp::And,
2737
OperandSize::S16,
2738
Some(Extend::<Zero>::I64Extend16),
2739
)
2740
}
2741
2742
fn visit_i64_atomic_rmw32_and_u(&mut self, arg: MemArg) -> Self::Output {
2743
self.emit_atomic_rmw(
2744
&arg,
2745
RmwOp::And,
2746
OperandSize::S32,
2747
Some(Extend::<Zero>::I64Extend32),
2748
)
2749
}
2750
2751
fn visit_i64_atomic_rmw_and(&mut self, arg: MemArg) -> Self::Output {
2752
self.emit_atomic_rmw(&arg, RmwOp::And, OperandSize::S64, None)
2753
}
2754
2755
fn visit_i32_atomic_rmw8_or_u(&mut self, arg: MemArg) -> Self::Output {
2756
self.emit_atomic_rmw(
2757
&arg,
2758
RmwOp::Or,
2759
OperandSize::S8,
2760
Some(Extend::<Zero>::I32Extend8),
2761
)
2762
}
2763
2764
fn visit_i32_atomic_rmw16_or_u(&mut self, arg: MemArg) -> Self::Output {
2765
self.emit_atomic_rmw(
2766
&arg,
2767
RmwOp::Or,
2768
OperandSize::S16,
2769
Some(Extend::<Zero>::I32Extend16),
2770
)
2771
}
2772
2773
fn visit_i32_atomic_rmw_or(&mut self, arg: MemArg) -> Self::Output {
2774
self.emit_atomic_rmw(&arg, RmwOp::Or, OperandSize::S32, None)
2775
}
2776
2777
fn visit_i64_atomic_rmw8_or_u(&mut self, arg: MemArg) -> Self::Output {
2778
self.emit_atomic_rmw(
2779
&arg,
2780
RmwOp::Or,
2781
OperandSize::S8,
2782
Some(Extend::<Zero>::I64Extend8),
2783
)
2784
}
2785
2786
fn visit_i64_atomic_rmw16_or_u(&mut self, arg: MemArg) -> Self::Output {
2787
self.emit_atomic_rmw(
2788
&arg,
2789
RmwOp::Or,
2790
OperandSize::S16,
2791
Some(Extend::<Zero>::I64Extend16),
2792
)
2793
}
2794
2795
fn visit_i64_atomic_rmw32_or_u(&mut self, arg: MemArg) -> Self::Output {
2796
self.emit_atomic_rmw(
2797
&arg,
2798
RmwOp::Or,
2799
OperandSize::S32,
2800
Some(Extend::<Zero>::I64Extend32),
2801
)
2802
}
2803
2804
fn visit_i64_atomic_rmw_or(&mut self, arg: MemArg) -> Self::Output {
2805
self.emit_atomic_rmw(&arg, RmwOp::Or, OperandSize::S64, None)
2806
}
2807
2808
fn visit_i32_atomic_rmw8_xor_u(&mut self, arg: MemArg) -> Self::Output {
2809
self.emit_atomic_rmw(
2810
&arg,
2811
RmwOp::Xor,
2812
OperandSize::S8,
2813
Some(Extend::<Zero>::I32Extend8),
2814
)
2815
}
2816
2817
fn visit_i32_atomic_rmw16_xor_u(&mut self, arg: MemArg) -> Self::Output {
2818
self.emit_atomic_rmw(
2819
&arg,
2820
RmwOp::Xor,
2821
OperandSize::S16,
2822
Some(Extend::<Zero>::I32Extend16),
2823
)
2824
}
2825
2826
fn visit_i32_atomic_rmw_xor(&mut self, arg: MemArg) -> Self::Output {
2827
self.emit_atomic_rmw(&arg, RmwOp::Xor, OperandSize::S32, None)
2828
}
2829
2830
fn visit_i64_atomic_rmw8_xor_u(&mut self, arg: MemArg) -> Self::Output {
2831
self.emit_atomic_rmw(
2832
&arg,
2833
RmwOp::Xor,
2834
OperandSize::S8,
2835
Some(Extend::<Zero>::I64Extend8),
2836
)
2837
}
2838
2839
fn visit_i64_atomic_rmw16_xor_u(&mut self, arg: MemArg) -> Self::Output {
2840
self.emit_atomic_rmw(
2841
&arg,
2842
RmwOp::Xor,
2843
OperandSize::S16,
2844
Some(Extend::<Zero>::I64Extend16),
2845
)
2846
}
2847
2848
fn visit_i64_atomic_rmw32_xor_u(&mut self, arg: MemArg) -> Self::Output {
2849
self.emit_atomic_rmw(
2850
&arg,
2851
RmwOp::Xor,
2852
OperandSize::S32,
2853
Some(Extend::<Zero>::I64Extend32),
2854
)
2855
}
2856
2857
fn visit_i64_atomic_rmw_xor(&mut self, arg: MemArg) -> Self::Output {
2858
self.emit_atomic_rmw(&arg, RmwOp::Xor, OperandSize::S64, None)
2859
}
2860
2861
fn visit_i32_atomic_rmw8_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2862
self.emit_atomic_cmpxchg(&arg, OperandSize::S8, Some(Extend::I32Extend8))
2863
}
2864
2865
fn visit_i32_atomic_rmw16_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2866
self.emit_atomic_cmpxchg(&arg, OperandSize::S16, Some(Extend::I32Extend16))
2867
}
2868
2869
fn visit_i32_atomic_rmw_cmpxchg(&mut self, arg: MemArg) -> Self::Output {
2870
self.emit_atomic_cmpxchg(&arg, OperandSize::S32, None)
2871
}
2872
2873
fn visit_i64_atomic_rmw8_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2874
self.emit_atomic_cmpxchg(&arg, OperandSize::S8, Some(Extend::I64Extend8))
2875
}
2876
2877
fn visit_i64_atomic_rmw16_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2878
self.emit_atomic_cmpxchg(&arg, OperandSize::S16, Some(Extend::I64Extend16))
2879
}
2880
2881
fn visit_i64_atomic_rmw32_cmpxchg_u(&mut self, arg: MemArg) -> Self::Output {
2882
self.emit_atomic_cmpxchg(&arg, OperandSize::S32, Some(Extend::I64Extend32))
2883
}
2884
2885
fn visit_i64_atomic_rmw_cmpxchg(&mut self, arg: MemArg) -> Self::Output {
2886
self.emit_atomic_cmpxchg(&arg, OperandSize::S64, None)
2887
}
2888
2889
fn visit_memory_atomic_wait32(&mut self, arg: MemArg) -> Self::Output {
2890
self.emit_atomic_wait(&arg, AtomicWaitKind::Wait32)
2891
}
2892
2893
fn visit_memory_atomic_wait64(&mut self, arg: MemArg) -> Self::Output {
2894
self.emit_atomic_wait(&arg, AtomicWaitKind::Wait64)
2895
}
2896
2897
fn visit_memory_atomic_notify(&mut self, arg: MemArg) -> Self::Output {
2898
self.emit_atomic_notify(&arg)
2899
}
2900
2901
fn visit_atomic_fence(&mut self) -> Self::Output {
2902
self.masm.fence()
2903
}
2904
2905
wasmparser::for_each_visit_operator!(def_unsupported);
2906
}
2907
2908
impl<'a, 'translation, 'data, M> VisitSimdOperator<'a>
2909
for CodeGen<'a, 'translation, 'data, M, Emission>
2910
where
2911
M: MacroAssembler,
2912
{
2913
fn visit_v128_const(&mut self, val: V128) -> Self::Output {
2914
self.context.stack.push(Val::v128(val.i128()));
2915
Ok(())
2916
}
2917
2918
fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output {
2919
self.emit_wasm_load(
2920
&memarg,
2921
WasmValType::V128,
2922
LoadKind::Operand(OperandSize::S128),
2923
)
2924
}
2925
2926
fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output {
2927
self.emit_wasm_store(&memarg, StoreKind::Operand(OperandSize::S128))
2928
}
2929
2930
fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output {
2931
self.emit_wasm_load(
2932
&memarg,
2933
WasmValType::V128,
2934
LoadKind::VectorExtend(V128LoadExtendKind::E8x8S),
2935
)
2936
}
2937
2938
fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output {
2939
self.emit_wasm_load(
2940
&memarg,
2941
WasmValType::V128,
2942
LoadKind::VectorExtend(V128LoadExtendKind::E8x8U),
2943
)
2944
}
2945
2946
fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output {
2947
self.emit_wasm_load(
2948
&memarg,
2949
WasmValType::V128,
2950
LoadKind::VectorExtend(V128LoadExtendKind::E16x4S),
2951
)
2952
}
2953
2954
fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output {
2955
self.emit_wasm_load(
2956
&memarg,
2957
WasmValType::V128,
2958
LoadKind::VectorExtend(V128LoadExtendKind::E16x4U),
2959
)
2960
}
2961
2962
fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output {
2963
self.emit_wasm_load(
2964
&memarg,
2965
WasmValType::V128,
2966
LoadKind::VectorExtend(V128LoadExtendKind::E32x2S),
2967
)
2968
}
2969
2970
fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output {
2971
self.emit_wasm_load(
2972
&memarg,
2973
WasmValType::V128,
2974
LoadKind::VectorExtend(V128LoadExtendKind::E32x2U),
2975
)
2976
}
2977
2978
fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output {
2979
self.emit_wasm_load(
2980
&memarg,
2981
WasmValType::V128,
2982
LoadKind::Splat(SplatLoadKind::S8),
2983
)
2984
}
2985
2986
fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output {
2987
self.emit_wasm_load(
2988
&memarg,
2989
WasmValType::V128,
2990
LoadKind::Splat(SplatLoadKind::S16),
2991
)
2992
}
2993
2994
fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output {
2995
self.emit_wasm_load(
2996
&memarg,
2997
WasmValType::V128,
2998
LoadKind::Splat(SplatLoadKind::S32),
2999
)
3000
}
3001
3002
fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output {
3003
self.emit_wasm_load(
3004
&memarg,
3005
WasmValType::V128,
3006
LoadKind::Splat(SplatLoadKind::S64),
3007
)
3008
}
3009
3010
fn visit_i8x16_splat(&mut self) -> Self::Output {
3011
self.masm.splat(&mut self.context, SplatKind::I8x16)
3012
}
3013
3014
fn visit_i16x8_splat(&mut self) -> Self::Output {
3015
self.masm.splat(&mut self.context, SplatKind::I16x8)
3016
}
3017
3018
fn visit_i32x4_splat(&mut self) -> Self::Output {
3019
self.masm.splat(&mut self.context, SplatKind::I32x4)
3020
}
3021
3022
fn visit_i64x2_splat(&mut self) -> Self::Output {
3023
self.masm.splat(&mut self.context, SplatKind::I64x2)
3024
}
3025
3026
fn visit_f32x4_splat(&mut self) -> Self::Output {
3027
self.masm.splat(&mut self.context, SplatKind::F32x4)
3028
}
3029
3030
fn visit_f64x2_splat(&mut self) -> Self::Output {
3031
self.masm.splat(&mut self.context, SplatKind::F64x2)
3032
}
3033
3034
fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output {
3035
let rhs = self.context.pop_to_reg(self.masm, None)?;
3036
let lhs = self.context.pop_to_reg(self.masm, None)?;
3037
self.masm
3038
.shuffle(writable!(lhs.into()), lhs.into(), rhs.into(), lanes)?;
3039
self.context.stack.push(TypedReg::v128(lhs.into()).into());
3040
self.context.free_reg(rhs);
3041
Ok(())
3042
}
3043
3044
fn visit_i8x16_swizzle(&mut self) -> Self::Output {
3045
let rhs = self.context.pop_to_reg(self.masm, None)?;
3046
let lhs = self.context.pop_to_reg(self.masm, None)?;
3047
self.masm
3048
.swizzle(writable!(lhs.into()), lhs.into(), rhs.into())?;
3049
self.context.stack.push(TypedReg::v128(lhs.into()).into());
3050
self.context.free_reg(rhs);
3051
Ok(())
3052
}
3053
3054
fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output {
3055
self.context.extract_lane_op(
3056
self.masm,
3057
ExtractLaneKind::I8x16S,
3058
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3059
)
3060
}
3061
3062
fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output {
3063
self.context.extract_lane_op(
3064
self.masm,
3065
ExtractLaneKind::I8x16U,
3066
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3067
)
3068
}
3069
3070
fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output {
3071
self.context.extract_lane_op(
3072
self.masm,
3073
ExtractLaneKind::I16x8S,
3074
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3075
)
3076
}
3077
3078
fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output {
3079
self.context.extract_lane_op(
3080
self.masm,
3081
ExtractLaneKind::I16x8U,
3082
|masm, src, dst, kind| masm.extract_lane(src, dst, lane, kind),
3083
)
3084
}
3085
3086
fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output {
3087
self.context
3088
.extract_lane_op(self.masm, ExtractLaneKind::I32x4, |masm, src, dst, kind| {
3089
masm.extract_lane(src, dst, lane, kind)
3090
})
3091
}
3092
3093
fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output {
3094
self.context
3095
.extract_lane_op(self.masm, ExtractLaneKind::I64x2, |masm, src, dst, kind| {
3096
masm.extract_lane(src, dst, lane, kind)
3097
})
3098
}
3099
3100
fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output {
3101
self.context
3102
.extract_lane_op(self.masm, ExtractLaneKind::F32x4, |masm, src, dst, kind| {
3103
masm.extract_lane(src, dst, lane, kind)
3104
})
3105
}
3106
3107
fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output {
3108
self.context
3109
.extract_lane_op(self.masm, ExtractLaneKind::F64x2, |masm, src, dst, kind| {
3110
masm.extract_lane(src, dst, lane, kind)
3111
})
3112
}
3113
3114
fn visit_i8x16_eq(&mut self) -> Self::Output {
3115
self.context
3116
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3117
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I8x16)?;
3118
Ok(TypedReg::v128(dst))
3119
})
3120
}
3121
3122
fn visit_i16x8_eq(&mut self) -> Self::Output {
3123
self.context
3124
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3125
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I16x8)?;
3126
Ok(TypedReg::v128(dst))
3127
})
3128
}
3129
3130
fn visit_i32x4_eq(&mut self) -> Self::Output {
3131
self.context
3132
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3133
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I32x4)?;
3134
Ok(TypedReg::v128(dst))
3135
})
3136
}
3137
3138
fn visit_i64x2_eq(&mut self) -> Self::Output {
3139
self.context
3140
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3141
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::I64x2)?;
3142
Ok(TypedReg::v128(dst))
3143
})
3144
}
3145
3146
fn visit_f32x4_eq(&mut self) -> Self::Output {
3147
self.context
3148
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3149
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::F32x4)?;
3150
Ok(TypedReg::v128(dst))
3151
})
3152
}
3153
3154
fn visit_f64x2_eq(&mut self) -> Self::Output {
3155
self.context
3156
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3157
masm.v128_eq(writable!(dst), dst, src, VectorEqualityKind::F64x2)?;
3158
Ok(TypedReg::v128(dst))
3159
})
3160
}
3161
3162
fn visit_i8x16_ne(&mut self) -> Self::Output {
3163
self.context
3164
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3165
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I8x16)?;
3166
Ok(TypedReg::v128(dst))
3167
})
3168
}
3169
3170
fn visit_i16x8_ne(&mut self) -> Self::Output {
3171
self.context
3172
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3173
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I16x8)?;
3174
Ok(TypedReg::v128(dst))
3175
})
3176
}
3177
3178
fn visit_i32x4_ne(&mut self) -> Self::Output {
3179
self.context
3180
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3181
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I32x4)?;
3182
Ok(TypedReg::v128(dst))
3183
})
3184
}
3185
3186
fn visit_i64x2_ne(&mut self) -> Self::Output {
3187
self.context
3188
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3189
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::I64x2)?;
3190
Ok(TypedReg::v128(dst))
3191
})
3192
}
3193
3194
fn visit_f32x4_ne(&mut self) -> Self::Output {
3195
self.context
3196
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3197
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::F32x4)?;
3198
Ok(TypedReg::v128(dst))
3199
})
3200
}
3201
3202
fn visit_f64x2_ne(&mut self) -> Self::Output {
3203
self.context
3204
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3205
masm.v128_ne(writable!(dst), dst, src, VectorEqualityKind::F64x2)?;
3206
Ok(TypedReg::v128(dst))
3207
})
3208
}
3209
3210
fn visit_i8x16_lt_s(&mut self) -> Self::Output {
3211
self.context
3212
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3213
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3214
Ok(TypedReg::v128(dst))
3215
})
3216
}
3217
3218
fn visit_i8x16_lt_u(&mut self) -> Self::Output {
3219
self.context
3220
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3221
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3222
Ok(TypedReg::v128(dst))
3223
})
3224
}
3225
3226
fn visit_i16x8_lt_s(&mut self) -> Self::Output {
3227
self.context
3228
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3229
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3230
Ok(TypedReg::v128(dst))
3231
})
3232
}
3233
3234
fn visit_i16x8_lt_u(&mut self) -> Self::Output {
3235
self.context
3236
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3237
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3238
Ok(TypedReg::v128(dst))
3239
})
3240
}
3241
3242
fn visit_i32x4_lt_s(&mut self) -> Self::Output {
3243
self.context
3244
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3245
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3246
Ok(TypedReg::v128(dst))
3247
})
3248
}
3249
3250
fn visit_i32x4_lt_u(&mut self) -> Self::Output {
3251
self.context
3252
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3253
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3254
Ok(TypedReg::v128(dst))
3255
})
3256
}
3257
3258
fn visit_i64x2_lt_s(&mut self) -> Self::Output {
3259
self.context
3260
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3261
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3262
Ok(TypedReg::v128(dst))
3263
})
3264
}
3265
3266
fn visit_f32x4_lt(&mut self) -> Self::Output {
3267
self.context
3268
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3269
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3270
Ok(TypedReg::v128(dst))
3271
})
3272
}
3273
3274
fn visit_f64x2_lt(&mut self) -> Self::Output {
3275
self.context
3276
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3277
masm.v128_lt(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3278
Ok(TypedReg::v128(dst))
3279
})
3280
}
3281
3282
fn visit_i8x16_le_s(&mut self) -> Self::Output {
3283
self.context
3284
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3285
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3286
Ok(TypedReg::v128(dst))
3287
})
3288
}
3289
3290
fn visit_i8x16_le_u(&mut self) -> Self::Output {
3291
self.context
3292
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3293
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3294
Ok(TypedReg::v128(dst))
3295
})
3296
}
3297
3298
fn visit_i16x8_le_s(&mut self) -> Self::Output {
3299
self.context
3300
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3301
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3302
Ok(TypedReg::v128(dst))
3303
})
3304
}
3305
3306
fn visit_i16x8_le_u(&mut self) -> Self::Output {
3307
self.context
3308
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3309
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3310
Ok(TypedReg::v128(dst))
3311
})
3312
}
3313
3314
fn visit_i32x4_le_s(&mut self) -> Self::Output {
3315
self.context
3316
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3317
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3318
Ok(TypedReg::v128(dst))
3319
})
3320
}
3321
3322
fn visit_i32x4_le_u(&mut self) -> Self::Output {
3323
self.context
3324
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3325
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3326
Ok(TypedReg::v128(dst))
3327
})
3328
}
3329
3330
fn visit_i64x2_le_s(&mut self) -> Self::Output {
3331
self.context
3332
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3333
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3334
Ok(TypedReg::v128(dst))
3335
})
3336
}
3337
3338
fn visit_f32x4_le(&mut self) -> Self::Output {
3339
self.context
3340
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3341
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3342
Ok(TypedReg::v128(dst))
3343
})
3344
}
3345
3346
fn visit_f64x2_le(&mut self) -> Self::Output {
3347
self.context
3348
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3349
masm.v128_le(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3350
Ok(TypedReg::v128(dst))
3351
})
3352
}
3353
3354
fn visit_i8x16_gt_s(&mut self) -> Self::Output {
3355
self.context
3356
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3357
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3358
Ok(TypedReg::v128(dst))
3359
})
3360
}
3361
3362
fn visit_i8x16_gt_u(&mut self) -> Self::Output {
3363
self.context
3364
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3365
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3366
Ok(TypedReg::v128(dst))
3367
})
3368
}
3369
3370
fn visit_i16x8_gt_s(&mut self) -> Self::Output {
3371
self.context
3372
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3373
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3374
Ok(TypedReg::v128(dst))
3375
})
3376
}
3377
3378
fn visit_i16x8_gt_u(&mut self) -> Self::Output {
3379
self.context
3380
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3381
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3382
Ok(TypedReg::v128(dst))
3383
})
3384
}
3385
3386
fn visit_i32x4_gt_s(&mut self) -> Self::Output {
3387
self.context
3388
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3389
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3390
Ok(TypedReg::v128(dst))
3391
})
3392
}
3393
3394
fn visit_i32x4_gt_u(&mut self) -> Self::Output {
3395
self.context
3396
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3397
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3398
Ok(TypedReg::v128(dst))
3399
})
3400
}
3401
3402
fn visit_i64x2_gt_s(&mut self) -> Self::Output {
3403
self.context
3404
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3405
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3406
Ok(TypedReg::v128(dst))
3407
})
3408
}
3409
3410
fn visit_f32x4_gt(&mut self) -> Self::Output {
3411
self.context
3412
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3413
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3414
Ok(TypedReg::v128(dst))
3415
})
3416
}
3417
3418
fn visit_f64x2_gt(&mut self) -> Self::Output {
3419
self.context
3420
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3421
masm.v128_gt(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3422
Ok(TypedReg::v128(dst))
3423
})
3424
}
3425
3426
fn visit_i8x16_ge_s(&mut self) -> Self::Output {
3427
self.context
3428
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3429
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I8x16S)?;
3430
Ok(TypedReg::v128(dst))
3431
})
3432
}
3433
3434
fn visit_i8x16_ge_u(&mut self) -> Self::Output {
3435
self.context
3436
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3437
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I8x16U)?;
3438
Ok(TypedReg::v128(dst))
3439
})
3440
}
3441
3442
fn visit_i16x8_ge_s(&mut self) -> Self::Output {
3443
self.context
3444
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3445
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I16x8S)?;
3446
Ok(TypedReg::v128(dst))
3447
})
3448
}
3449
3450
fn visit_i16x8_ge_u(&mut self) -> Self::Output {
3451
self.context
3452
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3453
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I16x8U)?;
3454
Ok(TypedReg::v128(dst))
3455
})
3456
}
3457
3458
fn visit_i32x4_ge_s(&mut self) -> Self::Output {
3459
self.context
3460
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3461
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I32x4S)?;
3462
Ok(TypedReg::v128(dst))
3463
})
3464
}
3465
3466
fn visit_i32x4_ge_u(&mut self) -> Self::Output {
3467
self.context
3468
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3469
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I32x4U)?;
3470
Ok(TypedReg::v128(dst))
3471
})
3472
}
3473
3474
fn visit_i64x2_ge_s(&mut self) -> Self::Output {
3475
self.context
3476
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3477
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::I64x2S)?;
3478
Ok(TypedReg::v128(dst))
3479
})
3480
}
3481
3482
fn visit_f32x4_ge(&mut self) -> Self::Output {
3483
self.context
3484
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3485
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::F32x4)?;
3486
Ok(TypedReg::v128(dst))
3487
})
3488
}
3489
3490
fn visit_f64x2_ge(&mut self) -> Self::Output {
3491
self.context
3492
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3493
masm.v128_ge(writable!(dst), dst, src, VectorCompareKind::F64x2)?;
3494
Ok(TypedReg::v128(dst))
3495
})
3496
}
3497
3498
fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output {
3499
self.context
3500
.replace_lane_op(self.masm, ReplaceLaneKind::I8x16, |masm, src, dst, kind| {
3501
masm.replace_lane(src, dst, lane, kind)
3502
})
3503
}
3504
3505
fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output {
3506
self.context
3507
.replace_lane_op(self.masm, ReplaceLaneKind::I16x8, |masm, src, dst, kind| {
3508
masm.replace_lane(src, dst, lane, kind)
3509
})
3510
}
3511
3512
fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output {
3513
self.context
3514
.replace_lane_op(self.masm, ReplaceLaneKind::I32x4, |masm, src, dst, kind| {
3515
masm.replace_lane(src, dst, lane, kind)
3516
})
3517
}
3518
3519
fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output {
3520
self.context
3521
.replace_lane_op(self.masm, ReplaceLaneKind::I64x2, |masm, src, dst, kind| {
3522
masm.replace_lane(src, dst, lane, kind)
3523
})
3524
}
3525
3526
fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output {
3527
self.context
3528
.replace_lane_op(self.masm, ReplaceLaneKind::F32x4, |masm, src, dst, kind| {
3529
masm.replace_lane(src, dst, lane, kind)
3530
})
3531
}
3532
3533
fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output {
3534
self.context
3535
.replace_lane_op(self.masm, ReplaceLaneKind::F64x2, |masm, src, dst, kind| {
3536
masm.replace_lane(src, dst, lane, kind)
3537
})
3538
}
3539
3540
fn visit_v128_not(&mut self) -> Self::Output {
3541
self.context.unop(self.masm, |masm, reg| {
3542
masm.v128_not(writable!(reg))?;
3543
Ok(TypedReg::new(WasmValType::V128, reg))
3544
})
3545
}
3546
3547
fn visit_v128_and(&mut self) -> Self::Output {
3548
self.context
3549
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3550
masm.v128_and(dst, src, writable!(dst))?;
3551
Ok(TypedReg::new(WasmValType::V128, dst))
3552
})
3553
}
3554
3555
fn visit_v128_andnot(&mut self) -> Self::Output {
3556
self.context
3557
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3558
// careful here: and_not is *not* commutative: dst = !src1 & src2
3559
masm.v128_and_not(src, dst, writable!(dst))?;
3560
Ok(TypedReg::new(WasmValType::V128, dst))
3561
})
3562
}
3563
3564
fn visit_v128_or(&mut self) -> Self::Output {
3565
self.context
3566
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3567
// careful here: and_not is *not* commutative: dst = !src1 & src2
3568
masm.v128_or(src, dst, writable!(dst))?;
3569
Ok(TypedReg::new(WasmValType::V128, dst))
3570
})
3571
}
3572
3573
fn visit_v128_xor(&mut self) -> Self::Output {
3574
self.context
3575
.binop(self.masm, OperandSize::S128, |masm, dst, src, _size| {
3576
// careful here: and_not is *not* commutative: dst = !src1 & src2
3577
masm.v128_xor(src, dst, writable!(dst))?;
3578
Ok(TypedReg::new(WasmValType::V128, dst))
3579
})
3580
}
3581
3582
fn visit_v128_bitselect(&mut self) -> Self::Output {
3583
let mask = self.context.pop_to_reg(self.masm, None)?;
3584
let op2 = self.context.pop_to_reg(self.masm, None)?;
3585
let op1 = self.context.pop_to_reg(self.masm, None)?;
3586
let dst = self.context.any_fpr(self.masm)?;
3587
3588
// careful here: bitselect is *not* commutative.
3589
self.masm
3590
.v128_bitselect(op1.reg, op2.reg, mask.reg, writable!(dst))?;
3591
3592
self.context
3593
.stack
3594
.push(TypedReg::new(WasmValType::V128, dst).into());
3595
self.context.free_reg(op1);
3596
self.context.free_reg(op2);
3597
self.context.free_reg(mask);
3598
3599
Ok(())
3600
}
3601
3602
fn visit_v128_any_true(&mut self) -> Self::Output {
3603
let src = self.context.pop_to_reg(self.masm, None)?;
3604
let dst = self.context.any_gpr(self.masm)?;
3605
3606
self.masm.v128_any_true(src.reg, writable!(dst))?;
3607
3608
self.context
3609
.stack
3610
.push(TypedReg::new(WasmValType::I32, dst).into());
3611
self.context.free_reg(src);
3612
3613
Ok(())
3614
}
3615
3616
fn visit_v128_load8_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3617
self.emit_wasm_load(
3618
&arg,
3619
WasmValType::V128,
3620
LoadKind::vector_lane(lane, OperandSize::S8),
3621
)
3622
}
3623
3624
fn visit_v128_load16_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3625
self.emit_wasm_load(
3626
&arg,
3627
WasmValType::V128,
3628
LoadKind::vector_lane(lane, OperandSize::S16),
3629
)
3630
}
3631
3632
fn visit_v128_load32_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3633
self.emit_wasm_load(
3634
&arg,
3635
WasmValType::V128,
3636
LoadKind::vector_lane(lane, OperandSize::S32),
3637
)
3638
}
3639
3640
fn visit_v128_load64_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3641
self.emit_wasm_load(
3642
&arg,
3643
WasmValType::V128,
3644
LoadKind::vector_lane(lane, OperandSize::S64),
3645
)
3646
}
3647
3648
fn visit_v128_store8_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3649
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S8))
3650
}
3651
3652
fn visit_v128_store16_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3653
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S16))
3654
}
3655
3656
fn visit_v128_store32_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3657
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S32))
3658
}
3659
3660
fn visit_v128_store64_lane(&mut self, arg: MemArg, lane: u8) -> Self::Output {
3661
self.emit_wasm_store(&arg, StoreKind::vector_lane(lane, OperandSize::S64))
3662
}
3663
3664
fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output {
3665
self.context.unop(self.masm, |masm, reg| {
3666
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4S)?;
3667
Ok(TypedReg::v128(reg))
3668
})
3669
}
3670
3671
fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output {
3672
self.context.unop(self.masm, |masm, reg| {
3673
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4U)?;
3674
Ok(TypedReg::v128(reg))
3675
})
3676
}
3677
3678
fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output {
3679
self.context.unop(self.masm, |masm, reg| {
3680
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4LowS)?;
3681
Ok(TypedReg::v128(reg))
3682
})
3683
}
3684
3685
fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output {
3686
self.context.unop(self.masm, |masm, reg| {
3687
masm.v128_convert(reg, writable!(reg), V128ConvertKind::I32x4LowU)?;
3688
Ok(TypedReg::v128(reg))
3689
})
3690
}
3691
3692
fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output {
3693
self.context
3694
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3695
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I16x8S)?;
3696
Ok(TypedReg::v128(dst))
3697
})
3698
}
3699
3700
fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output {
3701
self.context
3702
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3703
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I16x8U)?;
3704
Ok(TypedReg::v128(dst))
3705
})
3706
}
3707
3708
fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output {
3709
self.context
3710
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3711
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I32x4S)?;
3712
Ok(TypedReg::v128(dst))
3713
})
3714
}
3715
3716
fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output {
3717
self.context
3718
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3719
masm.v128_narrow(dst, src, writable!(dst), V128NarrowKind::I32x4U)?;
3720
Ok(TypedReg::v128(dst))
3721
})
3722
}
3723
3724
fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output {
3725
self.context.unop(self.masm, |masm, reg| {
3726
masm.v128_demote(reg, writable!(reg))?;
3727
Ok(TypedReg::v128(reg))
3728
})
3729
}
3730
3731
fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output {
3732
self.context.unop(self.masm, |masm, reg| {
3733
masm.v128_promote(reg, writable!(reg))?;
3734
Ok(TypedReg::v128(reg))
3735
})
3736
}
3737
3738
fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output {
3739
self.context.unop(self.masm, |masm, reg| {
3740
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI8x16S)?;
3741
Ok(TypedReg::v128(reg))
3742
})
3743
}
3744
3745
fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output {
3746
self.context.unop(self.masm, |masm, reg| {
3747
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI8x16S)?;
3748
Ok(TypedReg::v128(reg))
3749
})
3750
}
3751
3752
fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output {
3753
self.context.unop(self.masm, |masm, reg| {
3754
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI8x16U)?;
3755
Ok(TypedReg::v128(reg))
3756
})
3757
}
3758
3759
fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output {
3760
self.context.unop(self.masm, |masm, reg| {
3761
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI8x16U)?;
3762
Ok(TypedReg::v128(reg))
3763
})
3764
}
3765
3766
fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output {
3767
self.context.unop(self.masm, |masm, reg| {
3768
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI16x8S)?;
3769
Ok(TypedReg::v128(reg))
3770
})
3771
}
3772
3773
fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output {
3774
self.context.unop(self.masm, |masm, reg| {
3775
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI16x8S)?;
3776
Ok(TypedReg::v128(reg))
3777
})
3778
}
3779
3780
fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output {
3781
self.context.unop(self.masm, |masm, reg| {
3782
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI16x8U)?;
3783
Ok(TypedReg::v128(reg))
3784
})
3785
}
3786
3787
fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output {
3788
self.context.unop(self.masm, |masm, reg| {
3789
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI16x8U)?;
3790
Ok(TypedReg::v128(reg))
3791
})
3792
}
3793
3794
fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output {
3795
self.context.unop(self.masm, |masm, reg| {
3796
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI32x4S)?;
3797
Ok(TypedReg::v128(reg))
3798
})
3799
}
3800
3801
fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output {
3802
self.context.unop(self.masm, |masm, reg| {
3803
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI32x4S)?;
3804
Ok(TypedReg::v128(reg))
3805
})
3806
}
3807
3808
fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output {
3809
self.context.unop(self.masm, |masm, reg| {
3810
masm.v128_extend(reg, writable!(reg), V128ExtendKind::LowI32x4U)?;
3811
Ok(TypedReg::v128(reg))
3812
})
3813
}
3814
3815
fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output {
3816
self.context.unop(self.masm, |masm, reg| {
3817
masm.v128_extend(reg, writable!(reg), V128ExtendKind::HighI32x4U)?;
3818
Ok(TypedReg::v128(reg))
3819
})
3820
}
3821
3822
fn visit_i8x16_add(&mut self) -> Self::Output {
3823
self.context
3824
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3825
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16)?;
3826
Ok(TypedReg::new(WasmValType::V128, dst))
3827
})
3828
}
3829
3830
fn visit_i16x8_add(&mut self) -> Self::Output {
3831
self.context
3832
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3833
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8)?;
3834
Ok(TypedReg::new(WasmValType::V128, dst))
3835
})
3836
}
3837
3838
fn visit_i32x4_add(&mut self) -> Self::Output {
3839
self.context
3840
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3841
masm.v128_add(dst, src, writable!(dst), V128AddKind::I32x4)?;
3842
Ok(TypedReg::new(WasmValType::V128, dst))
3843
})
3844
}
3845
3846
fn visit_i64x2_add(&mut self) -> Self::Output {
3847
self.context
3848
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3849
masm.v128_add(dst, src, writable!(dst), V128AddKind::I64x2)?;
3850
Ok(TypedReg::new(WasmValType::V128, dst))
3851
})
3852
}
3853
3854
fn visit_i8x16_sub(&mut self) -> Self::Output {
3855
self.context
3856
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3857
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16)?;
3858
Ok(TypedReg::new(WasmValType::V128, dst))
3859
})
3860
}
3861
3862
fn visit_i16x8_sub(&mut self) -> Self::Output {
3863
self.context
3864
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3865
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8)?;
3866
Ok(TypedReg::new(WasmValType::V128, dst))
3867
})
3868
}
3869
3870
fn visit_i32x4_sub(&mut self) -> Self::Output {
3871
self.context
3872
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
3873
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I32x4)?;
3874
Ok(TypedReg::new(WasmValType::V128, dst))
3875
})
3876
}
3877
3878
fn visit_i64x2_sub(&mut self) -> Self::Output {
3879
self.context
3880
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
3881
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I64x2)?;
3882
Ok(TypedReg::new(WasmValType::V128, dst))
3883
})
3884
}
3885
3886
fn visit_i16x8_mul(&mut self) -> Self::Output {
3887
self.masm.v128_mul(&mut self.context, V128MulKind::I16x8)
3888
}
3889
3890
fn visit_i32x4_mul(&mut self) -> Self::Output {
3891
self.masm.v128_mul(&mut self.context, V128MulKind::I32x4)
3892
}
3893
3894
fn visit_i64x2_mul(&mut self) -> Self::Output {
3895
self.masm.v128_mul(&mut self.context, V128MulKind::I64x2)
3896
}
3897
3898
fn visit_i8x16_add_sat_s(&mut self) -> Self::Output {
3899
self.context
3900
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3901
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16SatS)?;
3902
Ok(TypedReg::new(WasmValType::V128, dst))
3903
})
3904
}
3905
3906
fn visit_i16x8_add_sat_s(&mut self) -> Self::Output {
3907
self.context
3908
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3909
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8SatS)?;
3910
Ok(TypedReg::new(WasmValType::V128, dst))
3911
})
3912
}
3913
3914
fn visit_i8x16_add_sat_u(&mut self) -> Self::Output {
3915
self.context
3916
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3917
masm.v128_add(dst, src, writable!(dst), V128AddKind::I8x16SatU)?;
3918
Ok(TypedReg::new(WasmValType::V128, dst))
3919
})
3920
}
3921
3922
fn visit_i16x8_add_sat_u(&mut self) -> Self::Output {
3923
self.context
3924
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3925
masm.v128_add(dst, src, writable!(dst), V128AddKind::I16x8SatU)?;
3926
Ok(TypedReg::new(WasmValType::V128, dst))
3927
})
3928
}
3929
3930
fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output {
3931
self.context
3932
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3933
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16SatS)?;
3934
Ok(TypedReg::new(WasmValType::V128, dst))
3935
})
3936
}
3937
3938
fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output {
3939
self.context
3940
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3941
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8SatS)?;
3942
Ok(TypedReg::new(WasmValType::V128, dst))
3943
})
3944
}
3945
3946
fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output {
3947
self.context
3948
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
3949
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I8x16SatU)?;
3950
Ok(TypedReg::new(WasmValType::V128, dst))
3951
})
3952
}
3953
3954
fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output {
3955
self.context
3956
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
3957
masm.v128_sub(dst, src, writable!(dst), V128SubKind::I16x8SatU)?;
3958
Ok(TypedReg::new(WasmValType::V128, dst))
3959
})
3960
}
3961
3962
fn visit_i8x16_abs(&mut self) -> Self::Output {
3963
self.context.unop(self.masm, |masm, reg| {
3964
masm.v128_abs(reg, writable!(reg), V128AbsKind::I8x16)?;
3965
Ok(TypedReg::new(WasmValType::V128, reg))
3966
})
3967
}
3968
3969
fn visit_i16x8_abs(&mut self) -> Self::Output {
3970
self.context.unop(self.masm, |masm, reg| {
3971
masm.v128_abs(reg, writable!(reg), V128AbsKind::I16x8)?;
3972
Ok(TypedReg::new(WasmValType::V128, reg))
3973
})
3974
}
3975
3976
fn visit_i32x4_abs(&mut self) -> Self::Output {
3977
self.context.unop(self.masm, |masm, reg| {
3978
masm.v128_abs(reg, writable!(reg), V128AbsKind::I32x4)?;
3979
Ok(TypedReg::new(WasmValType::V128, reg))
3980
})
3981
}
3982
3983
fn visit_i64x2_abs(&mut self) -> Self::Output {
3984
self.context.unop(self.masm, |masm, reg| {
3985
masm.v128_abs(reg, writable!(reg), V128AbsKind::I64x2)?;
3986
Ok(TypedReg::new(WasmValType::V128, reg))
3987
})
3988
}
3989
3990
fn visit_f32x4_abs(&mut self) -> Self::Output {
3991
self.context.unop(self.masm, |masm, reg| {
3992
masm.v128_abs(reg, writable!(reg), V128AbsKind::F32x4)?;
3993
Ok(TypedReg::new(WasmValType::V128, reg))
3994
})
3995
}
3996
3997
fn visit_f64x2_abs(&mut self) -> Self::Output {
3998
self.context.unop(self.masm, |masm, reg| {
3999
masm.v128_abs(reg, writable!(reg), V128AbsKind::F64x2)?;
4000
Ok(TypedReg::new(WasmValType::V128, reg))
4001
})
4002
}
4003
4004
fn visit_i8x16_neg(&mut self) -> Self::Output {
4005
self.context.unop(self.masm, |masm, op| {
4006
masm.v128_neg(writable!(op), V128NegKind::I8x16)?;
4007
Ok(TypedReg::new(WasmValType::V128, op))
4008
})
4009
}
4010
4011
fn visit_i16x8_neg(&mut self) -> Self::Output {
4012
self.context.unop(self.masm, |masm, op| {
4013
masm.v128_neg(writable!(op), V128NegKind::I16x8)?;
4014
Ok(TypedReg::new(WasmValType::V128, op))
4015
})
4016
}
4017
4018
fn visit_i32x4_neg(&mut self) -> Self::Output {
4019
self.context.unop(self.masm, |masm, op| {
4020
masm.v128_neg(writable!(op), V128NegKind::I32x4)?;
4021
Ok(TypedReg::new(WasmValType::V128, op))
4022
})
4023
}
4024
4025
fn visit_i64x2_neg(&mut self) -> Self::Output {
4026
self.context.unop(self.masm, |masm, op| {
4027
masm.v128_neg(writable!(op), V128NegKind::I64x2)?;
4028
Ok(TypedReg::new(WasmValType::V128, op))
4029
})
4030
}
4031
4032
fn visit_i8x16_shl(&mut self) -> Self::Output {
4033
self.masm
4034
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::Shl)
4035
}
4036
4037
fn visit_i16x8_shl(&mut self) -> Self::Output {
4038
self.masm
4039
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::Shl)
4040
}
4041
4042
fn visit_i32x4_shl(&mut self) -> Self::Output {
4043
self.masm
4044
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::Shl)
4045
}
4046
4047
fn visit_i64x2_shl(&mut self) -> Self::Output {
4048
self.masm
4049
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::Shl)
4050
}
4051
4052
fn visit_i8x16_shr_u(&mut self) -> Self::Output {
4053
self.masm
4054
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::ShrU)
4055
}
4056
4057
fn visit_i16x8_shr_u(&mut self) -> Self::Output {
4058
self.masm
4059
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::ShrU)
4060
}
4061
4062
fn visit_i32x4_shr_u(&mut self) -> Self::Output {
4063
self.masm
4064
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::ShrU)
4065
}
4066
4067
fn visit_i64x2_shr_u(&mut self) -> Self::Output {
4068
self.masm
4069
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::ShrU)
4070
}
4071
4072
fn visit_i8x16_shr_s(&mut self) -> Self::Output {
4073
self.masm
4074
.v128_shift(&mut self.context, OperandSize::S8, ShiftKind::ShrS)
4075
}
4076
4077
fn visit_i16x8_shr_s(&mut self) -> Self::Output {
4078
self.masm
4079
.v128_shift(&mut self.context, OperandSize::S16, ShiftKind::ShrS)
4080
}
4081
4082
fn visit_i32x4_shr_s(&mut self) -> Self::Output {
4083
self.masm
4084
.v128_shift(&mut self.context, OperandSize::S32, ShiftKind::ShrS)
4085
}
4086
4087
fn visit_i64x2_shr_s(&mut self) -> Self::Output {
4088
self.masm
4089
.v128_shift(&mut self.context, OperandSize::S64, ShiftKind::ShrS)
4090
}
4091
4092
fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output {
4093
self.context
4094
.binop(self.masm, OperandSize::S16, |masm, dst, src, size| {
4095
masm.v128_q15mulr_sat_s(dst, src, writable!(dst), size)?;
4096
Ok(TypedReg::v128(dst))
4097
})
4098
}
4099
4100
fn visit_i8x16_min_s(&mut self) -> Self::Output {
4101
self.context
4102
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4103
masm.v128_min(src, dst, writable!(dst), V128MinKind::I8x16S)?;
4104
Ok(TypedReg::v128(dst))
4105
})
4106
}
4107
4108
fn visit_i8x16_all_true(&mut self) -> Self::Output {
4109
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4110
masm.v128_all_true(src, writable!(dst), OperandSize::S8)
4111
})
4112
}
4113
4114
fn visit_i16x8_all_true(&mut self) -> Self::Output {
4115
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4116
masm.v128_all_true(src, writable!(dst), OperandSize::S16)
4117
})
4118
}
4119
4120
fn visit_i32x4_all_true(&mut self) -> Self::Output {
4121
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4122
masm.v128_all_true(src, writable!(dst), OperandSize::S32)
4123
})
4124
}
4125
4126
fn visit_i64x2_all_true(&mut self) -> Self::Output {
4127
self.context.v128_all_true_op(self.masm, |masm, src, dst| {
4128
masm.v128_all_true(src, writable!(dst), OperandSize::S64)
4129
})
4130
}
4131
4132
fn visit_i8x16_bitmask(&mut self) -> Self::Output {
4133
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4134
masm.v128_bitmask(src, writable!(dst), OperandSize::S8)
4135
})
4136
}
4137
4138
fn visit_i16x8_bitmask(&mut self) -> Self::Output {
4139
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4140
masm.v128_bitmask(src, writable!(dst), OperandSize::S16)
4141
})
4142
}
4143
4144
fn visit_i32x4_bitmask(&mut self) -> Self::Output {
4145
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4146
masm.v128_bitmask(src, writable!(dst), OperandSize::S32)
4147
})
4148
}
4149
4150
fn visit_i64x2_bitmask(&mut self) -> Self::Output {
4151
self.context.v128_bitmask_op(self.masm, |masm, src, dst| {
4152
masm.v128_bitmask(src, writable!(dst), OperandSize::S64)
4153
})
4154
}
4155
4156
fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output {
4157
self.masm
4158
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF32x4S)
4159
}
4160
4161
fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output {
4162
self.masm
4163
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF32x4U)
4164
}
4165
4166
fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output {
4167
self.masm
4168
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF64x2SZero)
4169
}
4170
4171
fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output {
4172
self.masm
4173
.v128_trunc(&mut self.context, V128TruncKind::I32x4FromF64x2UZero)
4174
}
4175
4176
fn visit_i16x8_min_s(&mut self) -> Self::Output {
4177
self.context
4178
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4179
masm.v128_min(src, dst, writable!(dst), V128MinKind::I16x8S)?;
4180
Ok(TypedReg::v128(dst))
4181
})
4182
}
4183
4184
fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output {
4185
self.context
4186
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4187
masm.v128_dot(dst, src, writable!(dst))?;
4188
Ok(TypedReg::v128(dst))
4189
})
4190
}
4191
4192
fn visit_i8x16_popcnt(&mut self) -> Self::Output {
4193
self.masm.v128_popcnt(&mut self.context)
4194
}
4195
4196
fn visit_i8x16_avgr_u(&mut self) -> Self::Output {
4197
self.context
4198
.binop(self.masm, OperandSize::S8, |masm, dst, src, size| {
4199
masm.v128_avgr(dst, src, writable!(dst), size)?;
4200
Ok(TypedReg::v128(dst))
4201
})
4202
}
4203
4204
fn visit_i32x4_min_s(&mut self) -> Self::Output {
4205
self.context
4206
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4207
masm.v128_min(src, dst, writable!(dst), V128MinKind::I32x4S)?;
4208
Ok(TypedReg::v128(dst))
4209
})
4210
}
4211
4212
fn visit_i8x16_min_u(&mut self) -> Self::Output {
4213
self.context
4214
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4215
masm.v128_min(src, dst, writable!(dst), V128MinKind::I8x16U)?;
4216
Ok(TypedReg::v128(dst))
4217
})
4218
}
4219
4220
fn visit_i16x8_avgr_u(&mut self) -> Self::Output {
4221
self.context
4222
.binop(self.masm, OperandSize::S16, |masm, dst, src, size| {
4223
masm.v128_avgr(dst, src, writable!(dst), size)?;
4224
Ok(TypedReg::v128(dst))
4225
})
4226
}
4227
4228
fn visit_i16x8_min_u(&mut self) -> Self::Output {
4229
self.context
4230
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4231
masm.v128_min(src, dst, writable!(dst), V128MinKind::I16x8U)?;
4232
Ok(TypedReg::v128(dst))
4233
})
4234
}
4235
4236
fn visit_i32x4_min_u(&mut self) -> Self::Output {
4237
self.context
4238
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4239
masm.v128_min(src, dst, writable!(dst), V128MinKind::I32x4U)?;
4240
Ok(TypedReg::v128(dst))
4241
})
4242
}
4243
4244
fn visit_i8x16_max_s(&mut self) -> Self::Output {
4245
self.context
4246
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4247
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I8x16S)?;
4248
Ok(TypedReg::v128(dst))
4249
})
4250
}
4251
4252
fn visit_i16x8_max_s(&mut self) -> Self::Output {
4253
self.context
4254
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4255
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I16x8S)?;
4256
Ok(TypedReg::v128(dst))
4257
})
4258
}
4259
4260
fn visit_i32x4_max_s(&mut self) -> Self::Output {
4261
self.context
4262
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4263
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I32x4S)?;
4264
Ok(TypedReg::v128(dst))
4265
})
4266
}
4267
4268
fn visit_i8x16_max_u(&mut self) -> Self::Output {
4269
self.context
4270
.binop(self.masm, OperandSize::S8, |masm, dst, src, _size| {
4271
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I8x16U)?;
4272
Ok(TypedReg::v128(dst))
4273
})
4274
}
4275
4276
fn visit_i16x8_max_u(&mut self) -> Self::Output {
4277
self.context
4278
.binop(self.masm, OperandSize::S16, |masm, dst, src, _size| {
4279
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I16x8U)?;
4280
Ok(TypedReg::v128(dst))
4281
})
4282
}
4283
4284
fn visit_i32x4_max_u(&mut self) -> Self::Output {
4285
self.context
4286
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4287
masm.v128_max(src, dst, writable!(dst), V128MaxKind::I32x4U)?;
4288
Ok(TypedReg::v128(dst))
4289
})
4290
}
4291
4292
fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output {
4293
self.masm
4294
.v128_extmul(&mut self.context, V128ExtMulKind::LowI8x16S)
4295
}
4296
4297
fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output {
4298
self.masm
4299
.v128_extmul(&mut self.context, V128ExtMulKind::LowI16x8S)
4300
}
4301
4302
fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output {
4303
self.masm
4304
.v128_extmul(&mut self.context, V128ExtMulKind::LowI32x4S)
4305
}
4306
4307
fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output {
4308
self.masm
4309
.v128_extmul(&mut self.context, V128ExtMulKind::LowI8x16U)
4310
}
4311
4312
fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output {
4313
self.masm
4314
.v128_extmul(&mut self.context, V128ExtMulKind::LowI16x8U)
4315
}
4316
4317
fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output {
4318
self.masm
4319
.v128_extmul(&mut self.context, V128ExtMulKind::LowI32x4U)
4320
}
4321
4322
fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output {
4323
self.masm
4324
.v128_extmul(&mut self.context, V128ExtMulKind::HighI8x16U)
4325
}
4326
4327
fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output {
4328
self.masm
4329
.v128_extmul(&mut self.context, V128ExtMulKind::HighI16x8U)
4330
}
4331
4332
fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output {
4333
self.masm
4334
.v128_extmul(&mut self.context, V128ExtMulKind::HighI32x4U)
4335
}
4336
4337
fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output {
4338
self.masm
4339
.v128_extmul(&mut self.context, V128ExtMulKind::HighI8x16S)
4340
}
4341
4342
fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output {
4343
self.masm
4344
.v128_extmul(&mut self.context, V128ExtMulKind::HighI16x8S)
4345
}
4346
4347
fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output {
4348
self.masm
4349
.v128_extmul(&mut self.context, V128ExtMulKind::HighI32x4S)
4350
}
4351
4352
fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output {
4353
self.context.unop(self.masm, |masm, op| {
4354
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I8x16S)?;
4355
Ok(TypedReg::v128(op))
4356
})
4357
}
4358
4359
fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output {
4360
self.context.unop(self.masm, |masm, op| {
4361
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I8x16U)?;
4362
Ok(TypedReg::v128(op))
4363
})
4364
}
4365
4366
fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output {
4367
self.context.unop(self.masm, |masm, op| {
4368
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I16x8S)?;
4369
Ok(TypedReg::v128(op))
4370
})
4371
}
4372
4373
fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output {
4374
self.context.unop(self.masm, |masm, op| {
4375
masm.v128_extadd_pairwise(op, writable!(op), V128ExtAddKind::I16x8U)?;
4376
Ok(TypedReg::v128(op))
4377
})
4378
}
4379
4380
fn visit_f32x4_add(&mut self) -> Self::Output {
4381
self.context
4382
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4383
masm.v128_add(dst, src, writable!(dst), V128AddKind::F32x4)?;
4384
Ok(TypedReg::v128(dst))
4385
})
4386
}
4387
4388
fn visit_f64x2_add(&mut self) -> Self::Output {
4389
self.context
4390
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4391
masm.v128_add(dst, src, writable!(dst), V128AddKind::F64x2)?;
4392
Ok(TypedReg::v128(dst))
4393
})
4394
}
4395
4396
fn visit_f32x4_sub(&mut self) -> Self::Output {
4397
self.context
4398
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4399
masm.v128_sub(dst, src, writable!(dst), V128SubKind::F32x4)?;
4400
Ok(TypedReg::v128(dst))
4401
})
4402
}
4403
4404
fn visit_f64x2_sub(&mut self) -> Self::Output {
4405
self.context
4406
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4407
masm.v128_sub(dst, src, writable!(dst), V128SubKind::F64x2)?;
4408
Ok(TypedReg::v128(dst))
4409
})
4410
}
4411
4412
fn visit_f32x4_mul(&mut self) -> Self::Output {
4413
self.masm.v128_mul(&mut self.context, V128MulKind::F32x4)
4414
}
4415
4416
fn visit_f64x2_mul(&mut self) -> Self::Output {
4417
self.masm.v128_mul(&mut self.context, V128MulKind::F64x2)
4418
}
4419
4420
fn visit_f32x4_div(&mut self) -> Self::Output {
4421
self.context
4422
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4423
masm.v128_div(dst, src, writable!(dst), size)?;
4424
Ok(TypedReg::v128(dst))
4425
})
4426
}
4427
4428
fn visit_f64x2_div(&mut self) -> Self::Output {
4429
self.context
4430
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4431
masm.v128_div(dst, src, writable!(dst), size)?;
4432
Ok(TypedReg::v128(dst))
4433
})
4434
}
4435
4436
fn visit_f32x4_neg(&mut self) -> Self::Output {
4437
self.context.unop(self.masm, |masm, reg| {
4438
masm.v128_neg(writable!(reg), V128NegKind::F32x4)?;
4439
Ok(TypedReg::v128(reg))
4440
})
4441
}
4442
4443
fn visit_f32x4_ceil(&mut self) -> Self::Output {
4444
self.context.unop(self.masm, |masm, reg| {
4445
masm.v128_ceil(reg, writable!(reg), OperandSize::S32)?;
4446
Ok(TypedReg::v128(reg))
4447
})
4448
}
4449
4450
fn visit_f64x2_neg(&mut self) -> Self::Output {
4451
self.context.unop(self.masm, |masm, reg| {
4452
masm.v128_neg(writable!(reg), V128NegKind::F64x2)?;
4453
Ok(TypedReg::v128(reg))
4454
})
4455
}
4456
4457
fn visit_f64x2_ceil(&mut self) -> Self::Output {
4458
self.context.unop(self.masm, |masm, reg| {
4459
masm.v128_ceil(reg, writable!(reg), OperandSize::S64)?;
4460
Ok(TypedReg::v128(reg))
4461
})
4462
}
4463
4464
fn visit_f32x4_sqrt(&mut self) -> Self::Output {
4465
self.context.unop(self.masm, |masm, reg| {
4466
masm.v128_sqrt(reg, writable!(reg), OperandSize::S32)?;
4467
Ok(TypedReg::v128(reg))
4468
})
4469
}
4470
4471
fn visit_f32x4_floor(&mut self) -> Self::Output {
4472
self.context.unop(self.masm, |masm, reg| {
4473
masm.v128_floor(reg, writable!(reg), OperandSize::S32)?;
4474
Ok(TypedReg::v128(reg))
4475
})
4476
}
4477
4478
fn visit_f64x2_sqrt(&mut self) -> Self::Output {
4479
self.context.unop(self.masm, |masm, reg| {
4480
masm.v128_sqrt(reg, writable!(reg), OperandSize::S64)?;
4481
Ok(TypedReg::v128(reg))
4482
})
4483
}
4484
4485
fn visit_f64x2_floor(&mut self) -> Self::Output {
4486
self.context.unop(self.masm, |masm, reg| {
4487
masm.v128_floor(reg, writable!(reg), OperandSize::S64)?;
4488
Ok(TypedReg::v128(reg))
4489
})
4490
}
4491
4492
fn visit_f32x4_nearest(&mut self) -> Self::Output {
4493
self.context.unop(self.masm, |masm, reg| {
4494
masm.v128_nearest(reg, writable!(reg), OperandSize::S32)?;
4495
Ok(TypedReg::v128(reg))
4496
})
4497
}
4498
4499
fn visit_f64x2_nearest(&mut self) -> Self::Output {
4500
self.context.unop(self.masm, |masm, reg| {
4501
masm.v128_nearest(reg, writable!(reg), OperandSize::S64)?;
4502
Ok(TypedReg::v128(reg))
4503
})
4504
}
4505
4506
fn visit_f32x4_trunc(&mut self) -> Self::Output {
4507
self.masm
4508
.v128_trunc(&mut self.context, V128TruncKind::F32x4)
4509
}
4510
4511
fn visit_f64x2_trunc(&mut self) -> Self::Output {
4512
self.masm
4513
.v128_trunc(&mut self.context, V128TruncKind::F64x2)
4514
}
4515
4516
fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output {
4517
self.emit_wasm_load(
4518
&memarg,
4519
WasmValType::V128,
4520
LoadKind::VectorZero(OperandSize::S32),
4521
)
4522
}
4523
4524
fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output {
4525
self.emit_wasm_load(
4526
&memarg,
4527
WasmValType::V128,
4528
LoadKind::VectorZero(OperandSize::S64),
4529
)
4530
}
4531
4532
fn visit_f32x4_pmin(&mut self) -> Self::Output {
4533
self.context
4534
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4535
masm.v128_pmin(dst, src, writable!(dst), size)?;
4536
Ok(TypedReg::v128(dst))
4537
})
4538
}
4539
4540
fn visit_f64x2_pmin(&mut self) -> Self::Output {
4541
self.context
4542
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4543
masm.v128_pmin(dst, src, writable!(dst), size)?;
4544
Ok(TypedReg::v128(dst))
4545
})
4546
}
4547
4548
fn visit_f32x4_pmax(&mut self) -> Self::Output {
4549
self.context
4550
.binop(self.masm, OperandSize::S32, |masm, dst, src, size| {
4551
masm.v128_pmax(dst, src, writable!(dst), size)?;
4552
Ok(TypedReg::v128(dst))
4553
})
4554
}
4555
4556
fn visit_f64x2_pmax(&mut self) -> Self::Output {
4557
self.context
4558
.binop(self.masm, OperandSize::S64, |masm, dst, src, size| {
4559
masm.v128_pmax(dst, src, writable!(dst), size)?;
4560
Ok(TypedReg::v128(dst))
4561
})
4562
}
4563
4564
fn visit_f32x4_min(&mut self) -> Self::Output {
4565
self.context
4566
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4567
masm.v128_min(dst, src, writable!(dst), V128MinKind::F32x4)?;
4568
Ok(TypedReg::v128(dst))
4569
})
4570
}
4571
4572
fn visit_f64x2_min(&mut self) -> Self::Output {
4573
self.context
4574
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4575
masm.v128_min(dst, src, writable!(dst), V128MinKind::F64x2)?;
4576
Ok(TypedReg::v128(dst))
4577
})
4578
}
4579
4580
fn visit_f32x4_max(&mut self) -> Self::Output {
4581
self.context
4582
.binop(self.masm, OperandSize::S32, |masm, dst, src, _size| {
4583
masm.v128_max(dst, src, writable!(dst), V128MaxKind::F32x4)?;
4584
Ok(TypedReg::v128(dst))
4585
})
4586
}
4587
4588
fn visit_f64x2_max(&mut self) -> Self::Output {
4589
self.context
4590
.binop(self.masm, OperandSize::S64, |masm, dst, src, _size| {
4591
masm.v128_max(dst, src, writable!(dst), V128MaxKind::F64x2)?;
4592
Ok(TypedReg::v128(dst))
4593
})
4594
}
4595
4596
wasmparser::for_each_visit_simd_operator!(def_unsupported);
4597
}
4598
4599
impl<'a, 'translation, 'data, M> CodeGen<'a, 'translation, 'data, M, Emission>
4600
where
4601
M: MacroAssembler,
4602
{
4603
fn cmp_i32s(&mut self, kind: IntCmpKind) -> Result<()> {
4604
self.context.i32_binop(self.masm, |masm, dst, src, size| {
4605
masm.cmp_with_set(writable!(dst), src, kind, size)?;
4606
Ok(TypedReg::i32(dst))
4607
})
4608
}
4609
4610
fn cmp_i64s(&mut self, kind: IntCmpKind) -> Result<()> {
4611
self.context
4612
.i64_binop(self.masm, move |masm, dst, src, size| {
4613
masm.cmp_with_set(writable!(dst), src, kind, size)?;
4614
Ok(TypedReg::i32(dst)) // Return value for comparisons is an `i32`.
4615
})
4616
}
4617
}
4618
4619
impl TryFrom<WasmValType> for OperandSize {
4620
type Error = anyhow::Error;
4621
fn try_from(ty: WasmValType) -> Result<OperandSize> {
4622
let ty = match ty {
4623
WasmValType::I32 | WasmValType::F32 => OperandSize::S32,
4624
WasmValType::I64 | WasmValType::F64 => OperandSize::S64,
4625
WasmValType::V128 => OperandSize::S128,
4626
WasmValType::Ref(rt) => {
4627
match rt.heap_type {
4628
// TODO: Hardcoded size, assuming 64-bit support only. Once
4629
// Wasmtime supports 32-bit architectures, this will need
4630
// to be updated in such a way that the calculation of the
4631
// OperandSize will depend on the target's pointer size.
4632
WasmHeapType::Func => OperandSize::S64,
4633
WasmHeapType::Extern => OperandSize::S64,
4634
_ => bail!(CodeGenError::unsupported_wasm_type()),
4635
}
4636
}
4637
};
4638
Ok(ty)
4639
}
4640
}
4641
4642