Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/interpreter/src/step.rs
1692 views
1
//! The [step] function interprets a single Cranelift instruction given its [State] and
2
//! [InstructionContext].
3
use crate::address::{Address, AddressSize};
4
use crate::frame::Frame;
5
use crate::instruction::InstructionContext;
6
use crate::state::{InterpreterFunctionRef, MemoryError, State};
7
use crate::value::{DataValueExt, ValueConversionKind, ValueError, ValueResult};
8
use cranelift_codegen::data_value::DataValue;
9
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
10
use cranelift_codegen::ir::{
11
AbiParam, AtomicRmwOp, Block, BlockArg, BlockCall, Endianness, ExternalName, FuncRef, Function,
12
InstructionData, MemFlags, Opcode, TrapCode, Type, Value as ValueRef, types,
13
};
14
use log::trace;
15
use smallvec::{SmallVec, smallvec};
16
use std::fmt::Debug;
17
use std::ops::RangeFrom;
18
use thiserror::Error;
19
20
/// Ensures that all types in args are the same as expected by the signature
21
fn validate_signature_params(sig: &[AbiParam], args: &[DataValue]) -> bool {
22
args.iter()
23
.map(|r| r.ty())
24
.zip(sig.iter().map(|r| r.value_type))
25
.all(|(a, b)| match (a, b) {
26
// For these two cases we don't have precise type information for `a`.
27
// We don't distinguish between different bool types, or different vector types
28
// The actual error is in `Value::ty` that returns default types for some values
29
// but we don't have enough information there either.
30
//
31
// Ideally the user has run the verifier and caught this properly...
32
(a, b) if a.is_vector() && b.is_vector() => true,
33
(a, b) => a == b,
34
})
35
}
36
37
// Helper for summing a sequence of values.
38
fn sum_unsigned(head: DataValue, tail: SmallVec<[DataValue; 1]>) -> ValueResult<u128> {
39
let mut acc = head;
40
for t in tail {
41
acc = DataValueExt::add(acc, t)?;
42
}
43
acc.into_int_unsigned()
44
}
45
46
/// Collect a list of block arguments.
47
fn collect_block_args(
48
frame: &Frame,
49
args: impl Iterator<Item = BlockArg>,
50
) -> SmallVec<[DataValue; 1]> {
51
args.into_iter()
52
.map(|n| match n {
53
BlockArg::Value(n) => frame.get(n).clone(),
54
_ => panic!("exceptions not supported"),
55
})
56
.collect()
57
}
58
59
/// Interpret a single Cranelift instruction. Note that program traps and interpreter errors are
60
/// distinct: a program trap results in `Ok(Flow::Trap(...))` whereas an interpretation error (e.g.
61
/// the types of two values are incompatible) results in `Err(...)`.
62
pub fn step<'a, I>(state: &mut dyn State<'a>, inst_context: I) -> Result<ControlFlow<'a>, StepError>
63
where
64
I: InstructionContext,
65
{
66
let inst = inst_context.data();
67
let ctrl_ty = inst_context.controlling_type().unwrap();
68
trace!(
69
"Step: {}{}",
70
inst.opcode(),
71
if ctrl_ty.is_invalid() {
72
String::new()
73
} else {
74
format!(".{ctrl_ty}")
75
}
76
);
77
78
// The following closures make the `step` implementation much easier to express. Note that they
79
// frequently close over the `state` or `inst_context` for brevity.
80
81
// Retrieve the current value for an instruction argument.
82
let arg = |index: usize| -> DataValue {
83
let value_ref = inst_context.args()[index];
84
state.current_frame().get(value_ref).clone()
85
};
86
87
// Retrieve the current values for all of an instruction's arguments.
88
let args = || -> SmallVec<[DataValue; 1]> { state.collect_values(inst_context.args()) };
89
90
// Retrieve the current values for a range of an instruction's arguments.
91
let args_range = |indexes: RangeFrom<usize>| -> Result<SmallVec<[DataValue; 1]>, StepError> {
92
Ok(SmallVec::<[DataValue; 1]>::from(&args()[indexes]))
93
};
94
95
// Retrieve the immediate value for an instruction, expecting it to exist.
96
let imm = || -> DataValue {
97
match inst {
98
InstructionData::UnaryConst {
99
constant_handle,
100
opcode,
101
} => {
102
let buffer = state
103
.get_current_function()
104
.dfg
105
.constants
106
.get(constant_handle);
107
match (ctrl_ty.bytes(), opcode) {
108
(_, Opcode::F128const) => {
109
DataValue::F128(buffer.try_into().expect("a 16-byte data buffer"))
110
}
111
(16, Opcode::Vconst) => DataValue::V128(
112
buffer.as_slice().try_into().expect("a 16-byte data buffer"),
113
),
114
(8, Opcode::Vconst) => {
115
DataValue::V64(buffer.as_slice().try_into().expect("an 8-byte data buffer"))
116
}
117
(4, Opcode::Vconst) => {
118
DataValue::V32(buffer.as_slice().try_into().expect("a 4-byte data buffer"))
119
}
120
(2, Opcode::Vconst) => {
121
DataValue::V16(buffer.as_slice().try_into().expect("a 2-byte data buffer"))
122
}
123
(length, opcode) => panic!(
124
"unexpected UnaryConst controlling type size {length} for opcode {opcode:?}"
125
),
126
}
127
}
128
InstructionData::Shuffle { imm, .. } => {
129
let mask = state
130
.get_current_function()
131
.dfg
132
.immediates
133
.get(imm)
134
.unwrap()
135
.as_slice();
136
match mask.len() {
137
16 => DataValue::V128(mask.try_into().expect("a 16-byte vector mask")),
138
8 => DataValue::V64(mask.try_into().expect("an 8-byte vector mask")),
139
4 => DataValue::V32(mask.try_into().expect("a 4-byte vector mask")),
140
2 => DataValue::V16(mask.try_into().expect("a 2-byte vector mask")),
141
length => panic!("unexpected Shuffle mask length {length}"),
142
}
143
}
144
// 8-bit.
145
InstructionData::BinaryImm8 { imm, .. } | InstructionData::TernaryImm8 { imm, .. } => {
146
DataValue::from(imm as i8) // Note the switch from unsigned to signed.
147
}
148
// 16-bit
149
InstructionData::UnaryIeee16 { imm, .. } => DataValue::from(imm),
150
// 32-bit
151
InstructionData::UnaryIeee32 { imm, .. } => DataValue::from(imm),
152
InstructionData::Load { offset, .. }
153
| InstructionData::Store { offset, .. }
154
| InstructionData::StackLoad { offset, .. }
155
| InstructionData::StackStore { offset, .. } => DataValue::from(offset),
156
// 64-bit.
157
InstructionData::UnaryImm { imm, .. }
158
| InstructionData::BinaryImm64 { imm, .. }
159
| InstructionData::IntCompareImm { imm, .. } => DataValue::from(imm.bits()),
160
InstructionData::UnaryIeee64 { imm, .. } => DataValue::from(imm),
161
_ => unreachable!(),
162
}
163
};
164
165
// Retrieve the immediate value for an instruction and convert it to the controlling type of the
166
// instruction. For example, since `InstructionData` stores all integer immediates in a 64-bit
167
// size, this will attempt to convert `iconst.i8 ...` to an 8-bit size.
168
let imm_as_ctrl_ty = || -> Result<DataValue, ValueError> {
169
DataValue::convert(imm(), ValueConversionKind::Exact(ctrl_ty))
170
};
171
172
// Indicate that the result of a step is to assign a single value to an instruction's results.
173
let assign = |value: DataValue| ControlFlow::Assign(smallvec![value]);
174
175
// Indicate that the result of a step is to assign multiple values to an instruction's results.
176
let assign_multiple = |values: &[DataValue]| ControlFlow::Assign(SmallVec::from(values));
177
178
// Similar to `assign` but converts some errors into traps
179
let assign_or_trap = |value: ValueResult<DataValue>| match value {
180
Ok(v) => Ok(assign(v)),
181
Err(ValueError::IntegerDivisionByZero) => Ok(ControlFlow::Trap(CraneliftTrap::User(
182
TrapCode::INTEGER_DIVISION_BY_ZERO,
183
))),
184
Err(ValueError::IntegerOverflow) => Ok(ControlFlow::Trap(CraneliftTrap::User(
185
TrapCode::INTEGER_OVERFLOW,
186
))),
187
Err(e) => Err(e),
188
};
189
190
let memerror_to_trap = |e: MemoryError| match e {
191
MemoryError::InvalidAddress(_)
192
| MemoryError::InvalidAddressType(_)
193
| MemoryError::InvalidOffset { .. }
194
| MemoryError::InvalidEntry { .. } => CraneliftTrap::User(TrapCode::HEAP_OUT_OF_BOUNDS),
195
MemoryError::OutOfBoundsStore { mem_flags, .. }
196
| MemoryError::OutOfBoundsLoad { mem_flags, .. } => CraneliftTrap::User(
197
mem_flags
198
.trap_code()
199
.expect("op with notrap flag should not trap"),
200
),
201
MemoryError::MisalignedLoad { .. } => CraneliftTrap::HeapMisaligned,
202
MemoryError::MisalignedStore { .. } => CraneliftTrap::HeapMisaligned,
203
};
204
205
// Assigns or traps depending on the value of the result
206
let assign_or_memtrap = |res| match res {
207
Ok(v) => assign(v),
208
Err(e) => ControlFlow::Trap(memerror_to_trap(e)),
209
};
210
211
// Continues or traps depending on the value of the result
212
let continue_or_memtrap = |res| match res {
213
Ok(_) => ControlFlow::Continue,
214
Err(e) => ControlFlow::Trap(memerror_to_trap(e)),
215
};
216
217
let calculate_addr =
218
|addr_ty: Type, imm: DataValue, args: SmallVec<[DataValue; 1]>| -> ValueResult<u64> {
219
let imm = imm.convert(ValueConversionKind::ZeroExtend(addr_ty))?;
220
let args = args
221
.into_iter()
222
.map(|v| v.convert(ValueConversionKind::ZeroExtend(addr_ty)))
223
.collect::<ValueResult<SmallVec<[DataValue; 1]>>>()?;
224
225
Ok(sum_unsigned(imm, args)? as u64)
226
};
227
228
// Interpret a unary instruction with the given `op`, assigning the resulting value to the
229
// instruction's results.
230
let unary =
231
|op: fn(DataValue) -> ValueResult<DataValue>, arg: DataValue| -> ValueResult<ControlFlow> {
232
let ctrl_ty = inst_context.controlling_type().unwrap();
233
let res = unary_arith(arg, ctrl_ty, op)?;
234
Ok(assign(res))
235
};
236
237
// Interpret a binary instruction with the given `op`, assigning the resulting value to the
238
// instruction's results.
239
let binary = |op: fn(DataValue, DataValue) -> ValueResult<DataValue>,
240
left: DataValue,
241
right: DataValue|
242
-> ValueResult<ControlFlow> {
243
let ctrl_ty = inst_context.controlling_type().unwrap();
244
let res = binary_arith(left, right, ctrl_ty, op)?;
245
Ok(assign(res))
246
};
247
248
// Similar to `binary` but converts select `ValueError`'s into trap `ControlFlow`'s
249
let binary_can_trap = |op: fn(DataValue, DataValue) -> ValueResult<DataValue>,
250
left: DataValue,
251
right: DataValue|
252
-> ValueResult<ControlFlow> {
253
let ctrl_ty = inst_context.controlling_type().unwrap();
254
let res = binary_arith(left, right, ctrl_ty, op);
255
assign_or_trap(res)
256
};
257
258
// Choose whether to assign `left` or `right` to the instruction's result based on a `condition`.
259
let choose = |condition: bool, left: DataValue, right: DataValue| -> ControlFlow {
260
assign(if condition { left } else { right })
261
};
262
263
// Retrieve an instruction's branch destination; expects the instruction to be a branch.
264
265
let continue_at = |block: BlockCall| {
266
let branch_args = collect_block_args(
267
state.current_frame(),
268
block.args(&state.get_current_function().dfg.value_lists),
269
);
270
Ok(ControlFlow::ContinueAt(
271
block.block(&state.get_current_function().dfg.value_lists),
272
branch_args,
273
))
274
};
275
276
// Based on `condition`, indicate where to continue the control flow.
277
#[expect(unused_variables, reason = "here in case it's needed in the future")]
278
let branch_when = |condition: bool, block| -> Result<ControlFlow, StepError> {
279
if condition {
280
continue_at(block)
281
} else {
282
Ok(ControlFlow::Continue)
283
}
284
};
285
286
// Retrieve an instruction's trap code; expects the instruction to be a trap.
287
let trap_code = || -> TrapCode { inst.trap_code().unwrap() };
288
289
// Based on `condition`, either trap or not.
290
let trap_when = |condition: bool, trap: CraneliftTrap| -> ControlFlow {
291
if condition {
292
ControlFlow::Trap(trap)
293
} else {
294
ControlFlow::Continue
295
}
296
};
297
298
// Calls a function reference with the given arguments.
299
let call_func =
300
|func_ref: InterpreterFunctionRef<'a>,
301
args: SmallVec<[DataValue; 1]>,
302
make_ctrl_flow: fn(&'a Function, SmallVec<[DataValue; 1]>) -> ControlFlow<'a>|
303
-> Result<ControlFlow<'a>, StepError> {
304
let signature = func_ref.signature();
305
306
// Check the types of the arguments. This is usually done by the verifier, but nothing
307
// guarantees that the user has ran that.
308
let args_match = validate_signature_params(&signature.params[..], &args[..]);
309
if !args_match {
310
return Ok(ControlFlow::Trap(CraneliftTrap::BadSignature));
311
}
312
313
Ok(match func_ref {
314
InterpreterFunctionRef::Function(func) => make_ctrl_flow(func, args),
315
InterpreterFunctionRef::LibCall(libcall) => {
316
debug_assert!(
317
!matches!(
318
inst.opcode(),
319
Opcode::ReturnCall | Opcode::ReturnCallIndirect,
320
),
321
"Cannot tail call to libcalls"
322
);
323
let libcall_handler = state.get_libcall_handler();
324
325
// We don't transfer control to a libcall, we just execute it and return the results
326
let res = libcall_handler(libcall, args);
327
let res = match res {
328
Err(trap) => return Ok(ControlFlow::Trap(trap)),
329
Ok(rets) => rets,
330
};
331
332
// Check that what the handler returned is what we expect.
333
if validate_signature_params(&signature.returns[..], &res[..]) {
334
ControlFlow::Assign(res)
335
} else {
336
ControlFlow::Trap(CraneliftTrap::BadSignature)
337
}
338
}
339
})
340
};
341
342
// Interpret a Cranelift instruction.
343
Ok(match inst.opcode() {
344
Opcode::Jump => {
345
if let InstructionData::Jump { destination, .. } = inst {
346
continue_at(destination)?
347
} else {
348
unreachable!()
349
}
350
}
351
Opcode::Brif => {
352
if let InstructionData::Brif {
353
arg,
354
blocks: [block_then, block_else],
355
..
356
} = inst
357
{
358
let arg = state.current_frame().get(arg).clone();
359
360
let condition = arg.convert(ValueConversionKind::ToBoolean)?.into_bool()?;
361
362
if condition {
363
continue_at(block_then)?
364
} else {
365
continue_at(block_else)?
366
}
367
} else {
368
unreachable!()
369
}
370
}
371
Opcode::BrTable => {
372
if let InstructionData::BranchTable { table, .. } = inst {
373
let jt_data = &state.get_current_function().stencil.dfg.jump_tables[table];
374
375
// Convert to usize to remove negative indexes from the following operations
376
let jump_target = usize::try_from(arg(0).into_int_unsigned()?)
377
.ok()
378
.and_then(|i| jt_data.as_slice().get(i))
379
.copied()
380
.unwrap_or(jt_data.default_block());
381
382
continue_at(jump_target)?
383
} else {
384
unreachable!()
385
}
386
}
387
Opcode::Trap => ControlFlow::Trap(CraneliftTrap::User(trap_code())),
388
Opcode::Debugtrap => ControlFlow::Trap(CraneliftTrap::Debug),
389
Opcode::Trapz => trap_when(!arg(0).into_bool()?, CraneliftTrap::User(trap_code())),
390
Opcode::Trapnz => trap_when(arg(0).into_bool()?, CraneliftTrap::User(trap_code())),
391
Opcode::Return => ControlFlow::Return(args()),
392
Opcode::Call | Opcode::ReturnCall => {
393
let func_ref = if let InstructionData::Call { func_ref, .. } = inst {
394
func_ref
395
} else {
396
unreachable!()
397
};
398
399
let curr_func = state.get_current_function();
400
let ext_data = curr_func
401
.dfg
402
.ext_funcs
403
.get(func_ref)
404
.ok_or(StepError::UnknownFunction(func_ref))?;
405
406
let args = args();
407
let func = match ext_data.name {
408
// These functions should be registered in the regular function store
409
ExternalName::User(_) | ExternalName::TestCase(_) => {
410
let function = state
411
.get_function(func_ref)
412
.ok_or(StepError::UnknownFunction(func_ref))?;
413
InterpreterFunctionRef::Function(function)
414
}
415
ExternalName::LibCall(libcall) => InterpreterFunctionRef::LibCall(libcall),
416
ExternalName::KnownSymbol(_) => unimplemented!(),
417
};
418
419
let make_control_flow = match inst.opcode() {
420
Opcode::Call => ControlFlow::Call,
421
Opcode::ReturnCall => ControlFlow::ReturnCall,
422
_ => unreachable!(),
423
};
424
425
call_func(func, args, make_control_flow)?
426
}
427
Opcode::CallIndirect | Opcode::ReturnCallIndirect => {
428
let args = args();
429
let addr_dv = DataValue::I64(arg(0).into_int_unsigned()? as i64);
430
let addr = Address::try_from(addr_dv.clone()).map_err(StepError::MemoryError)?;
431
432
let func = state
433
.get_function_from_address(addr)
434
.ok_or_else(|| StepError::MemoryError(MemoryError::InvalidAddress(addr_dv)))?;
435
436
let call_args: SmallVec<[DataValue; 1]> = SmallVec::from(&args[1..]);
437
438
let make_control_flow = match inst.opcode() {
439
Opcode::CallIndirect => ControlFlow::Call,
440
Opcode::ReturnCallIndirect => ControlFlow::ReturnCall,
441
_ => unreachable!(),
442
};
443
444
call_func(func, call_args, make_control_flow)?
445
}
446
Opcode::FuncAddr => {
447
let func_ref = if let InstructionData::FuncAddr { func_ref, .. } = inst {
448
func_ref
449
} else {
450
unreachable!()
451
};
452
453
let ext_data = state
454
.get_current_function()
455
.dfg
456
.ext_funcs
457
.get(func_ref)
458
.ok_or(StepError::UnknownFunction(func_ref))?;
459
460
let addr_ty = inst_context.controlling_type().unwrap();
461
assign_or_memtrap({
462
AddressSize::try_from(addr_ty).and_then(|addr_size| {
463
let addr = state.function_address(addr_size, &ext_data.name)?;
464
let dv = DataValue::try_from(addr)?;
465
Ok(dv)
466
})
467
})
468
}
469
Opcode::Load
470
| Opcode::Uload8
471
| Opcode::Sload8
472
| Opcode::Uload16
473
| Opcode::Sload16
474
| Opcode::Uload32
475
| Opcode::Sload32
476
| Opcode::Uload8x8
477
| Opcode::Sload8x8
478
| Opcode::Uload16x4
479
| Opcode::Sload16x4
480
| Opcode::Uload32x2
481
| Opcode::Sload32x2 => {
482
let ctrl_ty = inst_context.controlling_type().unwrap();
483
let (load_ty, kind) = match inst.opcode() {
484
Opcode::Load => (ctrl_ty, None),
485
Opcode::Uload8 => (types::I8, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
486
Opcode::Sload8 => (types::I8, Some(ValueConversionKind::SignExtend(ctrl_ty))),
487
Opcode::Uload16 => (types::I16, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
488
Opcode::Sload16 => (types::I16, Some(ValueConversionKind::SignExtend(ctrl_ty))),
489
Opcode::Uload32 => (types::I32, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
490
Opcode::Sload32 => (types::I32, Some(ValueConversionKind::SignExtend(ctrl_ty))),
491
Opcode::Uload8x8
492
| Opcode::Sload8x8
493
| Opcode::Uload16x4
494
| Opcode::Sload16x4
495
| Opcode::Uload32x2
496
| Opcode::Sload32x2 => unimplemented!(),
497
_ => unreachable!(),
498
};
499
500
let addr_value = calculate_addr(types::I64, imm(), args())?;
501
let mem_flags = inst.memflags().expect("instruction to have memory flags");
502
let loaded = assign_or_memtrap(
503
Address::try_from(addr_value)
504
.and_then(|addr| state.checked_load(addr, load_ty, mem_flags)),
505
);
506
507
match (loaded, kind) {
508
(ControlFlow::Assign(ret), Some(c)) => ControlFlow::Assign(
509
ret.into_iter()
510
.map(|loaded| loaded.convert(c.clone()))
511
.collect::<ValueResult<SmallVec<[DataValue; 1]>>>()?,
512
),
513
(cf, _) => cf,
514
}
515
}
516
Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => {
517
let kind = match inst.opcode() {
518
Opcode::Store => None,
519
Opcode::Istore8 => Some(ValueConversionKind::Truncate(types::I8)),
520
Opcode::Istore16 => Some(ValueConversionKind::Truncate(types::I16)),
521
Opcode::Istore32 => Some(ValueConversionKind::Truncate(types::I32)),
522
_ => unreachable!(),
523
};
524
525
let addr_value = calculate_addr(types::I64, imm(), args_range(1..)?)?;
526
let mem_flags = inst.memflags().expect("instruction to have memory flags");
527
let reduced = if let Some(c) = kind {
528
arg(0).convert(c)?
529
} else {
530
arg(0)
531
};
532
continue_or_memtrap(
533
Address::try_from(addr_value)
534
.and_then(|addr| state.checked_store(addr, reduced, mem_flags)),
535
)
536
}
537
Opcode::StackLoad => {
538
let load_ty = inst_context.controlling_type().unwrap();
539
let slot = inst.stack_slot().unwrap();
540
let offset = sum_unsigned(imm(), args())? as u64;
541
let mem_flags = MemFlags::new();
542
assign_or_memtrap({
543
state
544
.stack_address(AddressSize::_64, slot, offset)
545
.and_then(|addr| state.checked_load(addr, load_ty, mem_flags))
546
})
547
}
548
Opcode::StackStore => {
549
let arg = arg(0);
550
let slot = inst.stack_slot().unwrap();
551
let offset = sum_unsigned(imm(), args_range(1..)?)? as u64;
552
let mem_flags = MemFlags::new();
553
continue_or_memtrap({
554
state
555
.stack_address(AddressSize::_64, slot, offset)
556
.and_then(|addr| state.checked_store(addr, arg, mem_flags))
557
})
558
}
559
Opcode::StackAddr => {
560
let load_ty = inst_context.controlling_type().unwrap();
561
let slot = inst.stack_slot().unwrap();
562
let offset = sum_unsigned(imm(), args())? as u64;
563
assign_or_memtrap({
564
AddressSize::try_from(load_ty).and_then(|addr_size| {
565
let addr = state.stack_address(addr_size, slot, offset)?;
566
let dv = DataValue::try_from(addr)?;
567
Ok(dv)
568
})
569
})
570
}
571
Opcode::DynamicStackAddr => unimplemented!("DynamicStackSlot"),
572
Opcode::DynamicStackLoad => unimplemented!("DynamicStackLoad"),
573
Opcode::DynamicStackStore => unimplemented!("DynamicStackStore"),
574
Opcode::GlobalValue | Opcode::SymbolValue | Opcode::TlsValue => {
575
if let InstructionData::UnaryGlobalValue { global_value, .. } = inst {
576
assign_or_memtrap(state.resolve_global_value(global_value))
577
} else {
578
unreachable!()
579
}
580
}
581
Opcode::GetPinnedReg => assign(state.get_pinned_reg()),
582
Opcode::SetPinnedReg => {
583
let arg0 = arg(0);
584
state.set_pinned_reg(arg0);
585
ControlFlow::Continue
586
}
587
Opcode::Iconst => assign(DataValueExt::int(imm().into_int_signed()?, ctrl_ty)?),
588
Opcode::F16const => assign(imm()),
589
Opcode::F32const => assign(imm()),
590
Opcode::F64const => assign(imm()),
591
Opcode::F128const => assign(imm()),
592
Opcode::Vconst => assign(imm()),
593
Opcode::Nop => ControlFlow::Continue,
594
Opcode::Select | Opcode::SelectSpectreGuard => choose(arg(0).into_bool()?, arg(1), arg(2)),
595
Opcode::Bitselect => assign(bitselect(arg(0), arg(1), arg(2))?),
596
Opcode::Icmp => assign(icmp(ctrl_ty, inst.cond_code().unwrap(), &arg(0), &arg(1))?),
597
Opcode::IcmpImm => assign(icmp(
598
ctrl_ty,
599
inst.cond_code().unwrap(),
600
&arg(0),
601
&imm_as_ctrl_ty()?,
602
)?),
603
Opcode::Smin => {
604
if ctrl_ty.is_vector() {
605
let icmp = icmp(ctrl_ty, IntCC::SignedGreaterThan, &arg(1), &arg(0))?;
606
assign(bitselect(icmp, arg(0), arg(1))?)
607
} else {
608
assign(arg(0).smin(arg(1))?)
609
}
610
}
611
Opcode::Umin => {
612
if ctrl_ty.is_vector() {
613
let icmp = icmp(ctrl_ty, IntCC::UnsignedGreaterThan, &arg(1), &arg(0))?;
614
assign(bitselect(icmp, arg(0), arg(1))?)
615
} else {
616
assign(arg(0).umin(arg(1))?)
617
}
618
}
619
Opcode::Smax => {
620
if ctrl_ty.is_vector() {
621
let icmp = icmp(ctrl_ty, IntCC::SignedGreaterThan, &arg(0), &arg(1))?;
622
assign(bitselect(icmp, arg(0), arg(1))?)
623
} else {
624
assign(arg(0).smax(arg(1))?)
625
}
626
}
627
Opcode::Umax => {
628
if ctrl_ty.is_vector() {
629
let icmp = icmp(ctrl_ty, IntCC::UnsignedGreaterThan, &arg(0), &arg(1))?;
630
assign(bitselect(icmp, arg(0), arg(1))?)
631
} else {
632
assign(arg(0).umax(arg(1))?)
633
}
634
}
635
Opcode::AvgRound => {
636
let sum = DataValueExt::add(arg(0), arg(1))?;
637
let one = DataValueExt::int(1, arg(0).ty())?;
638
let inc = DataValueExt::add(sum, one)?;
639
let two = DataValueExt::int(2, arg(0).ty())?;
640
binary(DataValueExt::udiv, inc, two)?
641
}
642
Opcode::Iadd => binary(DataValueExt::add, arg(0), arg(1))?,
643
Opcode::UaddSat => assign(binary_arith(
644
arg(0),
645
arg(1),
646
ctrl_ty,
647
DataValueExt::uadd_sat,
648
)?),
649
Opcode::SaddSat => assign(binary_arith(
650
arg(0),
651
arg(1),
652
ctrl_ty,
653
DataValueExt::sadd_sat,
654
)?),
655
Opcode::Isub => binary(DataValueExt::sub, arg(0), arg(1))?,
656
Opcode::UsubSat => assign(binary_arith(
657
arg(0),
658
arg(1),
659
ctrl_ty,
660
DataValueExt::usub_sat,
661
)?),
662
Opcode::SsubSat => assign(binary_arith(
663
arg(0),
664
arg(1),
665
ctrl_ty,
666
DataValueExt::ssub_sat,
667
)?),
668
Opcode::Ineg => binary(DataValueExt::sub, DataValueExt::int(0, ctrl_ty)?, arg(0))?,
669
Opcode::Iabs => {
670
let (min_val, _) = ctrl_ty.lane_type().bounds(true);
671
let min_val: DataValue = DataValueExt::int(min_val as i128, ctrl_ty.lane_type())?;
672
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
673
let new_vec = arg0
674
.into_iter()
675
.map(|lane| {
676
if lane == min_val {
677
Ok(min_val.clone())
678
} else {
679
DataValueExt::int(lane.into_int_signed()?.abs(), ctrl_ty.lane_type())
680
}
681
})
682
.collect::<ValueResult<SimdVec<DataValue>>>()?;
683
assign(vectorizelanes(&new_vec, ctrl_ty)?)
684
}
685
Opcode::Imul => binary(DataValueExt::mul, arg(0), arg(1))?,
686
Opcode::Umulhi | Opcode::Smulhi => {
687
let double_length = match ctrl_ty.lane_bits() {
688
8 => types::I16,
689
16 => types::I32,
690
32 => types::I64,
691
64 => types::I128,
692
_ => unimplemented!("Unsupported integer length {}", ctrl_ty.bits()),
693
};
694
let conv_type = if inst.opcode() == Opcode::Umulhi {
695
ValueConversionKind::ZeroExtend(double_length)
696
} else {
697
ValueConversionKind::SignExtend(double_length)
698
};
699
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
700
let arg1 = extractlanes(&arg(1), ctrl_ty)?;
701
702
let res = arg0
703
.into_iter()
704
.zip(arg1)
705
.map(|(x, y)| {
706
let x = x.convert(conv_type.clone())?;
707
let y = y.convert(conv_type.clone())?;
708
709
Ok(DataValueExt::mul(x, y)?
710
.convert(ValueConversionKind::ExtractUpper(ctrl_ty.lane_type()))?)
711
})
712
.collect::<ValueResult<SimdVec<DataValue>>>()?;
713
714
assign(vectorizelanes(&res, ctrl_ty)?)
715
}
716
Opcode::Udiv => binary_can_trap(DataValueExt::udiv, arg(0), arg(1))?,
717
Opcode::Sdiv => binary_can_trap(DataValueExt::sdiv, arg(0), arg(1))?,
718
Opcode::Urem => binary_can_trap(DataValueExt::urem, arg(0), arg(1))?,
719
Opcode::Srem => binary_can_trap(DataValueExt::srem, arg(0), arg(1))?,
720
Opcode::IaddImm => binary(DataValueExt::add, arg(0), imm_as_ctrl_ty()?)?,
721
Opcode::ImulImm => binary(DataValueExt::mul, arg(0), imm_as_ctrl_ty()?)?,
722
Opcode::UdivImm => binary_can_trap(DataValueExt::udiv, arg(0), imm_as_ctrl_ty()?)?,
723
Opcode::SdivImm => binary_can_trap(DataValueExt::sdiv, arg(0), imm_as_ctrl_ty()?)?,
724
Opcode::UremImm => binary_can_trap(DataValueExt::urem, arg(0), imm_as_ctrl_ty()?)?,
725
Opcode::SremImm => binary_can_trap(DataValueExt::srem, arg(0), imm_as_ctrl_ty()?)?,
726
Opcode::IrsubImm => binary(DataValueExt::sub, imm_as_ctrl_ty()?, arg(0))?,
727
Opcode::UaddOverflow => {
728
let (sum, carry) = arg(0).uadd_overflow(arg(1))?;
729
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
730
}
731
Opcode::SaddOverflow => {
732
let (sum, carry) = arg(0).sadd_overflow(arg(1))?;
733
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
734
}
735
Opcode::UsubOverflow => {
736
let (sum, carry) = arg(0).usub_overflow(arg(1))?;
737
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
738
}
739
Opcode::SsubOverflow => {
740
let (sum, carry) = arg(0).ssub_overflow(arg(1))?;
741
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
742
}
743
Opcode::UmulOverflow => {
744
let (sum, carry) = arg(0).umul_overflow(arg(1))?;
745
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
746
}
747
Opcode::SmulOverflow => {
748
let (sum, carry) = arg(0).smul_overflow(arg(1))?;
749
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
750
}
751
Opcode::SaddOverflowCin => {
752
let (mut sum, mut carry) = arg(0).sadd_overflow(arg(1))?;
753
754
if DataValueExt::into_bool(arg(2))? {
755
let (sum2, carry2) = sum.sadd_overflow(DataValueExt::int(1, ctrl_ty)?)?;
756
carry |= carry2;
757
sum = sum2;
758
}
759
760
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
761
}
762
Opcode::UaddOverflowCin => {
763
let (mut sum, mut carry) = arg(0).uadd_overflow(arg(1))?;
764
765
if DataValueExt::into_bool(arg(2))? {
766
let (sum2, carry2) = sum.uadd_overflow(DataValueExt::int(1, ctrl_ty)?)?;
767
carry |= carry2;
768
sum = sum2;
769
}
770
771
assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
772
}
773
Opcode::UaddOverflowTrap => {
774
if let Some(sum) = DataValueExt::uadd_checked(arg(0), arg(1))? {
775
assign(sum)
776
} else {
777
ControlFlow::Trap(CraneliftTrap::User(trap_code()))
778
}
779
}
780
Opcode::SsubOverflowBin => {
781
let (mut sub, mut carry) = arg(0).ssub_overflow(arg(1))?;
782
783
if DataValueExt::into_bool(arg(2))? {
784
let (sub2, carry2) = sub.ssub_overflow(DataValueExt::int(1, ctrl_ty)?)?;
785
carry |= carry2;
786
sub = sub2;
787
}
788
789
assign_multiple(&[sub, DataValueExt::bool(carry, false, types::I8)?])
790
}
791
Opcode::UsubOverflowBin => {
792
let (mut sub, mut carry) = arg(0).usub_overflow(arg(1))?;
793
794
if DataValueExt::into_bool(arg(2))? {
795
let (sub2, carry2) = sub.usub_overflow(DataValueExt::int(1, ctrl_ty)?)?;
796
carry |= carry2;
797
sub = sub2;
798
}
799
800
assign_multiple(&[sub, DataValueExt::bool(carry, false, types::I8)?])
801
}
802
Opcode::Band => binary(DataValueExt::and, arg(0), arg(1))?,
803
Opcode::Bor => binary(DataValueExt::or, arg(0), arg(1))?,
804
Opcode::Bxor => binary(DataValueExt::xor, arg(0), arg(1))?,
805
Opcode::Bnot => unary(DataValueExt::not, arg(0))?,
806
Opcode::BandNot => binary(DataValueExt::and, arg(0), DataValueExt::not(arg(1))?)?,
807
Opcode::BorNot => binary(DataValueExt::or, arg(0), DataValueExt::not(arg(1))?)?,
808
Opcode::BxorNot => binary(DataValueExt::xor, arg(0), DataValueExt::not(arg(1))?)?,
809
Opcode::BandImm => binary(DataValueExt::and, arg(0), imm_as_ctrl_ty()?)?,
810
Opcode::BorImm => binary(DataValueExt::or, arg(0), imm_as_ctrl_ty()?)?,
811
Opcode::BxorImm => binary(DataValueExt::xor, arg(0), imm_as_ctrl_ty()?)?,
812
Opcode::Rotl => binary(DataValueExt::rotl, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
813
Opcode::Rotr => binary(DataValueExt::rotr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
814
Opcode::RotlImm => binary(DataValueExt::rotl, arg(0), shift_amt(ctrl_ty, imm())?)?,
815
Opcode::RotrImm => binary(DataValueExt::rotr, arg(0), shift_amt(ctrl_ty, imm())?)?,
816
Opcode::Ishl => binary(DataValueExt::shl, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
817
Opcode::Ushr => binary(DataValueExt::ushr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
818
Opcode::Sshr => binary(DataValueExt::sshr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
819
Opcode::IshlImm => binary(DataValueExt::shl, arg(0), shift_amt(ctrl_ty, imm())?)?,
820
Opcode::UshrImm => binary(DataValueExt::ushr, arg(0), shift_amt(ctrl_ty, imm())?)?,
821
Opcode::SshrImm => binary(DataValueExt::sshr, arg(0), shift_amt(ctrl_ty, imm())?)?,
822
Opcode::Bitrev => unary(DataValueExt::reverse_bits, arg(0))?,
823
Opcode::Bswap => unary(DataValueExt::swap_bytes, arg(0))?,
824
Opcode::Clz => unary(DataValueExt::leading_zeros, arg(0))?,
825
Opcode::Cls => {
826
let count = if arg(0) < DataValueExt::int(0, ctrl_ty)? {
827
arg(0).leading_ones()?
828
} else {
829
arg(0).leading_zeros()?
830
};
831
assign(DataValueExt::sub(count, DataValueExt::int(1, ctrl_ty)?)?)
832
}
833
Opcode::Ctz => unary(DataValueExt::trailing_zeros, arg(0))?,
834
Opcode::Popcnt => {
835
let count = if arg(0).ty().is_int() {
836
arg(0).count_ones()?
837
} else {
838
let lanes = extractlanes(&arg(0), ctrl_ty)?
839
.into_iter()
840
.map(|lane| lane.count_ones())
841
.collect::<ValueResult<SimdVec<DataValue>>>()?;
842
vectorizelanes(&lanes, ctrl_ty)?
843
};
844
assign(count)
845
}
846
847
Opcode::Fcmp => {
848
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
849
let arg1 = extractlanes(&arg(1), ctrl_ty)?;
850
851
assign(vectorizelanes(
852
&(arg0
853
.into_iter()
854
.zip(arg1.into_iter())
855
.map(|(x, y)| {
856
DataValue::bool(
857
fcmp(inst.fp_cond_code().unwrap(), &x, &y).unwrap(),
858
ctrl_ty.is_vector(),
859
ctrl_ty.lane_type().as_truthy(),
860
)
861
})
862
.collect::<ValueResult<SimdVec<DataValue>>>()?),
863
ctrl_ty,
864
)?)
865
}
866
Opcode::Fadd => binary(DataValueExt::add, arg(0), arg(1))?,
867
Opcode::Fsub => binary(DataValueExt::sub, arg(0), arg(1))?,
868
Opcode::Fmul => binary(DataValueExt::mul, arg(0), arg(1))?,
869
Opcode::Fdiv => binary(DataValueExt::sdiv, arg(0), arg(1))?,
870
Opcode::Sqrt => unary(DataValueExt::sqrt, arg(0))?,
871
Opcode::Fma => {
872
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
873
let arg1 = extractlanes(&arg(1), ctrl_ty)?;
874
let arg2 = extractlanes(&arg(2), ctrl_ty)?;
875
876
assign(vectorizelanes(
877
&(arg0
878
.into_iter()
879
.zip(arg1.into_iter())
880
.zip(arg2.into_iter())
881
.map(|((x, y), z)| DataValueExt::fma(x, y, z))
882
.collect::<ValueResult<SimdVec<DataValue>>>()?),
883
ctrl_ty,
884
)?)
885
}
886
Opcode::Fneg => unary(DataValueExt::neg, arg(0))?,
887
Opcode::Fabs => unary(DataValueExt::abs, arg(0))?,
888
Opcode::Fcopysign => binary(DataValueExt::copysign, arg(0), arg(1))?,
889
Opcode::Fmin => assign(match (arg(0), arg(1)) {
890
(a, _) if a.is_nan()? => a,
891
(_, b) if b.is_nan()? => b,
892
(a, b) if a.is_zero()? && b.is_zero()? && a.is_negative()? => a,
893
(a, b) if a.is_zero()? && b.is_zero()? && b.is_negative()? => b,
894
(a, b) => a.smin(b)?,
895
}),
896
Opcode::Fmax => assign(match (arg(0), arg(1)) {
897
(a, _) if a.is_nan()? => a,
898
(_, b) if b.is_nan()? => b,
899
(a, b) if a.is_zero()? && b.is_zero()? && a.is_negative()? => b,
900
(a, b) if a.is_zero()? && b.is_zero()? && b.is_negative()? => a,
901
(a, b) => a.smax(b)?,
902
}),
903
Opcode::Ceil => unary(DataValueExt::ceil, arg(0))?,
904
Opcode::Floor => unary(DataValueExt::floor, arg(0))?,
905
Opcode::Trunc => unary(DataValueExt::trunc, arg(0))?,
906
Opcode::Nearest => unary(DataValueExt::nearest, arg(0))?,
907
Opcode::Bitcast | Opcode::ScalarToVector => {
908
let input_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
909
let lanes = &if input_ty.is_vector() {
910
assert_eq!(
911
inst.memflags()
912
.expect("byte order flag to be set")
913
.endianness(Endianness::Little),
914
Endianness::Little,
915
"Only little endian bitcasts on vectors are supported"
916
);
917
extractlanes(&arg(0), ctrl_ty)?
918
} else {
919
extractlanes(&arg(0), input_ty)?
920
.into_iter()
921
.map(|x| DataValue::convert(x, ValueConversionKind::Exact(ctrl_ty.lane_type())))
922
.collect::<ValueResult<SimdVec<DataValue>>>()?
923
};
924
assign(match inst.opcode() {
925
Opcode::Bitcast => vectorizelanes(lanes, ctrl_ty)?,
926
Opcode::ScalarToVector => vectorizelanes_all(lanes, ctrl_ty)?,
927
_ => unreachable!(),
928
})
929
}
930
Opcode::Ireduce => assign(DataValueExt::convert(
931
arg(0),
932
ValueConversionKind::Truncate(ctrl_ty),
933
)?),
934
Opcode::Snarrow | Opcode::Unarrow | Opcode::Uunarrow => {
935
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
936
let arg1 = extractlanes(&arg(1), ctrl_ty)?;
937
let new_type = ctrl_ty.split_lanes().unwrap();
938
let (min, max) = new_type.bounds(inst.opcode() == Opcode::Snarrow);
939
let min: DataValue = DataValueExt::int(min as i128, ctrl_ty.lane_type())?;
940
let max: DataValue = DataValueExt::int(max as i128, ctrl_ty.lane_type())?;
941
let narrow = |mut lane: DataValue| -> ValueResult<DataValue> {
942
if inst.opcode() == Opcode::Uunarrow {
943
lane = DataValueExt::umax(lane, min.clone())?;
944
lane = DataValueExt::umin(lane, max.clone())?;
945
} else {
946
lane = DataValueExt::smax(lane, min.clone())?;
947
lane = DataValueExt::smin(lane, max.clone())?;
948
}
949
lane = lane.convert(ValueConversionKind::Truncate(new_type.lane_type()))?;
950
Ok(lane)
951
};
952
let new_vec = arg0
953
.into_iter()
954
.chain(arg1)
955
.map(|lane| narrow(lane))
956
.collect::<ValueResult<Vec<_>>>()?;
957
assign(vectorizelanes(&new_vec, new_type)?)
958
}
959
Opcode::Bmask => assign({
960
let bool = arg(0);
961
let bool_ty = ctrl_ty.as_truthy_pedantic();
962
let lanes = extractlanes(&bool, bool_ty)?
963
.into_iter()
964
.map(|lane| lane.convert(ValueConversionKind::Mask(ctrl_ty.lane_type())))
965
.collect::<ValueResult<SimdVec<DataValue>>>()?;
966
vectorizelanes(&lanes, ctrl_ty)?
967
}),
968
Opcode::Sextend => assign(DataValueExt::convert(
969
arg(0),
970
ValueConversionKind::SignExtend(ctrl_ty),
971
)?),
972
Opcode::Uextend => assign(DataValueExt::convert(
973
arg(0),
974
ValueConversionKind::ZeroExtend(ctrl_ty),
975
)?),
976
Opcode::Fpromote => assign(DataValueExt::convert(
977
arg(0),
978
ValueConversionKind::Exact(ctrl_ty),
979
)?),
980
Opcode::Fdemote => assign(DataValueExt::convert(
981
arg(0),
982
ValueConversionKind::RoundNearestEven(ctrl_ty),
983
)?),
984
Opcode::Shuffle => {
985
let mask = imm().into_array()?;
986
let a = DataValueExt::into_array(&arg(0))?;
987
let b = DataValueExt::into_array(&arg(1))?;
988
let mut new = [0u8; 16];
989
for i in 0..mask.len() {
990
if (mask[i] as usize) < a.len() {
991
new[i] = a[mask[i] as usize];
992
} else if (mask[i] as usize - a.len()) < b.len() {
993
new[i] = b[mask[i] as usize - a.len()];
994
} // else leave as 0.
995
}
996
assign(DataValueExt::vector(new, types::I8X16)?)
997
}
998
Opcode::Swizzle => {
999
let x = DataValueExt::into_array(&arg(0))?;
1000
let s = DataValueExt::into_array(&arg(1))?;
1001
let mut new = [0u8; 16];
1002
for i in 0..new.len() {
1003
if (s[i] as usize) < new.len() {
1004
new[i] = x[s[i] as usize];
1005
} // else leave as 0
1006
}
1007
assign(DataValueExt::vector(new, types::I8X16)?)
1008
}
1009
Opcode::Splat => assign(splat(ctrl_ty, arg(0))?),
1010
Opcode::Insertlane => {
1011
let idx = imm().into_int_unsigned()? as usize;
1012
let mut vector = extractlanes(&arg(0), ctrl_ty)?;
1013
vector[idx] = arg(1);
1014
assign(vectorizelanes(&vector, ctrl_ty)?)
1015
}
1016
Opcode::Extractlane => {
1017
let idx = imm().into_int_unsigned()? as usize;
1018
let lanes = extractlanes(&arg(0), ctrl_ty)?;
1019
assign(lanes[idx].clone())
1020
}
1021
Opcode::VhighBits => {
1022
// `ctrl_ty` controls the return type for this, so the input type
1023
// must be retrieved via `inst_context`.
1024
let vector_type = inst_context
1025
.type_of(inst_context.args()[0])
1026
.unwrap()
1027
.as_int();
1028
let a = extractlanes(&arg(0), vector_type)?;
1029
let mut result: u128 = 0;
1030
for (i, val) in a.into_iter().enumerate() {
1031
let val = val.reverse_bits()?.into_int_unsigned()?; // MSB -> LSB
1032
result |= (val & 1) << i;
1033
}
1034
assign(DataValueExt::int(result as i128, ctrl_ty)?)
1035
}
1036
Opcode::VanyTrue => {
1037
let simd_ty = ctrl_ty.as_int();
1038
let lane_ty = simd_ty.lane_type();
1039
let init = DataValue::bool(false, true, lane_ty)?;
1040
let any = fold_vector(arg(0), simd_ty, init.clone(), |acc, lane| acc.or(lane))?;
1041
assign(DataValue::bool(any != init, false, types::I8)?)
1042
}
1043
Opcode::VallTrue => assign(DataValue::bool(
1044
!(arg(0)
1045
.iter_lanes(ctrl_ty.as_int())?
1046
.try_fold(false, |acc, lane| {
1047
Ok::<bool, ValueError>(acc | lane.is_zero()?)
1048
})?),
1049
false,
1050
types::I8,
1051
)?),
1052
Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => {
1053
let new_type = ctrl_ty.merge_lanes().unwrap();
1054
let conv_type = match inst.opcode() {
1055
Opcode::SwidenLow | Opcode::SwidenHigh => {
1056
ValueConversionKind::SignExtend(new_type.lane_type())
1057
}
1058
Opcode::UwidenLow | Opcode::UwidenHigh => {
1059
ValueConversionKind::ZeroExtend(new_type.lane_type())
1060
}
1061
_ => unreachable!(),
1062
};
1063
let vec_iter = extractlanes(&arg(0), ctrl_ty)?.into_iter();
1064
let new_vec = match inst.opcode() {
1065
Opcode::SwidenLow | Opcode::UwidenLow => vec_iter
1066
.take(new_type.lane_count() as usize)
1067
.map(|lane| lane.convert(conv_type.clone()))
1068
.collect::<ValueResult<Vec<_>>>()?,
1069
Opcode::SwidenHigh | Opcode::UwidenHigh => vec_iter
1070
.skip(new_type.lane_count() as usize)
1071
.map(|lane| lane.convert(conv_type.clone()))
1072
.collect::<ValueResult<Vec<_>>>()?,
1073
_ => unreachable!(),
1074
};
1075
assign(vectorizelanes(&new_vec, new_type)?)
1076
}
1077
Opcode::FcvtToUint | Opcode::FcvtToSint => {
1078
// NaN check
1079
if arg(0).is_nan()? {
1080
return Ok(ControlFlow::Trap(CraneliftTrap::User(
1081
TrapCode::BAD_CONVERSION_TO_INTEGER,
1082
)));
1083
}
1084
let x = arg(0).into_float()? as i128;
1085
let is_signed = inst.opcode() == Opcode::FcvtToSint;
1086
let (min, max) = ctrl_ty.bounds(is_signed);
1087
let overflow = if is_signed {
1088
x < (min as i128) || x > (max as i128)
1089
} else {
1090
x < 0 || (x as u128) > max
1091
};
1092
// bounds check
1093
if overflow {
1094
return Ok(ControlFlow::Trap(CraneliftTrap::User(
1095
TrapCode::INTEGER_OVERFLOW,
1096
)));
1097
}
1098
// perform the conversion.
1099
assign(DataValueExt::int(x, ctrl_ty)?)
1100
}
1101
Opcode::FcvtToUintSat | Opcode::FcvtToSintSat => {
1102
let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1103
let cvt = |x: DataValue| -> ValueResult<DataValue> {
1104
// NaN check
1105
if x.is_nan()? {
1106
DataValue::int(0, ctrl_ty.lane_type())
1107
} else {
1108
let is_signed = inst.opcode() == Opcode::FcvtToSintSat;
1109
let (min, max) = ctrl_ty.bounds(is_signed);
1110
let x = x.into_float()? as i128;
1111
let x = if is_signed {
1112
let x = i128::max(x, min as i128);
1113
let x = i128::min(x, max as i128);
1114
x
1115
} else {
1116
let x = if x < 0 { 0 } else { x };
1117
let x = u128::min(x as u128, max);
1118
x as i128
1119
};
1120
1121
DataValue::int(x, ctrl_ty.lane_type())
1122
}
1123
};
1124
1125
let x = extractlanes(&arg(0), in_ty)?;
1126
1127
assign(vectorizelanes(
1128
&x.into_iter()
1129
.map(cvt)
1130
.collect::<ValueResult<SimdVec<DataValue>>>()?,
1131
ctrl_ty,
1132
)?)
1133
}
1134
Opcode::FcvtFromUint | Opcode::FcvtFromSint => {
1135
let x = extractlanes(
1136
&arg(0),
1137
inst_context.type_of(inst_context.args()[0]).unwrap(),
1138
)?;
1139
let bits = |x: DataValue| -> ValueResult<u64> {
1140
Ok(match ctrl_ty.lane_type() {
1141
types::F32 => (if inst.opcode() == Opcode::FcvtFromUint {
1142
x.into_int_unsigned()? as f32
1143
} else {
1144
x.into_int_signed()? as f32
1145
})
1146
.to_bits() as u64,
1147
types::F64 => (if inst.opcode() == Opcode::FcvtFromUint {
1148
x.into_int_unsigned()? as f64
1149
} else {
1150
x.into_int_signed()? as f64
1151
})
1152
.to_bits(),
1153
_ => unimplemented!("unexpected conversion to {:?}", ctrl_ty.lane_type()),
1154
})
1155
};
1156
assign(vectorizelanes(
1157
&x.into_iter()
1158
.map(|x| DataValue::float(bits(x)?, ctrl_ty.lane_type()))
1159
.collect::<ValueResult<SimdVec<DataValue>>>()?,
1160
ctrl_ty,
1161
)?)
1162
}
1163
Opcode::FvpromoteLow => {
1164
let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1165
assert_eq!(in_ty, types::F32X4);
1166
let out_ty = types::F64X2;
1167
let x = extractlanes(&arg(0), in_ty)?;
1168
assign(vectorizelanes(
1169
&x[..(out_ty.lane_count() as usize)]
1170
.into_iter()
1171
.map(|x| {
1172
DataValue::convert(
1173
x.to_owned(),
1174
ValueConversionKind::Exact(out_ty.lane_type()),
1175
)
1176
})
1177
.collect::<ValueResult<SimdVec<DataValue>>>()?,
1178
out_ty,
1179
)?)
1180
}
1181
Opcode::Fvdemote => {
1182
let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1183
assert_eq!(in_ty, types::F64X2);
1184
let out_ty = types::F32X4;
1185
let x = extractlanes(&arg(0), in_ty)?;
1186
let x = &mut x
1187
.into_iter()
1188
.map(|x| {
1189
DataValue::convert(x, ValueConversionKind::RoundNearestEven(out_ty.lane_type()))
1190
})
1191
.collect::<ValueResult<SimdVec<DataValue>>>()?;
1192
// zero the high bits.
1193
for _ in 0..(out_ty.lane_count() as usize - x.len()) {
1194
x.push(DataValue::float(0, out_ty.lane_type())?);
1195
}
1196
assign(vectorizelanes(x, out_ty)?)
1197
}
1198
Opcode::Isplit => assign_multiple(&[
1199
DataValueExt::convert(arg(0), ValueConversionKind::Truncate(types::I64))?,
1200
DataValueExt::convert(arg(0), ValueConversionKind::ExtractUpper(types::I64))?,
1201
]),
1202
Opcode::Iconcat => assign(DataValueExt::concat(arg(0), arg(1))?),
1203
Opcode::AtomicRmw => {
1204
let op = inst.atomic_rmw_op().unwrap();
1205
let val = arg(1);
1206
let addr = arg(0).into_int_unsigned()? as u64;
1207
let mem_flags = inst.memflags().expect("instruction to have memory flags");
1208
let loaded = Address::try_from(addr)
1209
.and_then(|addr| state.checked_load(addr, ctrl_ty, mem_flags));
1210
let prev_val = match loaded {
1211
Ok(v) => v,
1212
Err(e) => return Ok(ControlFlow::Trap(memerror_to_trap(e))),
1213
};
1214
let prev_val_to_assign = prev_val.clone();
1215
let replace = match op {
1216
AtomicRmwOp::Xchg => Ok(val),
1217
AtomicRmwOp::Add => DataValueExt::add(prev_val, val),
1218
AtomicRmwOp::Sub => DataValueExt::sub(prev_val, val),
1219
AtomicRmwOp::And => DataValueExt::and(prev_val, val),
1220
AtomicRmwOp::Or => DataValueExt::or(prev_val, val),
1221
AtomicRmwOp::Xor => DataValueExt::xor(prev_val, val),
1222
AtomicRmwOp::Nand => DataValueExt::and(prev_val, val).and_then(DataValue::not),
1223
AtomicRmwOp::Smax => DataValueExt::smax(prev_val, val),
1224
AtomicRmwOp::Smin => DataValueExt::smin(prev_val, val),
1225
AtomicRmwOp::Umax => DataValueExt::umax(val, prev_val),
1226
AtomicRmwOp::Umin => DataValueExt::umin(val, prev_val),
1227
}?;
1228
let stored = Address::try_from(addr)
1229
.and_then(|addr| state.checked_store(addr, replace, mem_flags));
1230
assign_or_memtrap(stored.map(|_| prev_val_to_assign))
1231
}
1232
Opcode::AtomicCas => {
1233
let addr = arg(0).into_int_unsigned()? as u64;
1234
let mem_flags = inst.memflags().expect("instruction to have memory flags");
1235
let loaded = Address::try_from(addr)
1236
.and_then(|addr| state.checked_load(addr, ctrl_ty, mem_flags));
1237
let loaded_val = match loaded {
1238
Ok(v) => v,
1239
Err(e) => return Ok(ControlFlow::Trap(memerror_to_trap(e))),
1240
};
1241
let expected_val = arg(1);
1242
let val_to_assign = if loaded_val == expected_val {
1243
let val_to_store = arg(2);
1244
Address::try_from(addr)
1245
.and_then(|addr| state.checked_store(addr, val_to_store, mem_flags))
1246
.map(|_| loaded_val)
1247
} else {
1248
Ok(loaded_val)
1249
};
1250
assign_or_memtrap(val_to_assign)
1251
}
1252
Opcode::AtomicLoad => {
1253
let load_ty = inst_context.controlling_type().unwrap();
1254
let addr = arg(0).into_int_unsigned()? as u64;
1255
let mem_flags = inst.memflags().expect("instruction to have memory flags");
1256
// We are doing a regular load here, this isn't actually thread safe.
1257
assign_or_memtrap(
1258
Address::try_from(addr)
1259
.and_then(|addr| state.checked_load(addr, load_ty, mem_flags)),
1260
)
1261
}
1262
Opcode::AtomicStore => {
1263
let val = arg(0);
1264
let addr = arg(1).into_int_unsigned()? as u64;
1265
let mem_flags = inst.memflags().expect("instruction to have memory flags");
1266
// We are doing a regular store here, this isn't actually thread safe.
1267
continue_or_memtrap(
1268
Address::try_from(addr).and_then(|addr| state.checked_store(addr, val, mem_flags)),
1269
)
1270
}
1271
Opcode::Fence => {
1272
// The interpreter always runs in a single threaded context, so we don't
1273
// actually need to emit a fence here.
1274
ControlFlow::Continue
1275
}
1276
Opcode::SqmulRoundSat => {
1277
let lane_type = ctrl_ty.lane_type();
1278
let double_width = ctrl_ty.double_width().unwrap().lane_type();
1279
let arg0 = extractlanes(&arg(0), ctrl_ty)?;
1280
let arg1 = extractlanes(&arg(1), ctrl_ty)?;
1281
let (min, max) = lane_type.bounds(true);
1282
let min: DataValue = DataValueExt::int(min as i128, double_width)?;
1283
let max: DataValue = DataValueExt::int(max as i128, double_width)?;
1284
let new_vec = arg0
1285
.into_iter()
1286
.zip(arg1.into_iter())
1287
.map(|(x, y)| {
1288
let x = x.into_int_signed()?;
1289
let y = y.into_int_signed()?;
1290
// temporarily double width of the value to avoid overflow.
1291
let z: DataValue = DataValueExt::int(
1292
(x * y + (1 << (lane_type.bits() - 2))) >> (lane_type.bits() - 1),
1293
double_width,
1294
)?;
1295
// check bounds, saturate, and truncate to correct width.
1296
let z = DataValueExt::smin(z, max.clone())?;
1297
let z = DataValueExt::smax(z, min.clone())?;
1298
let z = z.convert(ValueConversionKind::Truncate(lane_type))?;
1299
Ok(z)
1300
})
1301
.collect::<ValueResult<SimdVec<_>>>()?;
1302
assign(vectorizelanes(&new_vec, ctrl_ty)?)
1303
}
1304
Opcode::IaddPairwise => {
1305
assign(binary_pairwise(arg(0), arg(1), ctrl_ty, DataValueExt::add)?)
1306
}
1307
Opcode::ExtractVector => {
1308
unimplemented!("ExtractVector not supported");
1309
}
1310
Opcode::GetFramePointer => unimplemented!("GetFramePointer"),
1311
Opcode::GetStackPointer => unimplemented!("GetStackPointer"),
1312
Opcode::GetReturnAddress => unimplemented!("GetReturnAddress"),
1313
Opcode::X86Pshufb => unimplemented!("X86Pshufb"),
1314
Opcode::X86Blendv => unimplemented!("X86Blendv"),
1315
Opcode::X86Pmulhrsw => unimplemented!("X86Pmulhrsw"),
1316
Opcode::X86Pmaddubsw => unimplemented!("X86Pmaddubsw"),
1317
Opcode::X86Cvtt2dq => unimplemented!("X86Cvtt2dq"),
1318
Opcode::StackSwitch => unimplemented!("StackSwitch"),
1319
1320
Opcode::TryCall => unimplemented!("TryCall"),
1321
Opcode::TryCallIndirect => unimplemented!("TryCallIndirect"),
1322
})
1323
}
1324
1325
#[derive(Error, Debug)]
1326
pub enum StepError {
1327
#[error("unable to retrieve value from SSA reference: {0}")]
1328
UnknownValue(ValueRef),
1329
#[error("unable to find the following function: {0}")]
1330
UnknownFunction(FuncRef),
1331
#[error("cannot step with these values")]
1332
ValueError(#[from] ValueError),
1333
#[error("failed to access memory")]
1334
MemoryError(#[from] MemoryError),
1335
}
1336
1337
/// Enumerate the ways in which the control flow can change based on a single step in a Cranelift
1338
/// interpreter.
1339
#[derive(Debug, PartialEq)]
1340
pub enum ControlFlow<'a> {
1341
/// Return one or more values from an instruction to be assigned to a left-hand side, e.g.:
1342
/// in `v0 = iadd v1, v2`, the sum of `v1` and `v2` is assigned to `v0`.
1343
Assign(SmallVec<[DataValue; 1]>),
1344
/// Continue to the next available instruction, e.g.: in `nop`, we expect to resume execution
1345
/// at the instruction after it.
1346
Continue,
1347
/// Jump to another block with the given parameters, e.g.: in
1348
/// `brif v0, block42(v1, v2), block97`, if the condition is true, we continue execution at the
1349
/// first instruction of `block42` with the values in `v1` and `v2` filling in the block
1350
/// parameters.
1351
ContinueAt(Block, SmallVec<[DataValue; 1]>),
1352
/// Indicates a call the given [Function] with the supplied arguments.
1353
Call(&'a Function, SmallVec<[DataValue; 1]>),
1354
/// Indicates a tail call to the given [Function] with the supplied arguments.
1355
ReturnCall(&'a Function, SmallVec<[DataValue; 1]>),
1356
/// Return from the current function with the given parameters, e.g.: `return [v1, v2]`.
1357
Return(SmallVec<[DataValue; 1]>),
1358
/// Stop with a program-generated trap; note that these are distinct from errors that may occur
1359
/// during interpretation.
1360
Trap(CraneliftTrap),
1361
}
1362
1363
#[derive(Error, Debug, PartialEq, Eq, Hash)]
1364
pub enum CraneliftTrap {
1365
#[error("user code: {0}")]
1366
User(TrapCode),
1367
#[error("bad signature")]
1368
BadSignature,
1369
#[error("unreachable code has been reached")]
1370
UnreachableCodeReached,
1371
#[error("heap is misaligned")]
1372
HeapMisaligned,
1373
#[error("user debug")]
1374
Debug,
1375
}
1376
1377
/// Compare two values using the given integer condition `code`.
1378
fn icmp(
1379
ctrl_ty: types::Type,
1380
code: IntCC,
1381
left: &DataValue,
1382
right: &DataValue,
1383
) -> ValueResult<DataValue> {
1384
let cmp = |bool_ty: types::Type,
1385
code: IntCC,
1386
left: &DataValue,
1387
right: &DataValue|
1388
-> ValueResult<DataValue> {
1389
Ok(DataValueExt::bool(
1390
match code {
1391
IntCC::Equal => left == right,
1392
IntCC::NotEqual => left != right,
1393
IntCC::SignedGreaterThan => left > right,
1394
IntCC::SignedGreaterThanOrEqual => left >= right,
1395
IntCC::SignedLessThan => left < right,
1396
IntCC::SignedLessThanOrEqual => left <= right,
1397
IntCC::UnsignedGreaterThan => {
1398
left.clone().into_int_unsigned()? > right.clone().into_int_unsigned()?
1399
}
1400
IntCC::UnsignedGreaterThanOrEqual => {
1401
left.clone().into_int_unsigned()? >= right.clone().into_int_unsigned()?
1402
}
1403
IntCC::UnsignedLessThan => {
1404
left.clone().into_int_unsigned()? < right.clone().into_int_unsigned()?
1405
}
1406
IntCC::UnsignedLessThanOrEqual => {
1407
left.clone().into_int_unsigned()? <= right.clone().into_int_unsigned()?
1408
}
1409
},
1410
ctrl_ty.is_vector(),
1411
bool_ty,
1412
)?)
1413
};
1414
1415
let dst_ty = ctrl_ty.as_truthy();
1416
let left = extractlanes(left, ctrl_ty)?;
1417
let right = extractlanes(right, ctrl_ty)?;
1418
1419
let res = left
1420
.into_iter()
1421
.zip(right.into_iter())
1422
.map(|(l, r)| cmp(dst_ty.lane_type(), code, &l, &r))
1423
.collect::<ValueResult<SimdVec<DataValue>>>()?;
1424
1425
Ok(vectorizelanes(&res, dst_ty)?)
1426
}
1427
1428
/// Compare two values using the given floating point condition `code`.
1429
fn fcmp(code: FloatCC, left: &DataValue, right: &DataValue) -> ValueResult<bool> {
1430
Ok(match code {
1431
FloatCC::Ordered => left == right || left < right || left > right,
1432
FloatCC::Unordered => DataValueExt::uno(left, right)?,
1433
FloatCC::Equal => left == right,
1434
FloatCC::NotEqual => left < right || left > right || DataValueExt::uno(left, right)?,
1435
FloatCC::OrderedNotEqual => left < right || left > right,
1436
FloatCC::UnorderedOrEqual => left == right || DataValueExt::uno(left, right)?,
1437
FloatCC::LessThan => left < right,
1438
FloatCC::LessThanOrEqual => left <= right,
1439
FloatCC::GreaterThan => left > right,
1440
FloatCC::GreaterThanOrEqual => left >= right,
1441
FloatCC::UnorderedOrLessThan => DataValueExt::uno(left, right)? || left < right,
1442
FloatCC::UnorderedOrLessThanOrEqual => DataValueExt::uno(left, right)? || left <= right,
1443
FloatCC::UnorderedOrGreaterThan => DataValueExt::uno(left, right)? || left > right,
1444
FloatCC::UnorderedOrGreaterThanOrEqual => DataValueExt::uno(left, right)? || left >= right,
1445
})
1446
}
1447
1448
pub type SimdVec<DataValue> = SmallVec<[DataValue; 4]>;
1449
1450
/// Converts a SIMD vector value into a Rust array of [Value] for processing.
1451
/// If `x` is a scalar, it will be returned as a single-element array.
1452
pub(crate) fn extractlanes(
1453
x: &DataValue,
1454
vector_type: types::Type,
1455
) -> ValueResult<SimdVec<DataValue>> {
1456
let lane_type = vector_type.lane_type();
1457
let mut lanes = SimdVec::new();
1458
// Wrap scalar values as a single-element vector and return.
1459
if !x.ty().is_vector() {
1460
lanes.push(x.clone());
1461
return Ok(lanes);
1462
}
1463
1464
let iterations = match lane_type {
1465
types::I8 => 1,
1466
types::I16 | types::F16 => 2,
1467
types::I32 | types::F32 => 4,
1468
types::I64 | types::F64 => 8,
1469
_ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."),
1470
};
1471
1472
let x = x.into_array()?;
1473
for i in 0..vector_type.lane_count() {
1474
let mut lane: i128 = 0;
1475
for j in 0..iterations {
1476
lane += (x[((i * iterations) + j) as usize] as i128) << (8 * j);
1477
}
1478
1479
let lane_val: DataValue = if lane_type.is_float() {
1480
DataValueExt::float(lane as u64, lane_type)?
1481
} else {
1482
DataValueExt::int(lane, lane_type)?
1483
};
1484
lanes.push(lane_val);
1485
}
1486
return Ok(lanes);
1487
}
1488
1489
/// Convert a Rust array of [Value] back into a `Value::vector`.
1490
/// Supplying a single-element array will simply return its contained value.
1491
fn vectorizelanes(x: &[DataValue], vector_type: types::Type) -> ValueResult<DataValue> {
1492
// If the array is only one element, return it as a scalar.
1493
if x.len() == 1 {
1494
Ok(x[0].clone())
1495
} else {
1496
vectorizelanes_all(x, vector_type)
1497
}
1498
}
1499
1500
/// Convert a Rust array of [Value] back into a `Value::vector`.
1501
fn vectorizelanes_all(x: &[DataValue], vector_type: types::Type) -> ValueResult<DataValue> {
1502
let lane_type = vector_type.lane_type();
1503
let iterations = match lane_type {
1504
types::I8 => 1,
1505
types::I16 | types::F16 => 2,
1506
types::I32 | types::F32 => 4,
1507
types::I64 | types::F64 => 8,
1508
_ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."),
1509
};
1510
let mut result: [u8; 16] = [0; 16];
1511
for (i, val) in x.iter().enumerate() {
1512
let lane_val: i128 = val
1513
.clone()
1514
.convert(ValueConversionKind::Exact(lane_type.as_int()))?
1515
.into_int_unsigned()? as i128;
1516
1517
for j in 0..iterations {
1518
result[(i * iterations) + j] = (lane_val >> (8 * j)) as u8;
1519
}
1520
}
1521
DataValueExt::vector(result, vector_type)
1522
}
1523
1524
/// Performs a lanewise fold on a vector type
1525
fn fold_vector<F>(v: DataValue, ty: types::Type, init: DataValue, op: F) -> ValueResult<DataValue>
1526
where
1527
F: FnMut(DataValue, DataValue) -> ValueResult<DataValue>,
1528
{
1529
extractlanes(&v, ty)?.into_iter().try_fold(init, op)
1530
}
1531
1532
/// Performs the supplied unary arithmetic `op` on a Value, either Vector or Scalar.
1533
fn unary_arith<F>(x: DataValue, vector_type: types::Type, op: F) -> ValueResult<DataValue>
1534
where
1535
F: Fn(DataValue) -> ValueResult<DataValue>,
1536
{
1537
let arg = extractlanes(&x, vector_type)?;
1538
1539
let result = arg
1540
.into_iter()
1541
.map(|arg| Ok(op(arg)?))
1542
.collect::<ValueResult<SimdVec<DataValue>>>()?;
1543
1544
vectorizelanes(&result, vector_type)
1545
}
1546
1547
/// Performs the supplied binary arithmetic `op` on two values, either vector or scalar.
1548
fn binary_arith<F>(
1549
x: DataValue,
1550
y: DataValue,
1551
vector_type: types::Type,
1552
op: F,
1553
) -> ValueResult<DataValue>
1554
where
1555
F: Fn(DataValue, DataValue) -> ValueResult<DataValue>,
1556
{
1557
let arg0 = extractlanes(&x, vector_type)?;
1558
let arg1 = extractlanes(&y, vector_type)?;
1559
1560
let result = arg0
1561
.into_iter()
1562
.zip(arg1)
1563
.map(|(lhs, rhs)| Ok(op(lhs, rhs)?))
1564
.collect::<ValueResult<SimdVec<DataValue>>>()?;
1565
1566
vectorizelanes(&result, vector_type)
1567
}
1568
1569
/// Performs the supplied pairwise arithmetic `op` on two SIMD vectors, where
1570
/// pairs are formed from adjacent vector elements and the vectors are
1571
/// concatenated at the end.
1572
fn binary_pairwise<F>(
1573
x: DataValue,
1574
y: DataValue,
1575
vector_type: types::Type,
1576
op: F,
1577
) -> ValueResult<DataValue>
1578
where
1579
F: Fn(DataValue, DataValue) -> ValueResult<DataValue>,
1580
{
1581
let arg0 = extractlanes(&x, vector_type)?;
1582
let arg1 = extractlanes(&y, vector_type)?;
1583
1584
let result = arg0
1585
.chunks(2)
1586
.chain(arg1.chunks(2))
1587
.map(|pair| op(pair[0].clone(), pair[1].clone()))
1588
.collect::<ValueResult<SimdVec<DataValue>>>()?;
1589
1590
vectorizelanes(&result, vector_type)
1591
}
1592
1593
fn bitselect(c: DataValue, x: DataValue, y: DataValue) -> ValueResult<DataValue> {
1594
let mask_x = DataValueExt::and(c.clone(), x)?;
1595
let mask_y = DataValueExt::and(DataValueExt::not(c)?, y)?;
1596
DataValueExt::or(mask_x, mask_y)
1597
}
1598
1599
fn splat(ty: Type, val: DataValue) -> ValueResult<DataValue> {
1600
let mut new_vector = SimdVec::new();
1601
for _ in 0..ty.lane_count() {
1602
new_vector.push(val.clone());
1603
}
1604
vectorizelanes(&new_vector, ty)
1605
}
1606
1607
// Prepares the shift amount for a shift/rotate operation.
1608
// The shift amount must be the same type and have the same number of lanes as the vector.
1609
fn shift_amt(ty: Type, val: DataValue) -> ValueResult<DataValue> {
1610
splat(ty, val.convert(ValueConversionKind::Exact(ty.lane_type()))?)
1611
}
1612
1613