//! The pulley bytecode for fast interpreters.12#![cfg_attr(docsrs, feature(doc_cfg))]3#![cfg_attr(pulley_tail_calls, feature(explicit_tail_calls))]4#![cfg_attr(pulley_tail_calls, allow(incomplete_features, unstable_features))]5#![deny(missing_docs)]6#![no_std]78#[cfg(feature = "std")]9#[macro_use]10extern crate std;1112#[cfg(feature = "decode")]13extern crate alloc;1415/// Calls the given macro with each opcode.16///17/// # Instruction Guidelines18///19/// We're inventing an instruction set here which naturally brings a whole set20/// of design questions. Note that this is explicitly intended to be only ever21/// used for Pulley where there are a different set of design constraints than22/// other instruction sets (e.g. general-purpose CPU ISAs). Some examples of23/// constraints for Pulley are:24///25/// * Instructions must be portable to many architectures.26/// * The Pulley ISA is mostly target-independent as the compilation target is27/// currently only parameterized on pointer width and endianness.28/// * Pulley instructions should be balance of time-to-decode and code size. For29/// example super fancy bit-packing tricks might be tough to decode in30/// software but might be worthwhile if it's quite common and greatly reduces31/// the size of bytecode. There's not a hard-and-fast answer here, but a32/// balance to be made.33/// * Many "macro ops" are present to reduce the size of compiled bytecode so34/// there is a wide set of duplicate functionality between opcodes (and this35/// is expected).36///37/// Given all this it's also useful to have a set of guidelines used to name and38/// develop Pulley instructions. As of the time of this writing it's still39/// pretty early days for Pulley so some of these guidelines may change over40/// time. Additionally instructions don't necessarily all follow these41/// conventions and that may also change over time. With that in mind, here's a42/// rough set of guidelines:43///44/// * Most instructions are prefixed with `x`, `f`, or `v`, indicating which45/// type of register they're operating on. (e.g. `xadd32` operates on the `x`46/// integer registers and `fadd32` operates on the `f` float registers).47///48/// * Most instructions are suffixed or otherwise contain the bit width they're49/// operating on. For example `xadd32` is a 32-bit addition.50///51/// * If an instruction operates on signed or unsigned data (such as division52/// and remainder), then the instruction is suffixed with `_s` or `_u`.53///54/// * Instructions operate on either 32 or 64-bit parts of a register.55/// Instructions modifying only 32-bits of a register always modify the "low"56/// part of a register and leave the upper part unmodified. This is intended57/// to help 32-bit platforms where if most operations are 32-bit there's no58/// need for extra instructions to sign or zero extend and modify the upper59/// half of the register.60///61/// * Binops use `BinaryOperands<T>` for the destination and argument registers.62///63/// * Instructions operating on memory contain a few pieces of information:64///65/// ```text66/// xload16le_u32_o3267/// │└─┬┘└┤└┤ └┬┘ └┬┘68/// │ │ │ │ │ ▼69/// │ │ │ │ │ addressing mode70/// │ │ │ │ ▼71/// │ │ │ │ width of register modified + sign-extension (optional)72/// │ │ │ ▼73/// │ │ │ endianness of the operation (le/be)74/// │ │ ▼75/// │ │ bit-width of the operation76/// │ ▼77/// │ what's happening (load/store)78/// ▼79/// register being operated on (x/f/z)80/// ```81///82/// More guidelines might get added here over time, and if you have any83/// questions feel free to raise them and we can try to add them here as well!84#[macro_export]85macro_rules! for_each_op {86( $macro:ident ) => {87$macro! {88/// No-operation.89nop = Nop;9091/// Transfer control the address in the `lr` register.92ret = Ret;9394/// Transfer control to the PC at the given offset and set the `lr`95/// register to the PC just after this instruction.96///97/// This instruction generally assumes that the Pulley ABI is being98/// respected where arguments are in argument registers (starting at99/// x0 for integer arguments) and results are in result registers.100/// This instruction itself assume that all arguments are already in101/// their registers. Subsequent instructions below enable moving102/// arguments into the correct registers as part of the same call103/// instruction.104call = Call { offset: PcRelOffset };105/// Like `call`, but also `x0 = arg1`106call1 = Call1 { arg1: XReg, offset: PcRelOffset };107/// Like `call`, but also `x0, x1 = arg1, arg2`108call2 = Call2 { arg1: XReg, arg2: XReg, offset: PcRelOffset };109/// Like `call`, but also `x0, x1, x2 = arg1, arg2, arg3`110call3 = Call3 { arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset };111/// Like `call`, but also `x0, x1, x2, x3 = arg1, arg2, arg3, arg4`112call4 = Call4 { arg1: XReg, arg2: XReg, arg3: XReg, arg4: XReg, offset: PcRelOffset };113114/// Transfer control to the PC in `reg` and set `lr` to the PC just115/// after this instruction.116call_indirect = CallIndirect { reg: XReg };117118/// Unconditionally transfer control to the PC at the given offset.119jump = Jump { offset: PcRelOffset };120121/// Unconditionally transfer control to the PC at specified122/// register.123xjump = XJump { reg: XReg };124125/// Conditionally transfer control to the given PC offset if126/// `low32(cond)` contains a non-zero value.127br_if32 = BrIf { cond: XReg, offset: PcRelOffset };128129/// Conditionally transfer control to the given PC offset if130/// `low32(cond)` contains a zero value.131br_if_not32 = BrIfNot { cond: XReg, offset: PcRelOffset };132133/// Branch if `a == b`.134br_if_xeq32 = BrIfXeq32 { a: XReg, b: XReg, offset: PcRelOffset };135/// Branch if `a != `b.136br_if_xneq32 = BrIfXneq32 { a: XReg, b: XReg, offset: PcRelOffset };137/// Branch if signed `a < b`.138br_if_xslt32 = BrIfXslt32 { a: XReg, b: XReg, offset: PcRelOffset };139/// Branch if signed `a <= b`.140br_if_xslteq32 = BrIfXslteq32 { a: XReg, b: XReg, offset: PcRelOffset };141/// Branch if unsigned `a < b`.142br_if_xult32 = BrIfXult32 { a: XReg, b: XReg, offset: PcRelOffset };143/// Branch if unsigned `a <= b`.144br_if_xulteq32 = BrIfXulteq32 { a: XReg, b: XReg, offset: PcRelOffset };145/// Branch if `a == b`.146br_if_xeq64 = BrIfXeq64 { a: XReg, b: XReg, offset: PcRelOffset };147/// Branch if `a != `b.148br_if_xneq64 = BrIfXneq64 { a: XReg, b: XReg, offset: PcRelOffset };149/// Branch if signed `a < b`.150br_if_xslt64 = BrIfXslt64 { a: XReg, b: XReg, offset: PcRelOffset };151/// Branch if signed `a <= b`.152br_if_xslteq64 = BrIfXslteq64 { a: XReg, b: XReg, offset: PcRelOffset };153/// Branch if unsigned `a < b`.154br_if_xult64 = BrIfXult64 { a: XReg, b: XReg, offset: PcRelOffset };155/// Branch if unsigned `a <= b`.156br_if_xulteq64 = BrIfXulteq64 { a: XReg, b: XReg, offset: PcRelOffset };157158/// Branch if `a == b`.159br_if_xeq32_i8 = BrIfXeq32I8 { a: XReg, b: i8, offset: PcRelOffset };160/// Branch if `a == b`.161br_if_xeq32_i32 = BrIfXeq32I32 { a: XReg, b: i32, offset: PcRelOffset };162/// Branch if `a != `b.163br_if_xneq32_i8 = BrIfXneq32I8 { a: XReg, b: i8, offset: PcRelOffset };164/// Branch if `a != `b.165br_if_xneq32_i32 = BrIfXneq32I32 { a: XReg, b: i32, offset: PcRelOffset };166/// Branch if signed `a < b`.167br_if_xslt32_i8 = BrIfXslt32I8 { a: XReg, b: i8, offset: PcRelOffset };168/// Branch if signed `a < b`.169br_if_xslt32_i32 = BrIfXslt32I32 { a: XReg, b: i32, offset: PcRelOffset };170/// Branch if signed `a > b`.171br_if_xsgt32_i8 = BrIfXsgt32I8 { a: XReg, b: i8, offset: PcRelOffset };172/// Branch if signed `a > b`.173br_if_xsgt32_i32 = BrIfXsgt32I32 { a: XReg, b: i32, offset: PcRelOffset };174/// Branch if signed `a <= b`.175br_if_xslteq32_i8 = BrIfXslteq32I8 { a: XReg, b: i8, offset: PcRelOffset };176/// Branch if signed `a <= b`.177br_if_xslteq32_i32 = BrIfXslteq32I32 { a: XReg, b: i32, offset: PcRelOffset };178/// Branch if signed `a >= b`.179br_if_xsgteq32_i8 = BrIfXsgteq32I8 { a: XReg, b: i8, offset: PcRelOffset };180/// Branch if signed `a >= b`.181br_if_xsgteq32_i32 = BrIfXsgteq32I32 { a: XReg, b: i32, offset: PcRelOffset };182/// Branch if unsigned `a < b`.183br_if_xult32_u8 = BrIfXult32U8 { a: XReg, b: u8, offset: PcRelOffset };184/// Branch if unsigned `a < b`.185br_if_xult32_u32 = BrIfXult32U32 { a: XReg, b: u32, offset: PcRelOffset };186/// Branch if unsigned `a <= b`.187br_if_xulteq32_u8 = BrIfXulteq32U8 { a: XReg, b: u8, offset: PcRelOffset };188/// Branch if unsigned `a <= b`.189br_if_xulteq32_u32 = BrIfXulteq32U32 { a: XReg, b: u32, offset: PcRelOffset };190/// Branch if unsigned `a > b`.191br_if_xugt32_u8 = BrIfXugt32U8 { a: XReg, b: u8, offset: PcRelOffset };192/// Branch if unsigned `a > b`.193br_if_xugt32_u32 = BrIfXugt32U32 { a: XReg, b: u32, offset: PcRelOffset };194/// Branch if unsigned `a >= b`.195br_if_xugteq32_u8 = BrIfXugteq32U8 { a: XReg, b: u8, offset: PcRelOffset };196/// Branch if unsigned `a >= b`.197br_if_xugteq32_u32 = BrIfXugteq32U32 { a: XReg, b: u32, offset: PcRelOffset };198199/// Branch if `a == b`.200br_if_xeq64_i8 = BrIfXeq64I8 { a: XReg, b: i8, offset: PcRelOffset };201/// Branch if `a == b`.202br_if_xeq64_i32 = BrIfXeq64I32 { a: XReg, b: i32, offset: PcRelOffset };203/// Branch if `a != `b.204br_if_xneq64_i8 = BrIfXneq64I8 { a: XReg, b: i8, offset: PcRelOffset };205/// Branch if `a != `b.206br_if_xneq64_i32 = BrIfXneq64I32 { a: XReg, b: i32, offset: PcRelOffset };207/// Branch if signed `a < b`.208br_if_xslt64_i8 = BrIfXslt64I8 { a: XReg, b: i8, offset: PcRelOffset };209/// Branch if signed `a < b`.210br_if_xslt64_i32 = BrIfXslt64I32 { a: XReg, b: i32, offset: PcRelOffset };211/// Branch if signed `a > b`.212br_if_xsgt64_i8 = BrIfXsgt64I8 { a: XReg, b: i8, offset: PcRelOffset };213/// Branch if signed `a > b`.214br_if_xsgt64_i32 = BrIfXsgt64I32 { a: XReg, b: i32, offset: PcRelOffset };215/// Branch if signed `a <= b`.216br_if_xslteq64_i8 = BrIfXslteq64I8 { a: XReg, b: i8, offset: PcRelOffset };217/// Branch if signed `a <= b`.218br_if_xslteq64_i32 = BrIfXslteq64I32 { a: XReg, b: i32, offset: PcRelOffset };219/// Branch if signed `a >= b`.220br_if_xsgteq64_i8 = BrIfXsgteq64I8 { a: XReg, b: i8, offset: PcRelOffset };221/// Branch if signed `a >= b`.222br_if_xsgteq64_i32 = BrIfXsgteq64I32 { a: XReg, b: i32, offset: PcRelOffset };223/// Branch if unsigned `a < b`.224br_if_xult64_u8 = BrIfXult64U8 { a: XReg, b: u8, offset: PcRelOffset };225/// Branch if unsigned `a < b`.226br_if_xult64_u32 = BrIfXult64U32 { a: XReg, b: u32, offset: PcRelOffset };227/// Branch if unsigned `a <= b`.228br_if_xulteq64_u8 = BrIfXulteq64U8 { a: XReg, b: u8, offset: PcRelOffset };229/// Branch if unsigned `a <= b`.230br_if_xulteq64_u32 = BrIfXulteq64U32 { a: XReg, b: u32, offset: PcRelOffset };231/// Branch if unsigned `a > b`.232br_if_xugt64_u8 = BrIfXugt64U8 { a: XReg, b: u8, offset: PcRelOffset };233/// Branch if unsigned `a > b`.234br_if_xugt64_u32 = BrIfXugt64U32 { a: XReg, b: u32, offset: PcRelOffset };235/// Branch if unsigned `a >= b`.236br_if_xugteq64_u8 = BrIfXugteq64U8 { a: XReg, b: u8, offset: PcRelOffset };237/// Branch if unsigned `a >= b`.238br_if_xugteq64_u32 = BrIfXugteq64U32 { a: XReg, b: u32, offset: PcRelOffset };239240/// Branch to the label indicated by `low32(idx)`.241///242/// After this instruction are `amt` instances of `PcRelOffset`243/// and the `idx` selects which one will be branched to. The value244/// of `idx` is clamped to `amt - 1` (e.g. the last offset is the245/// "default" one.246br_table32 = BrTable32 { idx: XReg, amt: u32 };247248/// Move between `x` registers.249xmov = Xmov { dst: XReg, src: XReg };250251/// Set `dst = 0`252xzero = Xzero { dst: XReg };253/// Set `dst = 1`254xone = Xone { dst: XReg };255/// Set `dst = sign_extend(imm8)`.256xconst8 = Xconst8 { dst: XReg, imm: i8 };257/// Set `dst = sign_extend(imm16)`.258xconst16 = Xconst16 { dst: XReg, imm: i16 };259/// Set `dst = sign_extend(imm32)`.260xconst32 = Xconst32 { dst: XReg, imm: i32 };261/// Set `dst = imm64`.262xconst64 = Xconst64 { dst: XReg, imm: i64 };263264/// 32-bit wrapping addition: `low32(dst) = low32(src1) + low32(src2)`.265///266/// The upper 32-bits of `dst` are unmodified.267xadd32 = Xadd32 { operands: BinaryOperands<XReg> };268/// Same as `xadd32` but `src2` is a zero-extended 8-bit immediate.269xadd32_u8 = Xadd32U8 { dst: XReg, src1: XReg, src2: u8 };270/// Same as `xadd32` but `src2` is a 32-bit immediate.271xadd32_u32 = Xadd32U32 { dst: XReg, src1: XReg, src2: u32 };272273/// 64-bit wrapping addition: `dst = src1 + src2`.274xadd64 = Xadd64 { operands: BinaryOperands<XReg> };275/// Same as `xadd64` but `src2` is a zero-extended 8-bit immediate.276xadd64_u8 = Xadd64U8 { dst: XReg, src1: XReg, src2: u8 };277/// Same as `xadd64` but `src2` is a zero-extended 32-bit immediate.278xadd64_u32 = Xadd64U32 { dst: XReg, src1: XReg, src2: u32 };279280/// `low32(dst) = low32(src1) * low32(src2) + low32(src3)`281xmadd32 = Xmadd32 { dst: XReg, src1: XReg, src2: XReg, src3: XReg };282/// `dst = src1 * src2 + src3`283xmadd64 = Xmadd64 { dst: XReg, src1: XReg, src2: XReg, src3: XReg };284285/// 32-bit wrapping subtraction: `low32(dst) = low32(src1) - low32(src2)`.286///287/// The upper 32-bits of `dst` are unmodified.288xsub32 = Xsub32 { operands: BinaryOperands<XReg> };289/// Same as `xsub32` but `src2` is a zero-extended 8-bit immediate.290xsub32_u8 = Xsub32U8 { dst: XReg, src1: XReg, src2: u8 };291/// Same as `xsub32` but `src2` is a 32-bit immediate.292xsub32_u32 = Xsub32U32 { dst: XReg, src1: XReg, src2: u32 };293294/// 64-bit wrapping subtraction: `dst = src1 - src2`.295xsub64 = Xsub64 { operands: BinaryOperands<XReg> };296/// Same as `xsub64` but `src2` is a zero-extended 8-bit immediate.297xsub64_u8 = Xsub64U8 { dst: XReg, src1: XReg, src2: u8 };298/// Same as `xsub64` but `src2` is a zero-extended 32-bit immediate.299xsub64_u32 = Xsub64U32 { dst: XReg, src1: XReg, src2: u32 };300301/// `low32(dst) = low32(src1) * low32(src2)`302xmul32 = XMul32 { operands: BinaryOperands<XReg> };303/// Same as `xmul64` but `src2` is a sign-extended 8-bit immediate.304xmul32_s8 = Xmul32S8 { dst: XReg, src1: XReg, src2: i8 };305/// Same as `xmul32` but `src2` is a sign-extended 32-bit immediate.306xmul32_s32 = Xmul32S32 { dst: XReg, src1: XReg, src2: i32 };307308/// `dst = src1 * src2`309xmul64 = XMul64 { operands: BinaryOperands<XReg> };310/// Same as `xmul64` but `src2` is a sign-extended 8-bit immediate.311xmul64_s8 = Xmul64S8 { dst: XReg, src1: XReg, src2: i8 };312/// Same as `xmul64` but `src2` is a sign-extended 64-bit immediate.313xmul64_s32 = Xmul64S32 { dst: XReg, src1: XReg, src2: i32 };314315/// `low32(dst) = trailing_zeros(low32(src))`316xctz32 = Xctz32 { dst: XReg, src: XReg };317/// `dst = trailing_zeros(src)`318xctz64 = Xctz64 { dst: XReg, src: XReg };319320/// `low32(dst) = leading_zeros(low32(src))`321xclz32 = Xclz32 { dst: XReg, src: XReg };322/// `dst = leading_zeros(src)`323xclz64 = Xclz64 { dst: XReg, src: XReg };324325/// `low32(dst) = count_ones(low32(src))`326xpopcnt32 = Xpopcnt32 { dst: XReg, src: XReg };327/// `dst = count_ones(src)`328xpopcnt64 = Xpopcnt64 { dst: XReg, src: XReg };329330/// `low32(dst) = rotate_left(low32(src1), low32(src2))`331xrotl32 = Xrotl32 { operands: BinaryOperands<XReg> };332/// `dst = rotate_left(src1, src2)`333xrotl64 = Xrotl64 { operands: BinaryOperands<XReg> };334335/// `low32(dst) = rotate_right(low32(src1), low32(src2))`336xrotr32 = Xrotr32 { operands: BinaryOperands<XReg> };337/// `dst = rotate_right(src1, src2)`338xrotr64 = Xrotr64 { operands: BinaryOperands<XReg> };339340/// `low32(dst) = low32(src1) << low5(src2)`341xshl32 = Xshl32 { operands: BinaryOperands<XReg> };342/// `low32(dst) = low32(src1) >> low5(src2)`343xshr32_s = Xshr32S { operands: BinaryOperands<XReg> };344/// `low32(dst) = low32(src1) >> low5(src2)`345xshr32_u = Xshr32U { operands: BinaryOperands<XReg> };346/// `dst = src1 << low5(src2)`347xshl64 = Xshl64 { operands: BinaryOperands<XReg> };348/// `dst = src1 >> low6(src2)`349xshr64_s = Xshr64S { operands: BinaryOperands<XReg> };350/// `dst = src1 >> low6(src2)`351xshr64_u = Xshr64U { operands: BinaryOperands<XReg> };352353/// `low32(dst) = low32(src1) << low5(src2)`354xshl32_u6 = Xshl32U6 { operands: BinaryOperands<XReg, XReg, U6> };355/// `low32(dst) = low32(src1) >> low5(src2)`356xshr32_s_u6 = Xshr32SU6 { operands: BinaryOperands<XReg, XReg, U6> };357/// `low32(dst) = low32(src1) >> low5(src2)`358xshr32_u_u6 = Xshr32UU6 { operands: BinaryOperands<XReg, XReg, U6> };359/// `dst = src1 << low5(src2)`360xshl64_u6 = Xshl64U6 { operands: BinaryOperands<XReg, XReg, U6> };361/// `dst = src1 >> low6(src2)`362xshr64_s_u6 = Xshr64SU6 { operands: BinaryOperands<XReg, XReg, U6> };363/// `dst = src1 >> low6(src2)`364xshr64_u_u6 = Xshr64UU6 { operands: BinaryOperands<XReg, XReg, U6> };365366/// `low32(dst) = -low32(src)`367xneg32 = Xneg32 { dst: XReg, src: XReg };368/// `dst = -src`369xneg64 = Xneg64 { dst: XReg, src: XReg };370371/// `low32(dst) = src1 == src2`372xeq64 = Xeq64 { operands: BinaryOperands<XReg> };373/// `low32(dst) = src1 != src2`374xneq64 = Xneq64 { operands: BinaryOperands<XReg> };375/// `low32(dst) = src1 < src2` (signed)376xslt64 = Xslt64 { operands: BinaryOperands<XReg> };377/// `low32(dst) = src1 <= src2` (signed)378xslteq64 = Xslteq64 { operands: BinaryOperands<XReg> };379/// `low32(dst) = src1 < src2` (unsigned)380xult64 = Xult64 { operands: BinaryOperands<XReg> };381/// `low32(dst) = src1 <= src2` (unsigned)382xulteq64 = Xulteq64 { operands: BinaryOperands<XReg> };383/// `low32(dst) = low32(src1) == low32(src2)`384xeq32 = Xeq32 { operands: BinaryOperands<XReg> };385/// `low32(dst) = low32(src1) != low32(src2)`386xneq32 = Xneq32 { operands: BinaryOperands<XReg> };387/// `low32(dst) = low32(src1) < low32(src2)` (signed)388xslt32 = Xslt32 { operands: BinaryOperands<XReg> };389/// `low32(dst) = low32(src1) <= low32(src2)` (signed)390xslteq32 = Xslteq32 { operands: BinaryOperands<XReg> };391/// `low32(dst) = low32(src1) < low32(src2)` (unsigned)392xult32 = Xult32 { operands: BinaryOperands<XReg> };393/// `low32(dst) = low32(src1) <= low32(src2)` (unsigned)394xulteq32 = Xulteq32 { operands: BinaryOperands<XReg> };395396// Loads/stores with various addressing modes. Note that each style397// of addressing mode is split to its own suite of instructions to398// simplify the implementation of each opcode and avoid internal399// branching when using one addressing mode vs another.400//401// Note that big-endian, float, and vector loads are deferred to402// the "extended" opcode set below.403404/// `low32(dst) = zext_8_32(*addr)`405xload8_u32_o32 = XLoad8U32O32 { dst: XReg, addr: AddrO32 };406/// `low32(dst) = sext_8_32(*addr)`407xload8_s32_o32 = XLoad8S32O32 { dst: XReg, addr: AddrO32 };408/// `low32(dst) = o32ext_16_32(*addr)`409xload16le_u32_o32 = XLoad16LeU32O32 { dst: XReg, addr: AddrO32 };410/// `low32(dst) = sext_16_32(*addr)`411xload16le_s32_o32 = XLoad16LeS32O32 { dst: XReg, addr: AddrO32 };412/// `low32(dst) = *addr`413xload32le_o32 = XLoad32LeO32 { dst: XReg, addr: AddrO32 };414/// `dst = *addr`415xload64le_o32 = XLoad64LeO32 { dst: XReg, addr: AddrO32 };416/// `*addr = low8(src)`417xstore8_o32 = XStore8O32 { addr: AddrO32, src: XReg };418/// `*addr = low16(src)`419xstore16le_o32 = XStore16LeO32 { addr: AddrO32, src: XReg };420/// `*addr = low32(src)`421xstore32le_o32 = XStore32LeO32 { addr: AddrO32, src: XReg };422/// `*addr = src`423xstore64le_o32 = XStore64LeO32 { addr: AddrO32, src: XReg };424425/// `low32(dst) = zext_8_32(*addr)`426xload8_u32_z = XLoad8U32Z { dst: XReg, addr: AddrZ };427/// `low32(dst) = sext_8_32(*addr)`428xload8_s32_z = XLoad8S32Z { dst: XReg, addr: AddrZ };429/// `low32(dst) = zext_16_32(*addr)`430xload16le_u32_z = XLoad16LeU32Z { dst: XReg, addr: AddrZ };431/// `low32(dst) = sext_16_32(*addr)`432xload16le_s32_z = XLoad16LeS32Z { dst: XReg, addr: AddrZ };433/// `low32(dst) = *addr`434xload32le_z = XLoad32LeZ { dst: XReg, addr: AddrZ };435/// `dst = *addr`436xload64le_z = XLoad64LeZ { dst: XReg, addr: AddrZ };437/// `*addr = low8(src)`438xstore8_z = XStore8Z { addr: AddrZ, src: XReg };439/// `*addr = low16(src)`440xstore16le_z = XStore16LeZ { addr: AddrZ, src: XReg };441/// `*addr = low32(src)`442xstore32le_z = XStore32LeZ { addr: AddrZ, src: XReg };443/// `*addr = src`444xstore64le_z = XStore64LeZ { addr: AddrZ, src: XReg };445446/// `low32(dst) = zext_8_32(*addr)`447xload8_u32_g32 = XLoad8U32G32 { dst: XReg, addr: AddrG32 };448/// `low32(dst) = sext_8_32(*addr)`449xload8_s32_g32 = XLoad8S32G32 { dst: XReg, addr: AddrG32 };450/// `low32(dst) = zext_16_32(*addr)`451xload16le_u32_g32 = XLoad16LeU32G32 { dst: XReg, addr: AddrG32 };452/// `low32(dst) = sext_16_32(*addr)`453xload16le_s32_g32 = XLoad16LeS32G32 { dst: XReg, addr: AddrG32 };454/// `low32(dst) = *addr`455xload32le_g32 = XLoad32LeG32 { dst: XReg, addr: AddrG32 };456/// `dst = *addr`457xload64le_g32 = XLoad64LeG32 { dst: XReg, addr: AddrG32 };458/// `*addr = low8(src)`459xstore8_g32 = XStore8G32 { addr: AddrG32, src: XReg };460/// `*addr = low16(src)`461xstore16le_g32 = XStore16LeG32 { addr: AddrG32, src: XReg };462/// `*addr = low32(src)`463xstore32le_g32 = XStore32LeG32 { addr: AddrG32, src: XReg };464/// `*addr = src`465xstore64le_g32 = XStore64LeG32 { addr: AddrG32, src: XReg };466467/// `low32(dst) = zext_8_32(*addr)`468xload8_u32_g32bne = XLoad8U32G32Bne { dst: XReg, addr: AddrG32Bne };469/// `low32(dst) = sext_8_32(*addr)`470xload8_s32_g32bne = XLoad8S32G32Bne { dst: XReg, addr: AddrG32Bne };471/// `low32(dst) = zext_16_32(*addr)`472xload16le_u32_g32bne = XLoad16LeU32G32Bne { dst: XReg, addr: AddrG32Bne };473/// `low32(dst) = sext_16_32(*addr)`474xload16le_s32_g32bne = XLoad16LeS32G32Bne { dst: XReg, addr: AddrG32Bne };475/// `low32(dst) = *addr`476xload32le_g32bne = XLoad32LeG32Bne { dst: XReg, addr: AddrG32Bne };477/// `dst = *addr`478xload64le_g32bne = XLoad64LeG32Bne { dst: XReg, addr: AddrG32Bne };479/// `*addr = low8(src)`480xstore8_g32bne = XStore8G32Bne { addr: AddrG32Bne, src: XReg };481/// `*addr = low16(src)`482xstore16le_g32bne = XStore16LeG32Bne { addr: AddrG32Bne, src: XReg };483/// `*addr = low32(src)`484xstore32le_g32bne = XStore32LeG32Bne { addr: AddrG32Bne, src: XReg };485/// `*addr = src`486xstore64le_g32bne = XStore64LeG32Bne { addr: AddrG32Bne, src: XReg };487488489/// `push lr; push fp; fp = sp`490push_frame = PushFrame ;491/// `sp = fp; pop fp; pop lr`492pop_frame = PopFrame ;493494/// Macro-instruction to enter a function, allocate some stack, and495/// then save some registers.496///497/// This is equivalent to `push_frame`, `stack_alloc32 amt`, then498/// saving all of `regs` to the top of the stack just allocated.499push_frame_save = PushFrameSave { amt: u16, regs: UpperRegSet<XReg> };500/// Inverse of `push_frame_save`. Restores `regs` from the top of501/// the stack, then runs `stack_free32 amt`, then runs `pop_frame`.502pop_frame_restore = PopFrameRestore { amt: u16, regs: UpperRegSet<XReg> };503504/// `sp = sp.checked_sub(amt)`505stack_alloc32 = StackAlloc32 { amt: u32 };506507/// `sp = sp + amt`508stack_free32 = StackFree32 { amt: u32 };509510/// `dst = zext(low8(src))`511zext8 = Zext8 { dst: XReg, src: XReg };512/// `dst = zext(low16(src))`513zext16 = Zext16 { dst: XReg, src: XReg };514/// `dst = zext(low32(src))`515zext32 = Zext32 { dst: XReg, src: XReg };516/// `dst = sext(low8(src))`517sext8 = Sext8 { dst: XReg, src: XReg };518/// `dst = sext(low16(src))`519sext16 = Sext16 { dst: XReg, src: XReg };520/// `dst = sext(low32(src))`521sext32 = Sext32 { dst: XReg, src: XReg };522523/// `low32(dst) = |low32(src)|`524xabs32 = XAbs32 { dst: XReg, src: XReg };525/// `dst = |src|`526xabs64 = XAbs64 { dst: XReg, src: XReg };527528/// `low32(dst) = low32(src1) / low32(src2)` (signed)529xdiv32_s = XDiv32S { operands: BinaryOperands<XReg> };530531/// `dst = src1 / src2` (signed)532xdiv64_s = XDiv64S { operands: BinaryOperands<XReg> };533534/// `low32(dst) = low32(src1) / low32(src2)` (unsigned)535xdiv32_u = XDiv32U { operands: BinaryOperands<XReg> };536537/// `dst = src1 / src2` (unsigned)538xdiv64_u = XDiv64U { operands: BinaryOperands<XReg> };539540/// `low32(dst) = low32(src1) % low32(src2)` (signed)541xrem32_s = XRem32S { operands: BinaryOperands<XReg> };542543/// `dst = src1 / src2` (signed)544xrem64_s = XRem64S { operands: BinaryOperands<XReg> };545546/// `low32(dst) = low32(src1) % low32(src2)` (unsigned)547xrem32_u = XRem32U { operands: BinaryOperands<XReg> };548549/// `dst = src1 / src2` (unsigned)550xrem64_u = XRem64U { operands: BinaryOperands<XReg> };551552/// `low32(dst) = low32(src1) & low32(src2)`553xband32 = XBand32 { operands: BinaryOperands<XReg> };554/// Same as `xband64` but `src2` is a sign-extended 8-bit immediate.555xband32_s8 = Xband32S8 { dst: XReg, src1: XReg, src2: i8 };556/// Same as `xband32` but `src2` is a sign-extended 32-bit immediate.557xband32_s32 = Xband32S32 { dst: XReg, src1: XReg, src2: i32 };558/// `dst = src1 & src2`559xband64 = XBand64 { operands: BinaryOperands<XReg> };560/// Same as `xband64` but `src2` is a sign-extended 8-bit immediate.561xband64_s8 = Xband64S8 { dst: XReg, src1: XReg, src2: i8 };562/// Same as `xband64` but `src2` is a sign-extended 32-bit immediate.563xband64_s32 = Xband64S32 { dst: XReg, src1: XReg, src2: i32 };564/// `low32(dst) = low32(src1) | low32(src2)`565xbor32 = XBor32 { operands: BinaryOperands<XReg> };566/// Same as `xbor64` but `src2` is a sign-extended 8-bit immediate.567xbor32_s8 = Xbor32S8 { dst: XReg, src1: XReg, src2: i8 };568/// Same as `xbor32` but `src2` is a sign-extended 32-bit immediate.569xbor32_s32 = Xbor32S32 { dst: XReg, src1: XReg, src2: i32 };570/// `dst = src1 | src2`571xbor64 = XBor64 { operands: BinaryOperands<XReg> };572/// Same as `xbor64` but `src2` is a sign-extended 8-bit immediate.573xbor64_s8 = Xbor64S8 { dst: XReg, src1: XReg, src2: i8 };574/// Same as `xbor64` but `src2` is a sign-extended 32-bit immediate.575xbor64_s32 = Xbor64S32 { dst: XReg, src1: XReg, src2: i32 };576577/// `low32(dst) = low32(src1) ^ low32(src2)`578xbxor32 = XBxor32 { operands: BinaryOperands<XReg> };579/// Same as `xbxor64` but `src2` is a sign-extended 8-bit immediate.580xbxor32_s8 = Xbxor32S8 { dst: XReg, src1: XReg, src2: i8 };581/// Same as `xbxor32` but `src2` is a sign-extended 32-bit immediate.582xbxor32_s32 = Xbxor32S32 { dst: XReg, src1: XReg, src2: i32 };583/// `dst = src1 ^ src2`584xbxor64 = XBxor64 { operands: BinaryOperands<XReg> };585/// Same as `xbxor64` but `src2` is a sign-extended 8-bit immediate.586xbxor64_s8 = Xbxor64S8 { dst: XReg, src1: XReg, src2: i8 };587/// Same as `xbxor64` but `src2` is a sign-extended 32-bit immediate.588xbxor64_s32 = Xbxor64S32 { dst: XReg, src1: XReg, src2: i32 };589590/// `low32(dst) = !low32(src1)`591xbnot32 = XBnot32 { dst: XReg, src: XReg };592/// `dst = !src1`593xbnot64 = XBnot64 { dst: XReg, src: XReg };594595/// `low32(dst) = min(low32(src1), low32(src2))` (unsigned)596xmin32_u = Xmin32U { operands: BinaryOperands<XReg> };597/// `low32(dst) = min(low32(src1), low32(src2))` (signed)598xmin32_s = Xmin32S { operands: BinaryOperands<XReg> };599/// `low32(dst) = max(low32(src1), low32(src2))` (unsigned)600xmax32_u = Xmax32U { operands: BinaryOperands<XReg> };601/// `low32(dst) = max(low32(src1), low32(src2))` (signed)602xmax32_s = Xmax32S { operands: BinaryOperands<XReg> };603/// `dst = min(src1, src2)` (unsigned)604xmin64_u = Xmin64U { operands: BinaryOperands<XReg> };605/// `dst = min(src1, src2)` (signed)606xmin64_s = Xmin64S { operands: BinaryOperands<XReg> };607/// `dst = max(src1, src2)` (unsigned)608xmax64_u = Xmax64U { operands: BinaryOperands<XReg> };609/// `dst = max(src1, src2)` (signed)610xmax64_s = Xmax64S { operands: BinaryOperands<XReg> };611612/// `low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)`613xselect32 = XSelect32 { dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg };614/// `dst = low32(cond) ? if_nonzero : if_zero`615xselect64 = XSelect64 { dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg };616}617};618}619620/// Calls the given macro with each extended opcode.621#[macro_export]622macro_rules! for_each_extended_op {623( $macro:ident ) => {624$macro! {625/// Raise a trap.626trap = Trap;627628/// A special opcode to halt interpreter execution and yield control629/// back to the host.630///631/// This opcode results in `DoneReason::CallIndirectHost` where the632/// `id` here is shepherded along to the embedder. It's up to the633/// embedder to determine what to do with the `id` and the current634/// state of registers and the stack.635///636/// In Wasmtime this is used to implement interpreter-to-host calls.637/// This is modeled as a `call` instruction where the first638/// parameter is the native function pointer to invoke and all639/// remaining parameters for the native function are in following640/// parameter positions (e.g. `x1`, `x2`, ...). The results of the641/// host call are then store in `x0`.642///643/// Handling this in Wasmtime is done through a "relocation" which644/// is resolved at link-time when raw bytecode from Cranelift is645/// assembled into the final object that Wasmtime will interpret.646call_indirect_host = CallIndirectHost { id: u8 };647648/// Adds `offset` to the pc of this instruction and stores it in649/// `dst`.650xpcadd = Xpcadd { dst: XReg, offset: PcRelOffset };651652/// Gets the special "fp" register and moves it into `dst`.653xmov_fp = XmovFp { dst: XReg };654655/// Gets the special "lr" register and moves it into `dst`.656xmov_lr = XmovLr { dst: XReg };657658/// `dst = byteswap(low32(src))`659bswap32 = Bswap32 { dst: XReg, src: XReg };660/// `dst = byteswap(src)`661bswap64 = Bswap64 { dst: XReg, src: XReg };662663/// 32-bit checked unsigned addition: `low32(dst) = low32(src1) +664/// low32(src2)`.665///666/// The upper 32-bits of `dst` are unmodified. Traps if the addition667/// overflows.668xadd32_uoverflow_trap = Xadd32UoverflowTrap { operands: BinaryOperands<XReg> };669670/// 64-bit checked unsigned addition: `dst = src1 + src2`.671xadd64_uoverflow_trap = Xadd64UoverflowTrap { operands: BinaryOperands<XReg> };672673/// `dst = high64(src1 * src2)` (signed)674xmulhi64_s = XMulHi64S { operands: BinaryOperands<XReg> };675/// `dst = high64(src1 * src2)` (unsigned)676xmulhi64_u = XMulHi64U { operands: BinaryOperands<XReg> };677678/// low32(dst) = if low32(src) == 0 { 0 } else { -1 }679xbmask32 = Xbmask32 { dst: XReg, src: XReg };680/// dst = if src == 0 { 0 } else { -1 }681xbmask64 = Xbmask64 { dst: XReg, src: XReg };682683// Big-endian loads/stores of X-registers using the "o32"684// addressing mode685686/// `low32(dst) = zext(*addr)`687xload16be_u32_o32 = XLoad16BeU32O32 { dst: XReg, addr: AddrO32 };688/// `low32(dst) = sext(*addr)`689xload16be_s32_o32 = XLoad16BeS32O32 { dst: XReg, addr: AddrO32 };690/// `low32(dst) = zext(*addr)`691xload32be_o32 = XLoad32BeO32 { dst: XReg, addr: AddrO32 };692/// `dst = *addr`693xload64be_o32 = XLoad64BeO32 { dst: XReg, addr: AddrO32 };694/// `*addr = low16(src)`695xstore16be_o32 = XStore16BeO32 { addr: AddrO32, src: XReg };696/// `*addr = low32(src)`697xstore32be_o32 = XStore32BeO32 { addr: AddrO32, src: XReg };698/// `*addr = low64(src)`699xstore64be_o32 = XStore64BeO32 { addr: AddrO32, src: XReg };700701// Big and little endian float loads/stores. Note that the "Z"702// addressing mode only has little-endian variants.703704/// `low32(dst) = zext(*addr)`705fload32be_o32 = Fload32BeO32 { dst: FReg, addr: AddrO32 };706/// `dst = *addr`707fload64be_o32 = Fload64BeO32 { dst: FReg, addr: AddrO32 };708/// `*addr = low32(src)`709fstore32be_o32 = Fstore32BeO32 { addr: AddrO32, src: FReg };710/// `*addr = src`711fstore64be_o32 = Fstore64BeO32 { addr: AddrO32, src: FReg };712713/// `low32(dst) = zext(*addr)`714fload32le_o32 = Fload32LeO32 { dst: FReg, addr: AddrO32 };715/// `dst = *addr`716fload64le_o32 = Fload64LeO32 { dst: FReg, addr: AddrO32 };717/// `*addr = low32(src)`718fstore32le_o32 = Fstore32LeO32 { addr: AddrO32, src: FReg };719/// `*addr = src`720fstore64le_o32 = Fstore64LeO32 { addr: AddrO32, src: FReg };721722/// `low32(dst) = zext(*addr)`723fload32le_z = Fload32LeZ { dst: FReg, addr: AddrZ };724/// `dst = *addr`725fload64le_z = Fload64LeZ { dst: FReg, addr: AddrZ };726/// `*addr = low32(src)`727fstore32le_z = Fstore32LeZ { addr: AddrZ, src: FReg };728/// `*addr = src`729fstore64le_z = Fstore64LeZ { addr: AddrZ, src: FReg };730731/// `low32(dst) = zext(*addr)`732fload32le_g32 = Fload32LeG32 { dst: FReg, addr: AddrG32 };733/// `dst = *addr`734fload64le_g32 = Fload64LeG32 { dst: FReg, addr: AddrG32 };735/// `*addr = low32(src)`736fstore32le_g32 = Fstore32LeG32 { addr: AddrG32, src: FReg };737/// `*addr = src`738fstore64le_g32 = Fstore64LeG32 { addr: AddrG32, src: FReg };739740// Vector loads/stores. Note that big-endian variants are all741// omitted.742743/// `dst = *addr`744vload128le_o32 = VLoad128O32 { dst: VReg, addr: AddrO32 };745/// `*addr = src`746vstore128le_o32 = Vstore128LeO32 { addr: AddrO32, src: VReg };747/// `dst = *(ptr + offset)`748vload128le_z = VLoad128Z { dst: VReg, addr: AddrZ };749/// `*(ptr + offset) = src`750vstore128le_z = Vstore128LeZ { addr: AddrZ, src: VReg };751/// `dst = *(ptr + offset)`752vload128le_g32 = VLoad128G32 { dst: VReg, addr: AddrG32 };753/// `*(ptr + offset) = src`754vstore128le_g32 = Vstore128LeG32 { addr: AddrG32, src: VReg };755756/// Move between `f` registers.757fmov = Fmov { dst: FReg, src: FReg };758/// Move between `v` registers.759vmov = Vmov { dst: VReg, src: VReg };760761/// `low32(dst) = bitcast low32(src) as i32`762bitcast_int_from_float_32 = BitcastIntFromFloat32 { dst: XReg, src: FReg };763/// `dst = bitcast src as i64`764bitcast_int_from_float_64 = BitcastIntFromFloat64 { dst: XReg, src: FReg };765/// `low32(dst) = bitcast low32(src) as f32`766bitcast_float_from_int_32 = BitcastFloatFromInt32 { dst: FReg, src: XReg };767/// `dst = bitcast src as f64`768bitcast_float_from_int_64 = BitcastFloatFromInt64 { dst: FReg, src: XReg };769770/// `low32(dst) = bits`771fconst32 = FConst32 { dst: FReg, bits: u32 };772/// `dst = bits`773fconst64 = FConst64 { dst: FReg, bits: u64 };774775/// `low32(dst) = zext(src1 == src2)`776feq32 = Feq32 { dst: XReg, src1: FReg, src2: FReg };777/// `low32(dst) = zext(src1 != src2)`778fneq32 = Fneq32 { dst: XReg, src1: FReg, src2: FReg };779/// `low32(dst) = zext(src1 < src2)`780flt32 = Flt32 { dst: XReg, src1: FReg, src2: FReg };781/// `low32(dst) = zext(src1 <= src2)`782flteq32 = Flteq32 { dst: XReg, src1: FReg, src2: FReg };783/// `low32(dst) = zext(src1 == src2)`784feq64 = Feq64 { dst: XReg, src1: FReg, src2: FReg };785/// `low32(dst) = zext(src1 != src2)`786fneq64 = Fneq64 { dst: XReg, src1: FReg, src2: FReg };787/// `low32(dst) = zext(src1 < src2)`788flt64 = Flt64 { dst: XReg, src1: FReg, src2: FReg };789/// `low32(dst) = zext(src1 <= src2)`790flteq64 = Flteq64 { dst: XReg, src1: FReg, src2: FReg };791792/// `low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)`793fselect32 = FSelect32 { dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg };794/// `dst = low32(cond) ? if_nonzero : if_zero`795fselect64 = FSelect64 { dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg };796797/// `low32(dst) = demote(src)`798f32_from_f64 = F32FromF64 { dst: FReg, src: FReg };799/// `(st) = promote(low32(src))`800f64_from_f32 = F64FromF32 { dst: FReg, src: FReg };801802/// `low32(dst) = checked_f32_from_signed(low32(src))`803f32_from_x32_s = F32FromX32S { dst: FReg, src: XReg };804/// `low32(dst) = checked_f32_from_unsigned(low32(src))`805f32_from_x32_u = F32FromX32U { dst: FReg, src: XReg };806/// `low32(dst) = checked_f32_from_signed(src)`807f32_from_x64_s = F32FromX64S { dst: FReg, src: XReg };808/// `low32(dst) = checked_f32_from_unsigned(src)`809f32_from_x64_u = F32FromX64U { dst: FReg, src: XReg };810/// `dst = checked_f64_from_signed(low32(src))`811f64_from_x32_s = F64FromX32S { dst: FReg, src: XReg };812/// `dst = checked_f64_from_unsigned(low32(src))`813f64_from_x32_u = F64FromX32U { dst: FReg, src: XReg };814/// `dst = checked_f64_from_signed(src)`815f64_from_x64_s = F64FromX64S { dst: FReg, src: XReg };816/// `dst = checked_f64_from_unsigned(src)`817f64_from_x64_u = F64FromX64U { dst: FReg, src: XReg };818819/// `low32(dst) = checked_signed_from_f32(low32(src))`820x32_from_f32_s = X32FromF32S { dst: XReg, src: FReg };821/// `low32(dst) = checked_unsigned_from_f32(low32(src))`822x32_from_f32_u = X32FromF32U { dst: XReg, src: FReg };823/// `low32(dst) = checked_signed_from_f64(src)`824x32_from_f64_s = X32FromF64S { dst: XReg, src: FReg };825/// `low32(dst) = checked_unsigned_from_f64(src)`826x32_from_f64_u = X32FromF64U { dst: XReg, src: FReg };827/// `dst = checked_signed_from_f32(low32(src))`828x64_from_f32_s = X64FromF32S { dst: XReg, src: FReg };829/// `dst = checked_unsigned_from_f32(low32(src))`830x64_from_f32_u = X64FromF32U { dst: XReg, src: FReg };831/// `dst = checked_signed_from_f64(src)`832x64_from_f64_s = X64FromF64S { dst: XReg, src: FReg };833/// `dst = checked_unsigned_from_f64(src)`834x64_from_f64_u = X64FromF64U { dst: XReg, src: FReg };835836/// `low32(dst) = saturating_signed_from_f32(low32(src))`837x32_from_f32_s_sat = X32FromF32SSat { dst: XReg, src: FReg };838/// `low32(dst) = saturating_unsigned_from_f32(low32(src))`839x32_from_f32_u_sat = X32FromF32USat { dst: XReg, src: FReg };840/// `low32(dst) = saturating_signed_from_f64(src)`841x32_from_f64_s_sat = X32FromF64SSat { dst: XReg, src: FReg };842/// `low32(dst) = saturating_unsigned_from_f64(src)`843x32_from_f64_u_sat = X32FromF64USat { dst: XReg, src: FReg };844/// `dst = saturating_signed_from_f32(low32(src))`845x64_from_f32_s_sat = X64FromF32SSat { dst: XReg, src: FReg };846/// `dst = saturating_unsigned_from_f32(low32(src))`847x64_from_f32_u_sat = X64FromF32USat { dst: XReg, src: FReg };848/// `dst = saturating_signed_from_f64(src)`849x64_from_f64_s_sat = X64FromF64SSat { dst: XReg, src: FReg };850/// `dst = saturating_unsigned_from_f64(src)`851x64_from_f64_u_sat = X64FromF64USat { dst: XReg, src: FReg };852853/// `low32(dst) = copysign(low32(src1), low32(src2))`854fcopysign32 = FCopySign32 { operands: BinaryOperands<FReg> };855/// `dst = copysign(src1, src2)`856fcopysign64 = FCopySign64 { operands: BinaryOperands<FReg> };857858/// `low32(dst) = low32(src1) + low32(src2)`859fadd32 = Fadd32 { operands: BinaryOperands<FReg> };860/// `low32(dst) = low32(src1) - low32(src2)`861fsub32 = Fsub32 { operands: BinaryOperands<FReg> };862/// `low128(dst) = low128(src1) - low128(src2)`863vsubf32x4 = Vsubf32x4 { operands: BinaryOperands<VReg> };864/// `low32(dst) = low32(src1) * low32(src2)`865fmul32 = Fmul32 { operands: BinaryOperands<FReg> };866/// `low128(dst) = low128(src1) * low128(src2)`867vmulf32x4 = Vmulf32x4 { operands: BinaryOperands<VReg> };868/// `low32(dst) = low32(src1) / low32(src2)`869fdiv32 = Fdiv32 { operands: BinaryOperands<FReg> };870/// `low128(dst) = low128(src1) / low128(src2)`871vdivf32x4 = Vdivf32x4 { operands: BinaryOperands<VReg> };872/// `low32(dst) = ieee_maximum(low32(src1), low32(src2))`873fmaximum32 = Fmaximum32 { operands: BinaryOperands<FReg> };874/// `low32(dst) = ieee_minimum(low32(src1), low32(src2))`875fminimum32 = Fminimum32 { operands: BinaryOperands<FReg> };876/// `low32(dst) = ieee_trunc(low32(src))`877ftrunc32 = Ftrunc32 { dst: FReg, src: FReg };878/// `low128(dst) = ieee_trunc(low128(src))`879vtrunc32x4 = Vtrunc32x4 { dst: VReg, src: VReg };880/// `low128(dst) = ieee_trunc(low128(src))`881vtrunc64x2 = Vtrunc64x2 { dst: VReg, src: VReg };882/// `low32(dst) = ieee_floor(low32(src))`883ffloor32 = Ffloor32 { dst: FReg, src: FReg };884/// `low128(dst) = ieee_floor(low128(src))`885vfloor32x4 = Vfloor32x4 { dst: VReg, src: VReg };886/// `low128(dst) = ieee_floor(low128(src))`887vfloor64x2 = Vfloor64x2 { dst: VReg, src: VReg };888/// `low32(dst) = ieee_ceil(low32(src))`889fceil32 = Fceil32 { dst: FReg, src: FReg };890/// `low128(dst) = ieee_ceil(low128(src))`891vceil32x4 = Vceil32x4 { dst: VReg, src: VReg };892/// `low128(dst) = ieee_ceil(low128(src))`893vceil64x2 = Vceil64x2 { dst: VReg, src: VReg };894/// `low32(dst) = ieee_nearest(low32(src))`895fnearest32 = Fnearest32 { dst: FReg, src: FReg };896/// `low32(dst) = ieee_sqrt(low32(src))`897fsqrt32 = Fsqrt32 { dst: FReg, src: FReg };898/// `low32(dst) = ieee_sqrt(low32(src))`899vsqrt32x4 = Vsqrt32x4 { dst: VReg, src: VReg };900/// `low32(dst) = ieee_sqrt(low32(src))`901vsqrt64x2 = Vsqrt64x2 { dst: VReg, src: VReg };902/// `low32(dst) = -low32(src)`903fneg32 = Fneg32 { dst: FReg, src: FReg };904/// `low128(dst) = -low128(src)`905vnegf32x4 = Vnegf32x4 { dst: VReg, src: VReg };906/// `low32(dst) = |low32(src)|`907fabs32 = Fabs32 { dst: FReg, src: FReg };908909/// `dst = src1 + src2`910fadd64 = Fadd64 { operands: BinaryOperands<FReg> };911/// `dst = src1 - src2`912fsub64 = Fsub64 { operands: BinaryOperands<FReg> };913/// `dst = src1 * src2`914fmul64 = Fmul64 { operands: BinaryOperands<FReg> };915/// `dst = src1 / src2`916fdiv64 = Fdiv64 { operands: BinaryOperands<FReg> };917/// `dst = src1 / src2`918vdivf64x2 = VDivF64x2 { operands: BinaryOperands<VReg> };919/// `dst = ieee_maximum(src1, src2)`920fmaximum64 = Fmaximum64 { operands: BinaryOperands<FReg> };921/// `dst = ieee_minimum(src1, src2)`922fminimum64 = Fminimum64 { operands: BinaryOperands<FReg> };923/// `dst = ieee_trunc(src)`924ftrunc64 = Ftrunc64 { dst: FReg, src: FReg };925/// `dst = ieee_floor(src)`926ffloor64 = Ffloor64 { dst: FReg, src: FReg };927/// `dst = ieee_ceil(src)`928fceil64 = Fceil64 { dst: FReg, src: FReg };929/// `dst = ieee_nearest(src)`930fnearest64 = Fnearest64 { dst: FReg, src: FReg };931/// `low128(dst) = ieee_nearest(low128(src))`932vnearest32x4 = Vnearest32x4 { dst: VReg, src: VReg };933/// `low128(dst) = ieee_nearest(low128(src))`934vnearest64x2 = Vnearest64x2 { dst: VReg, src: VReg };935/// `dst = ieee_sqrt(src)`936fsqrt64 = Fsqrt64 { dst: FReg, src: FReg };937/// `dst = -src`938fneg64 = Fneg64 { dst: FReg, src: FReg };939/// `dst = |src|`940fabs64 = Fabs64 { dst: FReg, src: FReg };941942/// `dst = imm`943vconst128 = Vconst128 { dst: VReg, imm: u128 };944945/// `dst = src1 + src2`946vaddi8x16 = VAddI8x16 { operands: BinaryOperands<VReg> };947/// `dst = src1 + src2`948vaddi16x8 = VAddI16x8 { operands: BinaryOperands<VReg> };949/// `dst = src1 + src2`950vaddi32x4 = VAddI32x4 { operands: BinaryOperands<VReg> };951/// `dst = src1 + src2`952vaddi64x2 = VAddI64x2 { operands: BinaryOperands<VReg> };953/// `dst = src1 + src2`954vaddf32x4 = VAddF32x4 { operands: BinaryOperands<VReg> };955/// `dst = src1 + src2`956vaddf64x2 = VAddF64x2 { operands: BinaryOperands<VReg> };957958/// `dst = satruating_add(src1, src2)`959vaddi8x16_sat = VAddI8x16Sat { operands: BinaryOperands<VReg> };960/// `dst = satruating_add(src1, src2)`961vaddu8x16_sat = VAddU8x16Sat { operands: BinaryOperands<VReg> };962/// `dst = satruating_add(src1, src2)`963vaddi16x8_sat = VAddI16x8Sat { operands: BinaryOperands<VReg> };964/// `dst = satruating_add(src1, src2)`965vaddu16x8_sat = VAddU16x8Sat { operands: BinaryOperands<VReg> };966967/// `dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]`968vaddpairwisei16x8_s = VAddpairwiseI16x8S { operands: BinaryOperands<VReg> };969/// `dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]`970vaddpairwisei32x4_s = VAddpairwiseI32x4S { operands: BinaryOperands<VReg> };971972/// `dst = src1 << src2`973vshli8x16 = VShlI8x16 { operands: BinaryOperands<VReg, VReg, XReg> };974/// `dst = src1 << src2`975vshli16x8 = VShlI16x8 { operands: BinaryOperands<VReg, VReg, XReg> };976/// `dst = src1 << src2`977vshli32x4 = VShlI32x4 { operands: BinaryOperands<VReg, VReg, XReg> };978/// `dst = src1 << src2`979vshli64x2 = VShlI64x2 { operands: BinaryOperands<VReg, VReg, XReg> };980/// `dst = src1 >> src2` (signed)981vshri8x16_s = VShrI8x16S { operands: BinaryOperands<VReg, VReg, XReg> };982/// `dst = src1 >> src2` (signed)983vshri16x8_s = VShrI16x8S { operands: BinaryOperands<VReg, VReg, XReg> };984/// `dst = src1 >> src2` (signed)985vshri32x4_s = VShrI32x4S { operands: BinaryOperands<VReg, VReg, XReg> };986/// `dst = src1 >> src2` (signed)987vshri64x2_s = VShrI64x2S { operands: BinaryOperands<VReg, VReg, XReg> };988/// `dst = src1 >> src2` (unsigned)989vshri8x16_u = VShrI8x16U { operands: BinaryOperands<VReg, VReg, XReg> };990/// `dst = src1 >> src2` (unsigned)991vshri16x8_u = VShrI16x8U { operands: BinaryOperands<VReg, VReg, XReg> };992/// `dst = src1 >> src2` (unsigned)993vshri32x4_u = VShrI32x4U { operands: BinaryOperands<VReg, VReg, XReg> };994/// `dst = src1 >> src2` (unsigned)995vshri64x2_u = VShrI64x2U { operands: BinaryOperands<VReg, VReg, XReg> };996997/// `dst = splat(low8(src))`998vsplatx8 = VSplatX8 { dst: VReg, src: XReg };999/// `dst = splat(low16(src))`1000vsplatx16 = VSplatX16 { dst: VReg, src: XReg };1001/// `dst = splat(low32(src))`1002vsplatx32 = VSplatX32 { dst: VReg, src: XReg };1003/// `dst = splat(src)`1004vsplatx64 = VSplatX64 { dst: VReg, src: XReg };1005/// `dst = splat(low32(src))`1006vsplatf32 = VSplatF32 { dst: VReg, src: FReg };1007/// `dst = splat(src)`1008vsplatf64 = VSplatF64 { dst: VReg, src: FReg };10091010/// Load the 64-bit source as i8x8 and sign-extend to i16x8.1011vload8x8_s_z = VLoad8x8SZ { dst: VReg, addr: AddrZ };1012/// Load the 64-bit source as u8x8 and zero-extend to i16x8.1013vload8x8_u_z = VLoad8x8UZ { dst: VReg, addr: AddrZ };1014/// Load the 64-bit source as i16x4 and sign-extend to i32x4.1015vload16x4le_s_z = VLoad16x4LeSZ { dst: VReg, addr: AddrZ };1016/// Load the 64-bit source as u16x4 and zero-extend to i32x4.1017vload16x4le_u_z = VLoad16x4LeUZ { dst: VReg, addr: AddrZ };1018/// Load the 64-bit source as i32x2 and sign-extend to i64x2.1019vload32x2le_s_z = VLoad32x2LeSZ { dst: VReg, addr: AddrZ };1020/// Load the 64-bit source as u32x2 and zero-extend to i64x2.1021vload32x2le_u_z = VLoad32x2LeUZ { dst: VReg, addr: AddrZ };10221023/// `dst = src1 & src2`1024vband128 = VBand128 { operands: BinaryOperands<VReg> };1025/// `dst = src1 | src2`1026vbor128 = VBor128 { operands: BinaryOperands<VReg> };1027/// `dst = src1 ^ src2`1028vbxor128 = VBxor128 { operands: BinaryOperands<VReg> };1029/// `dst = !src1`1030vbnot128 = VBnot128 { dst: VReg, src: VReg };1031/// `dst = (c & x) | (!c & y)`1032vbitselect128 = VBitselect128 { dst: VReg, c: VReg, x: VReg, y: VReg };1033/// Collect high bits of each lane into the low 32-bits of the1034/// destination.1035vbitmask8x16 = Vbitmask8x16 { dst: XReg, src: VReg };1036/// Collect high bits of each lane into the low 32-bits of the1037/// destination.1038vbitmask16x8 = Vbitmask16x8 { dst: XReg, src: VReg };1039/// Collect high bits of each lane into the low 32-bits of the1040/// destination.1041vbitmask32x4 = Vbitmask32x4 { dst: XReg, src: VReg };1042/// Collect high bits of each lane into the low 32-bits of the1043/// destination.1044vbitmask64x2 = Vbitmask64x2 { dst: XReg, src: VReg };1045/// Store whether all lanes are nonzero in `dst`.1046valltrue8x16 = Valltrue8x16 { dst: XReg, src: VReg };1047/// Store whether all lanes are nonzero in `dst`.1048valltrue16x8 = Valltrue16x8 { dst: XReg, src: VReg };1049/// Store whether all lanes are nonzero in `dst`.1050valltrue32x4 = Valltrue32x4 { dst: XReg, src: VReg };1051/// Store whether any lanes are nonzero in `dst`.1052valltrue64x2 = Valltrue64x2 { dst: XReg, src: VReg };1053/// Store whether any lanes are nonzero in `dst`.1054vanytrue8x16 = Vanytrue8x16 { dst: XReg, src: VReg };1055/// Store whether any lanes are nonzero in `dst`.1056vanytrue16x8 = Vanytrue16x8 { dst: XReg, src: VReg };1057/// Store whether any lanes are nonzero in `dst`.1058vanytrue32x4 = Vanytrue32x4 { dst: XReg, src: VReg };1059/// Store whether any lanes are nonzero in `dst`.1060vanytrue64x2 = Vanytrue64x2 { dst: XReg, src: VReg };10611062/// Int-to-float conversion (same as `f32_from_x32_s`)1063vf32x4_from_i32x4_s = VF32x4FromI32x4S { dst: VReg, src: VReg };1064/// Int-to-float conversion (same as `f32_from_x32_u`)1065vf32x4_from_i32x4_u = VF32x4FromI32x4U { dst: VReg, src: VReg };1066/// Int-to-float conversion (same as `f64_from_x64_s`)1067vf64x2_from_i64x2_s = VF64x2FromI64x2S { dst: VReg, src: VReg };1068/// Int-to-float conversion (same as `f64_from_x64_u`)1069vf64x2_from_i64x2_u = VF64x2FromI64x2U { dst: VReg, src: VReg };1070/// Float-to-int conversion (same as `x32_from_f32_s`1071vi32x4_from_f32x4_s = VI32x4FromF32x4S { dst: VReg, src: VReg };1072/// Float-to-int conversion (same as `x32_from_f32_u`1073vi32x4_from_f32x4_u = VI32x4FromF32x4U { dst: VReg, src: VReg };1074/// Float-to-int conversion (same as `x64_from_f64_s`1075vi64x2_from_f64x2_s = VI64x2FromF64x2S { dst: VReg, src: VReg };1076/// Float-to-int conversion (same as `x64_from_f64_u`1077vi64x2_from_f64x2_u = VI64x2FromF64x2U { dst: VReg, src: VReg };10781079/// Widens the low lanes of the input vector, as signed, to twice1080/// the width.1081vwidenlow8x16_s = VWidenLow8x16S { dst: VReg, src: VReg };1082/// Widens the low lanes of the input vector, as unsigned, to twice1083/// the width.1084vwidenlow8x16_u = VWidenLow8x16U { dst: VReg, src: VReg };1085/// Widens the low lanes of the input vector, as signed, to twice1086/// the width.1087vwidenlow16x8_s = VWidenLow16x8S { dst: VReg, src: VReg };1088/// Widens the low lanes of the input vector, as unsigned, to twice1089/// the width.1090vwidenlow16x8_u = VWidenLow16x8U { dst: VReg, src: VReg };1091/// Widens the low lanes of the input vector, as signed, to twice1092/// the width.1093vwidenlow32x4_s = VWidenLow32x4S { dst: VReg, src: VReg };1094/// Widens the low lanes of the input vector, as unsigned, to twice1095/// the width.1096vwidenlow32x4_u = VWidenLow32x4U { dst: VReg, src: VReg };1097/// Widens the high lanes of the input vector, as signed, to twice1098/// the width.1099vwidenhigh8x16_s = VWidenHigh8x16S { dst: VReg, src: VReg };1100/// Widens the high lanes of the input vector, as unsigned, to twice1101/// the width.1102vwidenhigh8x16_u = VWidenHigh8x16U { dst: VReg, src: VReg };1103/// Widens the high lanes of the input vector, as signed, to twice1104/// the width.1105vwidenhigh16x8_s = VWidenHigh16x8S { dst: VReg, src: VReg };1106/// Widens the high lanes of the input vector, as unsigned, to twice1107/// the width.1108vwidenhigh16x8_u = VWidenHigh16x8U { dst: VReg, src: VReg };1109/// Widens the high lanes of the input vector, as signed, to twice1110/// the width.1111vwidenhigh32x4_s = VWidenHigh32x4S { dst: VReg, src: VReg };1112/// Widens the high lanes of the input vector, as unsigned, to twice1113/// the width.1114vwidenhigh32x4_u = VWidenHigh32x4U { dst: VReg, src: VReg };11151116/// Narrows the two 16x8 vectors, assuming all input lanes are1117/// signed, to half the width. Narrowing is signed and saturating.1118vnarrow16x8_s = Vnarrow16x8S { operands: BinaryOperands<VReg> };1119/// Narrows the two 16x8 vectors, assuming all input lanes are1120/// signed, to half the width. Narrowing is unsigned and saturating.1121vnarrow16x8_u = Vnarrow16x8U { operands: BinaryOperands<VReg> };1122/// Narrows the two 32x4 vectors, assuming all input lanes are1123/// signed, to half the width. Narrowing is signed and saturating.1124vnarrow32x4_s = Vnarrow32x4S { operands: BinaryOperands<VReg> };1125/// Narrows the two 32x4 vectors, assuming all input lanes are1126/// signed, to half the width. Narrowing is unsigned and saturating.1127vnarrow32x4_u = Vnarrow32x4U { operands: BinaryOperands<VReg> };1128/// Narrows the two 64x2 vectors, assuming all input lanes are1129/// signed, to half the width. Narrowing is signed and saturating.1130vnarrow64x2_s = Vnarrow64x2S { operands: BinaryOperands<VReg> };1131/// Narrows the two 64x2 vectors, assuming all input lanes are1132/// signed, to half the width. Narrowing is unsigned and saturating.1133vnarrow64x2_u = Vnarrow64x2U { operands: BinaryOperands<VReg> };1134/// Narrows the two 64x2 vectors, assuming all input lanes are1135/// unsigned, to half the width. Narrowing is unsigned and saturating.1136vunarrow64x2_u = Vunarrow64x2U { operands: BinaryOperands<VReg> };1137/// Promotes the low two lanes of the f32x4 input to f64x2.1138vfpromotelow = VFpromoteLow { dst: VReg, src: VReg };1139/// Demotes the two f64x2 lanes to f32x2 and then extends with two1140/// more zero lanes.1141vfdemote = VFdemote { dst: VReg, src: VReg };11421143/// `dst = src1 - src2`1144vsubi8x16 = VSubI8x16 { operands: BinaryOperands<VReg> };1145/// `dst = src1 - src2`1146vsubi16x8 = VSubI16x8 { operands: BinaryOperands<VReg> };1147/// `dst = src1 - src2`1148vsubi32x4 = VSubI32x4 { operands: BinaryOperands<VReg> };1149/// `dst = src1 - src2`1150vsubi64x2 = VSubI64x2 { operands: BinaryOperands<VReg> };1151/// `dst = src1 - src2`1152vsubf64x2 = VSubF64x2 { operands: BinaryOperands<VReg> };11531154/// `dst = saturating_sub(src1, src2)`1155vsubi8x16_sat = VSubI8x16Sat { operands: BinaryOperands<VReg> };1156/// `dst = saturating_sub(src1, src2)`1157vsubu8x16_sat = VSubU8x16Sat { operands: BinaryOperands<VReg> };1158/// `dst = saturating_sub(src1, src2)`1159vsubi16x8_sat = VSubI16x8Sat { operands: BinaryOperands<VReg> };1160/// `dst = saturating_sub(src1, src2)`1161vsubu16x8_sat = VSubU16x8Sat { operands: BinaryOperands<VReg> };11621163/// `dst = src1 * src2`1164vmuli8x16 = VMulI8x16 { operands: BinaryOperands<VReg> };1165/// `dst = src1 * src2`1166vmuli16x8 = VMulI16x8 { operands: BinaryOperands<VReg> };1167/// `dst = src1 * src2`1168vmuli32x4 = VMulI32x4 { operands: BinaryOperands<VReg> };1169/// `dst = src1 * src2`1170vmuli64x2 = VMulI64x2 { operands: BinaryOperands<VReg> };1171/// `dst = src1 * src2`1172vmulf64x2 = VMulF64x2 { operands: BinaryOperands<VReg> };11731174/// `dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)`1175vqmulrsi16x8 = VQmulrsI16x8 { operands: BinaryOperands<VReg> };11761177/// `dst = count_ones(src)`1178vpopcnt8x16 = VPopcnt8x16 { dst: VReg, src: VReg };11791180/// `low32(dst) = zext(src[lane])`1181xextractv8x16 = XExtractV8x16 { dst: XReg, src: VReg, lane: u8 };1182/// `low32(dst) = zext(src[lane])`1183xextractv16x8 = XExtractV16x8 { dst: XReg, src: VReg, lane: u8 };1184/// `low32(dst) = src[lane]`1185xextractv32x4 = XExtractV32x4 { dst: XReg, src: VReg, lane: u8 };1186/// `dst = src[lane]`1187xextractv64x2 = XExtractV64x2 { dst: XReg, src: VReg, lane: u8 };1188/// `low32(dst) = src[lane]`1189fextractv32x4 = FExtractV32x4 { dst: FReg, src: VReg, lane: u8 };1190/// `dst = src[lane]`1191fextractv64x2 = FExtractV64x2 { dst: FReg, src: VReg, lane: u8 };11921193/// `dst = src1; dst[lane] = src2`1194vinsertx8 = VInsertX8 { operands: BinaryOperands<VReg, VReg, XReg>, lane: u8 };1195/// `dst = src1; dst[lane] = src2`1196vinsertx16 = VInsertX16 { operands: BinaryOperands<VReg, VReg, XReg>, lane: u8 };1197/// `dst = src1; dst[lane] = src2`1198vinsertx32 = VInsertX32 { operands: BinaryOperands<VReg, VReg, XReg>, lane: u8 };1199/// `dst = src1; dst[lane] = src2`1200vinsertx64 = VInsertX64 { operands: BinaryOperands<VReg, VReg, XReg>, lane: u8 };1201/// `dst = src1; dst[lane] = src2`1202vinsertf32 = VInsertF32 { operands: BinaryOperands<VReg, VReg, FReg>, lane: u8 };1203/// `dst = src1; dst[lane] = src2`1204vinsertf64 = VInsertF64 { operands: BinaryOperands<VReg, VReg, FReg>, lane: u8 };12051206/// `dst = src == dst`1207veq8x16 = Veq8x16 { operands: BinaryOperands<VReg> };1208/// `dst = src != dst`1209vneq8x16 = Vneq8x16 { operands: BinaryOperands<VReg> };1210/// `dst = src < dst` (signed)1211vslt8x16 = Vslt8x16 { operands: BinaryOperands<VReg> };1212/// `dst = src <= dst` (signed)1213vslteq8x16 = Vslteq8x16 { operands: BinaryOperands<VReg> };1214/// `dst = src < dst` (unsigned)1215vult8x16 = Vult8x16 { operands: BinaryOperands<VReg> };1216/// `dst = src <= dst` (unsigned)1217vulteq8x16 = Vulteq8x16 { operands: BinaryOperands<VReg> };1218/// `dst = src == dst`1219veq16x8 = Veq16x8 { operands: BinaryOperands<VReg> };1220/// `dst = src != dst`1221vneq16x8 = Vneq16x8 { operands: BinaryOperands<VReg> };1222/// `dst = src < dst` (signed)1223vslt16x8 = Vslt16x8 { operands: BinaryOperands<VReg> };1224/// `dst = src <= dst` (signed)1225vslteq16x8 = Vslteq16x8 { operands: BinaryOperands<VReg> };1226/// `dst = src < dst` (unsigned)1227vult16x8 = Vult16x8 { operands: BinaryOperands<VReg> };1228/// `dst = src <= dst` (unsigned)1229vulteq16x8 = Vulteq16x8 { operands: BinaryOperands<VReg> };1230/// `dst = src == dst`1231veq32x4 = Veq32x4 { operands: BinaryOperands<VReg> };1232/// `dst = src != dst`1233vneq32x4 = Vneq32x4 { operands: BinaryOperands<VReg> };1234/// `dst = src < dst` (signed)1235vslt32x4 = Vslt32x4 { operands: BinaryOperands<VReg> };1236/// `dst = src <= dst` (signed)1237vslteq32x4 = Vslteq32x4 { operands: BinaryOperands<VReg> };1238/// `dst = src < dst` (unsigned)1239vult32x4 = Vult32x4 { operands: BinaryOperands<VReg> };1240/// `dst = src <= dst` (unsigned)1241vulteq32x4 = Vulteq32x4 { operands: BinaryOperands<VReg> };1242/// `dst = src == dst`1243veq64x2 = Veq64x2 { operands: BinaryOperands<VReg> };1244/// `dst = src != dst`1245vneq64x2 = Vneq64x2 { operands: BinaryOperands<VReg> };1246/// `dst = src < dst` (signed)1247vslt64x2 = Vslt64x2 { operands: BinaryOperands<VReg> };1248/// `dst = src <= dst` (signed)1249vslteq64x2 = Vslteq64x2 { operands: BinaryOperands<VReg> };1250/// `dst = src < dst` (unsigned)1251vult64x2 = Vult64x2 { operands: BinaryOperands<VReg> };1252/// `dst = src <= dst` (unsigned)1253vulteq64x2 = Vulteq64x2 { operands: BinaryOperands<VReg> };12541255/// `dst = -src`1256vneg8x16 = Vneg8x16 { dst: VReg, src: VReg };1257/// `dst = -src`1258vneg16x8 = Vneg16x8 { dst: VReg, src: VReg };1259/// `dst = -src`1260vneg32x4 = Vneg32x4 { dst: VReg, src: VReg };1261/// `dst = -src`1262vneg64x2 = Vneg64x2 { dst: VReg, src: VReg };1263/// `dst = -src`1264vnegf64x2 = VnegF64x2 { dst: VReg, src: VReg };12651266/// `dst = min(src1, src2)` (signed)1267vmin8x16_s = Vmin8x16S { operands: BinaryOperands<VReg> };1268/// `dst = min(src1, src2)` (unsigned)1269vmin8x16_u = Vmin8x16U { operands: BinaryOperands<VReg> };1270/// `dst = min(src1, src2)` (signed)1271vmin16x8_s = Vmin16x8S { operands: BinaryOperands<VReg> };1272/// `dst = min(src1, src2)` (unsigned)1273vmin16x8_u = Vmin16x8U { operands: BinaryOperands<VReg> };1274/// `dst = max(src1, src2)` (signed)1275vmax8x16_s = Vmax8x16S { operands: BinaryOperands<VReg> };1276/// `dst = max(src1, src2)` (unsigned)1277vmax8x16_u = Vmax8x16U { operands: BinaryOperands<VReg> };1278/// `dst = max(src1, src2)` (signed)1279vmax16x8_s = Vmax16x8S { operands: BinaryOperands<VReg> };1280/// `dst = max(src1, src2)` (unsigned)1281vmax16x8_u = Vmax16x8U { operands: BinaryOperands<VReg> };12821283/// `dst = min(src1, src2)` (signed)1284vmin32x4_s = Vmin32x4S { operands: BinaryOperands<VReg> };1285/// `dst = min(src1, src2)` (unsigned)1286vmin32x4_u = Vmin32x4U { operands: BinaryOperands<VReg> };1287/// `dst = max(src1, src2)` (signed)1288vmax32x4_s = Vmax32x4S { operands: BinaryOperands<VReg> };1289/// `dst = max(src1, src2)` (unsigned)1290vmax32x4_u = Vmax32x4U { operands: BinaryOperands<VReg> };12911292/// `dst = |src|`1293vabs8x16 = Vabs8x16 { dst: VReg, src: VReg };1294/// `dst = |src|`1295vabs16x8 = Vabs16x8 { dst: VReg, src: VReg };1296/// `dst = |src|`1297vabs32x4 = Vabs32x4 { dst: VReg, src: VReg };1298/// `dst = |src|`1299vabs64x2 = Vabs64x2 { dst: VReg, src: VReg };13001301/// `dst = |src|`1302vabsf32x4 = Vabsf32x4 { dst: VReg, src: VReg };1303/// `dst = |src|`1304vabsf64x2 = Vabsf64x2 { dst: VReg, src: VReg };1305/// `dst = ieee_maximum(src1, src2)`1306vmaximumf32x4 = Vmaximumf32x4 { operands: BinaryOperands<VReg> };1307/// `dst = ieee_maximum(src1, src2)`1308vmaximumf64x2 = Vmaximumf64x2 { operands: BinaryOperands<VReg> };1309/// `dst = ieee_minimum(src1, src2)`1310vminimumf32x4 = Vminimumf32x4 { operands: BinaryOperands<VReg> };1311/// `dst = ieee_minimum(src1, src2)`1312vminimumf64x2 = Vminimumf64x2 { operands: BinaryOperands<VReg> };13131314/// `dst = shuffle(src1, src2, mask)`1315vshuffle = VShuffle { dst: VReg, src1: VReg, src2: VReg, mask: u128 };13161317/// `dst = swizzle(src1, src2)`1318vswizzlei8x16 = Vswizzlei8x16 { operands: BinaryOperands<VReg> };13191320/// `dst = (src1 + src2 + 1) // 2`1321vavground8x16 = Vavground8x16 { operands: BinaryOperands<VReg> };1322/// `dst = (src1 + src2 + 1) // 2`1323vavground16x8 = Vavground16x8 { operands: BinaryOperands<VReg> };13241325/// `dst = src == dst`1326veqf32x4 = VeqF32x4 { operands: BinaryOperands<VReg> };1327/// `dst = src != dst`1328vneqf32x4 = VneqF32x4 { operands: BinaryOperands<VReg> };1329/// `dst = src < dst`1330vltf32x4 = VltF32x4 { operands: BinaryOperands<VReg> };1331/// `dst = src <= dst`1332vlteqf32x4 = VlteqF32x4 { operands: BinaryOperands<VReg> };1333/// `dst = src == dst`1334veqf64x2 = VeqF64x2 { operands: BinaryOperands<VReg> };1335/// `dst = src != dst`1336vneqf64x2 = VneqF64x2 { operands: BinaryOperands<VReg> };1337/// `dst = src < dst`1338vltf64x2 = VltF64x2 { operands: BinaryOperands<VReg> };1339/// `dst = src <= dst`1340vlteqf64x2 = VlteqF64x2 { operands: BinaryOperands<VReg> };13411342/// `dst = ieee_fma(a, b, c)`1343vfma32x4 = Vfma32x4 { dst: VReg, a: VReg, b: VReg, c: VReg };1344/// `dst = ieee_fma(a, b, c)`1345vfma64x2 = Vfma64x2 { dst: VReg, a: VReg, b: VReg, c: VReg };13461347/// `dst = low32(cond) ? if_nonzero : if_zero`1348vselect = Vselect { dst: VReg, cond: XReg, if_nonzero: VReg, if_zero: VReg };13491350/// `dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo`1351xadd128 = Xadd128 {1352dst_lo: XReg,1353dst_hi: XReg,1354lhs_lo: XReg,1355lhs_hi: XReg,1356rhs_lo: XReg,1357rhs_hi: XReg1358};1359/// `dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo`1360xsub128 = Xsub128 {1361dst_lo: XReg,1362dst_hi: XReg,1363lhs_lo: XReg,1364lhs_hi: XReg,1365rhs_lo: XReg,1366rhs_hi: XReg1367};1368/// `dst_hi:dst_lo = sext(lhs) * sext(rhs)`1369xwidemul64_s = Xwidemul64S {1370dst_lo: XReg,1371dst_hi: XReg,1372lhs: XReg,1373rhs: XReg1374};1375/// `dst_hi:dst_lo = zext(lhs) * zext(rhs)`1376xwidemul64_u = Xwidemul64U {1377dst_lo: XReg,1378dst_hi: XReg,1379lhs: XReg,1380rhs: XReg1381};1382}1383};1384}13851386#[cfg(feature = "decode")]1387pub mod decode;1388#[cfg(feature = "disas")]1389pub mod disas;1390#[cfg(feature = "encode")]1391pub mod encode;1392#[cfg(feature = "interp")]1393pub mod interp;1394#[cfg(feature = "profile")]1395pub mod profile;1396#[cfg(all(not(feature = "profile"), feature = "interp"))]1397mod profile_disabled;1398#[cfg(all(not(feature = "profile"), feature = "interp"))]1399use profile_disabled as profile;14001401pub mod regs;1402pub use regs::*;14031404pub mod imms;1405pub use imms::*;14061407pub mod op;1408pub use op::*;14091410pub mod opcode;1411pub use opcode::*;14121413#[cfg(any(feature = "encode", feature = "decode"))]1414pub(crate) unsafe fn unreachable_unchecked() -> ! {1415#[cfg(debug_assertions)]1416unreachable!();14171418#[cfg(not(debug_assertions))]1419unsafe {1420core::hint::unreachable_unchecked()1421}1422}142314241425