Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/assembler-x64/meta/src/instructions/add.rs
1693 views
1
use crate::dsl::{Customization::*, Feature::*, Inst, Length::*, Location::*, TupleType::*};
2
use crate::dsl::{align, evex, fmt, inst, r, rex, rw, sxl, sxq, vex, w};
3
4
#[rustfmt::skip] // Keeps instructions on a single line.
5
pub fn list() -> Vec<Inst> {
6
vec![
7
inst("addb", fmt("I", [rw(al), r(imm8)]), rex(0x4).ib(), _64b | compat),
8
inst("addw", fmt("I", [rw(ax), r(imm16)]), rex([0x66, 0x5]).iw(), _64b | compat),
9
inst("addl", fmt("I", [rw(eax), r(imm32)]), rex(0x5).id(), _64b | compat),
10
inst("addq", fmt("I_SXL", [rw(rax), sxq(imm32)]), rex(0x5).w().id(), _64b),
11
inst("addb", fmt("MI", [rw(rm8), r(imm8)]), rex(0x80).digit(0).ib(), _64b | compat),
12
inst("addw", fmt("MI", [rw(rm16), r(imm16)]), rex([0x66, 0x81]).digit(0).iw(), _64b | compat),
13
inst("addl", fmt("MI", [rw(rm32), r(imm32)]), rex(0x81).digit(0).id(), _64b | compat),
14
inst("addq", fmt("MI_SXL", [rw(rm64), sxq(imm32)]), rex(0x81).w().digit(0).id(), _64b),
15
inst("addl", fmt("MI_SXB", [rw(rm32), sxl(imm8)]), rex(0x83).digit(0).ib(), _64b | compat),
16
inst("addq", fmt("MI_SXB", [rw(rm64), sxq(imm8)]), rex(0x83).w().digit(0).ib(), _64b),
17
inst("addb", fmt("MR", [rw(rm8), r(r8)]), rex(0x0).r(), _64b | compat),
18
inst("addw", fmt("MR", [rw(rm16), r(r16)]), rex([0x66, 0x1]).r(), _64b | compat),
19
inst("addl", fmt("MR", [rw(rm32), r(r32)]), rex(0x1).r(), _64b | compat),
20
inst("addq", fmt("MR", [rw(rm64), r(r64)]), rex(0x1).w().r(), _64b),
21
inst("addb", fmt("RM", [rw(r8), r(rm8)]), rex(0x2).r(), _64b | compat),
22
inst("addw", fmt("RM", [rw(r16), r(rm16)]), rex([0x66, 0x3]).r(), _64b | compat),
23
inst("addl", fmt("RM", [rw(r32), r(rm32)]), rex(0x3).r(), _64b | compat),
24
inst("addq", fmt("RM", [rw(r64), r(rm64)]), rex(0x3).w().r(), _64b),
25
// Add with carry.
26
inst("adcb", fmt("I", [rw(al), r(imm8)]), rex(0x14).ib(), _64b | compat),
27
inst("adcw", fmt("I", [rw(ax), r(imm16)]), rex([0x66, 0x15]).iw(), _64b | compat),
28
inst("adcl", fmt("I", [rw(eax), r(imm32)]), rex(0x15).id(), _64b | compat),
29
inst("adcq", fmt("I_SXL", [rw(rax), sxq(imm32)]), rex(0x15).w().id(), _64b),
30
inst("adcb", fmt("MI", [rw(rm8), r(imm8)]), rex(0x80).digit(2).ib(), _64b | compat),
31
inst("adcw", fmt("MI", [rw(rm16), r(imm16)]), rex([0x66, 0x81]).digit(2).iw(), _64b | compat),
32
inst("adcl", fmt("MI", [rw(rm32), r(imm32)]), rex(0x81).digit(2).id(), _64b | compat),
33
inst("adcq", fmt("MI_SXL", [rw(rm64), sxq(imm32)]), rex(0x81).w().digit(2).id(), _64b),
34
inst("adcl", fmt("MI_SXB", [rw(rm32), sxl(imm8)]), rex(0x83).digit(2).ib(), _64b | compat),
35
inst("adcq", fmt("MI_SXB", [rw(rm64), sxq(imm8)]), rex(0x83).w().digit(2).ib(), _64b),
36
inst("adcb", fmt("MR", [rw(rm8), r(r8)]), rex(0x10).r(), _64b | compat),
37
inst("adcw", fmt("MR", [rw(rm16), r(r16)]), rex([0x66, 0x11]).r(), _64b | compat),
38
inst("adcl", fmt("MR", [rw(rm32), r(r32)]), rex(0x11).r(), _64b | compat),
39
inst("adcq", fmt("MR", [rw(rm64), r(r64)]), rex(0x11).w().r(), _64b),
40
inst("adcb", fmt("RM", [rw(r8), r(rm8)]), rex(0x12).r(), _64b | compat),
41
inst("adcw", fmt("RM", [rw(r16), r(rm16)]), rex([0x66, 0x13]).r(), _64b | compat),
42
inst("adcl", fmt("RM", [rw(r32), r(rm32)]), rex(0x13).r(), _64b | compat),
43
inst("adcq", fmt("RM", [rw(r64), r(rm64)]), rex(0x13).w().r(), _64b),
44
// `LOCK`-prefixed memory-writing instructions.
45
inst("lock_addb", fmt("MI", [rw(m8), r(imm8)]), rex([0xf0, 0x80]).digit(0).ib(), _64b | compat).custom(Mnemonic),
46
inst("lock_addw", fmt("MI", [rw(m16), r(imm16)]), rex([0xf0, 0x66, 0x81]).digit(0).iw(), _64b | compat).custom(Mnemonic),
47
inst("lock_addl", fmt("MI", [rw(m32), r(imm32)]), rex([0xf0, 0x81]).digit(0).id(), _64b | compat).custom(Mnemonic),
48
inst("lock_addq", fmt("MI_SXL", [rw(m64), sxq(imm32)]), rex([0xf0, 0x81]).w().digit(0).id(), _64b).custom(Mnemonic),
49
inst("lock_addl", fmt("MI_SXB", [rw(m32), sxl(imm8)]), rex([0xf0, 0x83]).digit(0).ib(), _64b | compat).custom(Mnemonic),
50
inst("lock_addq", fmt("MI_SXB", [rw(m64), sxq(imm8)]), rex([0xf0, 0x83]).w().digit(0).ib(), _64b).custom(Mnemonic),
51
inst("lock_addb", fmt("MR", [rw(m8), r(r8)]), rex([0xf0, 0x0]).r(), _64b | compat).custom(Mnemonic),
52
inst("lock_addw", fmt("MR", [rw(m16), r(r16)]), rex([0xf0, 0x66, 0x1]).r(), _64b | compat).custom(Mnemonic),
53
inst("lock_addl", fmt("MR", [rw(m32), r(r32)]), rex([0xf0, 0x1]).r(), _64b | compat).custom(Mnemonic),
54
inst("lock_addq", fmt("MR", [rw(m64), r(r64)]), rex([0xf0, 0x1]).w().r(), _64b).custom(Mnemonic),
55
inst("lock_adcb", fmt("MI", [rw(m8), r(imm8)]), rex([0xf0, 0x80]).digit(2).ib(), _64b | compat).custom(Mnemonic),
56
inst("lock_adcw", fmt("MI", [rw(m16), r(imm16)]), rex([0xf0, 0x66, 0x81]).digit(2).iw(), _64b | compat).custom(Mnemonic),
57
inst("lock_adcl", fmt("MI", [rw(m32), r(imm32)]), rex([0xf0, 0x81]).digit(2).id(), _64b | compat).custom(Mnemonic),
58
inst("lock_adcq", fmt("MI_SXL", [rw(m64), sxq(imm32)]), rex([0xf0, 0x81]).w().digit(2).id(), _64b).custom(Mnemonic),
59
inst("lock_adcl", fmt("MI_SXB", [rw(m32), sxl(imm8)]), rex([0xf0, 0x83]).digit(2).ib(), _64b | compat).custom(Mnemonic),
60
inst("lock_adcq", fmt("MI_SXB", [rw(m64), sxq(imm8)]), rex([0xf0, 0x83]).w().digit(2).ib(), _64b).custom(Mnemonic),
61
inst("lock_adcb", fmt("MR", [rw(m8), r(r8)]), rex([0xf0, 0x10]).r(), _64b | compat).custom(Mnemonic),
62
inst("lock_adcw", fmt("MR", [rw(m16), r(r16)]), rex([0xf0, 0x66, 0x11]).r(), _64b | compat).custom(Mnemonic),
63
inst("lock_adcl", fmt("MR", [rw(m32), r(r32)]), rex([0xf0, 0x11]).r(), _64b | compat).custom(Mnemonic),
64
inst("lock_adcq", fmt("MR", [rw(m64), r(r64)]), rex([0xf0, 0x11]).w().r(), _64b).custom(Mnemonic),
65
// `LOCK`-prefixed xadd
66
inst("lock_xaddb", fmt("MR", [rw(m8), rw(r8)]), rex([0xf0, 0x0f, 0xc0]).r(), _64b | compat).custom(Mnemonic | Visit),
67
inst("lock_xaddw", fmt("MR", [rw(m16), rw(r16)]), rex([0xf0, 0x66, 0x0f, 0xc1]).r(), _64b | compat).custom(Mnemonic | Visit),
68
inst("lock_xaddl", fmt("MR", [rw(m32), rw(r32)]), rex([0xf0, 0x0f, 0xc1]).r(), _64b | compat).custom(Mnemonic | Visit),
69
inst("lock_xaddq", fmt("MR", [rw(m64), rw(r64)]), rex([0xf0, 0x0f, 0xc1]).w().r(), _64b).custom(Mnemonic | Visit),
70
// Vector instructions.
71
inst("addss", fmt("A", [rw(xmm1), r(xmm_m32)]), rex([0xF3, 0x0F, 0x58]).r(), (_64b | compat) & sse).alt(avx, "vaddss_b"),
72
inst("addsd", fmt("A", [rw(xmm1), r(xmm_m64)]), rex([0xF2, 0x0F, 0x58]).r(), (_64b | compat) & sse2).alt(avx, "vaddsd_b"),
73
inst("addps", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x0F, 0x58]).r(), (_64b | compat) & sse).alt(avx, "vaddps_b"),
74
inst("addpd", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x58]).r(), (_64b | compat) & sse2).alt(avx, "vaddpd_b"),
75
inst("paddb", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xFC]).r(), (_64b | compat) & sse2).alt(avx, "vpaddb_b"),
76
inst("paddw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xFD]).r(), (_64b | compat) & sse2).alt(avx, "vpaddw_b"),
77
inst("paddd", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xFE]).r(), (_64b | compat) & sse2).alt(avx, "vpaddd_b"),
78
inst("paddq", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xD4]).r(), (_64b | compat) & sse2).alt(avx, "vpaddq_b"),
79
inst("paddsb", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xEC]).r(), (_64b | compat) & sse2).alt(avx, "vpaddsb_b"),
80
inst("paddsw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xED]).r(), (_64b | compat) & sse2).alt(avx, "vpaddsw_b"),
81
inst("paddusb", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xDC]).r(), (_64b | compat) & sse2).alt(avx, "vpaddusb_b"),
82
inst("paddusw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xDD]).r(), (_64b | compat) & sse2).alt(avx, "vpaddusw_b"),
83
inst("phaddw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x38, 0x01]).r(), (_64b | compat) & ssse3).alt(avx, "vphaddw_b"),
84
inst("phaddd", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x38, 0x02]).r(), (_64b | compat) & ssse3).alt(avx, "vphaddd_b"),
85
inst("vaddss", fmt("B", [w(xmm1), r(xmm2), r(xmm_m32)]), vex(L128)._f3()._0f().op(0x58).r(), (_64b | compat) & avx),
86
inst("vaddsd", fmt("B", [w(xmm1), r(xmm2), r(xmm_m64)]), vex(L128)._f2()._0f().op(0x58).r(), (_64b | compat) & avx),
87
inst("vaddps", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._0f().op(0x58).r(), (_64b | compat) & avx),
88
inst("vaddpd", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0x58).r(), (_64b | compat) & avx),
89
inst("vpaddb", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xFC).r(), (_64b | compat) & avx),
90
inst("vpaddw", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xFD).r(), (_64b | compat) & avx),
91
inst("vpaddd", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xFE).r(), (_64b | compat) & avx),
92
inst("vpaddq", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xD4).r(), (_64b | compat) & avx),
93
inst("vpaddsb", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xEC).r(), (_64b | compat) & avx),
94
inst("vpaddsw", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xED).r(), (_64b | compat) & avx),
95
inst("vpaddusb", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xDC).r(), (_64b | compat) & avx),
96
inst("vpaddusw", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xDD).r(), (_64b | compat) & avx),
97
inst("vphaddw", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f38().op(0x01).r(), (_64b | compat) & avx),
98
inst("vphaddd", fmt("B", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f38().op(0x02).r(), (_64b | compat) & avx),
99
inst("vaddpd", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Full)._66()._0f().w1().op(0x58).r(), (_64b | compat) & avx512vl),
100
]
101
}
102
103