Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/assembler-x64/meta/src/instructions/mov.rs
1693 views
1
use crate::dsl::{Feature::*, Inst, Length::*, Location::*};
2
use crate::dsl::{align, fmt, inst, r, rex, rw, sxl, sxq, sxw, vex, w};
3
4
#[rustfmt::skip] // Keeps instructions on a single line.
5
pub fn list() -> Vec<Inst> {
6
vec![
7
// Move integers to and from GPR and memory locations. Note that, in
8
// 64-bit mode, `r/m8` can not be encoded to access the following byte
9
// registers if a REX prefix is used: AH, BH, CH, DH. Only moves that
10
// overwrite all 64 bits are considered "write-only"; smaller-width
11
// moves indicate that upper bits are preserved by being "read-write."
12
inst("movb", fmt("MR", [w(rm8), r(r8)]), rex(0x88).r(), _64b | compat),
13
inst("movw", fmt("MR", [w(rm16), r(r16)]), rex([0x66, 0x89]).r(), _64b | compat),
14
inst("movl", fmt("MR", [w(rm32), r(r32)]), rex(0x89).r(), _64b | compat),
15
inst("movq", fmt("MR", [w(rm64), r(r64)]), rex(0x89).w().r(), _64b),
16
inst("movb", fmt("RM", [w(r8), r(rm8)]), rex(0x8A).r(), _64b | compat),
17
inst("movw", fmt("RM", [w(r16), r(rm16)]), rex([0x66, 0x8B]).r(), _64b | compat),
18
inst("movl", fmt("RM", [w(r32), r(rm32)]), rex(0x8B).r(), _64b | compat),
19
inst("movq", fmt("RM", [w(r64), r(rm64)]), rex(0x8B).w().r(), _64b),
20
inst("movb", fmt("OI", [w(r8), r(imm8)]), rex(0xB0).rb().ib(), _64b | compat),
21
inst("movw", fmt("OI", [w(r16), r(imm16)]), rex([0x66, 0xB8]).rw().iw(), _64b | compat),
22
inst("movl", fmt("OI", [w(r32), r(imm32)]), rex(0xB8).rd().id(), _64b | compat),
23
// Capstone disassembles this (and only this) slightly differently.
24
inst("movabsq", fmt("OI", [w(r64), r(imm64)]), rex(0xB8).w().ro().io(), _64b),
25
inst("movb", fmt("MI", [w(rm8), r(imm8)]), rex(0xC6).digit(0).ib(), _64b | compat),
26
inst("movw", fmt("MI", [w(rm16), r(imm16)]), rex([0x66, 0xC7]).digit(0).iw(), _64b | compat),
27
inst("movl", fmt("MI", [w(rm32), r(imm32)]), rex(0xC7).digit(0).id(), _64b | compat),
28
inst("movq", fmt("MI_SXL", [w(rm64), sxq(imm32)]), rex(0xC7).w().digit(0).id(), _64b),
29
30
// Move integers with sign extension. These are defined as `movsx` in
31
// the x64 reference manual but Capstone (and likely other tools)
32
// disassemble this as `movs{from}{to}`.
33
inst("movsbw", fmt("RM", [w(r16), sxw(rm8)]), rex([0x66, 0x0F, 0xBE]).r(), _64b | compat),
34
inst("movsbl", fmt("RM", [w(r32), sxl(rm8)]), rex([0x0F, 0xBE]).r(), _64b | compat),
35
inst("movsbq", fmt("RM", [w(r64), sxq(rm8)]), rex([0x0F, 0xBE]).w().r(), _64b),
36
inst("movsww", fmt("RM", [w(r16), sxl(rm16)]), rex([0x66, 0x0F, 0xBF]).r(), _64b | compat),
37
inst("movswl", fmt("RM", [w(r32), sxl(rm16)]), rex([0x0F, 0xBF]).r(), _64b | compat),
38
inst("movswq", fmt("RM", [w(r64), sxq(rm16)]), rex([0x0F, 0xBF]).w().r(), _64b),
39
inst("movslq", fmt("RM", [w(r64), sxl(rm32)]), rex(0x63).w().r(), _64b),
40
41
// Move integers with zero extension. These are defined as `movzx` in
42
// the x64 reference manual but Capstone (and likely other tools)
43
// disassemble this as `movz{from}{to}`.
44
inst("movzbw", fmt("RM", [w(r16), sxw(rm8)]), rex([0x66, 0x0F, 0xB6]).r(), _64b | compat),
45
inst("movzbl", fmt("RM", [w(r32), sxl(rm8)]), rex([0x0F, 0xB6]).r(), _64b | compat),
46
inst("movzbq", fmt("RM", [w(r64), sxq(rm8)]), rex([0x0F, 0xB6]).w().r(), _64b),
47
inst("movzww", fmt("RM", [w(r16), sxl(rm16)]), rex([0x66, 0x0F, 0xB7]).r(), _64b | compat),
48
inst("movzwl", fmt("RM", [w(r32), sxl(rm16)]), rex([0x0F, 0xB7]).r(), _64b | compat),
49
inst("movzwq", fmt("RM", [w(r64), sxq(rm16)]), rex([0x0F, 0xB7]).w().r(), _64b),
50
51
// Move integers between GPR and XMM locations. From the reference
52
// manual: "when the destination operand is an XMM register, the source
53
// operand is written to the low doubleword of the register, and the
54
// register is zero-extended to 128 bits."
55
inst("movd", fmt("A", [w(xmm1), r(rm32)]), rex([0x66, 0x0F, 0x6E]).r(), (_64b | compat) & sse2),
56
inst("movq", fmt("A", [w(xmm1), r(rm64)]), rex([0x66, 0x0F, 0x6E]).r().w(), _64b & sse2),
57
inst("movd", fmt("B", [w(rm32), r(xmm2)]), rex([0x66, 0x0F, 0x7E]).r(), (_64b | compat) & sse2),
58
inst("movq", fmt("B", [w(rm64), r(xmm2)]), rex([0x66, 0x0F, 0x7E]).r().w(), _64b & sse2),
59
inst("vmovd", fmt("A", [w(xmm1), r(rm32)]), vex(L128)._66()._0f().w0().op(0x6E).r(), (_64b | compat) & avx),
60
inst("vmovq", fmt("A", [w(xmm1), r(rm64)]), vex(L128)._66()._0f().w1().op(0x6E).r(), _64b & avx),
61
inst("vmovd", fmt("B", [w(rm32), r(xmm2)]), vex(L128)._66()._0f().w0().op(0x7E).r(), (_64b | compat) & avx),
62
inst("vmovq", fmt("B", [w(rm64), r(xmm2)]), vex(L128)._66()._0f().w1().op(0x7E).r(), _64b & avx),
63
64
// Move floating-point values to and from XMM locations. Some
65
// memory-loading versions of `movs*` clear the upper bits of the XMM
66
// destination.
67
//
68
// Note that `movss` and `movsd` only have an "A" and "C" modes listed
69
// in the Intel manual but here they're split into "*_M" and "*_R" to
70
// model the different regalloc behavior each one has. Notably the
71
// memory-using variant does the usual read or write the memory
72
// depending on the instruction, but the "*_R" variant both reads and
73
// writes the destination register because the upper bits are preserved.
74
//
75
// Additionally "C_R" is not specified here since it's not needed over
76
// the "A_R" variant and it's additionally not encoded correctly as the
77
// destination must be modeled in the ModRM:r/m byte, not the ModRM:reg
78
// byte. Currently our encoding based on format doesn't account for this
79
// special case, so it's just dropped here.
80
inst("movss", fmt("A_M", [w(xmm1), r(m32)]), rex([0xF3, 0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovss_d"),
81
inst("movss", fmt("A_R", [rw(xmm1), r(xmm2)]), rex([0xF3, 0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovss_b"),
82
inst("movss", fmt("C_M", [w(m32), r(xmm1)]), rex([0xF3, 0x0F, 0x11]).r(), compat | _64b | sse).alt(avx, "vmovss_c_m"),
83
inst("movsd", fmt("A_M", [w(xmm1), r(m64)]), rex([0xF2, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovsd_d"),
84
inst("movsd", fmt("A_R", [rw(xmm1), r(xmm2)]), rex([0xF2, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovsd_b"),
85
inst("movsd", fmt("C_M", [w(m64), r(xmm1)]), rex([0xF2, 0x0F, 0x11]).r(), compat | _64b | sse2).alt(avx, "vmovsd_c_m"),
86
inst("vmovss", fmt("D", [w(xmm1), r(m32)]), vex(LIG)._f3()._0f().op(0x10).r(), compat | _64b | avx),
87
inst("vmovss", fmt("B", [w(xmm1), r(xmm2), r(xmm3)]), vex(LIG)._f3()._0f().op(0x10).r(), compat | _64b | avx),
88
inst("vmovss", fmt("C_M", [w(m32), r(xmm1)]), vex(LIG)._f3()._0f().op(0x11).r(), compat | _64b | avx),
89
inst("vmovsd", fmt("D", [w(xmm1), r(m64)]), vex(LIG)._f2()._0f().op(0x10).r(), compat | _64b | avx),
90
inst("vmovsd", fmt("B", [w(xmm1), r(xmm2), r(xmm3)]), vex(LIG)._f2()._0f().op(0x10).r(), compat | _64b | avx),
91
inst("vmovsd", fmt("C_M", [w(m64), r(xmm1)]), vex(LIG)._f2()._0f().op(0x11).r(), compat | _64b | avx),
92
93
// Move aligned 128-bit values to and from XMM locations.
94
inst("movapd", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x28]).r(), compat | _64b | sse2).alt(avx, "vmovapd_a"),
95
inst("movapd", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x66, 0x0F, 0x29]).r(), compat | _64b | sse2).alt(avx, "vmovapd_b"),
96
inst("movaps", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x0F, 0x28]).r(), compat | _64b | sse).alt(avx, "vmovaps_a"),
97
inst("movaps", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x0F, 0x29]).r(), compat | _64b | sse).alt(avx, "vmovaps_b"),
98
inst("movdqa", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x6F]).r(), compat | _64b | sse2).alt(avx, "vmovdqa_a"),
99
inst("movdqa", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x66, 0x0F, 0x7F]).r(), compat | _64b | sse2).alt(avx, "vmovdqa_b"),
100
inst("vmovapd", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._66()._0f().op(0x28).r(), compat | _64b | avx),
101
inst("vmovapd", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._66()._0f().op(0x29).r(), compat | _64b | avx),
102
inst("vmovaps", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._0f().op(0x28).r(), compat | _64b | avx),
103
inst("vmovaps", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._0f().op(0x29).r(), compat | _64b | avx),
104
inst("vmovdqa", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._66()._0f().op(0x6F).r(), compat | _64b | avx),
105
inst("vmovdqa", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._66()._0f().op(0x7F).r(), compat | _64b | avx),
106
107
// Move unaligned 128-bit values to and from XMM locations.
108
inst("movupd", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0x66, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovupd_a"),
109
inst("movupd", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0x66, 0x0F, 0x11]).r(), compat | _64b | sse2).alt(avx, "vmovupd_b"),
110
inst("movups", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovups_a"),
111
inst("movups", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0x0F, 0x11]).r(), compat | _64b | sse).alt(avx, "vmovups_b"),
112
inst("movdqu", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0xF3, 0x0F, 0x6F]).r(), compat | _64b | sse2).alt(avx, "vmovdqu_a"),
113
inst("movdqu", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0xF3, 0x0F, 0x7F]).r(), compat | _64b | sse2).alt(avx, "vmovdqu_b"),
114
inst("vmovupd", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._66()._0f().op(0x10).r(), compat | _64b | avx),
115
inst("vmovupd", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._66()._0f().op(0x11).r(), compat | _64b | avx),
116
inst("vmovups", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._0f().op(0x10).r(), compat | _64b | avx),
117
inst("vmovups", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._0f().op(0x11).r(), compat | _64b | avx),
118
inst("vmovdqu", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._f3()._0f().op(0x6F).r(), compat | _64b | avx),
119
inst("vmovdqu", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._f3()._0f().op(0x7F).r(), compat | _64b | avx),
120
121
// Move and extend packed integers to and from XMM locations with sign extension.
122
inst("pmovsxbw", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x20]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbw_a"),
123
inst("pmovsxbd", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x21]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbd_a"),
124
inst("pmovsxbq", fmt("A", [w(xmm1), r(xmm_m16)]), rex([0x66, 0x0F, 0x38, 0x22]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbq_a"),
125
inst("pmovsxwd", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x23]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxwd_a"),
126
inst("pmovsxwq", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x24]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxwq_a"),
127
inst("pmovsxdq", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x25]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxdq_a"),
128
inst("vpmovsxbw", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x20).r(), (_64b | compat) & avx),
129
inst("vpmovsxbd", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x21).r(), (_64b | compat) & avx),
130
inst("vpmovsxbq", fmt("A", [w(xmm1), r(xmm_m16)]), vex(L128)._66()._0f38().op(0x22).r(), (_64b | compat) & avx),
131
inst("vpmovsxwd", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x23).r(), (_64b | compat) & avx),
132
inst("vpmovsxwq", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x24).r(), (_64b | compat) & avx),
133
inst("vpmovsxdq", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x25).r(), (_64b | compat) & avx),
134
135
// Move and extend packed integers to and from XMM locations with zero extension.
136
inst("pmovzxbw", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x30]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbw_a"),
137
inst("pmovzxbd", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x31]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbd_a"),
138
inst("pmovzxbq", fmt("A", [w(xmm1), r(xmm_m16)]), rex([0x66, 0x0F, 0x38, 0x32]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbq_a"),
139
inst("pmovzxwd", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x33]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxwd_a"),
140
inst("pmovzxwq", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x34]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxwq_a"),
141
inst("pmovzxdq", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x35]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxdq_a"),
142
inst("vpmovzxbw", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x30).r(), (_64b | compat) & avx),
143
inst("vpmovzxbd", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x31).r(), (_64b | compat) & avx),
144
inst("vpmovzxbq", fmt("A", [w(xmm1), r(xmm_m16)]), vex(L128)._66()._0f38().op(0x32).r(), (_64b | compat) & avx),
145
inst("vpmovzxwd", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x33).r(), (_64b | compat) & avx),
146
inst("vpmovzxwq", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x34).r(), (_64b | compat) & avx),
147
inst("vpmovzxdq", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x35).r(), (_64b | compat) & avx),
148
]
149
}
150
151