Path: blob/main/cranelift/assembler-x64/meta/src/instructions/mov.rs
1693 views
use crate::dsl::{Feature::*, Inst, Length::*, Location::*};1use crate::dsl::{align, fmt, inst, r, rex, rw, sxl, sxq, sxw, vex, w};23#[rustfmt::skip] // Keeps instructions on a single line.4pub fn list() -> Vec<Inst> {5vec![6// Move integers to and from GPR and memory locations. Note that, in7// 64-bit mode, `r/m8` can not be encoded to access the following byte8// registers if a REX prefix is used: AH, BH, CH, DH. Only moves that9// overwrite all 64 bits are considered "write-only"; smaller-width10// moves indicate that upper bits are preserved by being "read-write."11inst("movb", fmt("MR", [w(rm8), r(r8)]), rex(0x88).r(), _64b | compat),12inst("movw", fmt("MR", [w(rm16), r(r16)]), rex([0x66, 0x89]).r(), _64b | compat),13inst("movl", fmt("MR", [w(rm32), r(r32)]), rex(0x89).r(), _64b | compat),14inst("movq", fmt("MR", [w(rm64), r(r64)]), rex(0x89).w().r(), _64b),15inst("movb", fmt("RM", [w(r8), r(rm8)]), rex(0x8A).r(), _64b | compat),16inst("movw", fmt("RM", [w(r16), r(rm16)]), rex([0x66, 0x8B]).r(), _64b | compat),17inst("movl", fmt("RM", [w(r32), r(rm32)]), rex(0x8B).r(), _64b | compat),18inst("movq", fmt("RM", [w(r64), r(rm64)]), rex(0x8B).w().r(), _64b),19inst("movb", fmt("OI", [w(r8), r(imm8)]), rex(0xB0).rb().ib(), _64b | compat),20inst("movw", fmt("OI", [w(r16), r(imm16)]), rex([0x66, 0xB8]).rw().iw(), _64b | compat),21inst("movl", fmt("OI", [w(r32), r(imm32)]), rex(0xB8).rd().id(), _64b | compat),22// Capstone disassembles this (and only this) slightly differently.23inst("movabsq", fmt("OI", [w(r64), r(imm64)]), rex(0xB8).w().ro().io(), _64b),24inst("movb", fmt("MI", [w(rm8), r(imm8)]), rex(0xC6).digit(0).ib(), _64b | compat),25inst("movw", fmt("MI", [w(rm16), r(imm16)]), rex([0x66, 0xC7]).digit(0).iw(), _64b | compat),26inst("movl", fmt("MI", [w(rm32), r(imm32)]), rex(0xC7).digit(0).id(), _64b | compat),27inst("movq", fmt("MI_SXL", [w(rm64), sxq(imm32)]), rex(0xC7).w().digit(0).id(), _64b),2829// Move integers with sign extension. These are defined as `movsx` in30// the x64 reference manual but Capstone (and likely other tools)31// disassemble this as `movs{from}{to}`.32inst("movsbw", fmt("RM", [w(r16), sxw(rm8)]), rex([0x66, 0x0F, 0xBE]).r(), _64b | compat),33inst("movsbl", fmt("RM", [w(r32), sxl(rm8)]), rex([0x0F, 0xBE]).r(), _64b | compat),34inst("movsbq", fmt("RM", [w(r64), sxq(rm8)]), rex([0x0F, 0xBE]).w().r(), _64b),35inst("movsww", fmt("RM", [w(r16), sxl(rm16)]), rex([0x66, 0x0F, 0xBF]).r(), _64b | compat),36inst("movswl", fmt("RM", [w(r32), sxl(rm16)]), rex([0x0F, 0xBF]).r(), _64b | compat),37inst("movswq", fmt("RM", [w(r64), sxq(rm16)]), rex([0x0F, 0xBF]).w().r(), _64b),38inst("movslq", fmt("RM", [w(r64), sxl(rm32)]), rex(0x63).w().r(), _64b),3940// Move integers with zero extension. These are defined as `movzx` in41// the x64 reference manual but Capstone (and likely other tools)42// disassemble this as `movz{from}{to}`.43inst("movzbw", fmt("RM", [w(r16), sxw(rm8)]), rex([0x66, 0x0F, 0xB6]).r(), _64b | compat),44inst("movzbl", fmt("RM", [w(r32), sxl(rm8)]), rex([0x0F, 0xB6]).r(), _64b | compat),45inst("movzbq", fmt("RM", [w(r64), sxq(rm8)]), rex([0x0F, 0xB6]).w().r(), _64b),46inst("movzww", fmt("RM", [w(r16), sxl(rm16)]), rex([0x66, 0x0F, 0xB7]).r(), _64b | compat),47inst("movzwl", fmt("RM", [w(r32), sxl(rm16)]), rex([0x0F, 0xB7]).r(), _64b | compat),48inst("movzwq", fmt("RM", [w(r64), sxq(rm16)]), rex([0x0F, 0xB7]).w().r(), _64b),4950// Move integers between GPR and XMM locations. From the reference51// manual: "when the destination operand is an XMM register, the source52// operand is written to the low doubleword of the register, and the53// register is zero-extended to 128 bits."54inst("movd", fmt("A", [w(xmm1), r(rm32)]), rex([0x66, 0x0F, 0x6E]).r(), (_64b | compat) & sse2),55inst("movq", fmt("A", [w(xmm1), r(rm64)]), rex([0x66, 0x0F, 0x6E]).r().w(), _64b & sse2),56inst("movd", fmt("B", [w(rm32), r(xmm2)]), rex([0x66, 0x0F, 0x7E]).r(), (_64b | compat) & sse2),57inst("movq", fmt("B", [w(rm64), r(xmm2)]), rex([0x66, 0x0F, 0x7E]).r().w(), _64b & sse2),58inst("vmovd", fmt("A", [w(xmm1), r(rm32)]), vex(L128)._66()._0f().w0().op(0x6E).r(), (_64b | compat) & avx),59inst("vmovq", fmt("A", [w(xmm1), r(rm64)]), vex(L128)._66()._0f().w1().op(0x6E).r(), _64b & avx),60inst("vmovd", fmt("B", [w(rm32), r(xmm2)]), vex(L128)._66()._0f().w0().op(0x7E).r(), (_64b | compat) & avx),61inst("vmovq", fmt("B", [w(rm64), r(xmm2)]), vex(L128)._66()._0f().w1().op(0x7E).r(), _64b & avx),6263// Move floating-point values to and from XMM locations. Some64// memory-loading versions of `movs*` clear the upper bits of the XMM65// destination.66//67// Note that `movss` and `movsd` only have an "A" and "C" modes listed68// in the Intel manual but here they're split into "*_M" and "*_R" to69// model the different regalloc behavior each one has. Notably the70// memory-using variant does the usual read or write the memory71// depending on the instruction, but the "*_R" variant both reads and72// writes the destination register because the upper bits are preserved.73//74// Additionally "C_R" is not specified here since it's not needed over75// the "A_R" variant and it's additionally not encoded correctly as the76// destination must be modeled in the ModRM:r/m byte, not the ModRM:reg77// byte. Currently our encoding based on format doesn't account for this78// special case, so it's just dropped here.79inst("movss", fmt("A_M", [w(xmm1), r(m32)]), rex([0xF3, 0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovss_d"),80inst("movss", fmt("A_R", [rw(xmm1), r(xmm2)]), rex([0xF3, 0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovss_b"),81inst("movss", fmt("C_M", [w(m32), r(xmm1)]), rex([0xF3, 0x0F, 0x11]).r(), compat | _64b | sse).alt(avx, "vmovss_c_m"),82inst("movsd", fmt("A_M", [w(xmm1), r(m64)]), rex([0xF2, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovsd_d"),83inst("movsd", fmt("A_R", [rw(xmm1), r(xmm2)]), rex([0xF2, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovsd_b"),84inst("movsd", fmt("C_M", [w(m64), r(xmm1)]), rex([0xF2, 0x0F, 0x11]).r(), compat | _64b | sse2).alt(avx, "vmovsd_c_m"),85inst("vmovss", fmt("D", [w(xmm1), r(m32)]), vex(LIG)._f3()._0f().op(0x10).r(), compat | _64b | avx),86inst("vmovss", fmt("B", [w(xmm1), r(xmm2), r(xmm3)]), vex(LIG)._f3()._0f().op(0x10).r(), compat | _64b | avx),87inst("vmovss", fmt("C_M", [w(m32), r(xmm1)]), vex(LIG)._f3()._0f().op(0x11).r(), compat | _64b | avx),88inst("vmovsd", fmt("D", [w(xmm1), r(m64)]), vex(LIG)._f2()._0f().op(0x10).r(), compat | _64b | avx),89inst("vmovsd", fmt("B", [w(xmm1), r(xmm2), r(xmm3)]), vex(LIG)._f2()._0f().op(0x10).r(), compat | _64b | avx),90inst("vmovsd", fmt("C_M", [w(m64), r(xmm1)]), vex(LIG)._f2()._0f().op(0x11).r(), compat | _64b | avx),9192// Move aligned 128-bit values to and from XMM locations.93inst("movapd", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x28]).r(), compat | _64b | sse2).alt(avx, "vmovapd_a"),94inst("movapd", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x66, 0x0F, 0x29]).r(), compat | _64b | sse2).alt(avx, "vmovapd_b"),95inst("movaps", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x0F, 0x28]).r(), compat | _64b | sse).alt(avx, "vmovaps_a"),96inst("movaps", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x0F, 0x29]).r(), compat | _64b | sse).alt(avx, "vmovaps_b"),97inst("movdqa", fmt("A", [w(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0x6F]).r(), compat | _64b | sse2).alt(avx, "vmovdqa_a"),98inst("movdqa", fmt("B", [w(align(xmm_m128)), r(xmm1)]), rex([0x66, 0x0F, 0x7F]).r(), compat | _64b | sse2).alt(avx, "vmovdqa_b"),99inst("vmovapd", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._66()._0f().op(0x28).r(), compat | _64b | avx),100inst("vmovapd", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._66()._0f().op(0x29).r(), compat | _64b | avx),101inst("vmovaps", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._0f().op(0x28).r(), compat | _64b | avx),102inst("vmovaps", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._0f().op(0x29).r(), compat | _64b | avx),103inst("vmovdqa", fmt("A", [w(xmm1), r(align(xmm_m128))]), vex(L128)._66()._0f().op(0x6F).r(), compat | _64b | avx),104inst("vmovdqa", fmt("B", [w(align(xmm_m128)), r(xmm1)]), vex(L128)._66()._0f().op(0x7F).r(), compat | _64b | avx),105106// Move unaligned 128-bit values to and from XMM locations.107inst("movupd", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0x66, 0x0F, 0x10]).r(), compat | _64b | sse2).alt(avx, "vmovupd_a"),108inst("movupd", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0x66, 0x0F, 0x11]).r(), compat | _64b | sse2).alt(avx, "vmovupd_b"),109inst("movups", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0x0F, 0x10]).r(), compat | _64b | sse).alt(avx, "vmovups_a"),110inst("movups", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0x0F, 0x11]).r(), compat | _64b | sse).alt(avx, "vmovups_b"),111inst("movdqu", fmt("A", [w(xmm1), r(xmm_m128)]), rex([0xF3, 0x0F, 0x6F]).r(), compat | _64b | sse2).alt(avx, "vmovdqu_a"),112inst("movdqu", fmt("B", [w(xmm_m128), r(xmm1)]), rex([0xF3, 0x0F, 0x7F]).r(), compat | _64b | sse2).alt(avx, "vmovdqu_b"),113inst("vmovupd", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._66()._0f().op(0x10).r(), compat | _64b | avx),114inst("vmovupd", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._66()._0f().op(0x11).r(), compat | _64b | avx),115inst("vmovups", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._0f().op(0x10).r(), compat | _64b | avx),116inst("vmovups", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._0f().op(0x11).r(), compat | _64b | avx),117inst("vmovdqu", fmt("A", [w(xmm1), r(xmm_m128)]), vex(L128)._f3()._0f().op(0x6F).r(), compat | _64b | avx),118inst("vmovdqu", fmt("B", [w(xmm_m128), r(xmm1)]), vex(L128)._f3()._0f().op(0x7F).r(), compat | _64b | avx),119120// Move and extend packed integers to and from XMM locations with sign extension.121inst("pmovsxbw", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x20]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbw_a"),122inst("pmovsxbd", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x21]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbd_a"),123inst("pmovsxbq", fmt("A", [w(xmm1), r(xmm_m16)]), rex([0x66, 0x0F, 0x38, 0x22]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxbq_a"),124inst("pmovsxwd", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x23]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxwd_a"),125inst("pmovsxwq", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x24]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxwq_a"),126inst("pmovsxdq", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x25]).r(), (_64b | compat) & sse41).alt(avx, "vpmovsxdq_a"),127inst("vpmovsxbw", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x20).r(), (_64b | compat) & avx),128inst("vpmovsxbd", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x21).r(), (_64b | compat) & avx),129inst("vpmovsxbq", fmt("A", [w(xmm1), r(xmm_m16)]), vex(L128)._66()._0f38().op(0x22).r(), (_64b | compat) & avx),130inst("vpmovsxwd", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x23).r(), (_64b | compat) & avx),131inst("vpmovsxwq", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x24).r(), (_64b | compat) & avx),132inst("vpmovsxdq", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x25).r(), (_64b | compat) & avx),133134// Move and extend packed integers to and from XMM locations with zero extension.135inst("pmovzxbw", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x30]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbw_a"),136inst("pmovzxbd", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x31]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbd_a"),137inst("pmovzxbq", fmt("A", [w(xmm1), r(xmm_m16)]), rex([0x66, 0x0F, 0x38, 0x32]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxbq_a"),138inst("pmovzxwd", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x33]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxwd_a"),139inst("pmovzxwq", fmt("A", [w(xmm1), r(xmm_m32)]), rex([0x66, 0x0F, 0x38, 0x34]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxwq_a"),140inst("pmovzxdq", fmt("A", [w(xmm1), r(xmm_m64)]), rex([0x66, 0x0F, 0x38, 0x35]).r(), (_64b | compat) & sse41).alt(avx, "vpmovzxdq_a"),141inst("vpmovzxbw", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x30).r(), (_64b | compat) & avx),142inst("vpmovzxbd", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x31).r(), (_64b | compat) & avx),143inst("vpmovzxbq", fmt("A", [w(xmm1), r(xmm_m16)]), vex(L128)._66()._0f38().op(0x32).r(), (_64b | compat) & avx),144inst("vpmovzxwd", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x33).r(), (_64b | compat) & avx),145inst("vpmovzxwq", fmt("A", [w(xmm1), r(xmm_m32)]), vex(L128)._66()._0f38().op(0x34).r(), (_64b | compat) & avx),146inst("vpmovzxdq", fmt("A", [w(xmm1), r(xmm_m64)]), vex(L128)._66()._0f38().op(0x35).r(), (_64b | compat) & avx),147]148}149150151