Path: blob/main/cranelift/codegen/src/isa/aarch64/lower/isle.rs
3088 views
//! ISLE integration glue code for aarch64 lowering.12// Pull in the ISLE generated code.3pub mod generated_code;4use generated_code::{Context, ImmExtend};56// Types that the generated ISLE code uses via `use super::*`.7use super::{8ASIMDFPModImm, ASIMDMovModImm, BranchTarget, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI,9FPUOpRIMod, FloatCC, Imm12, ImmLogic, ImmShift, Inst as MInst, IntCC, MachLabel, MemLabel,10MoveWideConst, MoveWideOp, NZCV, Opcode, OperandSize, Reg, SImm9, ScalarSize, ShiftOpAndAmt,11UImm5, UImm12Scaled, VecMisc2, VectorSize, fp_reg, lower_condcode, lower_fp_condcode,12stack_reg, writable_link_reg, writable_zero_reg, zero_reg,13};14use crate::ir::{ArgumentExtension, condcodes};15use crate::isa;16use crate::isa::aarch64::AArch64Backend;17use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInfo};18use crate::machinst::isle::*;19use crate::{20binemit::CodeOffset,21ir::{22AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, TrapCode, Value,23ValueList, immediates::*, types::*,24},25isa::aarch64::abi::AArch64MachineDeps,26isa::aarch64::inst::SImm7Scaled,27isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},28machinst::{29CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,30abi::ArgPair, ty_bits,31},32};33use alloc::boxed::Box;34use alloc::vec::Vec;35use core::u32;36use regalloc2::PReg;3738type BoxCallInfo = Box<CallInfo<ExternalName>>;39type BoxCallIndInfo = Box<CallInfo<Reg>>;40type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;41type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;42type VecMachLabel = Vec<MachLabel>;43type BoxExternalName = Box<ExternalName>;44type VecArgPair = Vec<ArgPair>;4546/// The main entry point for lowering with ISLE.47pub(crate) fn lower(48lower_ctx: &mut Lower<MInst>,49backend: &AArch64Backend,50inst: Inst,51) -> Option<InstOutput> {52// TODO: reuse the ISLE context across lowerings so we can reuse its53// internal heap allocations.54let mut isle_ctx = IsleContext { lower_ctx, backend };55generated_code::constructor_lower(&mut isle_ctx, inst)56}5758pub(crate) fn lower_branch(59lower_ctx: &mut Lower<MInst>,60backend: &AArch64Backend,61branch: Inst,62targets: &[MachLabel],63) -> Option<()> {64// TODO: reuse the ISLE context across lowerings so we can reuse its65// internal heap allocations.66let mut isle_ctx = IsleContext { lower_ctx, backend };67generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)68}6970pub struct ExtendedValue {71val: Value,72extend: ExtendOp,73}7475impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {76isle_lower_prelude_methods!();7778fn gen_call_info(79&mut self,80sig: Sig,81dest: ExternalName,82uses: CallArgList,83defs: CallRetList,84try_call_info: Option<TryCallInfo>,85patchable: bool,86) -> BoxCallInfo {87let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();88let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();89self.lower_ctx90.abi_mut()91.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);9293Box::new(94self.lower_ctx95.gen_call_info(sig, dest, uses, defs, try_call_info, patchable),96)97}9899fn gen_call_ind_info(100&mut self,101sig: Sig,102dest: Reg,103uses: CallArgList,104defs: CallRetList,105try_call_info: Option<TryCallInfo>,106) -> BoxCallIndInfo {107let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();108let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();109self.lower_ctx110.abi_mut()111.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);112113Box::new(114self.lower_ctx115.gen_call_info(sig, dest, uses, defs, try_call_info, false),116)117}118119fn gen_return_call_info(120&mut self,121sig: Sig,122dest: ExternalName,123uses: CallArgList,124) -> BoxReturnCallInfo {125let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();126self.lower_ctx127.abi_mut()128.accumulate_tail_args_size(new_stack_arg_size);129130let key =131AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);132133Box::new(ReturnCallInfo {134dest,135uses,136key,137new_stack_arg_size,138})139}140141fn gen_return_call_ind_info(142&mut self,143sig: Sig,144dest: Reg,145uses: CallArgList,146) -> BoxReturnCallIndInfo {147let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();148self.lower_ctx149.abi_mut()150.accumulate_tail_args_size(new_stack_arg_size);151152let key =153AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);154155Box::new(ReturnCallInfo {156dest,157uses,158key,159new_stack_arg_size,160})161}162163fn sign_return_address_disabled(&mut self) -> Option<()> {164if self.backend.isa_flags.sign_return_address() {165None166} else {167Some(())168}169}170171fn use_lse(&mut self, _: Inst) -> Option<()> {172if self.backend.isa_flags.has_lse() {173Some(())174} else {175None176}177}178179fn use_fp16(&mut self) -> bool {180self.backend.isa_flags.has_fp16()181}182183fn move_wide_const_from_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {184let bits = ty.bits();185let n = if bits < 64 {186n & !(u64::MAX << bits)187} else {188n189};190MoveWideConst::maybe_from_u64(n)191}192193fn move_wide_const_from_inverted_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {194self.move_wide_const_from_u64(ty, !n)195}196197fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {198ImmLogic::maybe_from_u64(n, ty)199}200201fn imm_size_from_type(&mut self, ty: Type) -> Option<u16> {202match ty {203I32 => Some(32),204I64 => Some(64),205_ => None,206}207}208209fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {210let ty = if ty.bits() < 32 { I32 } else { ty };211self.imm_logic_from_u64(ty, n.bits() as u64)212}213214fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {215Imm12::maybe_from_u64(n)216}217218fn imm_shift_from_u8(&mut self, n: u8) -> ImmShift {219ImmShift::maybe_from_u64(n.into()).unwrap()220}221222fn lshr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {223let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;224if let Ok(bits) = u8::try_from(ty_bits(ty)) {225let shiftimm = shiftimm.mask(bits);226Some(ShiftOpAndAmt::new(ShiftOp::LSR, shiftimm))227} else {228None229}230}231232fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {233self.lshl_from_u64(ty, n.bits() as u64)234}235236fn lshl_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {237let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;238let shiftee_bits = ty_bits(ty);239if shiftee_bits <= core::u8::MAX as usize {240let shiftimm = shiftimm.mask(shiftee_bits as u8);241Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))242} else {243None244}245}246247fn ashr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {248let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;249let shiftee_bits = ty_bits(ty);250if shiftee_bits <= core::u8::MAX as usize {251let shiftimm = shiftimm.mask(shiftee_bits as u8);252Some(ShiftOpAndAmt::new(ShiftOp::ASR, shiftimm))253} else {254None255}256}257258fn integral_ty(&mut self, ty: Type) -> Option<Type> {259match ty {260I8 | I16 | I32 | I64 => Some(ty),261_ => None,262}263}264265fn is_zero_simm9(&mut self, imm: &SImm9) -> Option<()> {266if imm.value() == 0 { Some(()) } else { None }267}268269fn is_zero_uimm12(&mut self, imm: &UImm12Scaled) -> Option<()> {270if imm.value() == 0 { Some(()) } else { None }271}272273/// This is target-word-size dependent. And it excludes booleans and reftypes.274fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {275match ty {276I8 | I16 | I32 | I64 => Some(ty),277_ => None,278}279}280281/// This is the fallback case for loading a 64-bit integral constant into a282/// register.283///284/// The logic here is nontrivial enough that it's not really worth porting285/// this over to ISLE.286fn load_constant_full(287&mut self,288ty: Type,289extend: &ImmExtend,290extend_to: &OperandSize,291value: u64,292) -> Reg {293let bits = ty.bits();294295let value = match (extend_to, *extend) {296(OperandSize::Size32, ImmExtend::Sign) if bits < 32 => {297let shift = 32 - bits;298let value = value as i32;299300// we cast first to a u32 and then to a u64, to ensure that we are representing a301// i32 in a u64, and not a i64. This is important, otherwise value will not fit in302// 32 bits303((value << shift) >> shift) as u32 as u64304}305(OperandSize::Size32, ImmExtend::Zero) if bits < 32 => {306value & !((u32::MAX as u64) << bits)307}308(OperandSize::Size64, ImmExtend::Sign) if bits < 64 => {309let shift = 64 - bits;310let value = value as i64;311312((value << shift) >> shift) as u64313}314(OperandSize::Size64, ImmExtend::Zero) if bits < 64 => value & !(u64::MAX << bits),315_ => value,316};317318// Divide the value into 16-bit slices that we can manipulate using319// `movz`, `movn`, and `movk`.320fn get(value: u64, shift: u8) -> u16 {321(value >> (shift * 16)) as u16322}323fn replace(mut old: u64, new: u16, shift: u8) -> u64 {324let offset = shift * 16;325old &= !(0xffff << offset);326old |= u64::from(new) << offset;327old328}329330// The 32-bit versions of `movz`/`movn`/`movk` will clear the upper 32331// bits, so if that's the outcome we want we might as well use them. For332// simplicity and ease of reading the disassembly, we use the same size333// for all instructions in the sequence.334let size = if value >> 32 == 0 {335OperandSize::Size32336} else {337OperandSize::Size64338};339340// The `movz` instruction initially sets all bits to zero, while `movn`341// initially sets all bits to one. A good choice of initial value can342// reduce the number of `movk` instructions we need afterward, so we343// check both variants to determine which is closest to the constant344// we actually wanted. In case they're equally good, we prefer `movz`345// because the assembly listings are generally harder to read when the346// operands are negated.347let (mut running_value, op, first) =348[(MoveWideOp::MovZ, 0), (MoveWideOp::MovN, size.max_value())]349.into_iter()350.map(|(op, base)| {351// Both `movz` and `movn` can overwrite one slice after setting352// the initial value; we get to pick which one. 32-bit variants353// can only modify the lower two slices.354let first = (0..(size.bits() / 16))355// Pick one slice that's different from the initial value356.find(|&i| get(base ^ value, i) != 0)357// If none are different, we still have to pick one358.unwrap_or(0);359// Compute the value we'll get from this `movz`/`movn`360(replace(base, get(value, first), first), op, first)361})362// Count how many `movk` instructions we'll need.363.min_by_key(|(base, ..)| (0..4).filter(|&i| get(base ^ value, i) != 0).count())364// `variants` isn't empty so `min_by_key` always returns something.365.unwrap();366367// Build the initial instruction we chose above, putting the result368// into a new temporary virtual register. Note that the encoding for the369// immediate operand is bitwise-inverted for `movn`.370let mut rd = self.temp_writable_reg(I64);371self.lower_ctx.emit(MInst::MovWide {372op,373rd,374imm: MoveWideConst {375bits: match op {376MoveWideOp::MovZ => get(value, first),377MoveWideOp::MovN => !get(value, first),378},379shift: first,380},381size,382});383if self.backend.flags.enable_pcc() {384self.lower_ctx385.add_range_fact(rd.to_reg(), 64, running_value, running_value);386}387388// Emit a `movk` instruction for each remaining slice of the desired389// constant that does not match the initial value constructed above.390for shift in (first + 1)..(size.bits() / 16) {391let bits = get(value, shift);392if bits != get(running_value, shift) {393let rn = rd.to_reg();394rd = self.temp_writable_reg(I64);395self.lower_ctx.emit(MInst::MovK {396rd,397rn,398imm: MoveWideConst { bits, shift },399size,400});401running_value = replace(running_value, bits, shift);402if self.backend.flags.enable_pcc() {403self.lower_ctx404.add_range_fact(rd.to_reg(), 64, running_value, running_value);405}406}407}408409debug_assert_eq!(value, running_value);410return rd.to_reg();411}412413fn zero_reg(&mut self) -> Reg {414zero_reg()415}416417fn stack_reg(&mut self) -> Reg {418stack_reg()419}420421fn fp_reg(&mut self) -> Reg {422fp_reg()423}424425fn writable_link_reg(&mut self) -> WritableReg {426writable_link_reg()427}428429fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {430let (val, extend) = super::get_as_extended_value(self.lower_ctx, val)?;431Some(ExtendedValue { val, extend })432}433434fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {435self.put_in_reg(reg.val)436}437438fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {439reg.extend440}441442fn emit(&mut self, inst: &MInst) -> Unit {443self.lower_ctx.emit(inst.clone());444}445446fn cond_br_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {447CondBrKind::Zero(reg, *size)448}449450fn cond_br_not_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {451CondBrKind::NotZero(reg, *size)452}453454fn cond_br_cond(&mut self, cond: &Cond) -> CondBrKind {455CondBrKind::Cond(*cond)456}457458fn nzcv(&mut self, n: bool, z: bool, c: bool, v: bool) -> NZCV {459NZCV::new(n, z, c, v)460}461462fn u8_into_uimm5(&mut self, x: u8) -> UImm5 {463UImm5::maybe_from_u8(x).unwrap()464}465466fn u8_into_imm12(&mut self, x: u8) -> Imm12 {467Imm12::maybe_from_u64(x.into()).unwrap()468}469470fn writable_zero_reg(&mut self) -> WritableReg {471writable_zero_reg()472}473474fn shift_mask(&mut self, ty: Type) -> ImmLogic {475debug_assert!(ty.lane_bits().is_power_of_two());476477let mask = (ty.lane_bits() - 1) as u64;478ImmLogic::maybe_from_u64(mask, I32).unwrap()479}480481fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {482let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);483ImmShift::maybe_from_u64(imm_value)484}485486fn u64_into_imm_logic(&mut self, ty: Type, val: u64) -> ImmLogic {487ImmLogic::maybe_from_u64(val, ty).unwrap()488}489490fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift {491let size = u8::try_from(ty.bits()).unwrap();492imm.imm = size.wrapping_sub(imm.value());493imm.imm &= size - 1;494imm495}496497fn rotr_mask(&mut self, ty: Type) -> ImmLogic {498ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap()499}500501fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift {502let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap();503ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap()504}505506fn icmp_zero_cond(&mut self, cond: &IntCC) -> Option<IntCC> {507match cond {508&IntCC::Equal509| &IntCC::SignedGreaterThanOrEqual510| &IntCC::SignedGreaterThan511| &IntCC::SignedLessThanOrEqual512| &IntCC::SignedLessThan => Some(*cond),513_ => None,514}515}516517fn fcmp_zero_cond(&mut self, cond: &FloatCC) -> Option<FloatCC> {518match cond {519&FloatCC::Equal520| &FloatCC::GreaterThanOrEqual521| &FloatCC::GreaterThan522| &FloatCC::LessThanOrEqual523| &FloatCC::LessThan => Some(*cond),524_ => None,525}526}527528fn fcmp_zero_cond_not_eq(&mut self, cond: &FloatCC) -> Option<FloatCC> {529match cond {530&FloatCC::NotEqual => Some(FloatCC::NotEqual),531_ => None,532}533}534535fn icmp_zero_cond_not_eq(&mut self, cond: &IntCC) -> Option<IntCC> {536match cond {537&IntCC::NotEqual => Some(IntCC::NotEqual),538_ => None,539}540}541542fn float_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &FloatCC) -> VecMisc2 {543match cond {544&FloatCC::Equal => VecMisc2::Fcmeq0,545&FloatCC::GreaterThanOrEqual => VecMisc2::Fcmge0,546&FloatCC::LessThanOrEqual => VecMisc2::Fcmle0,547&FloatCC::GreaterThan => VecMisc2::Fcmgt0,548&FloatCC::LessThan => VecMisc2::Fcmlt0,549_ => panic!(),550}551}552553fn int_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &IntCC) -> VecMisc2 {554match cond {555&IntCC::Equal => VecMisc2::Cmeq0,556&IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmge0,557&IntCC::SignedLessThanOrEqual => VecMisc2::Cmle0,558&IntCC::SignedGreaterThan => VecMisc2::Cmgt0,559&IntCC::SignedLessThan => VecMisc2::Cmlt0,560_ => panic!(),561}562}563564fn float_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &FloatCC) -> VecMisc2 {565match cond {566&FloatCC::Equal => VecMisc2::Fcmeq0,567&FloatCC::GreaterThanOrEqual => VecMisc2::Fcmle0,568&FloatCC::LessThanOrEqual => VecMisc2::Fcmge0,569&FloatCC::GreaterThan => VecMisc2::Fcmlt0,570&FloatCC::LessThan => VecMisc2::Fcmgt0,571_ => panic!(),572}573}574575fn int_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &IntCC) -> VecMisc2 {576match cond {577&IntCC::Equal => VecMisc2::Cmeq0,578&IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmle0,579&IntCC::SignedLessThanOrEqual => VecMisc2::Cmge0,580&IntCC::SignedGreaterThan => VecMisc2::Cmlt0,581&IntCC::SignedLessThan => VecMisc2::Cmgt0,582_ => panic!(),583}584}585586fn fp_cond_code(&mut self, cc: &condcodes::FloatCC) -> Cond {587lower_fp_condcode(*cc)588}589590fn cond_code(&mut self, cc: &condcodes::IntCC) -> Cond {591lower_condcode(*cc)592}593594fn invert_cond(&mut self, cond: &Cond) -> Cond {595(*cond).invert()596}597fn preg_sp(&mut self) -> PReg {598super::regs::stack_reg().to_real_reg().unwrap().into()599}600601fn preg_fp(&mut self) -> PReg {602super::regs::fp_reg().to_real_reg().unwrap().into()603}604605fn preg_link(&mut self) -> PReg {606super::regs::link_reg().to_real_reg().unwrap().into()607}608609fn preg_pinned(&mut self) -> PReg {610super::regs::pinned_reg().to_real_reg().unwrap().into()611}612613fn branch_target(&mut self, label: MachLabel) -> BranchTarget {614BranchTarget::Label(label)615}616617fn targets_jt_space(&mut self, elements: &BoxVecMachLabel) -> CodeOffset {618// calculate the number of bytes needed for the jumptable sequence:619// 4 bytes per instruction, with 8 instructions base + the size of620// the jumptable more.621(4 * (8 + elements.len())).try_into().unwrap()622}623624fn min_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {625if in_bits == 32 {626// From float32.627let min = match (signed, out_bits) {628(true, 8) => i8::MIN as f32 - 1.,629(true, 16) => i16::MIN as f32 - 1.,630(true, 32) => i32::MIN as f32, // I32_MIN - 1 isn't precisely representable as a f32.631(true, 64) => i64::MIN as f32, // I64_MIN - 1 isn't precisely representable as a f32.632633(false, _) => -1.,634_ => unimplemented!(635"unexpected {} output size of {} bits for 32-bit input",636if signed { "signed" } else { "unsigned" },637out_bits638),639};640641generated_code::constructor_constant_f32(self, min.to_bits())642} else if in_bits == 64 {643// From float64.644let min = match (signed, out_bits) {645(true, 8) => i8::MIN as f64 - 1.,646(true, 16) => i16::MIN as f64 - 1.,647(true, 32) => i32::MIN as f64 - 1.,648(true, 64) => i64::MIN as f64,649650(false, _) => -1.,651_ => unimplemented!(652"unexpected {} output size of {} bits for 64-bit input",653if signed { "signed" } else { "unsigned" },654out_bits655),656};657658generated_code::constructor_constant_f64(self, min.to_bits())659} else {660unimplemented!(661"unexpected input size for min_fp_value: {} (signed: {}, output size: {})",662in_bits,663signed,664out_bits665);666}667}668669fn max_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {670if in_bits == 32 {671// From float32.672let max = match (signed, out_bits) {673(true, 8) => i8::MAX as f32 + 1.,674(true, 16) => i16::MAX as f32 + 1.,675(true, 32) => (i32::MAX as u64 + 1) as f32,676(true, 64) => (i64::MAX as u64 + 1) as f32,677678(false, 8) => u8::MAX as f32 + 1.,679(false, 16) => u16::MAX as f32 + 1.,680(false, 32) => (u32::MAX as u64 + 1) as f32,681(false, 64) => (u64::MAX as u128 + 1) as f32,682_ => unimplemented!(683"unexpected {} output size of {} bits for 32-bit input",684if signed { "signed" } else { "unsigned" },685out_bits686),687};688689generated_code::constructor_constant_f32(self, max.to_bits())690} else if in_bits == 64 {691// From float64.692let max = match (signed, out_bits) {693(true, 8) => i8::MAX as f64 + 1.,694(true, 16) => i16::MAX as f64 + 1.,695(true, 32) => i32::MAX as f64 + 1.,696(true, 64) => (i64::MAX as u64 + 1) as f64,697698(false, 8) => u8::MAX as f64 + 1.,699(false, 16) => u16::MAX as f64 + 1.,700(false, 32) => u32::MAX as f64 + 1.,701(false, 64) => (u64::MAX as u128 + 1) as f64,702_ => unimplemented!(703"unexpected {} output size of {} bits for 64-bit input",704if signed { "signed" } else { "unsigned" },705out_bits706),707};708709generated_code::constructor_constant_f64(self, max.to_bits())710} else {711unimplemented!(712"unexpected input size for max_fp_value: {} (signed: {}, output size: {})",713in_bits,714signed,715out_bits716);717}718}719720fn fpu_op_ri_ushr(&mut self, ty_bits: u8, shift: u8) -> FPUOpRI {721if ty_bits == 32 {722FPUOpRI::UShr32(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())723} else if ty_bits == 64 {724FPUOpRI::UShr64(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())725} else {726unimplemented!(727"unexpected input size for fpu_op_ri_ushr: {} (shift: {})",728ty_bits,729shift730);731}732}733734fn fpu_op_ri_sli(&mut self, ty_bits: u8, shift: u8) -> FPUOpRIMod {735if ty_bits == 32 {736FPUOpRIMod::Sli32(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())737} else if ty_bits == 64 {738FPUOpRIMod::Sli64(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())739} else {740unimplemented!(741"unexpected input size for fpu_op_ri_sli: {} (shift: {})",742ty_bits,743shift744);745}746}747748fn vec_extract_imm4_from_immediate(&mut self, imm: Immediate) -> Option<u8> {749let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();750751if bytes.windows(2).all(|a| a[0] + 1 == a[1]) && bytes[0] < 16 {752Some(bytes[0])753} else {754None755}756}757758fn shuffle_dup8_from_imm(&mut self, imm: Immediate) -> Option<u8> {759let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();760if bytes.iter().all(|b| *b == bytes[0]) && bytes[0] < 16 {761Some(bytes[0])762} else {763None764}765}766fn shuffle_dup16_from_imm(&mut self, imm: Immediate) -> Option<u8> {767let (a, b, c, d, e, f, g, h) = self.shuffle16_from_imm(imm)?;768if a == b && b == c && c == d && d == e && e == f && f == g && g == h && a < 8 {769Some(a)770} else {771None772}773}774fn shuffle_dup32_from_imm(&mut self, imm: Immediate) -> Option<u8> {775let (a, b, c, d) = self.shuffle32_from_imm(imm)?;776if a == b && b == c && c == d && a < 4 {777Some(a)778} else {779None780}781}782fn shuffle_dup64_from_imm(&mut self, imm: Immediate) -> Option<u8> {783let (a, b) = self.shuffle64_from_imm(imm)?;784if a == b && a < 2 { Some(a) } else { None }785}786787fn asimd_mov_mod_imm_zero(&mut self, size: &ScalarSize) -> ASIMDMovModImm {788ASIMDMovModImm::zero(*size)789}790791fn asimd_mov_mod_imm_from_u64(792&mut self,793val: u64,794size: &ScalarSize,795) -> Option<ASIMDMovModImm> {796ASIMDMovModImm::maybe_from_u64(val, *size)797}798799fn asimd_fp_mod_imm_from_u64(&mut self, val: u64, size: &ScalarSize) -> Option<ASIMDFPModImm> {800ASIMDFPModImm::maybe_from_u64(val, *size)801}802803fn u64_low32_bits_unset(&mut self, val: u64) -> Option<u64> {804if val & 0xffffffff == 0 {805Some(val)806} else {807None808}809}810811fn shift_masked_imm(&mut self, ty: Type, imm: u64) -> u8 {812(imm as u8) & ((ty.lane_bits() - 1) as u8)813}814815fn simm7_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<SImm7Scaled> {816SImm7Scaled::maybe_from_i64(val, ty)817}818819fn simm9_from_i64(&mut self, val: i64) -> Option<SImm9> {820SImm9::maybe_from_i64(val)821}822823fn uimm12_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<UImm12Scaled> {824UImm12Scaled::maybe_from_i64(val, ty)825}826827fn test_and_compare_bit_const(&mut self, ty: Type, n: u64) -> Option<u8> {828if n.count_ones() != 1 {829return None;830}831let bit = n.trailing_zeros();832if bit >= ty.bits() {833return None;834}835Some(bit as u8)836}837838/// Use as a helper when generating `AluRRRShift` for `extr` instructions.839fn a64_extr_imm(&mut self, ty: Type, shift: ImmShift) -> ShiftOpAndAmt {840// The `ShiftOpAndAmt` immediate is used with `AluRRRShift` shape which841// requires `ShiftOpAndAmt` so the shift of `ty` and `shift` are842// translated into `ShiftOpAndAmt` here. The `ShiftOp` value here is843// only used for its encoding, not its logical meaning.844let (op, expected) = match ty {845types::I32 => (ShiftOp::LSL, 0b00),846types::I64 => (ShiftOp::LSR, 0b01),847_ => unreachable!(),848};849assert_eq!(op.bits(), expected);850ShiftOpAndAmt::new(851op,852ShiftOpShiftImm::maybe_from_shift(shift.value().into()).unwrap(),853)854}855856fn is_pic(&mut self) -> bool {857self.backend.flags.is_pic()858}859}860861862