Path: blob/main/cranelift/codegen/src/isa/aarch64/lower/isle.rs
1693 views
//! ISLE integration glue code for aarch64 lowering.12// Pull in the ISLE generated code.3pub mod generated_code;4use generated_code::{Context, ImmExtend};56// Types that the generated ISLE code uses via `use super::*`.7use super::{8ASIMDFPModImm, ASIMDMovModImm, BranchTarget, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI,9FPUOpRIMod, FloatCC, Imm12, ImmLogic, ImmShift, Inst as MInst, IntCC, MachLabel, MemLabel,10MoveWideConst, MoveWideOp, NZCV, Opcode, OperandSize, Reg, SImm9, ScalarSize, ShiftOpAndAmt,11UImm5, UImm12Scaled, VecMisc2, VectorSize, fp_reg, lower_condcode, lower_fp_condcode,12stack_reg, writable_link_reg, writable_zero_reg, zero_reg,13};14use crate::ir::{ArgumentExtension, condcodes};15use crate::isa;16use crate::isa::aarch64::AArch64Backend;17use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInfo};18use crate::machinst::isle::*;19use crate::{20binemit::CodeOffset,21ir::{22AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, TrapCode, Value,23ValueList, immediates::*, types::*,24},25isa::aarch64::abi::AArch64MachineDeps,26isa::aarch64::inst::SImm7Scaled,27isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},28machinst::{29CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,30abi::ArgPair, ty_bits,31},32};33use core::u32;34use regalloc2::PReg;35use std::boxed::Box;36use std::vec::Vec;3738type BoxCallInfo = Box<CallInfo<ExternalName>>;39type BoxCallIndInfo = Box<CallInfo<Reg>>;40type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;41type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;42type VecMachLabel = Vec<MachLabel>;43type BoxExternalName = Box<ExternalName>;44type VecArgPair = Vec<ArgPair>;4546/// The main entry point for lowering with ISLE.47pub(crate) fn lower(48lower_ctx: &mut Lower<MInst>,49backend: &AArch64Backend,50inst: Inst,51) -> Option<InstOutput> {52// TODO: reuse the ISLE context across lowerings so we can reuse its53// internal heap allocations.54let mut isle_ctx = IsleContext { lower_ctx, backend };55generated_code::constructor_lower(&mut isle_ctx, inst)56}5758pub(crate) fn lower_branch(59lower_ctx: &mut Lower<MInst>,60backend: &AArch64Backend,61branch: Inst,62targets: &[MachLabel],63) -> Option<()> {64// TODO: reuse the ISLE context across lowerings so we can reuse its65// internal heap allocations.66let mut isle_ctx = IsleContext { lower_ctx, backend };67generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)68}6970pub struct ExtendedValue {71val: Value,72extend: ExtendOp,73}7475impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {76isle_lower_prelude_methods!();7778fn gen_call_info(79&mut self,80sig: Sig,81dest: ExternalName,82uses: CallArgList,83defs: CallRetList,84try_call_info: Option<TryCallInfo>,85) -> BoxCallInfo {86let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();87let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();88self.lower_ctx89.abi_mut()90.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);9192Box::new(93self.lower_ctx94.gen_call_info(sig, dest, uses, defs, try_call_info),95)96}9798fn gen_call_ind_info(99&mut self,100sig: Sig,101dest: Reg,102uses: CallArgList,103defs: CallRetList,104try_call_info: Option<TryCallInfo>,105) -> BoxCallIndInfo {106let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();107let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();108self.lower_ctx109.abi_mut()110.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);111112Box::new(113self.lower_ctx114.gen_call_info(sig, dest, uses, defs, try_call_info),115)116}117118fn gen_return_call_info(119&mut self,120sig: Sig,121dest: ExternalName,122uses: CallArgList,123) -> BoxReturnCallInfo {124let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();125self.lower_ctx126.abi_mut()127.accumulate_tail_args_size(new_stack_arg_size);128129let key =130AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);131132Box::new(ReturnCallInfo {133dest,134uses,135key,136new_stack_arg_size,137})138}139140fn gen_return_call_ind_info(141&mut self,142sig: Sig,143dest: Reg,144uses: CallArgList,145) -> BoxReturnCallIndInfo {146let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();147self.lower_ctx148.abi_mut()149.accumulate_tail_args_size(new_stack_arg_size);150151let key =152AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);153154Box::new(ReturnCallInfo {155dest,156uses,157key,158new_stack_arg_size,159})160}161162fn sign_return_address_disabled(&mut self) -> Option<()> {163if self.backend.isa_flags.sign_return_address() {164None165} else {166Some(())167}168}169170fn use_lse(&mut self, _: Inst) -> Option<()> {171if self.backend.isa_flags.has_lse() {172Some(())173} else {174None175}176}177178fn use_fp16(&mut self) -> bool {179self.backend.isa_flags.has_fp16()180}181182fn move_wide_const_from_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {183let bits = ty.bits();184let n = if bits < 64 {185n & !(u64::MAX << bits)186} else {187n188};189MoveWideConst::maybe_from_u64(n)190}191192fn move_wide_const_from_inverted_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {193self.move_wide_const_from_u64(ty, !n)194}195196fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {197ImmLogic::maybe_from_u64(n, ty)198}199200fn imm_size_from_type(&mut self, ty: Type) -> Option<u16> {201match ty {202I32 => Some(32),203I64 => Some(64),204_ => None,205}206}207208fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {209let ty = if ty.bits() < 32 { I32 } else { ty };210self.imm_logic_from_u64(ty, n.bits() as u64)211}212213fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {214Imm12::maybe_from_u64(n)215}216217fn imm_shift_from_u8(&mut self, n: u8) -> ImmShift {218ImmShift::maybe_from_u64(n.into()).unwrap()219}220221fn lshr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {222let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;223if let Ok(bits) = u8::try_from(ty_bits(ty)) {224let shiftimm = shiftimm.mask(bits);225Some(ShiftOpAndAmt::new(ShiftOp::LSR, shiftimm))226} else {227None228}229}230231fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {232self.lshl_from_u64(ty, n.bits() as u64)233}234235fn lshl_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {236let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;237let shiftee_bits = ty_bits(ty);238if shiftee_bits <= std::u8::MAX as usize {239let shiftimm = shiftimm.mask(shiftee_bits as u8);240Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))241} else {242None243}244}245246fn ashr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {247let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;248let shiftee_bits = ty_bits(ty);249if shiftee_bits <= std::u8::MAX as usize {250let shiftimm = shiftimm.mask(shiftee_bits as u8);251Some(ShiftOpAndAmt::new(ShiftOp::ASR, shiftimm))252} else {253None254}255}256257fn integral_ty(&mut self, ty: Type) -> Option<Type> {258match ty {259I8 | I16 | I32 | I64 => Some(ty),260_ => None,261}262}263264fn is_zero_simm9(&mut self, imm: &SImm9) -> Option<()> {265if imm.value() == 0 { Some(()) } else { None }266}267268fn is_zero_uimm12(&mut self, imm: &UImm12Scaled) -> Option<()> {269if imm.value() == 0 { Some(()) } else { None }270}271272/// This is target-word-size dependent. And it excludes booleans and reftypes.273fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {274match ty {275I8 | I16 | I32 | I64 => Some(ty),276_ => None,277}278}279280/// This is the fallback case for loading a 64-bit integral constant into a281/// register.282///283/// The logic here is nontrivial enough that it's not really worth porting284/// this over to ISLE.285fn load_constant_full(286&mut self,287ty: Type,288extend: &ImmExtend,289extend_to: &OperandSize,290value: u64,291) -> Reg {292let bits = ty.bits();293294let value = match (extend_to, *extend) {295(OperandSize::Size32, ImmExtend::Sign) if bits < 32 => {296let shift = 32 - bits;297let value = value as i32;298299// we cast first to a u32 and then to a u64, to ensure that we are representing a300// i32 in a u64, and not a i64. This is important, otherwise value will not fit in301// 32 bits302((value << shift) >> shift) as u32 as u64303}304(OperandSize::Size32, ImmExtend::Zero) if bits < 32 => {305value & !((u32::MAX as u64) << bits)306}307(OperandSize::Size64, ImmExtend::Sign) if bits < 64 => {308let shift = 64 - bits;309let value = value as i64;310311((value << shift) >> shift) as u64312}313(OperandSize::Size64, ImmExtend::Zero) if bits < 64 => value & !(u64::MAX << bits),314_ => value,315};316317// Divide the value into 16-bit slices that we can manipulate using318// `movz`, `movn`, and `movk`.319fn get(value: u64, shift: u8) -> u16 {320(value >> (shift * 16)) as u16321}322fn replace(mut old: u64, new: u16, shift: u8) -> u64 {323let offset = shift * 16;324old &= !(0xffff << offset);325old |= u64::from(new) << offset;326old327}328329// The 32-bit versions of `movz`/`movn`/`movk` will clear the upper 32330// bits, so if that's the outcome we want we might as well use them. For331// simplicity and ease of reading the disassembly, we use the same size332// for all instructions in the sequence.333let size = if value >> 32 == 0 {334OperandSize::Size32335} else {336OperandSize::Size64337};338339// The `movz` instruction initially sets all bits to zero, while `movn`340// initially sets all bits to one. A good choice of initial value can341// reduce the number of `movk` instructions we need afterward, so we342// check both variants to determine which is closest to the constant343// we actually wanted. In case they're equally good, we prefer `movz`344// because the assembly listings are generally harder to read when the345// operands are negated.346let (mut running_value, op, first) =347[(MoveWideOp::MovZ, 0), (MoveWideOp::MovN, size.max_value())]348.into_iter()349.map(|(op, base)| {350// Both `movz` and `movn` can overwrite one slice after setting351// the initial value; we get to pick which one. 32-bit variants352// can only modify the lower two slices.353let first = (0..(size.bits() / 16))354// Pick one slice that's different from the initial value355.find(|&i| get(base ^ value, i) != 0)356// If none are different, we still have to pick one357.unwrap_or(0);358// Compute the value we'll get from this `movz`/`movn`359(replace(base, get(value, first), first), op, first)360})361// Count how many `movk` instructions we'll need.362.min_by_key(|(base, ..)| (0..4).filter(|&i| get(base ^ value, i) != 0).count())363// `variants` isn't empty so `min_by_key` always returns something.364.unwrap();365366// Build the initial instruction we chose above, putting the result367// into a new temporary virtual register. Note that the encoding for the368// immediate operand is bitwise-inverted for `movn`.369let mut rd = self.temp_writable_reg(I64);370self.lower_ctx.emit(MInst::MovWide {371op,372rd,373imm: MoveWideConst {374bits: match op {375MoveWideOp::MovZ => get(value, first),376MoveWideOp::MovN => !get(value, first),377},378shift: first,379},380size,381});382if self.backend.flags.enable_pcc() {383self.lower_ctx384.add_range_fact(rd.to_reg(), 64, running_value, running_value);385}386387// Emit a `movk` instruction for each remaining slice of the desired388// constant that does not match the initial value constructed above.389for shift in (first + 1)..(size.bits() / 16) {390let bits = get(value, shift);391if bits != get(running_value, shift) {392let rn = rd.to_reg();393rd = self.temp_writable_reg(I64);394self.lower_ctx.emit(MInst::MovK {395rd,396rn,397imm: MoveWideConst { bits, shift },398size,399});400running_value = replace(running_value, bits, shift);401if self.backend.flags.enable_pcc() {402self.lower_ctx403.add_range_fact(rd.to_reg(), 64, running_value, running_value);404}405}406}407408debug_assert_eq!(value, running_value);409return rd.to_reg();410}411412fn zero_reg(&mut self) -> Reg {413zero_reg()414}415416fn stack_reg(&mut self) -> Reg {417stack_reg()418}419420fn fp_reg(&mut self) -> Reg {421fp_reg()422}423424fn writable_link_reg(&mut self) -> WritableReg {425writable_link_reg()426}427428fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {429let (val, extend) = super::get_as_extended_value(self.lower_ctx, val)?;430Some(ExtendedValue { val, extend })431}432433fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {434self.put_in_reg(reg.val)435}436437fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {438reg.extend439}440441fn emit(&mut self, inst: &MInst) -> Unit {442self.lower_ctx.emit(inst.clone());443}444445fn cond_br_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {446CondBrKind::Zero(reg, *size)447}448449fn cond_br_not_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {450CondBrKind::NotZero(reg, *size)451}452453fn cond_br_cond(&mut self, cond: &Cond) -> CondBrKind {454CondBrKind::Cond(*cond)455}456457fn nzcv(&mut self, n: bool, z: bool, c: bool, v: bool) -> NZCV {458NZCV::new(n, z, c, v)459}460461fn u8_into_uimm5(&mut self, x: u8) -> UImm5 {462UImm5::maybe_from_u8(x).unwrap()463}464465fn u8_into_imm12(&mut self, x: u8) -> Imm12 {466Imm12::maybe_from_u64(x.into()).unwrap()467}468469fn writable_zero_reg(&mut self) -> WritableReg {470writable_zero_reg()471}472473fn shift_mask(&mut self, ty: Type) -> ImmLogic {474debug_assert!(ty.lane_bits().is_power_of_two());475476let mask = (ty.lane_bits() - 1) as u64;477ImmLogic::maybe_from_u64(mask, I32).unwrap()478}479480fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {481let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);482ImmShift::maybe_from_u64(imm_value)483}484485fn u64_into_imm_logic(&mut self, ty: Type, val: u64) -> ImmLogic {486ImmLogic::maybe_from_u64(val, ty).unwrap()487}488489fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift {490let size = u8::try_from(ty.bits()).unwrap();491imm.imm = size.wrapping_sub(imm.value());492imm.imm &= size - 1;493imm494}495496fn rotr_mask(&mut self, ty: Type) -> ImmLogic {497ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap()498}499500fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift {501let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap();502ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap()503}504505fn icmp_zero_cond(&mut self, cond: &IntCC) -> Option<IntCC> {506match cond {507&IntCC::Equal508| &IntCC::SignedGreaterThanOrEqual509| &IntCC::SignedGreaterThan510| &IntCC::SignedLessThanOrEqual511| &IntCC::SignedLessThan => Some(*cond),512_ => None,513}514}515516fn fcmp_zero_cond(&mut self, cond: &FloatCC) -> Option<FloatCC> {517match cond {518&FloatCC::Equal519| &FloatCC::GreaterThanOrEqual520| &FloatCC::GreaterThan521| &FloatCC::LessThanOrEqual522| &FloatCC::LessThan => Some(*cond),523_ => None,524}525}526527fn fcmp_zero_cond_not_eq(&mut self, cond: &FloatCC) -> Option<FloatCC> {528match cond {529&FloatCC::NotEqual => Some(FloatCC::NotEqual),530_ => None,531}532}533534fn icmp_zero_cond_not_eq(&mut self, cond: &IntCC) -> Option<IntCC> {535match cond {536&IntCC::NotEqual => Some(IntCC::NotEqual),537_ => None,538}539}540541fn float_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &FloatCC) -> VecMisc2 {542match cond {543&FloatCC::Equal => VecMisc2::Fcmeq0,544&FloatCC::GreaterThanOrEqual => VecMisc2::Fcmge0,545&FloatCC::LessThanOrEqual => VecMisc2::Fcmle0,546&FloatCC::GreaterThan => VecMisc2::Fcmgt0,547&FloatCC::LessThan => VecMisc2::Fcmlt0,548_ => panic!(),549}550}551552fn int_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &IntCC) -> VecMisc2 {553match cond {554&IntCC::Equal => VecMisc2::Cmeq0,555&IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmge0,556&IntCC::SignedLessThanOrEqual => VecMisc2::Cmle0,557&IntCC::SignedGreaterThan => VecMisc2::Cmgt0,558&IntCC::SignedLessThan => VecMisc2::Cmlt0,559_ => panic!(),560}561}562563fn float_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &FloatCC) -> VecMisc2 {564match cond {565&FloatCC::Equal => VecMisc2::Fcmeq0,566&FloatCC::GreaterThanOrEqual => VecMisc2::Fcmle0,567&FloatCC::LessThanOrEqual => VecMisc2::Fcmge0,568&FloatCC::GreaterThan => VecMisc2::Fcmlt0,569&FloatCC::LessThan => VecMisc2::Fcmgt0,570_ => panic!(),571}572}573574fn int_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &IntCC) -> VecMisc2 {575match cond {576&IntCC::Equal => VecMisc2::Cmeq0,577&IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmle0,578&IntCC::SignedLessThanOrEqual => VecMisc2::Cmge0,579&IntCC::SignedGreaterThan => VecMisc2::Cmlt0,580&IntCC::SignedLessThan => VecMisc2::Cmgt0,581_ => panic!(),582}583}584585fn fp_cond_code(&mut self, cc: &condcodes::FloatCC) -> Cond {586lower_fp_condcode(*cc)587}588589fn cond_code(&mut self, cc: &condcodes::IntCC) -> Cond {590lower_condcode(*cc)591}592593fn invert_cond(&mut self, cond: &Cond) -> Cond {594(*cond).invert()595}596fn preg_sp(&mut self) -> PReg {597super::regs::stack_reg().to_real_reg().unwrap().into()598}599600fn preg_fp(&mut self) -> PReg {601super::regs::fp_reg().to_real_reg().unwrap().into()602}603604fn preg_link(&mut self) -> PReg {605super::regs::link_reg().to_real_reg().unwrap().into()606}607608fn preg_pinned(&mut self) -> PReg {609super::regs::pinned_reg().to_real_reg().unwrap().into()610}611612fn branch_target(&mut self, label: MachLabel) -> BranchTarget {613BranchTarget::Label(label)614}615616fn targets_jt_space(&mut self, elements: &BoxVecMachLabel) -> CodeOffset {617// calculate the number of bytes needed for the jumptable sequence:618// 4 bytes per instruction, with 8 instructions base + the size of619// the jumptable more.620(4 * (8 + elements.len())).try_into().unwrap()621}622623fn min_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {624if in_bits == 32 {625// From float32.626let min = match (signed, out_bits) {627(true, 8) => i8::MIN as f32 - 1.,628(true, 16) => i16::MIN as f32 - 1.,629(true, 32) => i32::MIN as f32, // I32_MIN - 1 isn't precisely representable as a f32.630(true, 64) => i64::MIN as f32, // I64_MIN - 1 isn't precisely representable as a f32.631632(false, _) => -1.,633_ => unimplemented!(634"unexpected {} output size of {} bits for 32-bit input",635if signed { "signed" } else { "unsigned" },636out_bits637),638};639640generated_code::constructor_constant_f32(self, min.to_bits())641} else if in_bits == 64 {642// From float64.643let min = match (signed, out_bits) {644(true, 8) => i8::MIN as f64 - 1.,645(true, 16) => i16::MIN as f64 - 1.,646(true, 32) => i32::MIN as f64 - 1.,647(true, 64) => i64::MIN as f64,648649(false, _) => -1.,650_ => unimplemented!(651"unexpected {} output size of {} bits for 64-bit input",652if signed { "signed" } else { "unsigned" },653out_bits654),655};656657generated_code::constructor_constant_f64(self, min.to_bits())658} else {659unimplemented!(660"unexpected input size for min_fp_value: {} (signed: {}, output size: {})",661in_bits,662signed,663out_bits664);665}666}667668fn max_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {669if in_bits == 32 {670// From float32.671let max = match (signed, out_bits) {672(true, 8) => i8::MAX as f32 + 1.,673(true, 16) => i16::MAX as f32 + 1.,674(true, 32) => (i32::MAX as u64 + 1) as f32,675(true, 64) => (i64::MAX as u64 + 1) as f32,676677(false, 8) => u8::MAX as f32 + 1.,678(false, 16) => u16::MAX as f32 + 1.,679(false, 32) => (u32::MAX as u64 + 1) as f32,680(false, 64) => (u64::MAX as u128 + 1) as f32,681_ => unimplemented!(682"unexpected {} output size of {} bits for 32-bit input",683if signed { "signed" } else { "unsigned" },684out_bits685),686};687688generated_code::constructor_constant_f32(self, max.to_bits())689} else if in_bits == 64 {690// From float64.691let max = match (signed, out_bits) {692(true, 8) => i8::MAX as f64 + 1.,693(true, 16) => i16::MAX as f64 + 1.,694(true, 32) => i32::MAX as f64 + 1.,695(true, 64) => (i64::MAX as u64 + 1) as f64,696697(false, 8) => u8::MAX as f64 + 1.,698(false, 16) => u16::MAX as f64 + 1.,699(false, 32) => u32::MAX as f64 + 1.,700(false, 64) => (u64::MAX as u128 + 1) as f64,701_ => unimplemented!(702"unexpected {} output size of {} bits for 64-bit input",703if signed { "signed" } else { "unsigned" },704out_bits705),706};707708generated_code::constructor_constant_f64(self, max.to_bits())709} else {710unimplemented!(711"unexpected input size for max_fp_value: {} (signed: {}, output size: {})",712in_bits,713signed,714out_bits715);716}717}718719fn fpu_op_ri_ushr(&mut self, ty_bits: u8, shift: u8) -> FPUOpRI {720if ty_bits == 32 {721FPUOpRI::UShr32(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())722} else if ty_bits == 64 {723FPUOpRI::UShr64(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())724} else {725unimplemented!(726"unexpected input size for fpu_op_ri_ushr: {} (shift: {})",727ty_bits,728shift729);730}731}732733fn fpu_op_ri_sli(&mut self, ty_bits: u8, shift: u8) -> FPUOpRIMod {734if ty_bits == 32 {735FPUOpRIMod::Sli32(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())736} else if ty_bits == 64 {737FPUOpRIMod::Sli64(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())738} else {739unimplemented!(740"unexpected input size for fpu_op_ri_sli: {} (shift: {})",741ty_bits,742shift743);744}745}746747fn vec_extract_imm4_from_immediate(&mut self, imm: Immediate) -> Option<u8> {748let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();749750if bytes.windows(2).all(|a| a[0] + 1 == a[1]) && bytes[0] < 16 {751Some(bytes[0])752} else {753None754}755}756757fn shuffle_dup8_from_imm(&mut self, imm: Immediate) -> Option<u8> {758let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();759if bytes.iter().all(|b| *b == bytes[0]) && bytes[0] < 16 {760Some(bytes[0])761} else {762None763}764}765fn shuffle_dup16_from_imm(&mut self, imm: Immediate) -> Option<u8> {766let (a, b, c, d, e, f, g, h) = self.shuffle16_from_imm(imm)?;767if a == b && b == c && c == d && d == e && e == f && f == g && g == h && a < 8 {768Some(a)769} else {770None771}772}773fn shuffle_dup32_from_imm(&mut self, imm: Immediate) -> Option<u8> {774let (a, b, c, d) = self.shuffle32_from_imm(imm)?;775if a == b && b == c && c == d && a < 4 {776Some(a)777} else {778None779}780}781fn shuffle_dup64_from_imm(&mut self, imm: Immediate) -> Option<u8> {782let (a, b) = self.shuffle64_from_imm(imm)?;783if a == b && a < 2 { Some(a) } else { None }784}785786fn asimd_mov_mod_imm_zero(&mut self, size: &ScalarSize) -> ASIMDMovModImm {787ASIMDMovModImm::zero(*size)788}789790fn asimd_mov_mod_imm_from_u64(791&mut self,792val: u64,793size: &ScalarSize,794) -> Option<ASIMDMovModImm> {795ASIMDMovModImm::maybe_from_u64(val, *size)796}797798fn asimd_fp_mod_imm_from_u64(&mut self, val: u64, size: &ScalarSize) -> Option<ASIMDFPModImm> {799ASIMDFPModImm::maybe_from_u64(val, *size)800}801802fn u64_low32_bits_unset(&mut self, val: u64) -> Option<u64> {803if val & 0xffffffff == 0 {804Some(val)805} else {806None807}808}809810fn shift_masked_imm(&mut self, ty: Type, imm: u64) -> u8 {811(imm as u8) & ((ty.lane_bits() - 1) as u8)812}813814fn simm7_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<SImm7Scaled> {815SImm7Scaled::maybe_from_i64(val, ty)816}817818fn simm9_from_i64(&mut self, val: i64) -> Option<SImm9> {819SImm9::maybe_from_i64(val)820}821822fn uimm12_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<UImm12Scaled> {823UImm12Scaled::maybe_from_i64(val, ty)824}825826fn test_and_compare_bit_const(&mut self, ty: Type, n: u64) -> Option<u8> {827if n.count_ones() != 1 {828return None;829}830let bit = n.trailing_zeros();831if bit >= ty.bits() {832return None;833}834Some(bit as u8)835}836837/// Use as a helper when generating `AluRRRShift` for `extr` instructions.838fn a64_extr_imm(&mut self, ty: Type, shift: ImmShift) -> ShiftOpAndAmt {839// The `ShiftOpAndAmt` immediate is used with `AluRRRShift` shape which840// requires `ShiftOpAndAmt` so the shift of `ty` and `shift` are841// translated into `ShiftOpAndAmt` here. The `ShiftOp` value here is842// only used for its encoding, not its logical meaning.843let (op, expected) = match ty {844types::I32 => (ShiftOp::LSL, 0b00),845types::I64 => (ShiftOp::LSR, 0b01),846_ => unreachable!(),847};848assert_eq!(op.bits(), expected);849ShiftOpAndAmt::new(850op,851ShiftOpShiftImm::maybe_from_shift(shift.value().into()).unwrap(),852)853}854855fn is_pic(&mut self) -> bool {856self.backend.flags.is_pic()857}858}859860861