Path: blob/main/cranelift/codegen/src/isa/s390x/lower/isle.rs
3090 views
//! ISLE integration glue code for s390x lowering.12// Pull in the ISLE generated code.3pub mod generated_code;45// Types that the generated ISLE code uses via `use super::*`.6use crate::ir::ExternalName;7use crate::isa::s390x::S390xBackend;8use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;9use crate::isa::s390x::inst::{10CallInstDest, Cond, Inst as MInst, LaneOrder, MemArg, RegPair, ReturnCallInfo, SImm20,11SymbolReloc, UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair, gpr, stack_reg,12writable_gpr, zero_reg,13};14use crate::machinst::isle::*;15use crate::machinst::{CallInfo, MachLabel, Reg, TryCallInfo, non_writable_value_regs};16use crate::{17ir::{18AtomicRmwOp, BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode,19TrapCode, Value, ValueList, condcodes::*, immediates::*, types::*,20},21isa::CallConv,22machinst::{23ArgPair, CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,24},25};26use alloc::boxed::Box;27use alloc::vec::Vec;28use core::cell::Cell;29use regalloc2::PReg;3031type BoxCallInfo = Box<CallInfo<CallInstDest>>;32type BoxReturnCallInfo = Box<ReturnCallInfo<CallInstDest>>;33type VecMachLabel = Vec<MachLabel>;34type BoxExternalName = Box<ExternalName>;35type BoxSymbolReloc = Box<SymbolReloc>;36type VecMInst = Vec<MInst>;37type VecMInstBuilder = Cell<Vec<MInst>>;38type VecArgPair = Vec<ArgPair>;3940/// The main entry point for lowering with ISLE.41pub(crate) fn lower(42lower_ctx: &mut Lower<MInst>,43backend: &S390xBackend,44inst: Inst,45) -> Option<InstOutput> {46// TODO: reuse the ISLE context across lowerings so we can reuse its47// internal heap allocations.48let mut isle_ctx = IsleContext { lower_ctx, backend };49generated_code::constructor_lower(&mut isle_ctx, inst)50}5152/// The main entry point for branch lowering with ISLE.53pub(crate) fn lower_branch(54lower_ctx: &mut Lower<MInst>,55backend: &S390xBackend,56branch: Inst,57targets: &[MachLabel],58) -> Option<()> {59// TODO: reuse the ISLE context across lowerings so we can reuse its60// internal heap allocations.61let mut isle_ctx = IsleContext { lower_ctx, backend };62generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)63}6465impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {66isle_lower_prelude_methods!();6768#[inline]69fn call_inst_dest_direct(&mut self, name: ExternalName) -> CallInstDest {70CallInstDest::Direct { name }71}7273#[inline]74fn call_inst_dest_indirect(&mut self, reg: Reg) -> CallInstDest {75CallInstDest::Indirect { reg }76}7778// Adjust the stack before performing a (regular) call to a function79// using the tail-call ABI. We need to allocate the part of the callee's80// frame holding the incoming argument area. If necessary for unwinding,81// we also create a (temporary) copy of the backchain.82fn abi_emit_call_adjust_stack(&mut self, abi: Sig) -> Unit {83let sig_data = &self.lower_ctx.sigs()[abi];84if sig_data.call_conv() == CallConv::Tail {85let arg_space = sig_data.sized_stack_arg_space();86if arg_space > 0 {87if self.backend.flags.preserve_frame_pointers() {88let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();89let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());90let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());91self.emit(&MInst::Load64 {92rd: tmp,93mem: src_mem,94});95self.emit(&MInst::AllocateArgs { size: arg_space });96self.emit(&MInst::Store64 {97rd: tmp.to_reg(),98mem: dst_mem,99});100} else {101self.emit(&MInst::AllocateArgs { size: arg_space });102}103}104}105}106107// Adjust the stack before performing a tail call. The actual stack108// adjustment is defered to the call instruction itself, but we create109// a temporary backchain copy in the proper place here, if necessary110// for unwinding.111fn abi_emit_return_call_adjust_stack(&mut self, abi: Sig) -> Unit {112let sig_data = &self.lower_ctx.sigs()[abi];113let arg_space = sig_data.sized_stack_arg_space();114if arg_space > 0 && self.backend.flags.preserve_frame_pointers() {115let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();116let src_mem = MemArg::InitialSPOffset { off: 0 };117let dst_mem = MemArg::InitialSPOffset {118off: -(arg_space as i64),119};120self.emit(&MInst::Load64 {121rd: tmp,122mem: src_mem,123});124self.emit(&MInst::Store64 {125rd: tmp.to_reg(),126mem: dst_mem,127});128}129}130131// Load call arguments into a vector of ValueRegs. This is the same as132// the common-code put_in_regs_vec routine, except that we also handle133// vector lane swaps if caller and callee differ in lane order.134fn abi_prepare_args(&mut self, abi: Sig, (list, off): ValueSlice) -> ValueRegsVec {135let lane_order = LaneOrder::from(self.lower_ctx.sigs()[abi].call_conv());136let lane_swap_needed = self.lane_order() != lane_order;137138(off..list.len(&self.lower_ctx.dfg().value_lists))139.map(|ix| {140let val = list.get(ix, &self.lower_ctx.dfg().value_lists).unwrap();141let ty = self.lower_ctx.dfg().value_type(val);142let regs = self.put_in_regs(val);143144if lane_swap_needed && ty.is_vector() && ty.lane_count() >= 2 {145let tmp_regs = self.lower_ctx.alloc_tmp(ty);146self.emit(&MInst::VecEltRev {147lane_count: ty.lane_count(),148rd: tmp_regs.only_reg().unwrap(),149rn: regs.only_reg().unwrap(),150});151non_writable_value_regs(tmp_regs)152} else {153regs154}155})156.collect()157}158159fn gen_call_info(160&mut self,161sig: Sig,162dest: CallInstDest,163uses: CallArgList,164defs: CallRetList,165try_call_info: Option<TryCallInfo>,166patchable: bool,167) -> BoxCallInfo {168let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();169let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();170let total_space = if self.lower_ctx.sigs()[sig].call_conv() != CallConv::Tail {171REG_SAVE_AREA_SIZE + stack_arg_space + stack_ret_space172} else {173REG_SAVE_AREA_SIZE + stack_ret_space174};175self.lower_ctx176.abi_mut()177.accumulate_outgoing_args_size(total_space);178179Box::new(180self.lower_ctx181.gen_call_info(sig, dest, uses, defs, try_call_info, patchable),182)183}184185fn gen_return_call_info(186&mut self,187sig: Sig,188dest: CallInstDest,189uses: CallArgList,190) -> BoxReturnCallInfo {191let callee_pop_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();192self.lower_ctx193.abi_mut()194.accumulate_tail_args_size(callee_pop_size);195196Box::new(ReturnCallInfo {197dest,198uses,199callee_pop_size,200})201}202203fn abi_for_elf_tls_get_offset(&mut self) {204self.lower_ctx205.abi_mut()206.accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);207}208209#[inline]210fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {211Box::new(symbol_reloc.clone())212}213214#[inline]215fn mie3_enabled(&mut self, _: Type) -> Option<()> {216if self.backend.isa_flags.has_mie3() {217Some(())218} else {219None220}221}222223#[inline]224fn mie3_disabled(&mut self, _: Type) -> Option<()> {225if !self.backend.isa_flags.has_mie3() {226Some(())227} else {228None229}230}231232#[inline]233fn mie4_enabled(&mut self, _: Type) -> Option<()> {234if self.backend.isa_flags.has_mie4() {235Some(())236} else {237None238}239}240241#[inline]242fn mie4_disabled(&mut self, _: Type) -> Option<()> {243if !self.backend.isa_flags.has_mie4() {244Some(())245} else {246None247}248}249250#[inline]251fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {252if self.backend.isa_flags.has_vxrs_ext2() {253Some(())254} else {255None256}257}258259#[inline]260fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {261if !self.backend.isa_flags.has_vxrs_ext2() {262Some(())263} else {264None265}266}267268#[inline]269fn vxrs_ext3_enabled(&mut self, _: Type) -> Option<()> {270if self.backend.isa_flags.has_vxrs_ext3() {271Some(())272} else {273None274}275}276277#[inline]278fn vxrs_ext3_disabled(&mut self, _: Type) -> Option<()> {279if !self.backend.isa_flags.has_vxrs_ext3() {280Some(())281} else {282None283}284}285286#[inline]287fn writable_gpr(&mut self, regno: u8) -> WritableReg {288writable_gpr(regno)289}290291#[inline]292fn zero_reg(&mut self) -> Reg {293zero_reg()294}295296#[inline]297fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {298match ty {299I8 | I16 | I32 => Some(ty),300_ => None,301}302}303304#[inline]305fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {306match ty {307I64 => Some(ty),308_ => None,309}310}311312#[inline]313fn vr128_ty(&mut self, ty: Type) -> Option<Type> {314match ty {315I128 | F128 => Some(ty),316_ if ty.is_vector() && ty.bits() == 128 => Some(ty),317_ => None,318}319}320321#[inline]322fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {323UImm32Shifted::maybe_with_shift(n, shift).unwrap()324}325326#[inline]327fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {328UImm16Shifted::maybe_with_shift(n, shift).unwrap()329}330331#[inline]332fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {333if val != cmp { Some(val) } else { None }334}335336#[inline]337fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {338((n >> 64) as u64, n as u64)339}340341#[inline]342fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {343(hi as u128) << 64 | (lo as u128)344}345346#[inline]347fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {348((n >> 32) as u32, n as u32)349}350351#[inline]352fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {353(hi as u64) << 32 | (lo as u64)354}355356#[inline]357fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {358((n >> 16) as u16, n as u16)359}360361#[inline]362fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {363(hi as u32) << 16 | (lo as u32)364}365366#[inline]367fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {368((n >> 8) as u8, n as u8)369}370371#[inline]372fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {373(hi as u16) << 8 | (lo as u16)374}375376#[inline]377fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {378let part = n & 0xffff_ffff_0000_0000;379if part != 0 { Some(part) } else { None }380}381382#[inline]383fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {384let part = n & 0x0000_0000_ffff_ffff;385if part != 0 { Some(part) } else { None }386}387388#[inline]389fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {390UImm32Shifted::maybe_from_u64(n)391}392393#[inline]394fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {395UImm16Shifted::maybe_from_u64(n)396}397398#[inline]399fn lane_order(&mut self) -> LaneOrder {400LaneOrder::from(self.lower_ctx.abi().call_conv())401}402403#[inline]404fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {405match self.lane_order() {406LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,407LaneOrder::BigEndian => idx,408}409}410411#[inline]412fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {413match self.lane_order() {414LaneOrder::LittleEndian => n,415LaneOrder::BigEndian if ty.lane_count() == 1 => n,416LaneOrder::BigEndian => {417let lane_count = ty.lane_count();418let lane_bits = ty.lane_bits();419let lane_mask = (1u128 << lane_bits) - 1;420let mut n_le = n;421let mut n_be = 0u128;422for _ in 0..lane_count {423n_be = (n_be << lane_bits) | (n_le & lane_mask);424n_le = n_le >> lane_bits;425}426n_be427}428}429}430431#[inline]432fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {433let lane_bytes = (ty.lane_bits() / 8) as u8;434let lane_mask = (1u16 << lane_bytes) - 1;435lane_mask << (16 - ((idx + 1) * lane_bytes))436}437438#[inline]439fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {440let bytes = match self.lane_order() {441LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {442if x < 16 {44315 - x444} else if x < 32 {44547 - x446} else {447128448}449}),450LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),451};452let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);453let permute_mask = u128::from_be_bytes(bytes);454(permute_mask, and_mask)455}456457#[inline]458fn u64_from_value(&mut self, val: Value) -> Option<u64> {459let inst = self.lower_ctx.dfg().value_def(val).inst()?;460let constant = self.lower_ctx.get_constant(inst)?;461let ty = self.lower_ctx.output_ty(inst, 0);462Some(zero_extend_to_u64(constant, self.ty_bits(ty)))463}464465#[inline]466fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {467let inst = self.lower_ctx.dfg().value_def(val).inst()?;468let constant = self.lower_ctx.get_constant(inst)?;469let ty = self.lower_ctx.output_ty(inst, 0);470Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))471}472473#[inline]474fn u32_from_value(&mut self, val: Value) -> Option<u32> {475let constant = self.u64_from_value(val)?;476let imm = u32::try_from(constant).ok()?;477Some(imm)478}479480#[inline]481fn u8_from_value(&mut self, val: Value) -> Option<u8> {482let constant = self.u64_from_value(val)?;483let imm = u8::try_from(constant).ok()?;484Some(imm)485}486487#[inline]488fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {489let inst = self.lower_ctx.dfg().value_def(val).inst()?;490let constant = self.lower_ctx.get_constant(inst)?;491let ty = self.lower_ctx.output_ty(inst, 0);492Some(sign_extend_to_u64(constant, self.ty_bits(ty)))493}494495#[inline]496fn i64_from_value(&mut self, val: Value) -> Option<i64> {497let constant = self.u64_from_signed_value(val)? as i64;498Some(constant)499}500501#[inline]502fn i32_from_value(&mut self, val: Value) -> Option<i32> {503let constant = self.u64_from_signed_value(val)? as i64;504let imm = i32::try_from(constant).ok()?;505Some(imm)506}507508#[inline]509fn i16_from_value(&mut self, val: Value) -> Option<i16> {510let constant = self.u64_from_signed_value(val)? as i64;511let imm = i16::try_from(constant).ok()?;512Some(imm)513}514515#[inline]516fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {517let constant = self.u64_from_signed_value(val)? as i64;518let imm = i16::try_from(constant).ok()?;519Some(imm.swap_bytes())520}521522#[inline]523fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {524let constant = self.u64_from_signed_value(val)? as i64;525let imm = constant.wrapping_neg();526Some(imm)527}528529#[inline]530fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {531let constant = self.u64_from_signed_value(val)? as i64;532let imm = i32::try_from(constant.wrapping_neg()).ok()?;533Some(imm)534}535536#[inline]537fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {538let constant = self.u64_from_signed_value(val)? as i64;539let imm = i16::try_from(constant.wrapping_neg()).ok()?;540Some(imm)541}542543#[inline]544fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {545let constant = self.u64_from_value(val)?;546UImm16Shifted::maybe_from_u64(constant)547}548549#[inline]550fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {551let constant = self.u64_from_value(val)?;552UImm32Shifted::maybe_from_u64(constant)553}554555#[inline]556fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {557let constant = self.u64_from_inverted_value(val)?;558let imm = UImm16Shifted::maybe_from_u64(constant)?;559Some(imm.negate_bits())560}561562#[inline]563fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {564let constant = self.u64_from_inverted_value(val)?;565let imm = UImm32Shifted::maybe_from_u64(constant)?;566Some(imm.negate_bits())567}568569#[inline]570fn len_minus_one(&mut self, len: u64) -> Option<u8> {571if len > 0 && len <= 256 {572Some((len - 1) as u8)573} else {574None575}576}577578#[inline]579fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {580let mask = ty.lane_bits() - 1;581(amt as u8) & (mask as u8)582}583584#[inline]585fn mask_as_cond(&mut self, mask: u8) -> Cond {586Cond::from_mask(mask)587}588589#[inline]590fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {591Cond::from_intcc(*cc)592}593594#[inline]595fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {596Cond::from_floatcc(*cc)597}598599#[inline]600fn invert_cond(&mut self, cond: &Cond) -> Cond {601Cond::invert(*cond)602}603604#[inline]605fn signed(&mut self, cc: &IntCC) -> Option<()> {606if condcode_is_signed(*cc) {607Some(())608} else {609None610}611}612613#[inline]614fn unsigned(&mut self, cc: &IntCC) -> Option<()> {615if !condcode_is_signed(*cc) {616Some(())617} else {618None619}620}621622#[inline]623fn zero_offset(&mut self) -> Offset32 {624Offset32::new(0)625}626627#[inline]628fn i64_from_offset(&mut self, off: Offset32) -> i64 {629i64::from(off)630}631632#[inline]633fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {634(2.0_f32).powi(size.into()).to_bits() as u64635}636637#[inline]638fn fcvt_to_uint_lb32(&mut self) -> u64 {639(-1.0_f32).to_bits() as u64640}641642#[inline]643fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {644(2.0_f64).powi(size.into()).to_bits()645}646647#[inline]648fn fcvt_to_uint_lb64(&mut self) -> u64 {649(-1.0_f64).to_bits()650}651652#[inline]653fn fcvt_to_uint_ub128(&mut self, size: u8) -> u128 {654Ieee128::pow2(size).bits()655}656657#[inline]658fn fcvt_to_uint_lb128(&mut self) -> u128 {659(-Ieee128::pow2(0)).bits()660}661662#[inline]663fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {664(2.0_f32).powi((size - 1).into()).to_bits() as u64665}666667#[inline]668fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {669let lb = (-2.0_f32).powi((size - 1).into());670core::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64671}672673#[inline]674fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {675(2.0_f64).powi((size - 1).into()).to_bits()676}677678#[inline]679fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {680let lb = (-2.0_f64).powi((size - 1).into());681core::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())682}683684#[inline]685fn fcvt_to_sint_ub128(&mut self, size: u8) -> u128 {686Ieee128::pow2(size - 1).bits()687}688689#[inline]690fn fcvt_to_sint_lb128(&mut self, size: u8) -> u128 {691Ieee128::fcvt_to_sint_negative_overflow(size).bits()692}693694#[inline]695fn littleendian(&mut self, flags: MemFlags) -> Option<()> {696let endianness = flags.endianness(Endianness::Big);697if endianness == Endianness::Little {698Some(())699} else {700None701}702}703704#[inline]705fn bigendian(&mut self, flags: MemFlags) -> Option<()> {706let endianness = flags.endianness(Endianness::Big);707if endianness == Endianness::Big {708Some(())709} else {710None711}712}713714#[inline]715fn memflags_trusted(&mut self) -> MemFlags {716MemFlags::trusted()717}718719#[inline]720fn memarg_imm_from_offset(&mut self, imm: Offset32) -> Option<SImm20> {721SImm20::maybe_from_i64(i64::from(imm))722}723724#[inline]725fn memarg_imm_from_offset_plus_bias(&mut self, imm: Offset32, bias: u8) -> Option<SImm20> {726let final_offset = i64::from(imm) + bias as i64;727SImm20::maybe_from_i64(final_offset)728}729730#[inline]731fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {732MemArg::BXD12 {733base: x,734index: y,735disp: UImm12::maybe_from_u64(bias as u64).unwrap(),736flags,737}738}739740#[inline]741fn memarg_reg_plus_reg_plus_off(742&mut self,743x: Reg,744y: Reg,745offset: &SImm20,746flags: MemFlags,747) -> MemArg {748if let Some(imm) = UImm12::maybe_from_simm20(*offset) {749MemArg::BXD12 {750base: x,751index: y,752disp: imm,753flags,754}755} else {756MemArg::BXD20 {757base: x,758index: y,759disp: *offset,760flags,761}762}763}764765#[inline]766fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {767MemArg::reg_plus_off(reg, off + (bias as i64), flags)768}769770#[inline]771fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {772MemArg::Symbol {773name: Box::new(name),774offset,775flags,776}777}778779#[inline]780fn memarg_got(&mut self) -> MemArg {781MemArg::Symbol {782name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),783offset: 0,784flags: MemFlags::trusted(),785}786}787788#[inline]789fn memarg_const(&mut self, constant: VCodeConstant) -> MemArg {790MemArg::Constant { constant }791}792793#[inline]794fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {795let off = i32::try_from(off1 + off2).ok()?;796if off & 1 == 0 { Some(off) } else { None }797}798799#[inline]800fn memarg_frame_pointer_offset(&mut self) -> MemArg {801// The frame pointer (back chain) is stored directly at SP.802MemArg::reg(stack_reg(), MemFlags::trusted())803}804805#[inline]806fn memarg_return_address_offset(&mut self) -> MemArg {807// The return address is stored 14 pointer-sized slots above the initial SP.808MemArg::InitialSPOffset { off: 14 * 8 }809}810811#[inline]812fn inst_builder_new(&mut self) -> VecMInstBuilder {813Cell::new(Vec::<MInst>::new())814}815816#[inline]817fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {818let mut vec = builder.take();819vec.push(inst.clone());820builder.set(vec);821}822823#[inline]824fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {825builder.take()826}827828#[inline]829fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {830if reg.to_reg().is_real() {831Some(reg)832} else {833None834}835}836837#[inline]838fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {839if dst.to_reg() == src { Some(src) } else { None }840}841842#[inline]843fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {844self.is_sinkable_inst(val)845}846847#[inline]848fn emit(&mut self, inst: &MInst) -> Unit {849self.lower_ctx.emit(inst.clone());850}851852#[inline]853fn preg_stack(&mut self) -> PReg {854stack_reg().to_real_reg().unwrap().into()855}856857#[inline]858fn preg_gpr_0(&mut self) -> PReg {859gpr(0).to_real_reg().unwrap().into()860}861862#[inline]863fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {864WritableRegPair { hi, lo }865}866867#[inline]868fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {869w.hi870}871872#[inline]873fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {874w.lo875}876877#[inline]878fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {879RegPair { hi, lo }880}881882#[inline]883fn regpair_hi(&mut self, w: RegPair) -> Reg {884w.hi885}886887#[inline]888fn regpair_lo(&mut self, w: RegPair) -> Reg {889w.lo890}891}892893/// Zero-extend the low `from_bits` bits of `value` to a full u64.894#[inline]895fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {896assert!(from_bits <= 64);897if from_bits >= 64 {898value899} else {900value & ((1u64 << from_bits) - 1)901}902}903904/// Sign-extend the low `from_bits` bits of `value` to a full u64.905#[inline]906fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {907assert!(from_bits <= 64);908if from_bits >= 64 {909value910} else {911(((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64912}913}914915/// Determines whether this condcode interprets inputs as signed or916/// unsigned. See the documentation for the `icmp` instruction in917/// cranelift-codegen/meta/src/shared/instructions.rs for further insights918/// into this.919#[inline]920fn condcode_is_signed(cc: IntCC) -> bool {921match cc {922IntCC::Equal => false,923IntCC::NotEqual => false,924IntCC::SignedGreaterThanOrEqual => true,925IntCC::SignedGreaterThan => true,926IntCC::SignedLessThanOrEqual => true,927IntCC::SignedLessThan => true,928IntCC::UnsignedGreaterThanOrEqual => false,929IntCC::UnsignedGreaterThan => false,930IntCC::UnsignedLessThanOrEqual => false,931IntCC::UnsignedLessThan => false,932}933}934935936