Path: blob/main/cranelift/codegen/src/isa/s390x/lower/isle.rs
1693 views
//! ISLE integration glue code for s390x lowering.12// Pull in the ISLE generated code.3pub mod generated_code;45// Types that the generated ISLE code uses via `use super::*`.6use crate::ir::ExternalName;7use crate::isa::s390x::S390xBackend;8use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;9use crate::isa::s390x::inst::{10CallInstDest, Cond, Inst as MInst, LaneOrder, MemArg, RegPair, ReturnCallInfo, SymbolReloc,11UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair, gpr, stack_reg, writable_gpr, zero_reg,12};13use crate::machinst::isle::*;14use crate::machinst::{CallInfo, MachLabel, Reg, TryCallInfo, non_writable_value_regs};15use crate::{16ir::{17AtomicRmwOp, BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode,18TrapCode, Value, ValueList, condcodes::*, immediates::*, types::*,19},20isa::CallConv,21machinst::{22ArgPair, CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,23},24};25use regalloc2::PReg;26use std::boxed::Box;27use std::cell::Cell;28use std::vec::Vec;2930type BoxCallInfo = Box<CallInfo<CallInstDest>>;31type BoxReturnCallInfo = Box<ReturnCallInfo<CallInstDest>>;32type VecMachLabel = Vec<MachLabel>;33type BoxExternalName = Box<ExternalName>;34type BoxSymbolReloc = Box<SymbolReloc>;35type VecMInst = Vec<MInst>;36type VecMInstBuilder = Cell<Vec<MInst>>;37type VecArgPair = Vec<ArgPair>;3839/// The main entry point for lowering with ISLE.40pub(crate) fn lower(41lower_ctx: &mut Lower<MInst>,42backend: &S390xBackend,43inst: Inst,44) -> Option<InstOutput> {45// TODO: reuse the ISLE context across lowerings so we can reuse its46// internal heap allocations.47let mut isle_ctx = IsleContext { lower_ctx, backend };48generated_code::constructor_lower(&mut isle_ctx, inst)49}5051/// The main entry point for branch lowering with ISLE.52pub(crate) fn lower_branch(53lower_ctx: &mut Lower<MInst>,54backend: &S390xBackend,55branch: Inst,56targets: &[MachLabel],57) -> Option<()> {58// TODO: reuse the ISLE context across lowerings so we can reuse its59// internal heap allocations.60let mut isle_ctx = IsleContext { lower_ctx, backend };61generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)62}6364impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {65isle_lower_prelude_methods!();6667#[inline]68fn call_inst_dest_direct(&mut self, name: ExternalName) -> CallInstDest {69CallInstDest::Direct { name }70}7172#[inline]73fn call_inst_dest_indirect(&mut self, reg: Reg) -> CallInstDest {74CallInstDest::Indirect { reg }75}7677// Adjust the stack before performing a (regular) call to a function78// using the tail-call ABI. We need to allocate the part of the callee's79// frame holding the incoming argument area. If necessary for unwinding,80// we also create a (temporary) copy of the backchain.81fn abi_emit_call_adjust_stack(&mut self, abi: Sig) -> Unit {82let sig_data = &self.lower_ctx.sigs()[abi];83if sig_data.call_conv() == CallConv::Tail {84let arg_space = sig_data.sized_stack_arg_space();85if arg_space > 0 {86if self.backend.flags.preserve_frame_pointers() {87let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();88let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());89let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());90self.emit(&MInst::Load64 {91rd: tmp,92mem: src_mem,93});94self.emit(&MInst::AllocateArgs { size: arg_space });95self.emit(&MInst::Store64 {96rd: tmp.to_reg(),97mem: dst_mem,98});99} else {100self.emit(&MInst::AllocateArgs { size: arg_space });101}102}103}104}105106// Adjust the stack before performing a tail call. The actual stack107// adjustment is defered to the call instruction itself, but we create108// a temporary backchain copy in the proper place here, if necessary109// for unwinding.110fn abi_emit_return_call_adjust_stack(&mut self, abi: Sig) -> Unit {111let sig_data = &self.lower_ctx.sigs()[abi];112let arg_space = sig_data.sized_stack_arg_space();113if arg_space > 0 && self.backend.flags.preserve_frame_pointers() {114let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();115let src_mem = MemArg::InitialSPOffset { off: 0 };116let dst_mem = MemArg::InitialSPOffset {117off: -(arg_space as i64),118};119self.emit(&MInst::Load64 {120rd: tmp,121mem: src_mem,122});123self.emit(&MInst::Store64 {124rd: tmp.to_reg(),125mem: dst_mem,126});127}128}129130// Load call arguments into a vector of ValueRegs. This is the same as131// the common-code put_in_regs_vec routine, except that we also handle132// vector lane swaps if caller and callee differ in lane order.133fn abi_prepare_args(&mut self, abi: Sig, (list, off): ValueSlice) -> ValueRegsVec {134let lane_order = LaneOrder::from(self.lower_ctx.sigs()[abi].call_conv());135let lane_swap_needed = self.lane_order() != lane_order;136137(off..list.len(&self.lower_ctx.dfg().value_lists))138.map(|ix| {139let val = list.get(ix, &self.lower_ctx.dfg().value_lists).unwrap();140let ty = self.lower_ctx.dfg().value_type(val);141let regs = self.put_in_regs(val);142143if lane_swap_needed && ty.is_vector() && ty.lane_count() >= 2 {144let tmp_regs = self.lower_ctx.alloc_tmp(ty);145self.emit(&MInst::VecEltRev {146lane_count: ty.lane_count(),147rd: tmp_regs.only_reg().unwrap(),148rn: regs.only_reg().unwrap(),149});150non_writable_value_regs(tmp_regs)151} else {152regs153}154})155.collect()156}157158fn gen_call_info(159&mut self,160sig: Sig,161dest: CallInstDest,162uses: CallArgList,163defs: CallRetList,164try_call_info: Option<TryCallInfo>,165) -> BoxCallInfo {166let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();167let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();168let total_space = if self.lower_ctx.sigs()[sig].call_conv() != CallConv::Tail {169REG_SAVE_AREA_SIZE + stack_arg_space + stack_ret_space170} else {171REG_SAVE_AREA_SIZE + stack_ret_space172};173self.lower_ctx174.abi_mut()175.accumulate_outgoing_args_size(total_space);176177Box::new(178self.lower_ctx179.gen_call_info(sig, dest, uses, defs, try_call_info),180)181}182183fn gen_return_call_info(184&mut self,185sig: Sig,186dest: CallInstDest,187uses: CallArgList,188) -> BoxReturnCallInfo {189let callee_pop_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();190self.lower_ctx191.abi_mut()192.accumulate_tail_args_size(callee_pop_size);193194Box::new(ReturnCallInfo {195dest,196uses,197callee_pop_size,198})199}200201fn abi_for_elf_tls_get_offset(&mut self) {202self.lower_ctx203.abi_mut()204.accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);205}206207#[inline]208fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {209Box::new(symbol_reloc.clone())210}211212#[inline]213fn mie3_enabled(&mut self, _: Type) -> Option<()> {214if self.backend.isa_flags.has_mie3() {215Some(())216} else {217None218}219}220221#[inline]222fn mie3_disabled(&mut self, _: Type) -> Option<()> {223if !self.backend.isa_flags.has_mie3() {224Some(())225} else {226None227}228}229230#[inline]231fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {232if self.backend.isa_flags.has_vxrs_ext2() {233Some(())234} else {235None236}237}238239#[inline]240fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {241if !self.backend.isa_flags.has_vxrs_ext2() {242Some(())243} else {244None245}246}247248#[inline]249fn writable_gpr(&mut self, regno: u8) -> WritableReg {250writable_gpr(regno)251}252253#[inline]254fn zero_reg(&mut self) -> Reg {255zero_reg()256}257258#[inline]259fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {260match ty {261I8 | I16 | I32 => Some(ty),262_ => None,263}264}265266#[inline]267fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {268match ty {269I64 => Some(ty),270_ => None,271}272}273274#[inline]275fn vr128_ty(&mut self, ty: Type) -> Option<Type> {276match ty {277I128 | F128 => Some(ty),278_ if ty.is_vector() && ty.bits() == 128 => Some(ty),279_ => None,280}281}282283#[inline]284fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {285UImm32Shifted::maybe_with_shift(n, shift).unwrap()286}287288#[inline]289fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {290UImm16Shifted::maybe_with_shift(n, shift).unwrap()291}292293#[inline]294fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {295if val != cmp { Some(val) } else { None }296}297298#[inline]299fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {300((n >> 64) as u64, n as u64)301}302303#[inline]304fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {305(hi as u128) << 64 | (lo as u128)306}307308#[inline]309fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {310((n >> 32) as u32, n as u32)311}312313#[inline]314fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {315(hi as u64) << 32 | (lo as u64)316}317318#[inline]319fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {320((n >> 16) as u16, n as u16)321}322323#[inline]324fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {325(hi as u32) << 16 | (lo as u32)326}327328#[inline]329fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {330((n >> 8) as u8, n as u8)331}332333#[inline]334fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {335(hi as u16) << 8 | (lo as u16)336}337338#[inline]339fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {340let part = n & 0xffff_ffff_0000_0000;341if part != 0 { Some(part) } else { None }342}343344#[inline]345fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {346let part = n & 0x0000_0000_ffff_ffff;347if part != 0 { Some(part) } else { None }348}349350#[inline]351fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {352UImm32Shifted::maybe_from_u64(n)353}354355#[inline]356fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {357UImm16Shifted::maybe_from_u64(n)358}359360#[inline]361fn lane_order(&mut self) -> LaneOrder {362LaneOrder::from(self.lower_ctx.abi().call_conv())363}364365#[inline]366fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {367match self.lane_order() {368LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,369LaneOrder::BigEndian => idx,370}371}372373#[inline]374fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {375match self.lane_order() {376LaneOrder::LittleEndian => n,377LaneOrder::BigEndian if ty.lane_count() == 1 => n,378LaneOrder::BigEndian => {379let lane_count = ty.lane_count();380let lane_bits = ty.lane_bits();381let lane_mask = (1u128 << lane_bits) - 1;382let mut n_le = n;383let mut n_be = 0u128;384for _ in 0..lane_count {385n_be = (n_be << lane_bits) | (n_le & lane_mask);386n_le = n_le >> lane_bits;387}388n_be389}390}391}392393#[inline]394fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {395let lane_bytes = (ty.lane_bits() / 8) as u8;396let lane_mask = (1u16 << lane_bytes) - 1;397lane_mask << (16 - ((idx + 1) * lane_bytes))398}399400#[inline]401fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {402let bytes = match self.lane_order() {403LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {404if x < 16 {40515 - x406} else if x < 32 {40747 - x408} else {409128410}411}),412LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),413};414let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);415let permute_mask = u128::from_be_bytes(bytes);416(permute_mask, and_mask)417}418419#[inline]420fn u64_from_value(&mut self, val: Value) -> Option<u64> {421let inst = self.lower_ctx.dfg().value_def(val).inst()?;422let constant = self.lower_ctx.get_constant(inst)?;423let ty = self.lower_ctx.output_ty(inst, 0);424Some(zero_extend_to_u64(constant, self.ty_bits(ty)))425}426427#[inline]428fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {429let inst = self.lower_ctx.dfg().value_def(val).inst()?;430let constant = self.lower_ctx.get_constant(inst)?;431let ty = self.lower_ctx.output_ty(inst, 0);432Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))433}434435#[inline]436fn u32_from_value(&mut self, val: Value) -> Option<u32> {437let constant = self.u64_from_value(val)?;438let imm = u32::try_from(constant).ok()?;439Some(imm)440}441442#[inline]443fn u8_from_value(&mut self, val: Value) -> Option<u8> {444let constant = self.u64_from_value(val)?;445let imm = u8::try_from(constant).ok()?;446Some(imm)447}448449#[inline]450fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {451let inst = self.lower_ctx.dfg().value_def(val).inst()?;452let constant = self.lower_ctx.get_constant(inst)?;453let ty = self.lower_ctx.output_ty(inst, 0);454Some(sign_extend_to_u64(constant, self.ty_bits(ty)))455}456457#[inline]458fn i64_from_value(&mut self, val: Value) -> Option<i64> {459let constant = self.u64_from_signed_value(val)? as i64;460Some(constant)461}462463#[inline]464fn i32_from_value(&mut self, val: Value) -> Option<i32> {465let constant = self.u64_from_signed_value(val)? as i64;466let imm = i32::try_from(constant).ok()?;467Some(imm)468}469470#[inline]471fn i16_from_value(&mut self, val: Value) -> Option<i16> {472let constant = self.u64_from_signed_value(val)? as i64;473let imm = i16::try_from(constant).ok()?;474Some(imm)475}476477#[inline]478fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {479let constant = self.u64_from_signed_value(val)? as i64;480let imm = i16::try_from(constant).ok()?;481Some(imm.swap_bytes())482}483484#[inline]485fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {486let constant = self.u64_from_signed_value(val)? as i64;487let imm = constant.wrapping_neg();488Some(imm)489}490491#[inline]492fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {493let constant = self.u64_from_signed_value(val)? as i64;494let imm = i32::try_from(constant.wrapping_neg()).ok()?;495Some(imm)496}497498#[inline]499fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {500let constant = self.u64_from_signed_value(val)? as i64;501let imm = i16::try_from(constant.wrapping_neg()).ok()?;502Some(imm)503}504505#[inline]506fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {507let constant = self.u64_from_value(val)?;508UImm16Shifted::maybe_from_u64(constant)509}510511#[inline]512fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {513let constant = self.u64_from_value(val)?;514UImm32Shifted::maybe_from_u64(constant)515}516517#[inline]518fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {519let constant = self.u64_from_inverted_value(val)?;520let imm = UImm16Shifted::maybe_from_u64(constant)?;521Some(imm.negate_bits())522}523524#[inline]525fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {526let constant = self.u64_from_inverted_value(val)?;527let imm = UImm32Shifted::maybe_from_u64(constant)?;528Some(imm.negate_bits())529}530531#[inline]532fn len_minus_one(&mut self, len: u64) -> Option<u8> {533if len > 0 && len <= 256 {534Some((len - 1) as u8)535} else {536None537}538}539540#[inline]541fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {542let mask = ty.lane_bits() - 1;543(amt as u8) & (mask as u8)544}545546#[inline]547fn mask_as_cond(&mut self, mask: u8) -> Cond {548Cond::from_mask(mask)549}550551#[inline]552fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {553Cond::from_intcc(*cc)554}555556#[inline]557fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {558Cond::from_floatcc(*cc)559}560561#[inline]562fn invert_cond(&mut self, cond: &Cond) -> Cond {563Cond::invert(*cond)564}565566#[inline]567fn signed(&mut self, cc: &IntCC) -> Option<()> {568if condcode_is_signed(*cc) {569Some(())570} else {571None572}573}574575#[inline]576fn unsigned(&mut self, cc: &IntCC) -> Option<()> {577if !condcode_is_signed(*cc) {578Some(())579} else {580None581}582}583584#[inline]585fn zero_offset(&mut self) -> Offset32 {586Offset32::new(0)587}588589#[inline]590fn i64_from_offset(&mut self, off: Offset32) -> i64 {591i64::from(off)592}593594#[inline]595fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {596(2.0_f32).powi(size.into()).to_bits() as u64597}598599#[inline]600fn fcvt_to_uint_lb32(&mut self) -> u64 {601(-1.0_f32).to_bits() as u64602}603604#[inline]605fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {606(2.0_f64).powi(size.into()).to_bits()607}608609#[inline]610fn fcvt_to_uint_lb64(&mut self) -> u64 {611(-1.0_f64).to_bits()612}613614#[inline]615fn fcvt_to_uint_ub128(&mut self, size: u8) -> u128 {616Ieee128::pow2(size).bits()617}618619#[inline]620fn fcvt_to_uint_lb128(&mut self) -> u128 {621(-Ieee128::pow2(0)).bits()622}623624#[inline]625fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {626(2.0_f32).powi((size - 1).into()).to_bits() as u64627}628629#[inline]630fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {631let lb = (-2.0_f32).powi((size - 1).into());632std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64633}634635#[inline]636fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {637(2.0_f64).powi((size - 1).into()).to_bits()638}639640#[inline]641fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {642let lb = (-2.0_f64).powi((size - 1).into());643std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())644}645646#[inline]647fn fcvt_to_sint_ub128(&mut self, size: u8) -> u128 {648Ieee128::pow2(size - 1).bits()649}650651#[inline]652fn fcvt_to_sint_lb128(&mut self, size: u8) -> u128 {653Ieee128::fcvt_to_sint_negative_overflow(size).bits()654}655656#[inline]657fn littleendian(&mut self, flags: MemFlags) -> Option<()> {658let endianness = flags.endianness(Endianness::Big);659if endianness == Endianness::Little {660Some(())661} else {662None663}664}665666#[inline]667fn bigendian(&mut self, flags: MemFlags) -> Option<()> {668let endianness = flags.endianness(Endianness::Big);669if endianness == Endianness::Big {670Some(())671} else {672None673}674}675676#[inline]677fn memflags_trusted(&mut self) -> MemFlags {678MemFlags::trusted()679}680681#[inline]682fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {683MemArg::BXD12 {684base: x,685index: y,686disp: UImm12::maybe_from_u64(bias as u64).unwrap(),687flags,688}689}690691#[inline]692fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {693MemArg::reg_plus_off(reg, off + (bias as i64), flags)694}695696#[inline]697fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {698MemArg::Symbol {699name: Box::new(name),700offset,701flags,702}703}704705#[inline]706fn memarg_got(&mut self) -> MemArg {707MemArg::Symbol {708name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),709offset: 0,710flags: MemFlags::trusted(),711}712}713714#[inline]715fn memarg_const(&mut self, constant: VCodeConstant) -> MemArg {716MemArg::Constant { constant }717}718719#[inline]720fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {721let off = i32::try_from(off1 + off2).ok()?;722if off & 1 == 0 { Some(off) } else { None }723}724725#[inline]726fn memarg_frame_pointer_offset(&mut self) -> MemArg {727// The frame pointer (back chain) is stored directly at SP.728MemArg::reg(stack_reg(), MemFlags::trusted())729}730731#[inline]732fn memarg_return_address_offset(&mut self) -> MemArg {733// The return address is stored 14 pointer-sized slots above the initial SP.734MemArg::InitialSPOffset { off: 14 * 8 }735}736737#[inline]738fn inst_builder_new(&mut self) -> VecMInstBuilder {739Cell::new(Vec::<MInst>::new())740}741742#[inline]743fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {744let mut vec = builder.take();745vec.push(inst.clone());746builder.set(vec);747}748749#[inline]750fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {751builder.take()752}753754#[inline]755fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {756if reg.to_reg().is_real() {757Some(reg)758} else {759None760}761}762763#[inline]764fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {765if dst.to_reg() == src { Some(src) } else { None }766}767768#[inline]769fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {770self.is_sinkable_inst(val)771}772773#[inline]774fn emit(&mut self, inst: &MInst) -> Unit {775self.lower_ctx.emit(inst.clone());776}777778#[inline]779fn preg_stack(&mut self) -> PReg {780stack_reg().to_real_reg().unwrap().into()781}782783#[inline]784fn preg_gpr_0(&mut self) -> PReg {785gpr(0).to_real_reg().unwrap().into()786}787788#[inline]789fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {790WritableRegPair { hi, lo }791}792793#[inline]794fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {795w.hi796}797798#[inline]799fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {800w.lo801}802803#[inline]804fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {805RegPair { hi, lo }806}807808#[inline]809fn regpair_hi(&mut self, w: RegPair) -> Reg {810w.hi811}812813#[inline]814fn regpair_lo(&mut self, w: RegPair) -> Reg {815w.lo816}817}818819/// Zero-extend the low `from_bits` bits of `value` to a full u64.820#[inline]821fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {822assert!(from_bits <= 64);823if from_bits >= 64 {824value825} else {826value & ((1u64 << from_bits) - 1)827}828}829830/// Sign-extend the low `from_bits` bits of `value` to a full u64.831#[inline]832fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {833assert!(from_bits <= 64);834if from_bits >= 64 {835value836} else {837(((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64838}839}840841/// Determines whether this condcode interprets inputs as signed or842/// unsigned. See the documentation for the `icmp` instruction in843/// cranelift-codegen/meta/src/shared/instructions.rs for further insights844/// into this.845#[inline]846fn condcode_is_signed(cc: IntCC) -> bool {847match cc {848IntCC::Equal => false,849IntCC::NotEqual => false,850IntCC::SignedGreaterThanOrEqual => true,851IntCC::SignedGreaterThan => true,852IntCC::SignedLessThanOrEqual => true,853IntCC::SignedLessThan => true,854IntCC::UnsignedGreaterThanOrEqual => false,855IntCC::UnsignedGreaterThan => false,856IntCC::UnsignedLessThanOrEqual => false,857IntCC::UnsignedLessThan => false,858}859}860861862