Path: blob/main/cranelift/codegen/src/isa/riscv64/lower/isle.rs
1693 views
//! ISLE integration glue code for riscv64 lowering.12// Pull in the ISLE generated code.3pub mod generated_code;4use generated_code::MInst;56// Types that the generated ISLE code uses via `use super::*`.7use self::generated_code::{FpuOPWidth, VecAluOpRR, VecLmul};8use crate::isa::riscv64::Riscv64Backend;9use crate::isa::riscv64::lower::args::{10FReg, VReg, WritableFReg, WritableVReg, WritableXReg, XReg,11};12use crate::machinst::Reg;13use crate::machinst::{CallInfo, MachInst, isle::*};14use crate::machinst::{VCodeConstant, VCodeConstantData};15use crate::{16ir::{17AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, Opcode, TrapCode,18Value, ValueList, immediates::*, types::*,19},20isa::riscv64::inst::*,21machinst::{ArgPair, CallArgList, CallRetList, InstOutput},22};23use regalloc2::PReg;24use std::boxed::Box;25use std::vec::Vec;26use wasmtime_math::{f32_cvt_to_int_bounds, f64_cvt_to_int_bounds};2728type BoxCallInfo = Box<CallInfo<ExternalName>>;29type BoxCallIndInfo = Box<CallInfo<Reg>>;30type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;31type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;32type BoxExternalName = Box<ExternalName>;33type VecMachLabel = Vec<MachLabel>;34type VecArgPair = Vec<ArgPair>;3536pub(crate) struct RV64IsleContext<'a, 'b, I, B>37where38I: VCodeInst,39B: LowerBackend,40{41pub lower_ctx: &'a mut Lower<'b, I>,42pub backend: &'a B,43/// Precalucated value for the minimum vector register size. Will be 0 if44/// vectors are not supported.45min_vec_reg_size: u64,46}4748impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {49fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {50Self {51lower_ctx,52backend,53min_vec_reg_size: backend.isa_flags.min_vec_reg_size(),54}55}5657pub(crate) fn dfg(&self) -> &crate::ir::DataFlowGraph {58&self.lower_ctx.f.dfg59}60}6162impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> {63isle_lower_prelude_methods!();6465fn gen_call_info(66&mut self,67sig: Sig,68dest: ExternalName,69uses: CallArgList,70defs: CallRetList,71try_call_info: Option<TryCallInfo>,72) -> BoxCallInfo {73let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();74let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();75self.lower_ctx76.abi_mut()77.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);7879Box::new(80self.lower_ctx81.gen_call_info(sig, dest, uses, defs, try_call_info),82)83}8485fn gen_call_ind_info(86&mut self,87sig: Sig,88dest: Reg,89uses: CallArgList,90defs: CallRetList,91try_call_info: Option<TryCallInfo>,92) -> BoxCallIndInfo {93let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();94let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();95self.lower_ctx96.abi_mut()97.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);9899Box::new(100self.lower_ctx101.gen_call_info(sig, dest, uses, defs, try_call_info),102)103}104105fn gen_return_call_info(106&mut self,107sig: Sig,108dest: ExternalName,109uses: CallArgList,110) -> BoxReturnCallInfo {111let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();112self.lower_ctx113.abi_mut()114.accumulate_tail_args_size(new_stack_arg_size);115116Box::new(ReturnCallInfo {117dest,118uses,119new_stack_arg_size,120})121}122123fn gen_return_call_ind_info(124&mut self,125sig: Sig,126dest: Reg,127uses: CallArgList,128) -> BoxReturnCallIndInfo {129let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();130self.lower_ctx131.abi_mut()132.accumulate_tail_args_size(new_stack_arg_size);133134Box::new(ReturnCallInfo {135dest,136uses,137new_stack_arg_size,138})139}140141fn fpu_op_width_from_ty(&mut self, ty: Type) -> FpuOPWidth {142match ty {143F16 => FpuOPWidth::H,144F32 => FpuOPWidth::S,145F64 => FpuOPWidth::D,146F128 => FpuOPWidth::Q,147_ => unimplemented!("Unimplemented FPU Op Width: {ty}"),148}149}150151fn vreg_new(&mut self, r: Reg) -> VReg {152VReg::new(r).unwrap()153}154fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {155r.map(|wr| VReg::new(wr).unwrap())156}157fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {158arg0.to_reg()159}160fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {161arg0.map(|vr| vr.to_reg())162}163fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {164*arg0165}166fn xreg_new(&mut self, r: Reg) -> XReg {167XReg::new(r).unwrap()168}169fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {170r.map(|wr| XReg::new(wr).unwrap())171}172fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {173arg0.to_reg()174}175fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {176arg0.map(|xr| xr.to_reg())177}178fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {179*arg0180}181fn freg_new(&mut self, r: Reg) -> FReg {182FReg::new(r).unwrap()183}184fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {185r.map(|wr| FReg::new(wr).unwrap())186}187fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {188arg0.to_reg()189}190fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {191arg0.map(|fr| fr.to_reg())192}193fn freg_to_reg(&mut self, arg0: FReg) -> Reg {194*arg0195}196197fn min_vec_reg_size(&mut self) -> u64 {198self.min_vec_reg_size199}200201#[inline]202fn ty_vec_fits_in_register(&mut self, ty: Type) -> Option<Type> {203if ty.is_vector() && (ty.bits() as u64) <= self.min_vec_reg_size() {204Some(ty)205} else {206None207}208}209210fn ty_supported(&mut self, ty: Type) -> Option<Type> {211let lane_type = ty.lane_type();212let supported = match ty {213// Scalar integers are always supported214ty if ty.is_int() => true,215// Floating point types depend on certain extensions216// F32 depends on the F extension217// If F32 is supported, then the registers are also large enough for F16218F16 | F32 => self.backend.isa_flags.has_f(),219// F64 depends on the D extension220F64 => self.backend.isa_flags.has_d(),221// F128 is currently stored in a pair of integer registers222F128 => true,223224// The base vector extension supports all integer types, up to 64 bits225// as long as they fit in a register226ty if self.ty_vec_fits_in_register(ty).is_some()227&& lane_type.is_int()228&& lane_type.bits() <= 64 =>229{230true231}232233// If the vector type has floating point lanes then the spec states:234//235// Vector instructions where any floating-point vector operand’s EEW is not a236// supported floating-point type width (which includes when FLEN < SEW) are reserved.237//238// So we also have to check if we support the scalar version of the type.239ty if self.ty_vec_fits_in_register(ty).is_some()240&& lane_type.is_float()241&& self.ty_supported(lane_type).is_some()242// Additionally the base V spec only supports 32 and 64 bit floating point types.243&& (lane_type.bits() == 32 || lane_type.bits() == 64 || (lane_type.bits() == 16 && self.backend.isa_flags.has_zvfh())) =>244{245true246}247248// Otherwise do not match249_ => false,250};251252if supported { Some(ty) } else { None }253}254255fn ty_supported_float_size(&mut self, ty: Type) -> Option<Type> {256self.ty_supported(ty)257.filter(|&ty| ty.is_float() && ty != F128)258}259260fn ty_supported_float_min(&mut self, ty: Type) -> Option<Type> {261self.ty_supported_float_size(ty)262.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfhmin())263}264265fn ty_supported_float_full(&mut self, ty: Type) -> Option<Type> {266self.ty_supported_float_min(ty)267.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfh())268}269270fn ty_supported_vec(&mut self, ty: Type) -> Option<Type> {271self.ty_supported(ty).filter(|ty| ty.is_vector())272}273274fn ty_reg_pair(&mut self, ty: Type) -> Option<Type> {275match ty {276I128 | F128 => Some(ty),277_ => None,278}279}280281fn load_ra(&mut self) -> Reg {282if self.backend.flags.preserve_frame_pointers() {283let tmp = self.temp_writable_reg(I64);284self.emit(&MInst::Load {285rd: tmp,286op: LoadOP::Ld,287flags: MemFlags::trusted(),288from: AMode::FPOffset(8),289});290tmp.to_reg()291} else {292link_reg()293}294}295296fn label_to_br_target(&mut self, label: MachLabel) -> CondBrTarget {297CondBrTarget::Label(label)298}299300fn imm12_and(&mut self, imm: Imm12, x: u64) -> Imm12 {301Imm12::from_i16(imm.as_i16() & (x as i16))302}303304fn fli_constant_from_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {305FliConstant::maybe_from_u64(ty, imm)306}307308fn fli_constant_from_negated_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {309let negated_imm = match ty {310F64 => imm ^ 0x8000_0000_0000_0000,311F32 => imm ^ 0x8000_0000,312F16 => imm ^ 0x8000,313_ => unimplemented!(),314};315316FliConstant::maybe_from_u64(ty, negated_imm)317}318319fn i64_generate_imm(&mut self, imm: i64) -> Option<(Imm20, Imm12)> {320MInst::generate_imm(imm as u64)321}322323fn i64_shift_for_lui(&mut self, imm: i64) -> Option<(u64, Imm12)> {324let trailing = imm.trailing_zeros();325if trailing < 12 {326return None;327}328329let shift = Imm12::from_i16(trailing as i16 - 12);330let base = (imm as u64) >> trailing;331Some((base, shift))332}333334fn i64_shift(&mut self, imm: i64) -> Option<(i64, Imm12)> {335let trailing = imm.trailing_zeros();336// We can do without this condition but in this case there is no need to go further337if trailing == 0 {338return None;339}340341let shift = Imm12::from_i16(trailing as i16);342let base = imm >> trailing;343Some((base, shift))344}345346#[inline]347fn emit(&mut self, arg0: &MInst) -> Unit {348self.lower_ctx.emit(arg0.clone());349}350#[inline]351fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12> {352Imm12::maybe_from_u64(arg0)353}354#[inline]355fn imm12_from_i64(&mut self, arg0: i64) -> Option<Imm12> {356Imm12::maybe_from_i64(arg0)357}358#[inline]359fn imm12_is_zero(&mut self, imm: Imm12) -> Option<()> {360if imm.as_i16() == 0 { Some(()) } else { None }361}362363#[inline]364fn imm20_from_u64(&mut self, arg0: u64) -> Option<Imm20> {365Imm20::maybe_from_u64(arg0)366}367#[inline]368fn imm20_from_i64(&mut self, arg0: i64) -> Option<Imm20> {369Imm20::maybe_from_i64(arg0)370}371#[inline]372fn imm20_is_zero(&mut self, imm: Imm20) -> Option<()> {373if imm.as_i32() == 0 { Some(()) } else { None }374}375376#[inline]377fn imm5_from_u64(&mut self, arg0: u64) -> Option<Imm5> {378Imm5::maybe_from_i8(i8::try_from(arg0 as i64).ok()?)379}380#[inline]381fn imm5_from_i64(&mut self, arg0: i64) -> Option<Imm5> {382Imm5::maybe_from_i8(i8::try_from(arg0).ok()?)383}384#[inline]385fn i8_to_imm5(&mut self, arg0: i8) -> Option<Imm5> {386Imm5::maybe_from_i8(arg0)387}388#[inline]389fn uimm5_bitcast_to_imm5(&mut self, arg0: UImm5) -> Imm5 {390Imm5::from_bits(arg0.bits() as u8)391}392#[inline]393fn uimm5_from_u8(&mut self, arg0: u8) -> Option<UImm5> {394UImm5::maybe_from_u8(arg0)395}396#[inline]397fn uimm5_from_u64(&mut self, arg0: u64) -> Option<UImm5> {398arg0.try_into().ok().and_then(UImm5::maybe_from_u8)399}400#[inline]401fn writable_zero_reg(&mut self) -> WritableReg {402writable_zero_reg()403}404#[inline]405fn zero_reg(&mut self) -> XReg {406XReg::new(zero_reg()).unwrap()407}408fn is_non_zero_reg(&mut self, reg: XReg) -> Option<()> {409if reg != self.zero_reg() {410Some(())411} else {412None413}414}415fn is_zero_reg(&mut self, reg: XReg) -> Option<()> {416if reg == self.zero_reg() {417Some(())418} else {419None420}421}422#[inline]423fn imm_from_bits(&mut self, val: u64) -> Imm12 {424Imm12::maybe_from_u64(val).unwrap()425}426#[inline]427fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 {428Imm12::maybe_from_i64(val).unwrap()429}430431fn frm_bits(&mut self, frm: &FRM) -> UImm5 {432UImm5::maybe_from_u8(frm.bits()).unwrap()433}434435fn imm12_const(&mut self, val: i32) -> Imm12 {436if let Some(res) = Imm12::maybe_from_i64(val as i64) {437res438} else {439panic!("Unable to make an Imm12 value from {val}")440}441}442fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 {443Imm12::maybe_from_i64((val + add) as i64).unwrap()444}445fn imm12_add(&mut self, val: Imm12, add: i32) -> Option<Imm12> {446Imm12::maybe_from_i64((i32::from(val.as_i16()) + add).into())447}448449//450fn gen_shamt(&mut self, ty: Type, shamt: XReg) -> ValueRegs {451let ty_bits = if ty.bits() > 64 { 64 } else { ty.bits() };452let ty_bits = i16::try_from(ty_bits).unwrap();453let shamt = {454let tmp = self.temp_writable_reg(I64);455self.emit(&MInst::AluRRImm12 {456alu_op: AluOPRRI::Andi,457rd: tmp,458rs: shamt.to_reg(),459imm12: Imm12::from_i16(ty_bits - 1),460});461tmp.to_reg()462};463let len_sub_shamt = {464let tmp = self.temp_writable_reg(I64);465self.emit(&MInst::load_imm12(tmp, Imm12::from_i16(ty_bits)));466let len_sub_shamt = self.temp_writable_reg(I64);467self.emit(&MInst::AluRRR {468alu_op: AluOPRRR::Sub,469rd: len_sub_shamt,470rs1: tmp.to_reg(),471rs2: shamt,472});473len_sub_shamt.to_reg()474};475ValueRegs::two(shamt, len_sub_shamt)476}477478fn has_v(&mut self) -> bool {479self.backend.isa_flags.has_v()480}481482fn has_m(&mut self) -> bool {483self.backend.isa_flags.has_m()484}485486fn has_zfa(&mut self) -> bool {487self.backend.isa_flags.has_zfa()488}489490fn has_zfhmin(&mut self) -> bool {491self.backend.isa_flags.has_zfhmin()492}493494fn has_zfh(&mut self) -> bool {495self.backend.isa_flags.has_zfh()496}497498fn has_zvfh(&mut self) -> bool {499self.backend.isa_flags.has_zvfh()500}501502fn has_zbkb(&mut self) -> bool {503self.backend.isa_flags.has_zbkb()504}505506fn has_zba(&mut self) -> bool {507self.backend.isa_flags.has_zba()508}509510fn has_zbb(&mut self) -> bool {511self.backend.isa_flags.has_zbb()512}513514fn has_zbc(&mut self) -> bool {515self.backend.isa_flags.has_zbc()516}517518fn has_zbs(&mut self) -> bool {519self.backend.isa_flags.has_zbs()520}521522fn has_zicond(&mut self) -> bool {523self.backend.isa_flags.has_zicond()524}525526fn gen_reg_offset_amode(&mut self, base: Reg, offset: i64) -> AMode {527AMode::RegOffset(base, offset)528}529530fn gen_sp_offset_amode(&mut self, offset: i64) -> AMode {531AMode::SPOffset(offset)532}533534fn gen_fp_offset_amode(&mut self, offset: i64) -> AMode {535AMode::FPOffset(offset)536}537538fn gen_stack_slot_amode(&mut self, ss: StackSlot, offset: i64) -> AMode {539// Offset from beginning of stackslot area.540let stack_off = self.lower_ctx.abi().sized_stackslot_offsets()[ss] as i64;541let sp_off: i64 = stack_off + offset;542AMode::SlotOffset(sp_off)543}544545fn gen_const_amode(&mut self, c: VCodeConstant) -> AMode {546AMode::Const(c)547}548549fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {550if ty.is_int() && ty.bits() <= 64 {551Some(ty)552} else {553None554}555}556fn is_atomic_rmw_max_etc(&mut self, op: &AtomicRmwOp) -> Option<(AtomicRmwOp, bool)> {557let op = *op;558match op {559crate::ir::AtomicRmwOp::Umin => Some((op, false)),560crate::ir::AtomicRmwOp::Umax => Some((op, false)),561crate::ir::AtomicRmwOp::Smin => Some((op, true)),562crate::ir::AtomicRmwOp::Smax => Some((op, true)),563_ => None,564}565}566567fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {568self.is_sinkable_inst(val)569}570571fn load_op(&mut self, ty: Type) -> LoadOP {572LoadOP::from_type(ty)573}574fn store_op(&mut self, ty: Type) -> StoreOP {575StoreOP::from_type(ty)576}577578fn gen_stack_addr(&mut self, slot: StackSlot, offset: Offset32) -> Reg {579let result = self.temp_writable_reg(I64);580let i = self581.lower_ctx582.abi()583.sized_stackslot_addr(slot, i64::from(offset) as u32, result);584self.emit(&i);585result.to_reg()586}587fn atomic_amo(&mut self) -> AMO {588AMO::SeqCst589}590591fn lower_br_table(&mut self, index: Reg, targets: &[MachLabel]) -> Unit {592let tmp1 = self.temp_writable_reg(I64);593let tmp2 = self.temp_writable_reg(I64);594self.emit(&MInst::BrTable {595index,596tmp1,597tmp2,598targets: targets.to_vec(),599});600}601602fn fp_reg(&mut self) -> PReg {603px_reg(8)604}605606fn sp_reg(&mut self) -> PReg {607px_reg(2)608}609610#[inline]611fn int_compare(&mut self, kind: &IntCC, rs1: XReg, rs2: XReg) -> IntegerCompare {612IntegerCompare {613kind: *kind,614rs1: rs1.to_reg(),615rs2: rs2.to_reg(),616}617}618619#[inline]620fn int_compare_decompose(&mut self, cmp: IntegerCompare) -> (IntCC, XReg, XReg) {621(cmp.kind, self.xreg_new(cmp.rs1), self.xreg_new(cmp.rs2))622}623624#[inline]625fn vstate_from_type(&mut self, ty: Type) -> VState {626VState::from_type(ty)627}628629#[inline]630fn vstate_mf2(&mut self, vs: VState) -> VState {631VState {632vtype: VType {633lmul: VecLmul::LmulF2,634..vs.vtype635},636..vs637}638}639640fn vec_alu_rr_dst_type(&mut self, op: &VecAluOpRR) -> Type {641MInst::canonical_type_for_rc(op.dst_regclass())642}643644fn bclr_imm(&mut self, ty: Type, i: u64) -> Option<Imm12> {645// Only consider those bits in the immediate which are up to the width646// of `ty`.647let neg = !i & (u64::MAX >> (64 - ty.bits()));648if neg.count_ones() != 1 {649return None;650}651Imm12::maybe_from_u64(neg.trailing_zeros().into())652}653654fn binvi_imm(&mut self, i: u64) -> Option<Imm12> {655if i.count_ones() != 1 {656return None;657}658Imm12::maybe_from_u64(i.trailing_zeros().into())659}660fn bseti_imm(&mut self, i: u64) -> Option<Imm12> {661self.binvi_imm(i)662}663664fn fcvt_smin_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {665match (int, float) {666// Saturating cases for larger integers are handled using the667// `fcvt.{w,d}.{s,d}` instruction directly, that automatically668// saturates up/down to the correct limit.669//670// NB: i32/i64 don't use this function because the native RISC-V671// instruction does everything we already need, so only cases for672// i8/i16 are listed here.673(I8, F32) if saturating => f32::from(i8::MIN).to_bits().into(),674(I8, F64) if saturating => f64::from(i8::MIN).to_bits(),675(I16, F32) if saturating => f32::from(i16::MIN).to_bits().into(),676(I16, F64) if saturating => f64::from(i16::MIN).to_bits(),677678(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).0.to_bits().into(),679(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).0.to_bits(),680_ => unimplemented!(),681}682}683684fn fcvt_smax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {685// NB: see `fcvt_smin_bound` for some more comments686match (int, float) {687(I8, F32) if saturating => f32::from(i8::MAX).to_bits().into(),688(I8, F64) if saturating => f64::from(i8::MAX).to_bits(),689(I16, F32) if saturating => f32::from(i16::MAX).to_bits().into(),690(I16, F64) if saturating => f64::from(i16::MAX).to_bits(),691692(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).1.to_bits().into(),693(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).1.to_bits(),694_ => unimplemented!(),695}696}697698fn fcvt_umax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {699// NB: see `fcvt_smin_bound` for some more comments700match (int, float) {701(I8, F32) if saturating => f32::from(u8::MAX).to_bits().into(),702(I8, F64) if saturating => f64::from(u8::MAX).to_bits(),703(I16, F32) if saturating => f32::from(u16::MAX).to_bits().into(),704(I16, F64) if saturating => f64::from(u16::MAX).to_bits(),705706(_, F32) if !saturating => f32_cvt_to_int_bounds(false, int.bits()).1.to_bits().into(),707(_, F64) if !saturating => f64_cvt_to_int_bounds(false, int.bits()).1.to_bits(),708_ => unimplemented!(),709}710}711712fn fcvt_umin_bound(&mut self, float: Type, saturating: bool) -> u64 {713assert!(!saturating);714match float {715F32 => (-1.0f32).to_bits().into(),716F64 => (-1.0f64).to_bits(),717_ => unimplemented!(),718}719}720721fn is_pic(&mut self) -> bool {722self.backend.flags.is_pic()723}724}725726/// The main entry point for lowering with ISLE.727pub(crate) fn lower(728lower_ctx: &mut Lower<MInst>,729backend: &Riscv64Backend,730inst: Inst,731) -> Option<InstOutput> {732// TODO: reuse the ISLE context across lowerings so we can reuse its733// internal heap allocations.734let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);735generated_code::constructor_lower(&mut isle_ctx, inst)736}737738/// The main entry point for branch lowering with ISLE.739pub(crate) fn lower_branch(740lower_ctx: &mut Lower<MInst>,741backend: &Riscv64Backend,742branch: Inst,743targets: &[MachLabel],744) -> Option<()> {745// TODO: reuse the ISLE context across lowerings so we can reuse its746// internal heap allocations.747let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);748generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)749}750751752