Path: blob/main/cranelift/codegen/src/isa/riscv64/lower/isle.rs
3115 views
//! ISLE integration glue code for riscv64 lowering.12// Pull in the ISLE generated code.3pub mod generated_code;4use generated_code::MInst;56// Types that the generated ISLE code uses via `use super::*`.7use self::generated_code::{FpuOPWidth, VecAluOpRR, VecLmul};8use crate::isa::riscv64::Riscv64Backend;9use crate::isa::riscv64::lower::args::{10FReg, VReg, WritableFReg, WritableVReg, WritableXReg, XReg,11};12use crate::machinst::Reg;13use crate::machinst::{CallInfo, MachInst, isle::*};14use crate::machinst::{VCodeConstant, VCodeConstantData};15use crate::{16ir::{17AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, Opcode, TrapCode,18Value, ValueList, immediates::*, types::*,19},20isa::riscv64::inst::*,21machinst::{ArgPair, CallArgList, CallRetList, InstOutput},22};23use alloc::boxed::Box;24use alloc::vec::Vec;25use regalloc2::PReg;26use wasmtime_core::math::{f32_cvt_to_int_bounds, f64_cvt_to_int_bounds};2728type BoxCallInfo = Box<CallInfo<ExternalName>>;29type BoxCallIndInfo = Box<CallInfo<Reg>>;30type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;31type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;32type BoxExternalName = Box<ExternalName>;33type VecMachLabel = Vec<MachLabel>;34type VecArgPair = Vec<ArgPair>;3536pub(crate) struct RV64IsleContext<'a, 'b, I, B>37where38I: VCodeInst,39B: LowerBackend,40{41pub lower_ctx: &'a mut Lower<'b, I>,42pub backend: &'a B,43/// Precalucated value for the minimum vector register size. Will be 0 if44/// vectors are not supported.45min_vec_reg_size: u64,46}4748impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {49fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {50Self {51lower_ctx,52backend,53min_vec_reg_size: backend.isa_flags.min_vec_reg_size(),54}55}5657pub(crate) fn dfg(&self) -> &crate::ir::DataFlowGraph {58&self.lower_ctx.f.dfg59}60}6162impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> {63isle_lower_prelude_methods!();6465fn gen_call_info(66&mut self,67sig: Sig,68dest: ExternalName,69uses: CallArgList,70defs: CallRetList,71try_call_info: Option<TryCallInfo>,72patchable: bool,73) -> BoxCallInfo {74let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();75let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();76self.lower_ctx77.abi_mut()78.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);7980Box::new(81self.lower_ctx82.gen_call_info(sig, dest, uses, defs, try_call_info, patchable),83)84}8586fn gen_call_ind_info(87&mut self,88sig: Sig,89dest: Reg,90uses: CallArgList,91defs: CallRetList,92try_call_info: Option<TryCallInfo>,93) -> BoxCallIndInfo {94let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();95let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();96self.lower_ctx97.abi_mut()98.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);99100Box::new(101self.lower_ctx102.gen_call_info(sig, dest, uses, defs, try_call_info, false),103)104}105106fn gen_return_call_info(107&mut self,108sig: Sig,109dest: ExternalName,110uses: CallArgList,111) -> BoxReturnCallInfo {112let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();113self.lower_ctx114.abi_mut()115.accumulate_tail_args_size(new_stack_arg_size);116117Box::new(ReturnCallInfo {118dest,119uses,120new_stack_arg_size,121})122}123124fn gen_return_call_ind_info(125&mut self,126sig: Sig,127dest: Reg,128uses: CallArgList,129) -> BoxReturnCallIndInfo {130let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();131self.lower_ctx132.abi_mut()133.accumulate_tail_args_size(new_stack_arg_size);134135Box::new(ReturnCallInfo {136dest,137uses,138new_stack_arg_size,139})140}141142fn fpu_op_width_from_ty(&mut self, ty: Type) -> FpuOPWidth {143match ty {144F16 => FpuOPWidth::H,145F32 => FpuOPWidth::S,146F64 => FpuOPWidth::D,147F128 => FpuOPWidth::Q,148_ => unimplemented!("Unimplemented FPU Op Width: {ty}"),149}150}151152fn vreg_new(&mut self, r: Reg) -> VReg {153VReg::new(r).unwrap()154}155fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {156r.map(|wr| VReg::new(wr).unwrap())157}158fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {159arg0.to_reg()160}161fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {162arg0.map(|vr| vr.to_reg())163}164fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {165*arg0166}167fn xreg_new(&mut self, r: Reg) -> XReg {168XReg::new(r).unwrap()169}170fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {171r.map(|wr| XReg::new(wr).unwrap())172}173fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {174arg0.to_reg()175}176fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {177arg0.map(|xr| xr.to_reg())178}179fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {180*arg0181}182fn freg_new(&mut self, r: Reg) -> FReg {183FReg::new(r).unwrap()184}185fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {186r.map(|wr| FReg::new(wr).unwrap())187}188fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {189arg0.to_reg()190}191fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {192arg0.map(|fr| fr.to_reg())193}194fn freg_to_reg(&mut self, arg0: FReg) -> Reg {195*arg0196}197198fn min_vec_reg_size(&mut self) -> u64 {199self.min_vec_reg_size200}201202#[inline]203fn ty_vec_fits_in_register(&mut self, ty: Type) -> Option<Type> {204if ty.is_vector() && (ty.bits() as u64) <= self.min_vec_reg_size() {205Some(ty)206} else {207None208}209}210211fn ty_supported(&mut self, ty: Type) -> Option<Type> {212let lane_type = ty.lane_type();213let supported = match ty {214// Scalar integers are always supported215ty if ty.is_int() => true,216// Floating point types depend on certain extensions217// F32 depends on the F extension218// If F32 is supported, then the registers are also large enough for F16219F16 | F32 => self.backend.isa_flags.has_f(),220// F64 depends on the D extension221F64 => self.backend.isa_flags.has_d(),222// F128 is currently stored in a pair of integer registers223F128 => true,224225// The base vector extension supports all integer types, up to 64 bits226// as long as they fit in a register227ty if self.ty_vec_fits_in_register(ty).is_some()228&& lane_type.is_int()229&& lane_type.bits() <= 64 =>230{231true232}233234// If the vector type has floating point lanes then the spec states:235//236// Vector instructions where any floating-point vector operand’s EEW is not a237// supported floating-point type width (which includes when FLEN < SEW) are reserved.238//239// So we also have to check if we support the scalar version of the type.240ty if self.ty_vec_fits_in_register(ty).is_some()241&& lane_type.is_float()242&& self.ty_supported(lane_type).is_some()243// Additionally the base V spec only supports 32 and 64 bit floating point types.244&& (lane_type.bits() == 32 || lane_type.bits() == 64 || (lane_type.bits() == 16 && self.backend.isa_flags.has_zvfh())) =>245{246true247}248249// Otherwise do not match250_ => false,251};252253if supported { Some(ty) } else { None }254}255256fn ty_supported_float_size(&mut self, ty: Type) -> Option<Type> {257self.ty_supported(ty)258.filter(|&ty| ty.is_float() && ty != F128)259}260261fn ty_supported_float_min(&mut self, ty: Type) -> Option<Type> {262self.ty_supported_float_size(ty)263.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfhmin())264}265266fn ty_supported_float_full(&mut self, ty: Type) -> Option<Type> {267self.ty_supported_float_min(ty)268.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfh())269}270271fn ty_supported_vec(&mut self, ty: Type) -> Option<Type> {272self.ty_supported(ty).filter(|ty| ty.is_vector())273}274275fn ty_reg_pair(&mut self, ty: Type) -> Option<Type> {276match ty {277I128 | F128 => Some(ty),278_ => None,279}280}281282fn load_ra(&mut self) -> Reg {283if self.backend.flags.preserve_frame_pointers() {284let tmp = self.temp_writable_reg(I64);285self.emit(&MInst::Load {286rd: tmp,287op: LoadOP::Ld,288flags: MemFlags::trusted(),289from: AMode::FPOffset(8),290});291tmp.to_reg()292} else {293link_reg()294}295}296297fn label_to_br_target(&mut self, label: MachLabel) -> CondBrTarget {298CondBrTarget::Label(label)299}300301fn imm12_and(&mut self, imm: Imm12, x: u64) -> Imm12 {302Imm12::from_i16(imm.as_i16() & (x as i16))303}304305fn fli_constant_from_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {306FliConstant::maybe_from_u64(ty, imm)307}308309fn fli_constant_from_negated_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {310let negated_imm = match ty {311F64 => imm ^ 0x8000_0000_0000_0000,312F32 => imm ^ 0x8000_0000,313F16 => imm ^ 0x8000,314_ => unimplemented!(),315};316317FliConstant::maybe_from_u64(ty, negated_imm)318}319320fn i64_generate_imm(&mut self, imm: i64) -> Option<(Imm20, Imm12)> {321MInst::generate_imm(imm as u64)322}323324fn i64_shift_for_lui(&mut self, imm: i64) -> Option<(u64, Imm12)> {325let trailing = imm.trailing_zeros();326if trailing < 12 {327return None;328}329330let shift = Imm12::from_i16(trailing as i16 - 12);331let base = (imm as u64) >> trailing;332Some((base, shift))333}334335fn i64_shift(&mut self, imm: i64) -> Option<(i64, Imm12)> {336let trailing = imm.trailing_zeros();337// We can do without this condition but in this case there is no need to go further338if trailing == 0 {339return None;340}341342let shift = Imm12::from_i16(trailing as i16);343let base = imm >> trailing;344Some((base, shift))345}346347#[inline]348fn emit(&mut self, arg0: &MInst) -> Unit {349self.lower_ctx.emit(arg0.clone());350}351#[inline]352fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12> {353Imm12::maybe_from_u64(arg0)354}355#[inline]356fn imm12_from_i64(&mut self, arg0: i64) -> Option<Imm12> {357Imm12::maybe_from_i64(arg0)358}359#[inline]360fn imm12_is_zero(&mut self, imm: Imm12) -> Option<()> {361if imm.as_i16() == 0 { Some(()) } else { None }362}363364#[inline]365fn imm20_from_u64(&mut self, arg0: u64) -> Option<Imm20> {366Imm20::maybe_from_u64(arg0)367}368#[inline]369fn imm20_from_i64(&mut self, arg0: i64) -> Option<Imm20> {370Imm20::maybe_from_i64(arg0)371}372#[inline]373fn imm20_is_zero(&mut self, imm: Imm20) -> Option<()> {374if imm.as_i32() == 0 { Some(()) } else { None }375}376377#[inline]378fn imm5_from_u64(&mut self, arg0: u64) -> Option<Imm5> {379Imm5::maybe_from_i8(i8::try_from(arg0 as i64).ok()?)380}381#[inline]382fn imm5_from_i64(&mut self, arg0: i64) -> Option<Imm5> {383Imm5::maybe_from_i8(i8::try_from(arg0).ok()?)384}385#[inline]386fn i8_to_imm5(&mut self, arg0: i8) -> Option<Imm5> {387Imm5::maybe_from_i8(arg0)388}389#[inline]390fn uimm5_bitcast_to_imm5(&mut self, arg0: UImm5) -> Imm5 {391Imm5::from_bits(arg0.bits() as u8)392}393#[inline]394fn uimm5_from_u8(&mut self, arg0: u8) -> Option<UImm5> {395UImm5::maybe_from_u8(arg0)396}397#[inline]398fn uimm5_from_u64(&mut self, arg0: u64) -> Option<UImm5> {399arg0.try_into().ok().and_then(UImm5::maybe_from_u8)400}401#[inline]402fn writable_zero_reg(&mut self) -> WritableReg {403writable_zero_reg()404}405#[inline]406fn zero_reg(&mut self) -> XReg {407XReg::new(zero_reg()).unwrap()408}409fn is_non_zero_reg(&mut self, reg: XReg) -> Option<()> {410if reg != self.zero_reg() {411Some(())412} else {413None414}415}416fn is_zero_reg(&mut self, reg: XReg) -> Option<()> {417if reg == self.zero_reg() {418Some(())419} else {420None421}422}423#[inline]424fn imm_from_bits(&mut self, val: u64) -> Imm12 {425Imm12::maybe_from_u64(val).unwrap()426}427#[inline]428fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 {429Imm12::maybe_from_i64(val).unwrap()430}431432fn frm_bits(&mut self, frm: &FRM) -> UImm5 {433UImm5::maybe_from_u8(frm.bits()).unwrap()434}435436fn imm12_const(&mut self, val: i32) -> Imm12 {437if let Some(res) = Imm12::maybe_from_i64(val as i64) {438res439} else {440panic!("Unable to make an Imm12 value from {val}")441}442}443fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 {444Imm12::maybe_from_i64((val + add) as i64).unwrap()445}446fn imm12_add(&mut self, val: Imm12, add: i32) -> Option<Imm12> {447Imm12::maybe_from_i64((i32::from(val.as_i16()) + add).into())448}449450//451fn gen_shamt(&mut self, ty: Type, shamt: XReg) -> ValueRegs {452let ty_bits = if ty.bits() > 64 { 64 } else { ty.bits() };453let ty_bits = i16::try_from(ty_bits).unwrap();454let shamt = {455let tmp = self.temp_writable_reg(I64);456self.emit(&MInst::AluRRImm12 {457alu_op: AluOPRRI::Andi,458rd: tmp,459rs: shamt.to_reg(),460imm12: Imm12::from_i16(ty_bits - 1),461});462tmp.to_reg()463};464let len_sub_shamt = {465let tmp = self.temp_writable_reg(I64);466self.emit(&MInst::load_imm12(tmp, Imm12::from_i16(ty_bits)));467let len_sub_shamt = self.temp_writable_reg(I64);468self.emit(&MInst::AluRRR {469alu_op: AluOPRRR::Sub,470rd: len_sub_shamt,471rs1: tmp.to_reg(),472rs2: shamt,473});474len_sub_shamt.to_reg()475};476ValueRegs::two(shamt, len_sub_shamt)477}478479fn has_v(&mut self) -> bool {480self.backend.isa_flags.has_v()481}482483fn has_m(&mut self) -> bool {484self.backend.isa_flags.has_m()485}486487fn has_zfa(&mut self) -> bool {488self.backend.isa_flags.has_zfa()489}490491fn has_zfhmin(&mut self) -> bool {492self.backend.isa_flags.has_zfhmin()493}494495fn has_zfh(&mut self) -> bool {496self.backend.isa_flags.has_zfh()497}498499fn has_zvfh(&mut self) -> bool {500self.backend.isa_flags.has_zvfh()501}502503fn has_zbkb(&mut self) -> bool {504self.backend.isa_flags.has_zbkb()505}506507fn has_zba(&mut self) -> bool {508self.backend.isa_flags.has_zba()509}510511fn has_zbb(&mut self) -> bool {512self.backend.isa_flags.has_zbb()513}514515fn has_zbc(&mut self) -> bool {516self.backend.isa_flags.has_zbc()517}518519fn has_zbs(&mut self) -> bool {520self.backend.isa_flags.has_zbs()521}522523fn has_zicond(&mut self) -> bool {524self.backend.isa_flags.has_zicond()525}526527fn gen_reg_offset_amode(&mut self, base: Reg, offset: i64) -> AMode {528AMode::RegOffset(base, offset)529}530531fn gen_sp_offset_amode(&mut self, offset: i64) -> AMode {532AMode::SPOffset(offset)533}534535fn gen_fp_offset_amode(&mut self, offset: i64) -> AMode {536AMode::FPOffset(offset)537}538539fn gen_stack_slot_amode(&mut self, ss: StackSlot, offset: i64) -> AMode {540// Offset from beginning of stackslot area.541let stack_off = self.lower_ctx.abi().sized_stackslot_offsets()[ss] as i64;542let sp_off: i64 = stack_off + offset;543AMode::SlotOffset(sp_off)544}545546fn gen_const_amode(&mut self, c: VCodeConstant) -> AMode {547AMode::Const(c)548}549550fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {551if ty.is_int() && ty.bits() <= 64 {552Some(ty)553} else {554None555}556}557fn is_atomic_rmw_max_etc(&mut self, op: &AtomicRmwOp) -> Option<(AtomicRmwOp, bool)> {558let op = *op;559match op {560crate::ir::AtomicRmwOp::Umin => Some((op, false)),561crate::ir::AtomicRmwOp::Umax => Some((op, false)),562crate::ir::AtomicRmwOp::Smin => Some((op, true)),563crate::ir::AtomicRmwOp::Smax => Some((op, true)),564_ => None,565}566}567568fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {569self.is_sinkable_inst(val)570}571572fn load_op(&mut self, ty: Type) -> LoadOP {573LoadOP::from_type(ty)574}575fn store_op(&mut self, ty: Type) -> StoreOP {576StoreOP::from_type(ty)577}578579fn gen_stack_addr(&mut self, slot: StackSlot, offset: Offset32) -> Reg {580let result = self.temp_writable_reg(I64);581let i = self582.lower_ctx583.abi()584.sized_stackslot_addr(slot, i64::from(offset) as u32, result);585self.emit(&i);586result.to_reg()587}588fn atomic_amo(&mut self) -> AMO {589AMO::SeqCst590}591592fn lower_br_table(&mut self, index: Reg, targets: &[MachLabel]) -> Unit {593let tmp1 = self.temp_writable_reg(I64);594let tmp2 = self.temp_writable_reg(I64);595self.emit(&MInst::BrTable {596index,597tmp1,598tmp2,599targets: targets.to_vec(),600});601}602603fn fp_reg(&mut self) -> PReg {604px_reg(8)605}606607fn sp_reg(&mut self) -> PReg {608px_reg(2)609}610611#[inline]612fn int_compare(&mut self, kind: &IntCC, rs1: XReg, rs2: XReg) -> IntegerCompare {613IntegerCompare {614kind: *kind,615rs1: rs1.to_reg(),616rs2: rs2.to_reg(),617}618}619620#[inline]621fn int_compare_decompose(&mut self, cmp: IntegerCompare) -> (IntCC, XReg, XReg) {622(cmp.kind, self.xreg_new(cmp.rs1), self.xreg_new(cmp.rs2))623}624625#[inline]626fn vstate_from_type(&mut self, ty: Type) -> VState {627VState::from_type(ty)628}629630#[inline]631fn vstate_mf2(&mut self, vs: VState) -> VState {632VState {633vtype: VType {634lmul: VecLmul::LmulF2,635..vs.vtype636},637..vs638}639}640641fn vec_alu_rr_dst_type(&mut self, op: &VecAluOpRR) -> Type {642MInst::canonical_type_for_rc(op.dst_regclass())643}644645fn bclr_imm(&mut self, ty: Type, i: u64) -> Option<Imm12> {646// Only consider those bits in the immediate which are up to the width647// of `ty`.648let neg = !i & (u64::MAX >> (64 - ty.bits()));649if neg.count_ones() != 1 {650return None;651}652Imm12::maybe_from_u64(neg.trailing_zeros().into())653}654655fn binvi_imm(&mut self, i: u64) -> Option<Imm12> {656if i.count_ones() != 1 {657return None;658}659Imm12::maybe_from_u64(i.trailing_zeros().into())660}661fn bseti_imm(&mut self, i: u64) -> Option<Imm12> {662self.binvi_imm(i)663}664665fn fcvt_smin_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {666match (int, float) {667// Saturating cases for larger integers are handled using the668// `fcvt.{w,d}.{s,d}` instruction directly, that automatically669// saturates up/down to the correct limit.670//671// NB: i32/i64 don't use this function because the native RISC-V672// instruction does everything we already need, so only cases for673// i8/i16 are listed here.674(I8, F32) if saturating => f32::from(i8::MIN).to_bits().into(),675(I8, F64) if saturating => f64::from(i8::MIN).to_bits(),676(I16, F32) if saturating => f32::from(i16::MIN).to_bits().into(),677(I16, F64) if saturating => f64::from(i16::MIN).to_bits(),678679(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).0.to_bits().into(),680(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).0.to_bits(),681_ => unimplemented!(),682}683}684685fn fcvt_smax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {686// NB: see `fcvt_smin_bound` for some more comments687match (int, float) {688(I8, F32) if saturating => f32::from(i8::MAX).to_bits().into(),689(I8, F64) if saturating => f64::from(i8::MAX).to_bits(),690(I16, F32) if saturating => f32::from(i16::MAX).to_bits().into(),691(I16, F64) if saturating => f64::from(i16::MAX).to_bits(),692693(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).1.to_bits().into(),694(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).1.to_bits(),695_ => unimplemented!(),696}697}698699fn fcvt_umax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {700// NB: see `fcvt_smin_bound` for some more comments701match (int, float) {702(I8, F32) if saturating => f32::from(u8::MAX).to_bits().into(),703(I8, F64) if saturating => f64::from(u8::MAX).to_bits(),704(I16, F32) if saturating => f32::from(u16::MAX).to_bits().into(),705(I16, F64) if saturating => f64::from(u16::MAX).to_bits(),706707(_, F32) if !saturating => f32_cvt_to_int_bounds(false, int.bits()).1.to_bits().into(),708(_, F64) if !saturating => f64_cvt_to_int_bounds(false, int.bits()).1.to_bits(),709_ => unimplemented!(),710}711}712713fn fcvt_umin_bound(&mut self, float: Type, saturating: bool) -> u64 {714assert!(!saturating);715match float {716F32 => (-1.0f32).to_bits().into(),717F64 => (-1.0f64).to_bits(),718_ => unimplemented!(),719}720}721722fn is_pic(&mut self) -> bool {723self.backend.flags.is_pic()724}725}726727/// The main entry point for lowering with ISLE.728pub(crate) fn lower(729lower_ctx: &mut Lower<MInst>,730backend: &Riscv64Backend,731inst: Inst,732) -> Option<InstOutput> {733// TODO: reuse the ISLE context across lowerings so we can reuse its734// internal heap allocations.735let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);736generated_code::constructor_lower(&mut isle_ctx, inst)737}738739/// The main entry point for branch lowering with ISLE.740pub(crate) fn lower_branch(741lower_ctx: &mut Lower<MInst>,742backend: &Riscv64Backend,743branch: Inst,744targets: &[MachLabel],745) -> Option<()> {746// TODO: reuse the ISLE context across lowerings so we can reuse its747// internal heap allocations.748let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);749generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)750}751752753