Path: blob/main/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
35266 views
//===-- RISCVISelLowering.h - RISC-V DAG Lowering Interface -----*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file defines the interfaces that RISC-V uses to lower LLVM code into a9// selection DAG.10//11//===----------------------------------------------------------------------===//1213#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H14#define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H1516#include "RISCV.h"17#include "llvm/CodeGen/CallingConvLower.h"18#include "llvm/CodeGen/SelectionDAG.h"19#include "llvm/CodeGen/TargetLowering.h"20#include <optional>2122namespace llvm {23class InstructionCost;24class RISCVSubtarget;25struct RISCVRegisterInfo;26class RVVArgDispatcher;2728namespace RISCVISD {29// clang-format off30enum NodeType : unsigned {31FIRST_NUMBER = ISD::BUILTIN_OP_END,32RET_GLUE,33SRET_GLUE,34MRET_GLUE,35CALL,36/// Select with condition operator - This selects between a true value and37/// a false value (ops #3 and #4) based on the boolean result of comparing38/// the lhs and rhs (ops #0 and #1) of a conditional expression with the39/// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.40/// The lhs and rhs are XLenVT integers. The true and false values can be41/// integer or floating point.42SELECT_CC,43BR_CC,44BuildPairF64,45SplitF64,46TAIL,4748// Add the Lo 12 bits from an address. Selected to ADDI.49ADD_LO,50// Get the Hi 20 bits from an address. Selected to LUI.51HI,5253// Represents an AUIPC+ADDI pair. Selected to PseudoLLA.54LLA,5556// Selected as PseudoAddTPRel. Used to emit a TP-relative relocation.57ADD_TPREL,5859// Multiply high for signedxunsigned.60MULHSU,6162// Represents (ADD (SHL a, b), c) with the arguments appearing in the order63// a, b, c. 'b' must be a constant. Maps to sh1add/sh2add/sh3add with zba64// or addsl with XTheadBa.65SHL_ADD,6667// RV64I shifts, directly matching the semantics of the named RISC-V68// instructions.69SLLW,70SRAW,71SRLW,72// 32-bit operations from RV64M that can't be simply matched with a pattern73// at instruction selection time. These have undefined behavior for division74// by 0 or overflow (divw) like their target independent counterparts.75DIVW,76DIVUW,77REMUW,78// RV64IB rotates, directly matching the semantics of the named RISC-V79// instructions.80ROLW,81RORW,82// RV64IZbb bit counting instructions directly matching the semantics of the83// named RISC-V instructions.84CLZW,85CTZW,8687// RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel.88ABSW,8990// FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as91// XLEN is the only legal integer width.92//93// FMV_H_X matches the semantics of the FMV.H.X.94// FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.95// FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result.96// FMV_W_X_RV64 matches the semantics of the FMV.W.X.97// FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.98//99// This is a more convenient semantic for producing dagcombines that remove100// unnecessary GPR->FPR->GPR moves.101FMV_H_X,102FMV_X_ANYEXTH,103FMV_X_SIGNEXTH,104FMV_W_X_RV64,105FMV_X_ANYEXTW_RV64,106// FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and107// fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of108// range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode109// is passed as a TargetConstant operand using the RISCVFPRndMode enum.110FCVT_X,111FCVT_XU,112// FP to 32 bit int conversions for RV64. These are used to keep track of the113// result being sign extended to 64 bit. These saturate out of range inputs.114// Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode115// is passed as a TargetConstant operand using the RISCVFPRndMode enum.116FCVT_W_RV64,117FCVT_WU_RV64,118119FP_ROUND_BF16,120FP_EXTEND_BF16,121122// Rounds an FP value to its corresponding integer in the same FP format.123// First operand is the value to round, the second operand is the largest124// integer that can be represented exactly in the FP format. This will be125// expanded into multiple instructions and basic blocks with a custom126// inserter.127FROUND,128129FCLASS,130131// Floating point fmax and fmin matching the RISC-V instruction semantics.132FMAX, FMIN,133134// A read of the 64-bit counter CSR on a 32-bit target (returns (Lo, Hi)).135// It takes a chain operand and another two target constant operands (the136// CSR numbers of the low and high parts of the counter).137READ_COUNTER_WIDE,138139// brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or140// XLenVT.141BREV8,142ORC_B,143ZIP,144UNZIP,145146// Scalar cryptography147CLMUL, CLMULH, CLMULR,148SHA256SIG0, SHA256SIG1, SHA256SUM0, SHA256SUM1,149SM4KS, SM4ED,150SM3P0, SM3P1,151152// May-Be-Operations153MOPR, MOPRR,154155// Vector Extension156FIRST_VL_VECTOR_OP,157// VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand158// for the VL value to be used for the operation. The first operand is159// passthru operand.160VMV_V_V_VL = FIRST_VL_VECTOR_OP,161// VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand162// for the VL value to be used for the operation. The first operand is163// passthru operand.164VMV_V_X_VL,165// VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand166// for the VL value to be used for the operation. The first operand is167// passthru operand.168VFMV_V_F_VL,169// VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign170// extended from the vector element size.171VMV_X_S,172// VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.173VMV_S_X_VL,174// VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.175VFMV_S_F_VL,176// Splats an 64-bit value that has been split into two i32 parts. This is177// expanded late to two scalar stores and a stride 0 vector load.178// The first operand is passthru operand.179SPLAT_VECTOR_SPLIT_I64_VL,180// Truncates a RVV integer vector by one power-of-two. Carries both an extra181// mask and VL operand.182TRUNCATE_VECTOR_VL,183// Matches the semantics of vslideup/vslidedown. The first operand is the184// pass-thru operand, the second is the source vector, the third is the XLenVT185// index (either constant or non-constant), the fourth is the mask, the fifth186// is the VL and the sixth is the policy.187VSLIDEUP_VL,188VSLIDEDOWN_VL,189// Matches the semantics of vslide1up/slide1down. The first operand is190// passthru operand, the second is source vector, third is the XLenVT scalar191// value. The fourth and fifth operands are the mask and VL operands.192VSLIDE1UP_VL,193VSLIDE1DOWN_VL,194// Matches the semantics of vfslide1up/vfslide1down. The first operand is195// passthru operand, the second is source vector, third is a scalar value196// whose type matches the element type of the vectors. The fourth and fifth197// operands are the mask and VL operands.198VFSLIDE1UP_VL,199VFSLIDE1DOWN_VL,200// Matches the semantics of the vid.v instruction, with a mask and VL201// operand.202VID_VL,203// Matches the semantics of the vfcnvt.rod function (Convert double-width204// float to single-width float, rounding towards odd). Takes a double-width205// float vector and produces a single-width float vector. Also has a mask and206// VL operand.207VFNCVT_ROD_VL,208// These nodes match the semantics of the corresponding RVV vector reduction209// instructions. They produce a vector result which is the reduction210// performed over the second vector operand plus the first element of the211// third vector operand. The first operand is the pass-thru operand. The212// second operand is an unconstrained vector type, and the result, first, and213// third operand's types are expected to be the corresponding full-width214// LMUL=1 type for the second operand:215// nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8216// nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32217// The different in types does introduce extra vsetvli instructions but218// similarly it reduces the number of registers consumed per reduction.219// Also has a mask and VL operand.220VECREDUCE_ADD_VL,221VECREDUCE_UMAX_VL,222VECREDUCE_SMAX_VL,223VECREDUCE_UMIN_VL,224VECREDUCE_SMIN_VL,225VECREDUCE_AND_VL,226VECREDUCE_OR_VL,227VECREDUCE_XOR_VL,228VECREDUCE_FADD_VL,229VECREDUCE_SEQ_FADD_VL,230VECREDUCE_FMIN_VL,231VECREDUCE_FMAX_VL,232233// Vector binary ops with a merge as a third operand, a mask as a fourth234// operand, and VL as a fifth operand.235ADD_VL,236AND_VL,237MUL_VL,238OR_VL,239SDIV_VL,240SHL_VL,241SREM_VL,242SRA_VL,243SRL_VL,244ROTL_VL,245ROTR_VL,246SUB_VL,247UDIV_VL,248UREM_VL,249XOR_VL,250SMIN_VL,251SMAX_VL,252UMIN_VL,253UMAX_VL,254255BITREVERSE_VL,256BSWAP_VL,257CTLZ_VL,258CTTZ_VL,259CTPOP_VL,260261SADDSAT_VL,262UADDSAT_VL,263SSUBSAT_VL,264USUBSAT_VL,265266// Averaging adds of signed integers.267AVGFLOORS_VL,268// Averaging adds of unsigned integers.269AVGFLOORU_VL,270// Rounding averaging adds of signed integers.271AVGCEILS_VL,272// Rounding averaging adds of unsigned integers.273AVGCEILU_VL,274275// Operands are (source, shift, merge, mask, roundmode, vl)276VNCLIPU_VL,277VNCLIP_VL,278279MULHS_VL,280MULHU_VL,281FADD_VL,282FSUB_VL,283FMUL_VL,284FDIV_VL,285VFMIN_VL,286VFMAX_VL,287288// Vector unary ops with a mask as a second operand and VL as a third operand.289FNEG_VL,290FABS_VL,291FSQRT_VL,292FCLASS_VL,293FCOPYSIGN_VL, // Has a merge operand294VFCVT_RTZ_X_F_VL,295VFCVT_RTZ_XU_F_VL,296VFCVT_X_F_VL,297VFCVT_XU_F_VL,298VFROUND_NOEXCEPT_VL,299VFCVT_RM_X_F_VL, // Has a rounding mode operand.300VFCVT_RM_XU_F_VL, // Has a rounding mode operand.301SINT_TO_FP_VL,302UINT_TO_FP_VL,303VFCVT_RM_F_X_VL, // Has a rounding mode operand.304VFCVT_RM_F_XU_VL, // Has a rounding mode operand.305FP_ROUND_VL,306FP_EXTEND_VL,307308// Vector FMA ops with a mask as a fourth operand and VL as a fifth operand.309VFMADD_VL,310VFNMADD_VL,311VFMSUB_VL,312VFNMSUB_VL,313314// Vector widening FMA ops with a mask as a fourth operand and VL as a fifth315// operand.316VFWMADD_VL,317VFWNMADD_VL,318VFWMSUB_VL,319VFWNMSUB_VL,320321// Widening instructions with a merge value a third operand, a mask as a322// fourth operand, and VL as a fifth operand.323VWMUL_VL,324VWMULU_VL,325VWMULSU_VL,326VWADD_VL,327VWADDU_VL,328VWSUB_VL,329VWSUBU_VL,330VWADD_W_VL,331VWADDU_W_VL,332VWSUB_W_VL,333VWSUBU_W_VL,334VWSLL_VL,335336VFWMUL_VL,337VFWADD_VL,338VFWSUB_VL,339VFWADD_W_VL,340VFWSUB_W_VL,341342// Widening ternary operations with a mask as the fourth operand and VL as the343// fifth operand.344VWMACC_VL,345VWMACCU_VL,346VWMACCSU_VL,347348// Narrowing logical shift right.349// Operands are (source, shift, passthru, mask, vl)350VNSRL_VL,351352// Vector compare producing a mask. Fourth operand is input mask. Fifth353// operand is VL.354SETCC_VL,355356// General vmerge node with mask, true, false, passthru, and vl operands.357// Tail agnostic vselect can be implemented by setting passthru to undef.358VMERGE_VL,359360// Mask binary operators.361VMAND_VL,362VMOR_VL,363VMXOR_VL,364365// Set mask vector to all zeros or ones.366VMCLR_VL,367VMSET_VL,368369// Matches the semantics of vrgather.vx and vrgather.vv with extra operands370// for passthru and VL. Operands are (src, index, mask, passthru, vl).371VRGATHER_VX_VL,372VRGATHER_VV_VL,373VRGATHEREI16_VV_VL,374375// Vector sign/zero extend with additional mask & VL operands.376VSEXT_VL,377VZEXT_VL,378379// vcpop.m with additional mask and VL operands.380VCPOP_VL,381382// vfirst.m with additional mask and VL operands.383VFIRST_VL,384385LAST_VL_VECTOR_OP = VFIRST_VL,386387// Read VLENB CSR388READ_VLENB,389// Reads value of CSR.390// The first operand is a chain pointer. The second specifies address of the391// required CSR. Two results are produced, the read value and the new chain392// pointer.393READ_CSR,394// Write value to CSR.395// The first operand is a chain pointer, the second specifies address of the396// required CSR and the third is the value to write. The result is the new397// chain pointer.398WRITE_CSR,399// Read and write value of CSR.400// The first operand is a chain pointer, the second specifies address of the401// required CSR and the third is the value to write. Two results are produced,402// the value read before the modification and the new chain pointer.403SWAP_CSR,404405// Branchless select operations, matching the semantics of the instructions406// defined in Zicond or XVentanaCondOps.407CZERO_EQZ, // vt.maskc for XVentanaCondOps.408CZERO_NEZ, // vt.maskcn for XVentanaCondOps.409410/// Software guarded BRIND node. Operand 0 is the chain operand and411/// operand 1 is the target address.412SW_GUARDED_BRIND,413414// FP to 32 bit int conversions for RV64. These are used to keep track of the415// result being sign extended to 64 bit. These saturate out of range inputs.416STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,417STRICT_FCVT_WU_RV64,418STRICT_FADD_VL,419STRICT_FSUB_VL,420STRICT_FMUL_VL,421STRICT_FDIV_VL,422STRICT_FSQRT_VL,423STRICT_VFMADD_VL,424STRICT_VFNMADD_VL,425STRICT_VFMSUB_VL,426STRICT_VFNMSUB_VL,427STRICT_FP_ROUND_VL,428STRICT_FP_EXTEND_VL,429STRICT_VFNCVT_ROD_VL,430STRICT_SINT_TO_FP_VL,431STRICT_UINT_TO_FP_VL,432STRICT_VFCVT_RM_X_F_VL,433STRICT_VFCVT_RTZ_X_F_VL,434STRICT_VFCVT_RTZ_XU_F_VL,435STRICT_FSETCC_VL,436STRICT_FSETCCS_VL,437STRICT_VFROUND_NOEXCEPT_VL,438LAST_RISCV_STRICTFP_OPCODE = STRICT_VFROUND_NOEXCEPT_VL,439440SF_VC_XV_SE,441SF_VC_IV_SE,442SF_VC_VV_SE,443SF_VC_FV_SE,444SF_VC_XVV_SE,445SF_VC_IVV_SE,446SF_VC_VVV_SE,447SF_VC_FVV_SE,448SF_VC_XVW_SE,449SF_VC_IVW_SE,450SF_VC_VVW_SE,451SF_VC_FVW_SE,452SF_VC_V_X_SE,453SF_VC_V_I_SE,454SF_VC_V_XV_SE,455SF_VC_V_IV_SE,456SF_VC_V_VV_SE,457SF_VC_V_FV_SE,458SF_VC_V_XVV_SE,459SF_VC_V_IVV_SE,460SF_VC_V_VVV_SE,461SF_VC_V_FVV_SE,462SF_VC_V_XVW_SE,463SF_VC_V_IVW_SE,464SF_VC_V_VVW_SE,465SF_VC_V_FVW_SE,466467// WARNING: Do not add anything in the end unless you want the node to468// have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all469// opcodes will be thought as target memory ops!470471TH_LWD = ISD::FIRST_TARGET_MEMORY_OPCODE,472TH_LWUD,473TH_LDD,474TH_SWD,475TH_SDD,476};477// clang-format on478} // namespace RISCVISD479480class RISCVTargetLowering : public TargetLowering {481const RISCVSubtarget &Subtarget;482483public:484explicit RISCVTargetLowering(const TargetMachine &TM,485const RISCVSubtarget &STI);486487const RISCVSubtarget &getSubtarget() const { return Subtarget; }488489bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,490MachineFunction &MF,491unsigned Intrinsic) const override;492bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,493unsigned AS,494Instruction *I = nullptr) const override;495bool isLegalICmpImmediate(int64_t Imm) const override;496bool isLegalAddImmediate(int64_t Imm) const override;497bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;498bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;499bool isTruncateFree(SDValue Val, EVT VT2) const override;500bool isZExtFree(SDValue Val, EVT VT2) const override;501bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;502bool signExtendConstant(const ConstantInt *CI) const override;503bool isCheapToSpeculateCttz(Type *Ty) const override;504bool isCheapToSpeculateCtlz(Type *Ty) const override;505bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;506bool hasAndNotCompare(SDValue Y) const override;507bool hasBitTest(SDValue X, SDValue Y) const override;508bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(509SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,510unsigned OldShiftOpcode, unsigned NewShiftOpcode,511SelectionDAG &DAG) const override;512/// Return true if the (vector) instruction I will be lowered to an instruction513/// with a scalar splat operand for the given Operand number.514bool canSplatOperand(Instruction *I, int Operand) const;515/// Return true if a vector instruction will lower to a target instruction516/// able to splat the given operand.517bool canSplatOperand(unsigned Opcode, int Operand) const;518bool shouldSinkOperands(Instruction *I,519SmallVectorImpl<Use *> &Ops) const override;520bool shouldScalarizeBinop(SDValue VecOp) const override;521bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;522std::pair<int, bool> getLegalZfaFPImm(const APFloat &Imm, EVT VT) const;523bool isFPImmLegal(const APFloat &Imm, EVT VT,524bool ForCodeSize) const override;525bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,526unsigned Index) const override;527528bool isIntDivCheap(EVT VT, AttributeList Attr) const override;529530bool preferScalarizeSplat(SDNode *N) const override;531532bool softPromoteHalfType() const override { return true; }533534/// Return the register type for a given MVT, ensuring vectors are treated535/// as a series of gpr sized integers.536MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,537EVT VT) const override;538539/// Return the number of registers for a given MVT, ensuring vectors are540/// treated as a series of gpr sized integers.541unsigned getNumRegistersForCallingConv(LLVMContext &Context,542CallingConv::ID CC,543EVT VT) const override;544545unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context,546CallingConv::ID CC, EVT VT,547EVT &IntermediateVT,548unsigned &NumIntermediates,549MVT &RegisterVT) const override;550551bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,552EVT VT) const override;553554/// Return true if the given shuffle mask can be codegen'd directly, or if it555/// should be stack expanded.556bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;557558bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {559// If the pair to store is a mixture of float and int values, we will560// save two bitwise instructions and one float-to-int instruction and561// increase one store instruction. There is potentially a more562// significant benefit because it avoids the float->int domain switch563// for input value. So It is more likely a win.564if ((LTy.isFloatingPoint() && HTy.isInteger()) ||565(LTy.isInteger() && HTy.isFloatingPoint()))566return true;567// If the pair only contains int values, we will save two bitwise568// instructions and increase one store instruction (costing one more569// store buffer). Since the benefit is more blurred we leave such a pair570// out until we get testcase to prove it is a win.571return false;572}573574bool575shouldExpandBuildVectorWithShuffles(EVT VT,576unsigned DefinedValues) const override;577578bool shouldExpandCttzElements(EVT VT) const override;579580/// Return the cost of LMUL for linear operations.581InstructionCost getLMULCost(MVT VT) const;582583InstructionCost getVRGatherVVCost(MVT VT) const;584InstructionCost getVRGatherVICost(MVT VT) const;585InstructionCost getVSlideVXCost(MVT VT) const;586InstructionCost getVSlideVICost(MVT VT) const;587588// Provide custom lowering hooks for some operations.589SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;590void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,591SelectionDAG &DAG) const override;592593SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;594595bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,596const APInt &DemandedElts,597TargetLoweringOpt &TLO) const override;598599void computeKnownBitsForTargetNode(const SDValue Op,600KnownBits &Known,601const APInt &DemandedElts,602const SelectionDAG &DAG,603unsigned Depth) const override;604unsigned ComputeNumSignBitsForTargetNode(SDValue Op,605const APInt &DemandedElts,606const SelectionDAG &DAG,607unsigned Depth) const override;608609bool canCreateUndefOrPoisonForTargetNode(SDValue Op,610const APInt &DemandedElts,611const SelectionDAG &DAG,612bool PoisonOnly, bool ConsiderFlags,613unsigned Depth) const override;614615const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;616617// This method returns the name of a target specific DAG node.618const char *getTargetNodeName(unsigned Opcode) const override;619620MachineMemOperand::Flags621getTargetMMOFlags(const Instruction &I) const override;622623MachineMemOperand::Flags624getTargetMMOFlags(const MemSDNode &Node) const override;625626bool627areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,628const MemSDNode &NodeY) const override;629630ConstraintType getConstraintType(StringRef Constraint) const override;631632InlineAsm::ConstraintCode633getInlineAsmMemConstraint(StringRef ConstraintCode) const override;634635std::pair<unsigned, const TargetRegisterClass *>636getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,637StringRef Constraint, MVT VT) const override;638639void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,640std::vector<SDValue> &Ops,641SelectionDAG &DAG) const override;642643MachineBasicBlock *644EmitInstrWithCustomInserter(MachineInstr &MI,645MachineBasicBlock *BB) const override;646647void AdjustInstrPostInstrSelection(MachineInstr &MI,648SDNode *Node) const override;649650EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,651EVT VT) const override;652653bool shouldFormOverflowOp(unsigned Opcode, EVT VT,654bool MathUsed) const override {655if (VT == MVT::i8 || VT == MVT::i16)656return false;657658return TargetLowering::shouldFormOverflowOp(Opcode, VT, MathUsed);659}660661bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem,662unsigned AddrSpace) const override {663// If we can replace 4 or more scalar stores, there will be a reduction664// in instructions even after we add a vector constant load.665return NumElem >= 4;666}667668bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {669return VT.isScalarInteger();670}671bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }672673bool isCtpopFast(EVT VT) const override;674675unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const override;676677bool preferZeroCompareBranch() const override { return true; }678679bool shouldInsertFencesForAtomic(const Instruction *I) const override {680return isa<LoadInst>(I) || isa<StoreInst>(I);681}682Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,683AtomicOrdering Ord) const override;684Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,685AtomicOrdering Ord) const override;686687bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,688EVT VT) const override;689690ISD::NodeType getExtendForAtomicOps() const override {691return ISD::SIGN_EXTEND;692}693694ISD::NodeType getExtendForAtomicCmpSwapArg() const override;695696bool shouldTransformSignedTruncationCheck(EVT XVT,697unsigned KeptBits) const override;698699TargetLowering::ShiftLegalizationStrategy700preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,701unsigned ExpansionFactor) const override {702if (DAG.getMachineFunction().getFunction().hasMinSize())703return ShiftLegalizationStrategy::LowerToLibcall;704return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,705ExpansionFactor);706}707708bool isDesirableToCommuteWithShift(const SDNode *N,709CombineLevel Level) const override;710711/// If a physical register, this returns the register that receives the712/// exception address on entry to an EH pad.713Register714getExceptionPointerRegister(const Constant *PersonalityFn) const override;715716/// If a physical register, this returns the register that receives the717/// exception typeid on entry to a landing pad.718Register719getExceptionSelectorRegister(const Constant *PersonalityFn) const override;720721bool shouldExtendTypeInLibCall(EVT Type) const override;722bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;723724/// Returns the register with the specified architectural or ABI name. This725/// method is necessary to lower the llvm.read_register.* and726/// llvm.write_register.* intrinsics. Allocatable registers must be reserved727/// with the clang -ffixed-xX flag for access to be allowed.728Register getRegisterByName(const char *RegName, LLT VT,729const MachineFunction &MF) const override;730731// Lower incoming arguments, copy physregs into vregs732SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,733bool IsVarArg,734const SmallVectorImpl<ISD::InputArg> &Ins,735const SDLoc &DL, SelectionDAG &DAG,736SmallVectorImpl<SDValue> &InVals) const override;737bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,738bool IsVarArg,739const SmallVectorImpl<ISD::OutputArg> &Outs,740LLVMContext &Context) const override;741SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,742const SmallVectorImpl<ISD::OutputArg> &Outs,743const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,744SelectionDAG &DAG) const override;745SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,746SmallVectorImpl<SDValue> &InVals) const override;747748bool shouldConvertConstantLoadToIntImm(const APInt &Imm,749Type *Ty) const override;750bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;751bool mayBeEmittedAsTailCall(const CallInst *CI) const override;752bool shouldConsiderGEPOffsetSplit() const override { return true; }753754bool decomposeMulByConstant(LLVMContext &Context, EVT VT,755SDValue C) const override;756757bool isMulAddWithConstProfitable(SDValue AddNode,758SDValue ConstNode) const override;759760TargetLowering::AtomicExpansionKind761shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;762Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,763Value *AlignedAddr, Value *Incr,764Value *Mask, Value *ShiftAmt,765AtomicOrdering Ord) const override;766TargetLowering::AtomicExpansionKind767shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;768Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,769AtomicCmpXchgInst *CI,770Value *AlignedAddr, Value *CmpVal,771Value *NewVal, Value *Mask,772AtomicOrdering Ord) const override;773774/// Returns true if the target allows unaligned memory accesses of the775/// specified type.776bool allowsMisalignedMemoryAccesses(777EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),778MachineMemOperand::Flags Flags = MachineMemOperand::MONone,779unsigned *Fast = nullptr) const override;780781EVT getOptimalMemOpType(const MemOp &Op,782const AttributeList &FuncAttributes) const override;783784bool splitValueIntoRegisterParts(785SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,786unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)787const override;788789SDValue joinRegisterPartsIntoValue(790SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,791unsigned NumParts, MVT PartVT, EVT ValueVT,792std::optional<CallingConv::ID> CC) const override;793794// Return the value of VLMax for the given vector type (i.e. SEW and LMUL)795SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const;796797static RISCVII::VLMUL getLMUL(MVT VT);798inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,799unsigned MinSize) {800// Original equation:801// VLMAX = (VectorBits / EltSize) * LMUL802// where LMUL = MinSize / RISCV::RVVBitsPerBlock803// The following equations have been reordered to prevent loss of precision804// when calculating fractional LMUL.805return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;806}807808// Return inclusive (low, high) bounds on the value of VLMAX for the809// given scalable container type given known bounds on VLEN.810static std::pair<unsigned, unsigned>811computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget);812813static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);814static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);815static unsigned getRegClassIDForVecVT(MVT VT);816static std::pair<unsigned, unsigned>817decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,818unsigned InsertExtractIdx,819const RISCVRegisterInfo *TRI);820MVT getContainerForFixedLengthVector(MVT VT) const;821822bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;823824bool isLegalElementTypeForRVV(EVT ScalarTy) const;825826bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;827828unsigned getJumpTableEncoding() const override;829830const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,831const MachineBasicBlock *MBB,832unsigned uid,833MCContext &Ctx) const override;834835bool isVScaleKnownToBeAPowerOfTwo() const override;836837bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,838ISD::MemIndexedMode &AM, SelectionDAG &DAG) const;839bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,840ISD::MemIndexedMode &AM,841SelectionDAG &DAG) const override;842bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,843SDValue &Offset, ISD::MemIndexedMode &AM,844SelectionDAG &DAG) const override;845846bool isLegalScaleForGatherScatter(uint64_t Scale,847uint64_t ElemSize) const override {848// Scaled addressing not supported on indexed load/stores849return Scale == 1;850}851852/// If the target has a standard location for the stack protector cookie,853/// returns the address of that location. Otherwise, returns nullptr.854Value *getIRStackGuard(IRBuilderBase &IRB) const override;855856/// Returns whether or not generating a interleaved load/store intrinsic for857/// this type will be legal.858bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,859Align Alignment, unsigned AddrSpace,860const DataLayout &) const;861862/// Return true if a stride load store of the given result type and863/// alignment is legal.864bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const;865866unsigned getMaxSupportedInterleaveFactor() const override { return 8; }867868bool fallBackToDAGISel(const Instruction &Inst) const override;869870bool lowerInterleavedLoad(LoadInst *LI,871ArrayRef<ShuffleVectorInst *> Shuffles,872ArrayRef<unsigned> Indices,873unsigned Factor) const override;874875bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,876unsigned Factor) const override;877878bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *II,879LoadInst *LI) const override;880881bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,882StoreInst *SI) const override;883884bool supportKCFIBundles() const override { return true; }885886SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr,887int JTI, SelectionDAG &DAG) const override;888889MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,890MachineBasicBlock::instr_iterator &MBBI,891const TargetInstrInfo *TII) const override;892893/// RISCVCCAssignFn - This target-specific function extends the default894/// CCValAssign with additional information used to lower RISC-V calling895/// conventions.896typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,897unsigned ValNo, MVT ValVT, MVT LocVT,898CCValAssign::LocInfo LocInfo,899ISD::ArgFlagsTy ArgFlags, CCState &State,900bool IsFixed, bool IsRet, Type *OrigTy,901const RISCVTargetLowering &TLI,902RVVArgDispatcher &RVVDispatcher);903904private:905void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,906const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,907RISCVCCAssignFn Fn) const;908void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,909const SmallVectorImpl<ISD::OutputArg> &Outs,910bool IsRet, CallLoweringInfo *CLI,911RISCVCCAssignFn Fn) const;912913template <class NodeTy>914SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true,915bool IsExternWeak = false) const;916SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,917bool UseGOT) const;918SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;919SDValue getTLSDescAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;920921SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;922SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;923SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;924SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;925SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;926SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;927SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;928SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;929SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;930SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;931SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;932SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;933SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;934SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;935SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,936int64_t ExtTrueVal) const;937SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;938SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;939SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;940SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;941SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;942SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;943SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;944SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;945SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;946SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;947SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,948bool IsVP) const;949SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;950SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;951SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;952SDValue lowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;953SDValue lowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;954SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;955SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;956SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;957SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;958SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;959SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;960SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,961SelectionDAG &DAG) const;962SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;963SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;964SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;965SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;966SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;967SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,968SelectionDAG &DAG) const;969SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;970SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;971SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;972SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;973SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;974SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;975SDValue lowerVPSplatExperimental(SDValue Op, SelectionDAG &DAG) const;976SDValue lowerVPSpliceExperimental(SDValue Op, SelectionDAG &DAG) const;977SDValue lowerVPReverseExperimental(SDValue Op, SelectionDAG &DAG) const;978SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG) const;979SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;980SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;981SDValue lowerVPCttzElements(SDValue Op, SelectionDAG &DAG) const;982SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,983unsigned ExtendOpc) const;984SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;985SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;986987SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;988SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;989990SDValue lowerStrictFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;991992SDValue lowerVectorStrictFSetcc(SDValue Op, SelectionDAG &DAG) const;993994SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;995SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;996997bool isEligibleForTailCallOptimization(998CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,999const SmallVector<CCValAssign, 16> &ArgLocs) const;10001001/// Generate error diagnostics if any register used by CC has been marked1002/// reserved.1003void validateCCReservedRegs(1004const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,1005MachineFunction &MF) const;10061007bool useRVVForFixedLengthVectorVT(MVT VT) const;10081009MVT getVPExplicitVectorLengthTy() const override;10101011bool shouldExpandGetVectorLength(EVT TripCountVT, unsigned VF,1012bool IsScalable) const override;10131014/// RVV code generation for fixed length vectors does not lower all1015/// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to1016/// merge. However, merging them creates a BUILD_VECTOR that is just as1017/// illegal as the original, thus leading to an infinite legalisation loop.1018/// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,1019/// this override can be removed.1020bool mergeStoresAfterLegalization(EVT VT) const override;10211022/// Disable normalizing1023/// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and1024/// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))1025/// RISC-V doesn't have flags so it's better to perform the and/or in a GPR.1026bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {1027return false;1028}10291030/// For available scheduling models FDIV + two independent FMULs are much1031/// faster than two FDIVs.1032unsigned combineRepeatedFPDivisors() const override;10331034SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,1035SmallVectorImpl<SDNode *> &Created) const override;10361037bool shouldFoldSelectWithSingleBitTest(EVT VT,1038const APInt &AndMask) const override;10391040unsigned getMinimumJumpTableEntries() const override;10411042SDValue emitFlushICache(SelectionDAG &DAG, SDValue InChain, SDValue Start,1043SDValue End, SDValue Flags, SDLoc DL) const;1044};10451046/// As per the spec, the rules for passing vector arguments are as follows:1047///1048/// 1. For the first vector mask argument, use v0 to pass it.1049/// 2. For vector data arguments or rest vector mask arguments, starting from1050/// the v8 register, if a vector register group between v8-v23 that has not been1051/// allocated can be found and the first register number is a multiple of LMUL,1052/// then allocate this vector register group to the argument and mark these1053/// registers as allocated. Otherwise, pass it by reference and are replaced in1054/// the argument list with the address.1055/// 3. For tuple vector data arguments, starting from the v8 register, if1056/// NFIELDS consecutive vector register groups between v8-v23 that have not been1057/// allocated can be found and the first register number is a multiple of LMUL,1058/// then allocate these vector register groups to the argument and mark these1059/// registers as allocated. Otherwise, pass it by reference and are replaced in1060/// the argument list with the address.1061class RVVArgDispatcher {1062public:1063static constexpr unsigned NumArgVRs = 16;10641065struct RVVArgInfo {1066unsigned NF;1067MVT VT;1068bool FirstVMask = false;1069};10701071template <typename Arg>1072RVVArgDispatcher(const MachineFunction *MF, const RISCVTargetLowering *TLI,1073ArrayRef<Arg> ArgList)1074: MF(MF), TLI(TLI) {1075constructArgInfos(ArgList);1076compute();1077}10781079RVVArgDispatcher() = default;10801081MCPhysReg getNextPhysReg();10821083private:1084SmallVector<RVVArgInfo, 4> RVVArgInfos;1085SmallVector<MCPhysReg, 4> AllocatedPhysRegs;10861087const MachineFunction *MF = nullptr;1088const RISCVTargetLowering *TLI = nullptr;10891090unsigned CurIdx = 0;10911092template <typename Arg> void constructArgInfos(ArrayRef<Arg> Ret);1093void compute();1094void allocatePhysReg(unsigned NF = 1, unsigned LMul = 1,1095unsigned StartReg = 0);1096};10971098namespace RISCV {10991100bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,1101MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,1102ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,1103bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,1104RVVArgDispatcher &RVVDispatcher);11051106bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,1107MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,1108ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,1109bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,1110RVVArgDispatcher &RVVDispatcher);11111112bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,1113CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,1114CCState &State);11151116ArrayRef<MCPhysReg> getArgGPRs(const RISCVABI::ABI ABI);11171118} // end namespace RISCV11191120namespace RISCVVIntrinsicsTable {11211122struct RISCVVIntrinsicInfo {1123unsigned IntrinsicID;1124uint8_t ScalarOperand;1125uint8_t VLOperand;1126bool hasScalarOperand() const {1127// 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.1128return ScalarOperand != 0xF;1129}1130bool hasVLOperand() const {1131// 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.1132return VLOperand != 0x1F;1133}1134};11351136using namespace RISCV;11371138#define GET_RISCVVIntrinsicsTable_DECL1139#include "RISCVGenSearchableTables.inc"1140#undef GET_RISCVVIntrinsicsTable_DECL11411142} // end namespace RISCVVIntrinsicsTable11431144} // end namespace llvm11451146#endif114711481149