Path: blob/main/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
35234 views
//===- Instructions.cpp - Implement the LLVM instructions -----------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file implements all of the non-inline methods for the LLVM instruction9// classes.10//11//===----------------------------------------------------------------------===//1213#include "llvm/IR/Instructions.h"14#include "LLVMContextImpl.h"15#include "llvm/ADT/SmallBitVector.h"16#include "llvm/ADT/SmallVector.h"17#include "llvm/ADT/Twine.h"18#include "llvm/IR/Attributes.h"19#include "llvm/IR/BasicBlock.h"20#include "llvm/IR/Constant.h"21#include "llvm/IR/ConstantRange.h"22#include "llvm/IR/Constants.h"23#include "llvm/IR/DataLayout.h"24#include "llvm/IR/DerivedTypes.h"25#include "llvm/IR/Function.h"26#include "llvm/IR/InstrTypes.h"27#include "llvm/IR/Instruction.h"28#include "llvm/IR/Intrinsics.h"29#include "llvm/IR/LLVMContext.h"30#include "llvm/IR/MDBuilder.h"31#include "llvm/IR/Metadata.h"32#include "llvm/IR/Module.h"33#include "llvm/IR/Operator.h"34#include "llvm/IR/ProfDataUtils.h"35#include "llvm/IR/Type.h"36#include "llvm/IR/Value.h"37#include "llvm/Support/AtomicOrdering.h"38#include "llvm/Support/Casting.h"39#include "llvm/Support/CheckedArithmetic.h"40#include "llvm/Support/ErrorHandling.h"41#include "llvm/Support/MathExtras.h"42#include "llvm/Support/ModRef.h"43#include "llvm/Support/TypeSize.h"44#include <algorithm>45#include <cassert>46#include <cstdint>47#include <optional>48#include <vector>4950using namespace llvm;5152static cl::opt<bool> DisableI2pP2iOpt(53"disable-i2p-p2i-opt", cl::init(false),54cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));5556//===----------------------------------------------------------------------===//57// AllocaInst Class58//===----------------------------------------------------------------------===//5960std::optional<TypeSize>61AllocaInst::getAllocationSize(const DataLayout &DL) const {62TypeSize Size = DL.getTypeAllocSize(getAllocatedType());63if (isArrayAllocation()) {64auto *C = dyn_cast<ConstantInt>(getArraySize());65if (!C)66return std::nullopt;67assert(!Size.isScalable() && "Array elements cannot have a scalable size");68auto CheckedProd =69checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());70if (!CheckedProd)71return std::nullopt;72return TypeSize::getFixed(*CheckedProd);73}74return Size;75}7677std::optional<TypeSize>78AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {79std::optional<TypeSize> Size = getAllocationSize(DL);80if (!Size)81return std::nullopt;82auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),83static_cast<TypeSize::ScalarTy>(8));84if (!CheckedProd)85return std::nullopt;86return TypeSize::get(*CheckedProd, Size->isScalable());87}8889//===----------------------------------------------------------------------===//90// SelectInst Class91//===----------------------------------------------------------------------===//9293/// areInvalidOperands - Return a string if the specified operands are invalid94/// for a select operation, otherwise return null.95const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {96if (Op1->getType() != Op2->getType())97return "both values to select must have same type";9899if (Op1->getType()->isTokenTy())100return "select values cannot have token type";101102if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {103// Vector select.104if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))105return "vector select condition element type must be i1";106VectorType *ET = dyn_cast<VectorType>(Op1->getType());107if (!ET)108return "selected values for vector select must be vectors";109if (ET->getElementCount() != VT->getElementCount())110return "vector select requires selected vectors to have "111"the same vector length as select condition";112} else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {113return "select condition must be i1 or <n x i1>";114}115return nullptr;116}117118//===----------------------------------------------------------------------===//119// PHINode Class120//===----------------------------------------------------------------------===//121122PHINode::PHINode(const PHINode &PN)123: Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),124ReservedSpace(PN.getNumOperands()) {125allocHungoffUses(PN.getNumOperands());126std::copy(PN.op_begin(), PN.op_end(), op_begin());127copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));128SubclassOptionalData = PN.SubclassOptionalData;129}130131// removeIncomingValue - Remove an incoming value. This is useful if a132// predecessor basic block is deleted.133Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {134Value *Removed = getIncomingValue(Idx);135136// Move everything after this operand down.137//138// FIXME: we could just swap with the end of the list, then erase. However,139// clients might not expect this to happen. The code as it is thrashes the140// use/def lists, which is kinda lame.141std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);142copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);143144// Nuke the last value.145Op<-1>().set(nullptr);146setNumHungOffUseOperands(getNumOperands() - 1);147148// If the PHI node is dead, because it has zero entries, nuke it now.149if (getNumOperands() == 0 && DeletePHIIfEmpty) {150// If anyone is using this PHI, make them use a dummy value instead...151replaceAllUsesWith(PoisonValue::get(getType()));152eraseFromParent();153}154return Removed;155}156157void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,158bool DeletePHIIfEmpty) {159SmallDenseSet<unsigned> RemoveIndices;160for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)161if (Predicate(Idx))162RemoveIndices.insert(Idx);163164if (RemoveIndices.empty())165return;166167// Remove operands.168auto NewOpEnd = remove_if(operands(), [&](Use &U) {169return RemoveIndices.contains(U.getOperandNo());170});171for (Use &U : make_range(NewOpEnd, op_end()))172U.set(nullptr);173174// Remove incoming blocks.175(void)std::remove_if(const_cast<block_iterator>(block_begin()),176const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {177return RemoveIndices.contains(&BB - block_begin());178});179180setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());181182// If the PHI node is dead, because it has zero entries, nuke it now.183if (getNumOperands() == 0 && DeletePHIIfEmpty) {184// If anyone is using this PHI, make them use a dummy value instead...185replaceAllUsesWith(PoisonValue::get(getType()));186eraseFromParent();187}188}189190/// growOperands - grow operands - This grows the operand list in response191/// to a push_back style of operation. This grows the number of ops by 1.5192/// times.193///194void PHINode::growOperands() {195unsigned e = getNumOperands();196unsigned NumOps = e + e / 2;197if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.198199ReservedSpace = NumOps;200growHungoffUses(ReservedSpace, /* IsPhi */ true);201}202203/// hasConstantValue - If the specified PHI node always merges together the same204/// value, return the value, otherwise return null.205Value *PHINode::hasConstantValue() const {206// Exploit the fact that phi nodes always have at least one entry.207Value *ConstantValue = getIncomingValue(0);208for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)209if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {210if (ConstantValue != this)211return nullptr; // Incoming values not all the same.212// The case where the first value is this PHI.213ConstantValue = getIncomingValue(i);214}215if (ConstantValue == this)216return PoisonValue::get(getType());217return ConstantValue;218}219220/// hasConstantOrUndefValue - Whether the specified PHI node always merges221/// together the same value, assuming that undefs result in the same value as222/// non-undefs.223/// Unlike \ref hasConstantValue, this does not return a value because the224/// unique non-undef incoming value need not dominate the PHI node.225bool PHINode::hasConstantOrUndefValue() const {226Value *ConstantValue = nullptr;227for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {228Value *Incoming = getIncomingValue(i);229if (Incoming != this && !isa<UndefValue>(Incoming)) {230if (ConstantValue && ConstantValue != Incoming)231return false;232ConstantValue = Incoming;233}234}235return true;236}237238//===----------------------------------------------------------------------===//239// LandingPadInst Implementation240//===----------------------------------------------------------------------===//241242LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,243const Twine &NameStr,244InsertPosition InsertBefore)245: Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {246init(NumReservedValues, NameStr);247}248249LandingPadInst::LandingPadInst(const LandingPadInst &LP)250: Instruction(LP.getType(), Instruction::LandingPad, nullptr,251LP.getNumOperands()),252ReservedSpace(LP.getNumOperands()) {253allocHungoffUses(LP.getNumOperands());254Use *OL = getOperandList();255const Use *InOL = LP.getOperandList();256for (unsigned I = 0, E = ReservedSpace; I != E; ++I)257OL[I] = InOL[I];258259setCleanup(LP.isCleanup());260}261262LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,263const Twine &NameStr,264InsertPosition InsertBefore) {265return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);266}267268void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {269ReservedSpace = NumReservedValues;270setNumHungOffUseOperands(0);271allocHungoffUses(ReservedSpace);272setName(NameStr);273setCleanup(false);274}275276/// growOperands - grow operands - This grows the operand list in response to a277/// push_back style of operation. This grows the number of ops by 2 times.278void LandingPadInst::growOperands(unsigned Size) {279unsigned e = getNumOperands();280if (ReservedSpace >= e + Size) return;281ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;282growHungoffUses(ReservedSpace);283}284285void LandingPadInst::addClause(Constant *Val) {286unsigned OpNo = getNumOperands();287growOperands(1);288assert(OpNo < ReservedSpace && "Growing didn't work!");289setNumHungOffUseOperands(getNumOperands() + 1);290getOperandList()[OpNo] = Val;291}292293//===----------------------------------------------------------------------===//294// CallBase Implementation295//===----------------------------------------------------------------------===//296297CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,298InsertPosition InsertPt) {299switch (CB->getOpcode()) {300case Instruction::Call:301return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);302case Instruction::Invoke:303return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);304case Instruction::CallBr:305return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);306default:307llvm_unreachable("Unknown CallBase sub-class!");308}309}310311CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,312InsertPosition InsertPt) {313SmallVector<OperandBundleDef, 2> OpDefs;314for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {315auto ChildOB = CI->getOperandBundleAt(i);316if (ChildOB.getTagName() != OpB.getTag())317OpDefs.emplace_back(ChildOB);318}319OpDefs.emplace_back(OpB);320return CallBase::Create(CI, OpDefs, InsertPt);321}322323Function *CallBase::getCaller() { return getParent()->getParent(); }324325unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {326assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");327return cast<CallBrInst>(this)->getNumIndirectDests() + 1;328}329330bool CallBase::isIndirectCall() const {331const Value *V = getCalledOperand();332if (isa<Function>(V) || isa<Constant>(V))333return false;334return !isInlineAsm();335}336337/// Tests if this call site must be tail call optimized. Only a CallInst can338/// be tail call optimized.339bool CallBase::isMustTailCall() const {340if (auto *CI = dyn_cast<CallInst>(this))341return CI->isMustTailCall();342return false;343}344345/// Tests if this call site is marked as a tail call.346bool CallBase::isTailCall() const {347if (auto *CI = dyn_cast<CallInst>(this))348return CI->isTailCall();349return false;350}351352Intrinsic::ID CallBase::getIntrinsicID() const {353if (auto *F = getCalledFunction())354return F->getIntrinsicID();355return Intrinsic::not_intrinsic;356}357358FPClassTest CallBase::getRetNoFPClass() const {359FPClassTest Mask = Attrs.getRetNoFPClass();360361if (const Function *F = getCalledFunction())362Mask |= F->getAttributes().getRetNoFPClass();363return Mask;364}365366FPClassTest CallBase::getParamNoFPClass(unsigned i) const {367FPClassTest Mask = Attrs.getParamNoFPClass(i);368369if (const Function *F = getCalledFunction())370Mask |= F->getAttributes().getParamNoFPClass(i);371return Mask;372}373374std::optional<ConstantRange> CallBase::getRange() const {375const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);376if (RangeAttr.isValid())377return RangeAttr.getRange();378return std::nullopt;379}380381bool CallBase::isReturnNonNull() const {382if (hasRetAttr(Attribute::NonNull))383return true;384385if (getRetDereferenceableBytes() > 0 &&386!NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))387return true;388389return false;390}391392Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {393unsigned Index;394395if (Attrs.hasAttrSomewhere(Kind, &Index))396return getArgOperand(Index - AttributeList::FirstArgIndex);397if (const Function *F = getCalledFunction())398if (F->getAttributes().hasAttrSomewhere(Kind, &Index))399return getArgOperand(Index - AttributeList::FirstArgIndex);400401return nullptr;402}403404/// Determine whether the argument or parameter has the given attribute.405bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {406assert(ArgNo < arg_size() && "Param index out of bounds!");407408if (Attrs.hasParamAttr(ArgNo, Kind))409return true;410411const Function *F = getCalledFunction();412if (!F)413return false;414415if (!F->getAttributes().hasParamAttr(ArgNo, Kind))416return false;417418// Take into account mod/ref by operand bundles.419switch (Kind) {420case Attribute::ReadNone:421return !hasReadingOperandBundles() && !hasClobberingOperandBundles();422case Attribute::ReadOnly:423return !hasClobberingOperandBundles();424case Attribute::WriteOnly:425return !hasReadingOperandBundles();426default:427return true;428}429}430431bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {432if (auto *F = dyn_cast<Function>(getCalledOperand()))433return F->getAttributes().hasFnAttr(Kind);434435return false;436}437438bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {439if (auto *F = dyn_cast<Function>(getCalledOperand()))440return F->getAttributes().hasFnAttr(Kind);441442return false;443}444445template <typename AK>446Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {447if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {448// getMemoryEffects() correctly combines memory effects from the call-site,449// operand bundles and function.450assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");451}452453if (auto *F = dyn_cast<Function>(getCalledOperand()))454return F->getAttributes().getFnAttr(Kind);455456return Attribute();457}458459template Attribute460CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;461template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;462463template <typename AK>464Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,465AK Kind) const {466Value *V = getCalledOperand();467468if (auto *F = dyn_cast<Function>(V))469return F->getAttributes().getParamAttr(ArgNo, Kind);470471return Attribute();472}473template Attribute474CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,475Attribute::AttrKind Kind) const;476template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,477StringRef Kind) const;478479void CallBase::getOperandBundlesAsDefs(480SmallVectorImpl<OperandBundleDef> &Defs) const {481for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)482Defs.emplace_back(getOperandBundleAt(i));483}484485CallBase::op_iterator486CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,487const unsigned BeginIndex) {488auto It = op_begin() + BeginIndex;489for (auto &B : Bundles)490It = std::copy(B.input_begin(), B.input_end(), It);491492auto *ContextImpl = getContext().pImpl;493auto BI = Bundles.begin();494unsigned CurrentIndex = BeginIndex;495496for (auto &BOI : bundle_op_infos()) {497assert(BI != Bundles.end() && "Incorrect allocation?");498499BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());500BOI.Begin = CurrentIndex;501BOI.End = CurrentIndex + BI->input_size();502CurrentIndex = BOI.End;503BI++;504}505506assert(BI == Bundles.end() && "Incorrect allocation?");507508return It;509}510511CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {512/// When there isn't many bundles, we do a simple linear search.513/// Else fallback to a binary-search that use the fact that bundles usually514/// have similar number of argument to get faster convergence.515if (bundle_op_info_end() - bundle_op_info_begin() < 8) {516for (auto &BOI : bundle_op_infos())517if (BOI.Begin <= OpIdx && OpIdx < BOI.End)518return BOI;519520llvm_unreachable("Did not find operand bundle for operand!");521}522523assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");524assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&525OpIdx < std::prev(bundle_op_info_end())->End &&526"The Idx isn't in the operand bundle");527528/// We need a decimal number below and to prevent using floating point numbers529/// we use an intergal value multiplied by this constant.530constexpr unsigned NumberScaling = 1024;531532bundle_op_iterator Begin = bundle_op_info_begin();533bundle_op_iterator End = bundle_op_info_end();534bundle_op_iterator Current = Begin;535536while (Begin != End) {537unsigned ScaledOperandPerBundle =538NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);539Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /540ScaledOperandPerBundle);541if (Current >= End)542Current = std::prev(End);543assert(Current < End && Current >= Begin &&544"the operand bundle doesn't cover every value in the range");545if (OpIdx >= Current->Begin && OpIdx < Current->End)546break;547if (OpIdx >= Current->End)548Begin = Current + 1;549else550End = Current;551}552553assert(OpIdx >= Current->Begin && OpIdx < Current->End &&554"the operand bundle doesn't cover every value in the range");555return *Current;556}557558CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,559OperandBundleDef OB,560InsertPosition InsertPt) {561if (CB->getOperandBundle(ID))562return CB;563564SmallVector<OperandBundleDef, 1> Bundles;565CB->getOperandBundlesAsDefs(Bundles);566Bundles.push_back(OB);567return Create(CB, Bundles, InsertPt);568}569570CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,571InsertPosition InsertPt) {572SmallVector<OperandBundleDef, 1> Bundles;573bool CreateNew = false;574575for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {576auto Bundle = CB->getOperandBundleAt(I);577if (Bundle.getTagID() == ID) {578CreateNew = true;579continue;580}581Bundles.emplace_back(Bundle);582}583584return CreateNew ? Create(CB, Bundles, InsertPt) : CB;585}586587bool CallBase::hasReadingOperandBundles() const {588// Implementation note: this is a conservative implementation of operand589// bundle semantics, where *any* non-assume operand bundle (other than590// ptrauth) forces a callsite to be at least readonly.591return hasOperandBundlesOtherThan(592{LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&593getIntrinsicID() != Intrinsic::assume;594}595596bool CallBase::hasClobberingOperandBundles() const {597return hasOperandBundlesOtherThan(598{LLVMContext::OB_deopt, LLVMContext::OB_funclet,599LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&600getIntrinsicID() != Intrinsic::assume;601}602603MemoryEffects CallBase::getMemoryEffects() const {604MemoryEffects ME = getAttributes().getMemoryEffects();605if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {606MemoryEffects FnME = Fn->getMemoryEffects();607if (hasOperandBundles()) {608// TODO: Add a method to get memory effects for operand bundles instead.609if (hasReadingOperandBundles())610FnME |= MemoryEffects::readOnly();611if (hasClobberingOperandBundles())612FnME |= MemoryEffects::writeOnly();613}614ME &= FnME;615}616return ME;617}618void CallBase::setMemoryEffects(MemoryEffects ME) {619addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));620}621622/// Determine if the function does not access memory.623bool CallBase::doesNotAccessMemory() const {624return getMemoryEffects().doesNotAccessMemory();625}626void CallBase::setDoesNotAccessMemory() {627setMemoryEffects(MemoryEffects::none());628}629630/// Determine if the function does not access or only reads memory.631bool CallBase::onlyReadsMemory() const {632return getMemoryEffects().onlyReadsMemory();633}634void CallBase::setOnlyReadsMemory() {635setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());636}637638/// Determine if the function does not access or only writes memory.639bool CallBase::onlyWritesMemory() const {640return getMemoryEffects().onlyWritesMemory();641}642void CallBase::setOnlyWritesMemory() {643setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());644}645646/// Determine if the call can access memmory only using pointers based647/// on its arguments.648bool CallBase::onlyAccessesArgMemory() const {649return getMemoryEffects().onlyAccessesArgPointees();650}651void CallBase::setOnlyAccessesArgMemory() {652setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());653}654655/// Determine if the function may only access memory that is656/// inaccessible from the IR.657bool CallBase::onlyAccessesInaccessibleMemory() const {658return getMemoryEffects().onlyAccessesInaccessibleMem();659}660void CallBase::setOnlyAccessesInaccessibleMemory() {661setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());662}663664/// Determine if the function may only access memory that is665/// either inaccessible from the IR or pointed to by its arguments.666bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {667return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();668}669void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {670setMemoryEffects(getMemoryEffects() &671MemoryEffects::inaccessibleOrArgMemOnly());672}673674//===----------------------------------------------------------------------===//675// CallInst Implementation676//===----------------------------------------------------------------------===//677678void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,679ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {680this->FTy = FTy;681assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&682"NumOperands not set up?");683684#ifndef NDEBUG685assert((Args.size() == FTy->getNumParams() ||686(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&687"Calling a function with bad signature!");688689for (unsigned i = 0; i != Args.size(); ++i)690assert((i >= FTy->getNumParams() ||691FTy->getParamType(i) == Args[i]->getType()) &&692"Calling a function with a bad signature!");693#endif694695// Set operands in order of their index to match use-list-order696// prediction.697llvm::copy(Args, op_begin());698setCalledOperand(Func);699700auto It = populateBundleOperandInfos(Bundles, Args.size());701(void)It;702assert(It + 1 == op_end() && "Should add up!");703704setName(NameStr);705}706707void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {708this->FTy = FTy;709assert(getNumOperands() == 1 && "NumOperands not set up?");710setCalledOperand(Func);711712assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");713714setName(NameStr);715}716717CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,718InsertPosition InsertBefore)719: CallBase(Ty->getReturnType(), Instruction::Call,720OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {721init(Ty, Func, Name);722}723724CallInst::CallInst(const CallInst &CI)725: CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,726OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),727CI.getNumOperands()) {728setTailCallKind(CI.getTailCallKind());729setCallingConv(CI.getCallingConv());730731std::copy(CI.op_begin(), CI.op_end(), op_begin());732std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),733bundle_op_info_begin());734SubclassOptionalData = CI.SubclassOptionalData;735}736737CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,738InsertPosition InsertPt) {739std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());740741auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),742Args, OpB, CI->getName(), InsertPt);743NewCI->setTailCallKind(CI->getTailCallKind());744NewCI->setCallingConv(CI->getCallingConv());745NewCI->SubclassOptionalData = CI->SubclassOptionalData;746NewCI->setAttributes(CI->getAttributes());747NewCI->setDebugLoc(CI->getDebugLoc());748return NewCI;749}750751// Update profile weight for call instruction by scaling it using the ratio752// of S/T. The meaning of "branch_weights" meta data for call instruction is753// transfered to represent call count.754void CallInst::updateProfWeight(uint64_t S, uint64_t T) {755if (T == 0) {756LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "757"div by 0. Ignoring. Likely the function "758<< getParent()->getParent()->getName()759<< " has 0 entry count, and contains call instructions "760"with non-zero prof info.");761return;762}763scaleProfData(*this, S, T);764}765766//===----------------------------------------------------------------------===//767// InvokeInst Implementation768//===----------------------------------------------------------------------===//769770void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,771BasicBlock *IfException, ArrayRef<Value *> Args,772ArrayRef<OperandBundleDef> Bundles,773const Twine &NameStr) {774this->FTy = FTy;775776assert((int)getNumOperands() ==777ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&778"NumOperands not set up?");779780#ifndef NDEBUG781assert(((Args.size() == FTy->getNumParams()) ||782(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&783"Invoking a function with bad signature");784785for (unsigned i = 0, e = Args.size(); i != e; i++)786assert((i >= FTy->getNumParams() ||787FTy->getParamType(i) == Args[i]->getType()) &&788"Invoking a function with a bad signature!");789#endif790791// Set operands in order of their index to match use-list-order792// prediction.793llvm::copy(Args, op_begin());794setNormalDest(IfNormal);795setUnwindDest(IfException);796setCalledOperand(Fn);797798auto It = populateBundleOperandInfos(Bundles, Args.size());799(void)It;800assert(It + 3 == op_end() && "Should add up!");801802setName(NameStr);803}804805InvokeInst::InvokeInst(const InvokeInst &II)806: CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,807OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),808II.getNumOperands()) {809setCallingConv(II.getCallingConv());810std::copy(II.op_begin(), II.op_end(), op_begin());811std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),812bundle_op_info_begin());813SubclassOptionalData = II.SubclassOptionalData;814}815816InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,817InsertPosition InsertPt) {818std::vector<Value *> Args(II->arg_begin(), II->arg_end());819820auto *NewII = InvokeInst::Create(821II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),822II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);823NewII->setCallingConv(II->getCallingConv());824NewII->SubclassOptionalData = II->SubclassOptionalData;825NewII->setAttributes(II->getAttributes());826NewII->setDebugLoc(II->getDebugLoc());827return NewII;828}829830LandingPadInst *InvokeInst::getLandingPadInst() const {831return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());832}833834void InvokeInst::updateProfWeight(uint64_t S, uint64_t T) {835if (T == 0) {836LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "837"div by 0. Ignoring. Likely the function "838<< getParent()->getParent()->getName()839<< " has 0 entry count, and contains call instructions "840"with non-zero prof info.");841return;842}843scaleProfData(*this, S, T);844}845846//===----------------------------------------------------------------------===//847// CallBrInst Implementation848//===----------------------------------------------------------------------===//849850void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,851ArrayRef<BasicBlock *> IndirectDests,852ArrayRef<Value *> Args,853ArrayRef<OperandBundleDef> Bundles,854const Twine &NameStr) {855this->FTy = FTy;856857assert((int)getNumOperands() ==858ComputeNumOperands(Args.size(), IndirectDests.size(),859CountBundleInputs(Bundles)) &&860"NumOperands not set up?");861862#ifndef NDEBUG863assert(((Args.size() == FTy->getNumParams()) ||864(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&865"Calling a function with bad signature");866867for (unsigned i = 0, e = Args.size(); i != e; i++)868assert((i >= FTy->getNumParams() ||869FTy->getParamType(i) == Args[i]->getType()) &&870"Calling a function with a bad signature!");871#endif872873// Set operands in order of their index to match use-list-order874// prediction.875std::copy(Args.begin(), Args.end(), op_begin());876NumIndirectDests = IndirectDests.size();877setDefaultDest(Fallthrough);878for (unsigned i = 0; i != NumIndirectDests; ++i)879setIndirectDest(i, IndirectDests[i]);880setCalledOperand(Fn);881882auto It = populateBundleOperandInfos(Bundles, Args.size());883(void)It;884assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");885886setName(NameStr);887}888889CallBrInst::CallBrInst(const CallBrInst &CBI)890: CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,891OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),892CBI.getNumOperands()) {893setCallingConv(CBI.getCallingConv());894std::copy(CBI.op_begin(), CBI.op_end(), op_begin());895std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),896bundle_op_info_begin());897SubclassOptionalData = CBI.SubclassOptionalData;898NumIndirectDests = CBI.NumIndirectDests;899}900901CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,902InsertPosition InsertPt) {903std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());904905auto *NewCBI = CallBrInst::Create(906CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),907CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);908NewCBI->setCallingConv(CBI->getCallingConv());909NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;910NewCBI->setAttributes(CBI->getAttributes());911NewCBI->setDebugLoc(CBI->getDebugLoc());912NewCBI->NumIndirectDests = CBI->NumIndirectDests;913return NewCBI;914}915916//===----------------------------------------------------------------------===//917// ReturnInst Implementation918//===----------------------------------------------------------------------===//919920ReturnInst::ReturnInst(const ReturnInst &RI)921: Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,922OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),923RI.getNumOperands()) {924if (RI.getNumOperands())925Op<0>() = RI.Op<0>();926SubclassOptionalData = RI.SubclassOptionalData;927}928929ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,930InsertPosition InsertBefore)931: Instruction(Type::getVoidTy(C), Instruction::Ret,932OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,933InsertBefore) {934if (retVal)935Op<0>() = retVal;936}937938//===----------------------------------------------------------------------===//939// ResumeInst Implementation940//===----------------------------------------------------------------------===//941942ResumeInst::ResumeInst(const ResumeInst &RI)943: Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,944OperandTraits<ResumeInst>::op_begin(this), 1) {945Op<0>() = RI.Op<0>();946}947948ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)949: Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,950OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {951Op<0>() = Exn;952}953954//===----------------------------------------------------------------------===//955// CleanupReturnInst Implementation956//===----------------------------------------------------------------------===//957958CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)959: Instruction(CRI.getType(), Instruction::CleanupRet,960OperandTraits<CleanupReturnInst>::op_end(this) -961CRI.getNumOperands(),962CRI.getNumOperands()) {963setSubclassData<Instruction::OpaqueField>(964CRI.getSubclassData<Instruction::OpaqueField>());965Op<0>() = CRI.Op<0>();966if (CRI.hasUnwindDest())967Op<1>() = CRI.Op<1>();968}969970void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {971if (UnwindBB)972setSubclassData<UnwindDestField>(true);973974Op<0>() = CleanupPad;975if (UnwindBB)976Op<1>() = UnwindBB;977}978979CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,980unsigned Values,981InsertPosition InsertBefore)982: Instruction(Type::getVoidTy(CleanupPad->getContext()),983Instruction::CleanupRet,984OperandTraits<CleanupReturnInst>::op_end(this) - Values,985Values, InsertBefore) {986init(CleanupPad, UnwindBB);987}988989//===----------------------------------------------------------------------===//990// CatchReturnInst Implementation991//===----------------------------------------------------------------------===//992void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {993Op<0>() = CatchPad;994Op<1>() = BB;995}996997CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)998: Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,999OperandTraits<CatchReturnInst>::op_begin(this), 2) {1000Op<0>() = CRI.Op<0>();1001Op<1>() = CRI.Op<1>();1002}10031004CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,1005InsertPosition InsertBefore)1006: Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,1007OperandTraits<CatchReturnInst>::op_begin(this), 2,1008InsertBefore) {1009init(CatchPad, BB);1010}10111012//===----------------------------------------------------------------------===//1013// CatchSwitchInst Implementation1014//===----------------------------------------------------------------------===//10151016CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,1017unsigned NumReservedValues,1018const Twine &NameStr,1019InsertPosition InsertBefore)1020: Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,1021InsertBefore) {1022if (UnwindDest)1023++NumReservedValues;1024init(ParentPad, UnwindDest, NumReservedValues + 1);1025setName(NameStr);1026}10271028CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)1029: Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,1030CSI.getNumOperands()) {1031init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());1032setNumHungOffUseOperands(ReservedSpace);1033Use *OL = getOperandList();1034const Use *InOL = CSI.getOperandList();1035for (unsigned I = 1, E = ReservedSpace; I != E; ++I)1036OL[I] = InOL[I];1037}10381039void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,1040unsigned NumReservedValues) {1041assert(ParentPad && NumReservedValues);10421043ReservedSpace = NumReservedValues;1044setNumHungOffUseOperands(UnwindDest ? 2 : 1);1045allocHungoffUses(ReservedSpace);10461047Op<0>() = ParentPad;1048if (UnwindDest) {1049setSubclassData<UnwindDestField>(true);1050setUnwindDest(UnwindDest);1051}1052}10531054/// growOperands - grow operands - This grows the operand list in response to a1055/// push_back style of operation. This grows the number of ops by 2 times.1056void CatchSwitchInst::growOperands(unsigned Size) {1057unsigned NumOperands = getNumOperands();1058assert(NumOperands >= 1);1059if (ReservedSpace >= NumOperands + Size)1060return;1061ReservedSpace = (NumOperands + Size / 2) * 2;1062growHungoffUses(ReservedSpace);1063}10641065void CatchSwitchInst::addHandler(BasicBlock *Handler) {1066unsigned OpNo = getNumOperands();1067growOperands(1);1068assert(OpNo < ReservedSpace && "Growing didn't work!");1069setNumHungOffUseOperands(getNumOperands() + 1);1070getOperandList()[OpNo] = Handler;1071}10721073void CatchSwitchInst::removeHandler(handler_iterator HI) {1074// Move all subsequent handlers up one.1075Use *EndDst = op_end() - 1;1076for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)1077*CurDst = *(CurDst + 1);1078// Null out the last handler use.1079*EndDst = nullptr;10801081setNumHungOffUseOperands(getNumOperands() - 1);1082}10831084//===----------------------------------------------------------------------===//1085// FuncletPadInst Implementation1086//===----------------------------------------------------------------------===//1087void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,1088const Twine &NameStr) {1089assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");1090llvm::copy(Args, op_begin());1091setParentPad(ParentPad);1092setName(NameStr);1093}10941095FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)1096: Instruction(FPI.getType(), FPI.getOpcode(),1097OperandTraits<FuncletPadInst>::op_end(this) -1098FPI.getNumOperands(),1099FPI.getNumOperands()) {1100std::copy(FPI.op_begin(), FPI.op_end(), op_begin());1101setParentPad(FPI.getParentPad());1102}11031104FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,1105ArrayRef<Value *> Args, unsigned Values,1106const Twine &NameStr,1107InsertPosition InsertBefore)1108: Instruction(ParentPad->getType(), Op,1109OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,1110InsertBefore) {1111init(ParentPad, Args, NameStr);1112}11131114//===----------------------------------------------------------------------===//1115// UnreachableInst Implementation1116//===----------------------------------------------------------------------===//11171118UnreachableInst::UnreachableInst(LLVMContext &Context,1119InsertPosition InsertBefore)1120: Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,11210, InsertBefore) {}11221123//===----------------------------------------------------------------------===//1124// BranchInst Implementation1125//===----------------------------------------------------------------------===//11261127void BranchInst::AssertOK() {1128if (isConditional())1129assert(getCondition()->getType()->isIntegerTy(1) &&1130"May only branch on boolean predicates!");1131}11321133BranchInst::BranchInst(BasicBlock *IfTrue, InsertPosition InsertBefore)1134: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,1135OperandTraits<BranchInst>::op_end(this) - 1, 1,1136InsertBefore) {1137assert(IfTrue && "Branch destination may not be null!");1138Op<-1>() = IfTrue;1139}11401141BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,1142InsertPosition InsertBefore)1143: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,1144OperandTraits<BranchInst>::op_end(this) - 3, 3,1145InsertBefore) {1146// Assign in order of operand index to make use-list order predictable.1147Op<-3>() = Cond;1148Op<-2>() = IfFalse;1149Op<-1>() = IfTrue;1150#ifndef NDEBUG1151AssertOK();1152#endif1153}11541155BranchInst::BranchInst(const BranchInst &BI)1156: Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,1157OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),1158BI.getNumOperands()) {1159// Assign in order of operand index to make use-list order predictable.1160if (BI.getNumOperands() != 1) {1161assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");1162Op<-3>() = BI.Op<-3>();1163Op<-2>() = BI.Op<-2>();1164}1165Op<-1>() = BI.Op<-1>();1166SubclassOptionalData = BI.SubclassOptionalData;1167}11681169void BranchInst::swapSuccessors() {1170assert(isConditional() &&1171"Cannot swap successors of an unconditional branch");1172Op<-1>().swap(Op<-2>());11731174// Update profile metadata if present and it matches our structural1175// expectations.1176swapProfMetadata();1177}11781179//===----------------------------------------------------------------------===//1180// AllocaInst Implementation1181//===----------------------------------------------------------------------===//11821183static Value *getAISize(LLVMContext &Context, Value *Amt) {1184if (!Amt)1185Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);1186else {1187assert(!isa<BasicBlock>(Amt) &&1188"Passed basic block into allocation size parameter! Use other ctor");1189assert(Amt->getType()->isIntegerTy() &&1190"Allocation array size is not an integer!");1191}1192return Amt;1193}11941195static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos) {1196assert(Pos.isValid() &&1197"Insertion position cannot be null when alignment not provided!");1198BasicBlock *BB = Pos.getBasicBlock();1199assert(BB->getParent() &&1200"BB must be in a Function when alignment not provided!");1201const DataLayout &DL = BB->getDataLayout();1202return DL.getPrefTypeAlign(Ty);1203}12041205AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,1206InsertPosition InsertBefore)1207: AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}12081209AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,1210const Twine &Name, InsertPosition InsertBefore)1211: AllocaInst(Ty, AddrSpace, ArraySize,1212computeAllocaDefaultAlign(Ty, InsertBefore), Name,1213InsertBefore) {}12141215AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,1216Align Align, const Twine &Name,1217InsertPosition InsertBefore)1218: UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,1219getAISize(Ty->getContext(), ArraySize), InsertBefore),1220AllocatedType(Ty) {1221setAlignment(Align);1222assert(!Ty->isVoidTy() && "Cannot allocate void!");1223setName(Name);1224}12251226bool AllocaInst::isArrayAllocation() const {1227if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))1228return !CI->isOne();1229return true;1230}12311232/// isStaticAlloca - Return true if this alloca is in the entry block of the1233/// function and is a constant size. If so, the code generator will fold it1234/// into the prolog/epilog code, so it is basically free.1235bool AllocaInst::isStaticAlloca() const {1236// Must be constant size.1237if (!isa<ConstantInt>(getArraySize())) return false;12381239// Must be in the entry block.1240const BasicBlock *Parent = getParent();1241return Parent->isEntryBlock() && !isUsedWithInAlloca();1242}12431244//===----------------------------------------------------------------------===//1245// LoadInst Implementation1246//===----------------------------------------------------------------------===//12471248void LoadInst::AssertOK() {1249assert(getOperand(0)->getType()->isPointerTy() &&1250"Ptr must have pointer type.");1251}12521253static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {1254assert(Pos.isValid() &&1255"Insertion position cannot be null when alignment not provided!");1256BasicBlock *BB = Pos.getBasicBlock();1257assert(BB->getParent() &&1258"BB must be in a Function when alignment not provided!");1259const DataLayout &DL = BB->getDataLayout();1260return DL.getABITypeAlign(Ty);1261}12621263LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,1264InsertPosition InsertBef)1265: LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}12661267LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,1268InsertPosition InsertBef)1269: LoadInst(Ty, Ptr, Name, isVolatile,1270computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}12711272LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,1273Align Align, InsertPosition InsertBef)1274: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,1275SyncScope::System, InsertBef) {}12761277LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,1278Align Align, AtomicOrdering Order, SyncScope::ID SSID,1279InsertPosition InsertBef)1280: UnaryInstruction(Ty, Load, Ptr, InsertBef) {1281setVolatile(isVolatile);1282setAlignment(Align);1283setAtomic(Order, SSID);1284AssertOK();1285setName(Name);1286}12871288//===----------------------------------------------------------------------===//1289// StoreInst Implementation1290//===----------------------------------------------------------------------===//12911292void StoreInst::AssertOK() {1293assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");1294assert(getOperand(1)->getType()->isPointerTy() &&1295"Ptr must have pointer type!");1296}12971298StoreInst::StoreInst(Value *val, Value *addr, InsertPosition InsertBefore)1299: StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}13001301StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,1302InsertPosition InsertBefore)1303: StoreInst(val, addr, isVolatile,1304computeLoadStoreDefaultAlign(val->getType(), InsertBefore),1305InsertBefore) {}13061307StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,1308InsertPosition InsertBefore)1309: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,1310SyncScope::System, InsertBefore) {}13111312StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,1313AtomicOrdering Order, SyncScope::ID SSID,1314InsertPosition InsertBefore)1315: Instruction(Type::getVoidTy(val->getContext()), Store,1316OperandTraits<StoreInst>::op_begin(this),1317OperandTraits<StoreInst>::operands(this), InsertBefore) {1318Op<0>() = val;1319Op<1>() = addr;1320setVolatile(isVolatile);1321setAlignment(Align);1322setAtomic(Order, SSID);1323AssertOK();1324}13251326//===----------------------------------------------------------------------===//1327// AtomicCmpXchgInst Implementation1328//===----------------------------------------------------------------------===//13291330void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,1331Align Alignment, AtomicOrdering SuccessOrdering,1332AtomicOrdering FailureOrdering,1333SyncScope::ID SSID) {1334Op<0>() = Ptr;1335Op<1>() = Cmp;1336Op<2>() = NewVal;1337setSuccessOrdering(SuccessOrdering);1338setFailureOrdering(FailureOrdering);1339setSyncScopeID(SSID);1340setAlignment(Alignment);13411342assert(getOperand(0) && getOperand(1) && getOperand(2) &&1343"All operands must be non-null!");1344assert(getOperand(0)->getType()->isPointerTy() &&1345"Ptr must have pointer type!");1346assert(getOperand(1)->getType() == getOperand(2)->getType() &&1347"Cmp type and NewVal type must be same!");1348}13491350AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,1351Align Alignment,1352AtomicOrdering SuccessOrdering,1353AtomicOrdering FailureOrdering,1354SyncScope::ID SSID,1355InsertPosition InsertBefore)1356: Instruction(1357StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),1358AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),1359OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {1360Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);1361}13621363//===----------------------------------------------------------------------===//1364// AtomicRMWInst Implementation1365//===----------------------------------------------------------------------===//13661367void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,1368Align Alignment, AtomicOrdering Ordering,1369SyncScope::ID SSID) {1370assert(Ordering != AtomicOrdering::NotAtomic &&1371"atomicrmw instructions can only be atomic.");1372assert(Ordering != AtomicOrdering::Unordered &&1373"atomicrmw instructions cannot be unordered.");1374Op<0>() = Ptr;1375Op<1>() = Val;1376setOperation(Operation);1377setOrdering(Ordering);1378setSyncScopeID(SSID);1379setAlignment(Alignment);13801381assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");1382assert(getOperand(0)->getType()->isPointerTy() &&1383"Ptr must have pointer type!");1384assert(Ordering != AtomicOrdering::NotAtomic &&1385"AtomicRMW instructions must be atomic!");1386}13871388AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,1389Align Alignment, AtomicOrdering Ordering,1390SyncScope::ID SSID, InsertPosition InsertBefore)1391: Instruction(Val->getType(), AtomicRMW,1392OperandTraits<AtomicRMWInst>::op_begin(this),1393OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {1394Init(Operation, Ptr, Val, Alignment, Ordering, SSID);1395}13961397StringRef AtomicRMWInst::getOperationName(BinOp Op) {1398switch (Op) {1399case AtomicRMWInst::Xchg:1400return "xchg";1401case AtomicRMWInst::Add:1402return "add";1403case AtomicRMWInst::Sub:1404return "sub";1405case AtomicRMWInst::And:1406return "and";1407case AtomicRMWInst::Nand:1408return "nand";1409case AtomicRMWInst::Or:1410return "or";1411case AtomicRMWInst::Xor:1412return "xor";1413case AtomicRMWInst::Max:1414return "max";1415case AtomicRMWInst::Min:1416return "min";1417case AtomicRMWInst::UMax:1418return "umax";1419case AtomicRMWInst::UMin:1420return "umin";1421case AtomicRMWInst::FAdd:1422return "fadd";1423case AtomicRMWInst::FSub:1424return "fsub";1425case AtomicRMWInst::FMax:1426return "fmax";1427case AtomicRMWInst::FMin:1428return "fmin";1429case AtomicRMWInst::UIncWrap:1430return "uinc_wrap";1431case AtomicRMWInst::UDecWrap:1432return "udec_wrap";1433case AtomicRMWInst::BAD_BINOP:1434return "<invalid operation>";1435}14361437llvm_unreachable("invalid atomicrmw operation");1438}14391440//===----------------------------------------------------------------------===//1441// FenceInst Implementation1442//===----------------------------------------------------------------------===//14431444FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,1445SyncScope::ID SSID, InsertPosition InsertBefore)1446: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {1447setOrdering(Ordering);1448setSyncScopeID(SSID);1449}14501451//===----------------------------------------------------------------------===//1452// GetElementPtrInst Implementation1453//===----------------------------------------------------------------------===//14541455void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,1456const Twine &Name) {1457assert(getNumOperands() == 1 + IdxList.size() &&1458"NumOperands not initialized?");1459Op<0>() = Ptr;1460llvm::copy(IdxList, op_begin() + 1);1461setName(Name);1462}14631464GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)1465: Instruction(GEPI.getType(), GetElementPtr,1466OperandTraits<GetElementPtrInst>::op_end(this) -1467GEPI.getNumOperands(),1468GEPI.getNumOperands()),1469SourceElementType(GEPI.SourceElementType),1470ResultElementType(GEPI.ResultElementType) {1471std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());1472SubclassOptionalData = GEPI.SubclassOptionalData;1473}14741475Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {1476if (auto *Struct = dyn_cast<StructType>(Ty)) {1477if (!Struct->indexValid(Idx))1478return nullptr;1479return Struct->getTypeAtIndex(Idx);1480}1481if (!Idx->getType()->isIntOrIntVectorTy())1482return nullptr;1483if (auto *Array = dyn_cast<ArrayType>(Ty))1484return Array->getElementType();1485if (auto *Vector = dyn_cast<VectorType>(Ty))1486return Vector->getElementType();1487return nullptr;1488}14891490Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {1491if (auto *Struct = dyn_cast<StructType>(Ty)) {1492if (Idx >= Struct->getNumElements())1493return nullptr;1494return Struct->getElementType(Idx);1495}1496if (auto *Array = dyn_cast<ArrayType>(Ty))1497return Array->getElementType();1498if (auto *Vector = dyn_cast<VectorType>(Ty))1499return Vector->getElementType();1500return nullptr;1501}15021503template <typename IndexTy>1504static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {1505if (IdxList.empty())1506return Ty;1507for (IndexTy V : IdxList.slice(1)) {1508Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);1509if (!Ty)1510return Ty;1511}1512return Ty;1513}15141515Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {1516return getIndexedTypeInternal(Ty, IdxList);1517}15181519Type *GetElementPtrInst::getIndexedType(Type *Ty,1520ArrayRef<Constant *> IdxList) {1521return getIndexedTypeInternal(Ty, IdxList);1522}15231524Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {1525return getIndexedTypeInternal(Ty, IdxList);1526}15271528/// hasAllZeroIndices - Return true if all of the indices of this GEP are1529/// zeros. If so, the result pointer and the first operand have the same1530/// value, just potentially different types.1531bool GetElementPtrInst::hasAllZeroIndices() const {1532for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {1533if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {1534if (!CI->isZero()) return false;1535} else {1536return false;1537}1538}1539return true;1540}15411542/// hasAllConstantIndices - Return true if all of the indices of this GEP are1543/// constant integers. If so, the result pointer and the first operand have1544/// a constant offset between them.1545bool GetElementPtrInst::hasAllConstantIndices() const {1546for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {1547if (!isa<ConstantInt>(getOperand(i)))1548return false;1549}1550return true;1551}15521553void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {1554SubclassOptionalData = NW.getRaw();1555}15561557void GetElementPtrInst::setIsInBounds(bool B) {1558GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();1559if (B)1560NW |= GEPNoWrapFlags::inBounds();1561else1562NW = NW.withoutInBounds();1563setNoWrapFlags(NW);1564}15651566GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {1567return cast<GEPOperator>(this)->getNoWrapFlags();1568}15691570bool GetElementPtrInst::isInBounds() const {1571return cast<GEPOperator>(this)->isInBounds();1572}15731574bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {1575return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();1576}15771578bool GetElementPtrInst::hasNoUnsignedWrap() const {1579return cast<GEPOperator>(this)->hasNoUnsignedWrap();1580}15811582bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,1583APInt &Offset) const {1584// Delegate to the generic GEPOperator implementation.1585return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);1586}15871588bool GetElementPtrInst::collectOffset(1589const DataLayout &DL, unsigned BitWidth,1590MapVector<Value *, APInt> &VariableOffsets,1591APInt &ConstantOffset) const {1592// Delegate to the generic GEPOperator implementation.1593return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,1594ConstantOffset);1595}15961597//===----------------------------------------------------------------------===//1598// ExtractElementInst Implementation1599//===----------------------------------------------------------------------===//16001601ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,1602const Twine &Name,1603InsertPosition InsertBef)1604: Instruction(1605cast<VectorType>(Val->getType())->getElementType(), ExtractElement,1606OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {1607assert(isValidOperands(Val, Index) &&1608"Invalid extractelement instruction operands!");1609Op<0>() = Val;1610Op<1>() = Index;1611setName(Name);1612}16131614bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {1615if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())1616return false;1617return true;1618}16191620//===----------------------------------------------------------------------===//1621// InsertElementInst Implementation1622//===----------------------------------------------------------------------===//16231624InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,1625const Twine &Name,1626InsertPosition InsertBef)1627: Instruction(Vec->getType(), InsertElement,1628OperandTraits<InsertElementInst>::op_begin(this), 3,1629InsertBef) {1630assert(isValidOperands(Vec, Elt, Index) &&1631"Invalid insertelement instruction operands!");1632Op<0>() = Vec;1633Op<1>() = Elt;1634Op<2>() = Index;1635setName(Name);1636}16371638bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,1639const Value *Index) {1640if (!Vec->getType()->isVectorTy())1641return false; // First operand of insertelement must be vector type.16421643if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())1644return false;// Second operand of insertelement must be vector element type.16451646if (!Index->getType()->isIntegerTy())1647return false; // Third operand of insertelement must be i32.1648return true;1649}16501651//===----------------------------------------------------------------------===//1652// ShuffleVectorInst Implementation1653//===----------------------------------------------------------------------===//16541655static Value *createPlaceholderForShuffleVector(Value *V) {1656assert(V && "Cannot create placeholder of nullptr V");1657return PoisonValue::get(V->getType());1658}16591660ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,1661InsertPosition InsertBefore)1662: ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,1663InsertBefore) {}16641665ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,1666const Twine &Name,1667InsertPosition InsertBefore)1668: ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,1669InsertBefore) {}16701671ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,1672const Twine &Name,1673InsertPosition InsertBefore)1674: Instruction(1675VectorType::get(cast<VectorType>(V1->getType())->getElementType(),1676cast<VectorType>(Mask->getType())->getElementCount()),1677ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),1678OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {1679assert(isValidOperands(V1, V2, Mask) &&1680"Invalid shuffle vector instruction operands!");16811682Op<0>() = V1;1683Op<1>() = V2;1684SmallVector<int, 16> MaskArr;1685getShuffleMask(cast<Constant>(Mask), MaskArr);1686setShuffleMask(MaskArr);1687setName(Name);1688}16891690ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,1691const Twine &Name,1692InsertPosition InsertBefore)1693: Instruction(1694VectorType::get(cast<VectorType>(V1->getType())->getElementType(),1695Mask.size(), isa<ScalableVectorType>(V1->getType())),1696ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),1697OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {1698assert(isValidOperands(V1, V2, Mask) &&1699"Invalid shuffle vector instruction operands!");1700Op<0>() = V1;1701Op<1>() = V2;1702setShuffleMask(Mask);1703setName(Name);1704}17051706void ShuffleVectorInst::commute() {1707int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();1708int NumMaskElts = ShuffleMask.size();1709SmallVector<int, 16> NewMask(NumMaskElts);1710for (int i = 0; i != NumMaskElts; ++i) {1711int MaskElt = getMaskValue(i);1712if (MaskElt == PoisonMaskElem) {1713NewMask[i] = PoisonMaskElem;1714continue;1715}1716assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");1717MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;1718NewMask[i] = MaskElt;1719}1720setShuffleMask(NewMask);1721Op<0>().swap(Op<1>());1722}17231724bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,1725ArrayRef<int> Mask) {1726// V1 and V2 must be vectors of the same type.1727if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())1728return false;17291730// Make sure the mask elements make sense.1731int V1Size =1732cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();1733for (int Elem : Mask)1734if (Elem != PoisonMaskElem && Elem >= V1Size * 2)1735return false;17361737if (isa<ScalableVectorType>(V1->getType()))1738if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))1739return false;17401741return true;1742}17431744bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,1745const Value *Mask) {1746// V1 and V2 must be vectors of the same type.1747if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())1748return false;17491750// Mask must be vector of i32, and must be the same kind of vector as the1751// input vectors1752auto *MaskTy = dyn_cast<VectorType>(Mask->getType());1753if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||1754isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))1755return false;17561757// Check to see if Mask is valid.1758if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))1759return true;17601761if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {1762unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();1763for (Value *Op : MV->operands()) {1764if (auto *CI = dyn_cast<ConstantInt>(Op)) {1765if (CI->uge(V1Size*2))1766return false;1767} else if (!isa<UndefValue>(Op)) {1768return false;1769}1770}1771return true;1772}17731774if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {1775unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();1776for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();1777i != e; ++i)1778if (CDS->getElementAsInteger(i) >= V1Size*2)1779return false;1780return true;1781}17821783return false;1784}17851786void ShuffleVectorInst::getShuffleMask(const Constant *Mask,1787SmallVectorImpl<int> &Result) {1788ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();17891790if (isa<ConstantAggregateZero>(Mask)) {1791Result.resize(EC.getKnownMinValue(), 0);1792return;1793}17941795Result.reserve(EC.getKnownMinValue());17961797if (EC.isScalable()) {1798assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&1799"Scalable vector shuffle mask must be undef or zeroinitializer");1800int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;1801for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)1802Result.emplace_back(MaskVal);1803return;1804}18051806unsigned NumElts = EC.getKnownMinValue();18071808if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {1809for (unsigned i = 0; i != NumElts; ++i)1810Result.push_back(CDS->getElementAsInteger(i));1811return;1812}1813for (unsigned i = 0; i != NumElts; ++i) {1814Constant *C = Mask->getAggregateElement(i);1815Result.push_back(isa<UndefValue>(C) ? -1 :1816cast<ConstantInt>(C)->getZExtValue());1817}1818}18191820void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {1821ShuffleMask.assign(Mask.begin(), Mask.end());1822ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());1823}18241825Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,1826Type *ResultTy) {1827Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());1828if (isa<ScalableVectorType>(ResultTy)) {1829assert(all_equal(Mask) && "Unexpected shuffle");1830Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);1831if (Mask[0] == 0)1832return Constant::getNullValue(VecTy);1833return PoisonValue::get(VecTy);1834}1835SmallVector<Constant *, 16> MaskConst;1836for (int Elem : Mask) {1837if (Elem == PoisonMaskElem)1838MaskConst.push_back(PoisonValue::get(Int32Ty));1839else1840MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));1841}1842return ConstantVector::get(MaskConst);1843}18441845static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {1846assert(!Mask.empty() && "Shuffle mask must contain elements");1847bool UsesLHS = false;1848bool UsesRHS = false;1849for (int I : Mask) {1850if (I == -1)1851continue;1852assert(I >= 0 && I < (NumOpElts * 2) &&1853"Out-of-bounds shuffle mask element");1854UsesLHS |= (I < NumOpElts);1855UsesRHS |= (I >= NumOpElts);1856if (UsesLHS && UsesRHS)1857return false;1858}1859// Allow for degenerate case: completely undef mask means neither source is used.1860return UsesLHS || UsesRHS;1861}18621863bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {1864// We don't have vector operand size information, so assume operands are the1865// same size as the mask.1866return isSingleSourceMaskImpl(Mask, NumSrcElts);1867}18681869static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {1870if (!isSingleSourceMaskImpl(Mask, NumOpElts))1871return false;1872for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {1873if (Mask[i] == -1)1874continue;1875if (Mask[i] != i && Mask[i] != (NumOpElts + i))1876return false;1877}1878return true;1879}18801881bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {1882if (Mask.size() != static_cast<unsigned>(NumSrcElts))1883return false;1884// We don't have vector operand size information, so assume operands are the1885// same size as the mask.1886return isIdentityMaskImpl(Mask, NumSrcElts);1887}18881889bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {1890if (Mask.size() != static_cast<unsigned>(NumSrcElts))1891return false;1892if (!isSingleSourceMask(Mask, NumSrcElts))1893return false;18941895// The number of elements in the mask must be at least 2.1896if (NumSrcElts < 2)1897return false;18981899for (int I = 0, E = Mask.size(); I < E; ++I) {1900if (Mask[I] == -1)1901continue;1902if (Mask[I] != (NumSrcElts - 1 - I) &&1903Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))1904return false;1905}1906return true;1907}19081909bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {1910if (Mask.size() != static_cast<unsigned>(NumSrcElts))1911return false;1912if (!isSingleSourceMask(Mask, NumSrcElts))1913return false;1914for (int I = 0, E = Mask.size(); I < E; ++I) {1915if (Mask[I] == -1)1916continue;1917if (Mask[I] != 0 && Mask[I] != NumSrcElts)1918return false;1919}1920return true;1921}19221923bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {1924if (Mask.size() != static_cast<unsigned>(NumSrcElts))1925return false;1926// Select is differentiated from identity. It requires using both sources.1927if (isSingleSourceMask(Mask, NumSrcElts))1928return false;1929for (int I = 0, E = Mask.size(); I < E; ++I) {1930if (Mask[I] == -1)1931continue;1932if (Mask[I] != I && Mask[I] != (NumSrcElts + I))1933return false;1934}1935return true;1936}19371938bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {1939// Example masks that will return true:1940// v1 = <a, b, c, d>1941// v2 = <e, f, g, h>1942// trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>1943// trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>19441945if (Mask.size() != static_cast<unsigned>(NumSrcElts))1946return false;1947// 1. The number of elements in the mask must be a power-of-2 and at least 2.1948int Sz = Mask.size();1949if (Sz < 2 || !isPowerOf2_32(Sz))1950return false;19511952// 2. The first element of the mask must be either a 0 or a 1.1953if (Mask[0] != 0 && Mask[0] != 1)1954return false;19551956// 3. The difference between the first 2 elements must be equal to the1957// number of elements in the mask.1958if ((Mask[1] - Mask[0]) != NumSrcElts)1959return false;19601961// 4. The difference between consecutive even-numbered and odd-numbered1962// elements must be equal to 2.1963for (int I = 2; I < Sz; ++I) {1964int MaskEltVal = Mask[I];1965if (MaskEltVal == -1)1966return false;1967int MaskEltPrevVal = Mask[I - 2];1968if (MaskEltVal - MaskEltPrevVal != 2)1969return false;1970}1971return true;1972}19731974bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,1975int &Index) {1976if (Mask.size() != static_cast<unsigned>(NumSrcElts))1977return false;1978// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>1979int StartIndex = -1;1980for (int I = 0, E = Mask.size(); I != E; ++I) {1981int MaskEltVal = Mask[I];1982if (MaskEltVal == -1)1983continue;19841985if (StartIndex == -1) {1986// Don't support a StartIndex that begins in the second input, or if the1987// first non-undef index would access below the StartIndex.1988if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))1989return false;19901991StartIndex = MaskEltVal - I;1992continue;1993}19941995// Splice is sequential starting from StartIndex.1996if (MaskEltVal != (StartIndex + I))1997return false;1998}19992000if (StartIndex == -1)2001return false;20022003// NOTE: This accepts StartIndex == 0 (COPY).2004Index = StartIndex;2005return true;2006}20072008bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,2009int NumSrcElts, int &Index) {2010// Must extract from a single source.2011if (!isSingleSourceMaskImpl(Mask, NumSrcElts))2012return false;20132014// Must be smaller (else this is an Identity shuffle).2015if (NumSrcElts <= (int)Mask.size())2016return false;20172018// Find start of extraction, accounting that we may start with an UNDEF.2019int SubIndex = -1;2020for (int i = 0, e = Mask.size(); i != e; ++i) {2021int M = Mask[i];2022if (M < 0)2023continue;2024int Offset = (M % NumSrcElts) - i;2025if (0 <= SubIndex && SubIndex != Offset)2026return false;2027SubIndex = Offset;2028}20292030if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {2031Index = SubIndex;2032return true;2033}2034return false;2035}20362037bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,2038int NumSrcElts, int &NumSubElts,2039int &Index) {2040int NumMaskElts = Mask.size();20412042// Don't try to match if we're shuffling to a smaller size.2043if (NumMaskElts < NumSrcElts)2044return false;20452046// TODO: We don't recognize self-insertion/widening.2047if (isSingleSourceMaskImpl(Mask, NumSrcElts))2048return false;20492050// Determine which mask elements are attributed to which source.2051APInt UndefElts = APInt::getZero(NumMaskElts);2052APInt Src0Elts = APInt::getZero(NumMaskElts);2053APInt Src1Elts = APInt::getZero(NumMaskElts);2054bool Src0Identity = true;2055bool Src1Identity = true;20562057for (int i = 0; i != NumMaskElts; ++i) {2058int M = Mask[i];2059if (M < 0) {2060UndefElts.setBit(i);2061continue;2062}2063if (M < NumSrcElts) {2064Src0Elts.setBit(i);2065Src0Identity &= (M == i);2066continue;2067}2068Src1Elts.setBit(i);2069Src1Identity &= (M == (i + NumSrcElts));2070}2071assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&2072"unknown shuffle elements");2073assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&2074"2-source shuffle not found");20752076// Determine lo/hi span ranges.2077// TODO: How should we handle undefs at the start of subvector insertions?2078int Src0Lo = Src0Elts.countr_zero();2079int Src1Lo = Src1Elts.countr_zero();2080int Src0Hi = NumMaskElts - Src0Elts.countl_zero();2081int Src1Hi = NumMaskElts - Src1Elts.countl_zero();20822083// If src0 is in place, see if the src1 elements is inplace within its own2084// span.2085if (Src0Identity) {2086int NumSub1Elts = Src1Hi - Src1Lo;2087ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);2088if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {2089NumSubElts = NumSub1Elts;2090Index = Src1Lo;2091return true;2092}2093}20942095// If src1 is in place, see if the src0 elements is inplace within its own2096// span.2097if (Src1Identity) {2098int NumSub0Elts = Src0Hi - Src0Lo;2099ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);2100if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {2101NumSubElts = NumSub0Elts;2102Index = Src0Lo;2103return true;2104}2105}21062107return false;2108}21092110bool ShuffleVectorInst::isIdentityWithPadding() const {2111// FIXME: Not currently possible to express a shuffle mask for a scalable2112// vector for this case.2113if (isa<ScalableVectorType>(getType()))2114return false;21152116int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();2117int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();2118if (NumMaskElts <= NumOpElts)2119return false;21202121// The first part of the mask must choose elements from exactly 1 source op.2122ArrayRef<int> Mask = getShuffleMask();2123if (!isIdentityMaskImpl(Mask, NumOpElts))2124return false;21252126// All extending must be with undef elements.2127for (int i = NumOpElts; i < NumMaskElts; ++i)2128if (Mask[i] != -1)2129return false;21302131return true;2132}21332134bool ShuffleVectorInst::isIdentityWithExtract() const {2135// FIXME: Not currently possible to express a shuffle mask for a scalable2136// vector for this case.2137if (isa<ScalableVectorType>(getType()))2138return false;21392140int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();2141int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();2142if (NumMaskElts >= NumOpElts)2143return false;21442145return isIdentityMaskImpl(getShuffleMask(), NumOpElts);2146}21472148bool ShuffleVectorInst::isConcat() const {2149// Vector concatenation is differentiated from identity with padding.2150if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))2151return false;21522153// FIXME: Not currently possible to express a shuffle mask for a scalable2154// vector for this case.2155if (isa<ScalableVectorType>(getType()))2156return false;21572158int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();2159int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();2160if (NumMaskElts != NumOpElts * 2)2161return false;21622163// Use the mask length rather than the operands' vector lengths here. We2164// already know that the shuffle returns a vector twice as long as the inputs,2165// and neither of the inputs are undef vectors. If the mask picks consecutive2166// elements from both inputs, then this is a concatenation of the inputs.2167return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);2168}21692170static bool isReplicationMaskWithParams(ArrayRef<int> Mask,2171int ReplicationFactor, int VF) {2172assert(Mask.size() == (unsigned)ReplicationFactor * VF &&2173"Unexpected mask size.");21742175for (int CurrElt : seq(VF)) {2176ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);2177assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&2178"Run out of mask?");2179Mask = Mask.drop_front(ReplicationFactor);2180if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {2181return MaskElt == PoisonMaskElem || MaskElt == CurrElt;2182}))2183return false;2184}2185assert(Mask.empty() && "Did not consume the whole mask?");21862187return true;2188}21892190bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,2191int &ReplicationFactor, int &VF) {2192// undef-less case is trivial.2193if (!llvm::is_contained(Mask, PoisonMaskElem)) {2194ReplicationFactor =2195Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();2196if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)2197return false;2198VF = Mask.size() / ReplicationFactor;2199return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);2200}22012202// However, if the mask contains undef's, we have to enumerate possible tuples2203// and pick one. There are bounds on replication factor: [1, mask size]2204// (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)2205// Additionally, mask size is a replication factor multiplied by vector size,2206// which further significantly reduces the search space.22072208// Before doing that, let's perform basic correctness checking first.2209int Largest = -1;2210for (int MaskElt : Mask) {2211if (MaskElt == PoisonMaskElem)2212continue;2213// Elements must be in non-decreasing order.2214if (MaskElt < Largest)2215return false;2216Largest = std::max(Largest, MaskElt);2217}22182219// Prefer larger replication factor if all else equal.2220for (int PossibleReplicationFactor :2221reverse(seq_inclusive<unsigned>(1, Mask.size()))) {2222if (Mask.size() % PossibleReplicationFactor != 0)2223continue;2224int PossibleVF = Mask.size() / PossibleReplicationFactor;2225if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,2226PossibleVF))2227continue;2228ReplicationFactor = PossibleReplicationFactor;2229VF = PossibleVF;2230return true;2231}22322233return false;2234}22352236bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,2237int &VF) const {2238// Not possible to express a shuffle mask for a scalable vector for this2239// case.2240if (isa<ScalableVectorType>(getType()))2241return false;22422243VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();2244if (ShuffleMask.size() % VF != 0)2245return false;2246ReplicationFactor = ShuffleMask.size() / VF;22472248return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);2249}22502251bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {2252if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||2253Mask.size() % VF != 0)2254return false;2255for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {2256ArrayRef<int> SubMask = Mask.slice(K, VF);2257if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))2258continue;2259SmallBitVector Used(VF, false);2260for (int Idx : SubMask) {2261if (Idx != PoisonMaskElem && Idx < VF)2262Used.set(Idx);2263}2264if (!Used.all())2265return false;2266}2267return true;2268}22692270/// Return true if this shuffle mask is a replication mask.2271bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {2272// Not possible to express a shuffle mask for a scalable vector for this2273// case.2274if (isa<ScalableVectorType>(getType()))2275return false;2276if (!isSingleSourceMask(ShuffleMask, VF))2277return false;22782279return isOneUseSingleSourceMask(ShuffleMask, VF);2280}22812282bool ShuffleVectorInst::isInterleave(unsigned Factor) {2283FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());2284// shuffle_vector can only interleave fixed length vectors - for scalable2285// vectors, see the @llvm.vector.interleave2 intrinsic2286if (!OpTy)2287return false;2288unsigned OpNumElts = OpTy->getNumElements();22892290return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);2291}22922293bool ShuffleVectorInst::isInterleaveMask(2294ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,2295SmallVectorImpl<unsigned> &StartIndexes) {2296unsigned NumElts = Mask.size();2297if (NumElts % Factor)2298return false;22992300unsigned LaneLen = NumElts / Factor;2301if (!isPowerOf2_32(LaneLen))2302return false;23032304StartIndexes.resize(Factor);23052306// Check whether each element matches the general interleaved rule.2307// Ignore undef elements, as long as the defined elements match the rule.2308// Outer loop processes all factors (x, y, z in the above example)2309unsigned I = 0, J;2310for (; I < Factor; I++) {2311unsigned SavedLaneValue;2312unsigned SavedNoUndefs = 0;23132314// Inner loop processes consecutive accesses (x, x+1... in the example)2315for (J = 0; J < LaneLen - 1; J++) {2316// Lane computes x's position in the Mask2317unsigned Lane = J * Factor + I;2318unsigned NextLane = Lane + Factor;2319int LaneValue = Mask[Lane];2320int NextLaneValue = Mask[NextLane];23212322// If both are defined, values must be sequential2323if (LaneValue >= 0 && NextLaneValue >= 0 &&2324LaneValue + 1 != NextLaneValue)2325break;23262327// If the next value is undef, save the current one as reference2328if (LaneValue >= 0 && NextLaneValue < 0) {2329SavedLaneValue = LaneValue;2330SavedNoUndefs = 1;2331}23322333// Undefs are allowed, but defined elements must still be consecutive:2334// i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....2335// Verify this by storing the last non-undef followed by an undef2336// Check that following non-undef masks are incremented with the2337// corresponding distance.2338if (SavedNoUndefs > 0 && LaneValue < 0) {2339SavedNoUndefs++;2340if (NextLaneValue >= 0 &&2341SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)2342break;2343}2344}23452346if (J < LaneLen - 1)2347return false;23482349int StartMask = 0;2350if (Mask[I] >= 0) {2351// Check that the start of the I range (J=0) is greater than 02352StartMask = Mask[I];2353} else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {2354// StartMask defined by the last value in lane2355StartMask = Mask[(LaneLen - 1) * Factor + I] - J;2356} else if (SavedNoUndefs > 0) {2357// StartMask defined by some non-zero value in the j loop2358StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);2359}2360// else StartMask remains set to 0, i.e. all elements are undefs23612362if (StartMask < 0)2363return false;2364// We must stay within the vectors; This case can happen with undefs.2365if (StartMask + LaneLen > NumInputElts)2366return false;23672368StartIndexes[I] = StartMask;2369}23702371return true;2372}23732374/// Check if the mask is a DE-interleave mask of the given factor2375/// \p Factor like:2376/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>2377bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,2378unsigned Factor,2379unsigned &Index) {2380// Check all potential start indices from 0 to (Factor - 1).2381for (unsigned Idx = 0; Idx < Factor; Idx++) {2382unsigned I = 0;23832384// Check that elements are in ascending order by Factor. Ignore undef2385// elements.2386for (; I < Mask.size(); I++)2387if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)2388break;23892390if (I == Mask.size()) {2391Index = Idx;2392return true;2393}2394}23952396return false;2397}23982399/// Try to lower a vector shuffle as a bit rotation.2400///2401/// Look for a repeated rotation pattern in each sub group.2402/// Returns an element-wise left bit rotation amount or -1 if failed.2403static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {2404int NumElts = Mask.size();2405assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");24062407int RotateAmt = -1;2408for (int i = 0; i != NumElts; i += NumSubElts) {2409for (int j = 0; j != NumSubElts; ++j) {2410int M = Mask[i + j];2411if (M < 0)2412continue;2413if (M < i || M >= i + NumSubElts)2414return -1;2415int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;2416if (0 <= RotateAmt && Offset != RotateAmt)2417return -1;2418RotateAmt = Offset;2419}2420}2421return RotateAmt;2422}24232424bool ShuffleVectorInst::isBitRotateMask(2425ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,2426unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {2427for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {2428int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);2429if (EltRotateAmt < 0)2430continue;2431RotateAmt = EltRotateAmt * EltSizeInBits;2432return true;2433}24342435return false;2436}24372438//===----------------------------------------------------------------------===//2439// InsertValueInst Class2440//===----------------------------------------------------------------------===//24412442void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,2443const Twine &Name) {2444assert(getNumOperands() == 2 && "NumOperands not initialized?");24452446// There's no fundamental reason why we require at least one index2447// (other than weirdness with &*IdxBegin being invalid; see2448// getelementptr's init routine for example). But there's no2449// present need to support it.2450assert(!Idxs.empty() && "InsertValueInst must have at least one index");24512452assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==2453Val->getType() && "Inserted value must match indexed type!");2454Op<0>() = Agg;2455Op<1>() = Val;24562457Indices.append(Idxs.begin(), Idxs.end());2458setName(Name);2459}24602461InsertValueInst::InsertValueInst(const InsertValueInst &IVI)2462: Instruction(IVI.getType(), InsertValue,2463OperandTraits<InsertValueInst>::op_begin(this), 2),2464Indices(IVI.Indices) {2465Op<0>() = IVI.getOperand(0);2466Op<1>() = IVI.getOperand(1);2467SubclassOptionalData = IVI.SubclassOptionalData;2468}24692470//===----------------------------------------------------------------------===//2471// ExtractValueInst Class2472//===----------------------------------------------------------------------===//24732474void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {2475assert(getNumOperands() == 1 && "NumOperands not initialized?");24762477// There's no fundamental reason why we require at least one index.2478// But there's no present need to support it.2479assert(!Idxs.empty() && "ExtractValueInst must have at least one index");24802481Indices.append(Idxs.begin(), Idxs.end());2482setName(Name);2483}24842485ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)2486: UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),2487Indices(EVI.Indices) {2488SubclassOptionalData = EVI.SubclassOptionalData;2489}24902491// getIndexedType - Returns the type of the element that would be extracted2492// with an extractvalue instruction with the specified parameters.2493//2494// A null type is returned if the indices are invalid for the specified2495// pointer type.2496//2497Type *ExtractValueInst::getIndexedType(Type *Agg,2498ArrayRef<unsigned> Idxs) {2499for (unsigned Index : Idxs) {2500// We can't use CompositeType::indexValid(Index) here.2501// indexValid() always returns true for arrays because getelementptr allows2502// out-of-bounds indices. Since we don't allow those for extractvalue and2503// insertvalue we need to check array indexing manually.2504// Since the only other types we can index into are struct types it's just2505// as easy to check those manually as well.2506if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {2507if (Index >= AT->getNumElements())2508return nullptr;2509Agg = AT->getElementType();2510} else if (StructType *ST = dyn_cast<StructType>(Agg)) {2511if (Index >= ST->getNumElements())2512return nullptr;2513Agg = ST->getElementType(Index);2514} else {2515// Not a valid type to index into.2516return nullptr;2517}2518}2519return const_cast<Type*>(Agg);2520}25212522//===----------------------------------------------------------------------===//2523// UnaryOperator Class2524//===----------------------------------------------------------------------===//25252526UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,2527const Twine &Name, InsertPosition InsertBefore)2528: UnaryInstruction(Ty, iType, S, InsertBefore) {2529Op<0>() = S;2530setName(Name);2531AssertOK();2532}25332534UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,2535InsertPosition InsertBefore) {2536return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);2537}25382539void UnaryOperator::AssertOK() {2540Value *LHS = getOperand(0);2541(void)LHS; // Silence warnings.2542#ifndef NDEBUG2543switch (getOpcode()) {2544case FNeg:2545assert(getType() == LHS->getType() &&2546"Unary operation should return same type as operand!");2547assert(getType()->isFPOrFPVectorTy() &&2548"Tried to create a floating-point operation on a "2549"non-floating-point type!");2550break;2551default: llvm_unreachable("Invalid opcode provided");2552}2553#endif2554}25552556//===----------------------------------------------------------------------===//2557// BinaryOperator Class2558//===----------------------------------------------------------------------===//25592560BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,2561const Twine &Name, InsertPosition InsertBefore)2562: Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),2563OperandTraits<BinaryOperator>::operands(this), InsertBefore) {2564Op<0>() = S1;2565Op<1>() = S2;2566setName(Name);2567AssertOK();2568}25692570void BinaryOperator::AssertOK() {2571Value *LHS = getOperand(0), *RHS = getOperand(1);2572(void)LHS; (void)RHS; // Silence warnings.2573assert(LHS->getType() == RHS->getType() &&2574"Binary operator operand types must match!");2575#ifndef NDEBUG2576switch (getOpcode()) {2577case Add: case Sub:2578case Mul:2579assert(getType() == LHS->getType() &&2580"Arithmetic operation should return same type as operands!");2581assert(getType()->isIntOrIntVectorTy() &&2582"Tried to create an integer operation on a non-integer type!");2583break;2584case FAdd: case FSub:2585case FMul:2586assert(getType() == LHS->getType() &&2587"Arithmetic operation should return same type as operands!");2588assert(getType()->isFPOrFPVectorTy() &&2589"Tried to create a floating-point operation on a "2590"non-floating-point type!");2591break;2592case UDiv:2593case SDiv:2594assert(getType() == LHS->getType() &&2595"Arithmetic operation should return same type as operands!");2596assert(getType()->isIntOrIntVectorTy() &&2597"Incorrect operand type (not integer) for S/UDIV");2598break;2599case FDiv:2600assert(getType() == LHS->getType() &&2601"Arithmetic operation should return same type as operands!");2602assert(getType()->isFPOrFPVectorTy() &&2603"Incorrect operand type (not floating point) for FDIV");2604break;2605case URem:2606case SRem:2607assert(getType() == LHS->getType() &&2608"Arithmetic operation should return same type as operands!");2609assert(getType()->isIntOrIntVectorTy() &&2610"Incorrect operand type (not integer) for S/UREM");2611break;2612case FRem:2613assert(getType() == LHS->getType() &&2614"Arithmetic operation should return same type as operands!");2615assert(getType()->isFPOrFPVectorTy() &&2616"Incorrect operand type (not floating point) for FREM");2617break;2618case Shl:2619case LShr:2620case AShr:2621assert(getType() == LHS->getType() &&2622"Shift operation should return same type as operands!");2623assert(getType()->isIntOrIntVectorTy() &&2624"Tried to create a shift operation on a non-integral type!");2625break;2626case And: case Or:2627case Xor:2628assert(getType() == LHS->getType() &&2629"Logical operation should return same type as operands!");2630assert(getType()->isIntOrIntVectorTy() &&2631"Tried to create a logical operation on a non-integral type!");2632break;2633default: llvm_unreachable("Invalid opcode provided");2634}2635#endif2636}26372638BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,2639const Twine &Name,2640InsertPosition InsertBefore) {2641assert(S1->getType() == S2->getType() &&2642"Cannot create binary operator with two operands of differing type!");2643return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);2644}26452646BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,2647InsertPosition InsertBefore) {2648Value *Zero = ConstantInt::get(Op->getType(), 0);2649return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,2650InsertBefore);2651}26522653BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,2654InsertPosition InsertBefore) {2655Value *Zero = ConstantInt::get(Op->getType(), 0);2656return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);2657}26582659BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,2660InsertPosition InsertBefore) {2661Constant *C = Constant::getAllOnesValue(Op->getType());2662return new BinaryOperator(Instruction::Xor, Op, C,2663Op->getType(), Name, InsertBefore);2664}26652666// Exchange the two operands to this instruction. This instruction is safe to2667// use on any binary instruction and does not modify the semantics of the2668// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode2669// is changed.2670bool BinaryOperator::swapOperands() {2671if (!isCommutative())2672return true; // Can't commute operands2673Op<0>().swap(Op<1>());2674return false;2675}26762677//===----------------------------------------------------------------------===//2678// FPMathOperator Class2679//===----------------------------------------------------------------------===//26802681float FPMathOperator::getFPAccuracy() const {2682const MDNode *MD =2683cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);2684if (!MD)2685return 0.0;2686ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));2687return Accuracy->getValueAPF().convertToFloat();2688}26892690//===----------------------------------------------------------------------===//2691// CastInst Class2692//===----------------------------------------------------------------------===//26932694// Just determine if this cast only deals with integral->integral conversion.2695bool CastInst::isIntegerCast() const {2696switch (getOpcode()) {2697default: return false;2698case Instruction::ZExt:2699case Instruction::SExt:2700case Instruction::Trunc:2701return true;2702case Instruction::BitCast:2703return getOperand(0)->getType()->isIntegerTy() &&2704getType()->isIntegerTy();2705}2706}27072708/// This function determines if the CastInst does not require any bits to be2709/// changed in order to effect the cast. Essentially, it identifies cases where2710/// no code gen is necessary for the cast, hence the name no-op cast. For2711/// example, the following are all no-op casts:2712/// # bitcast i32* %x to i8*2713/// # bitcast <2 x i32> %x to <4 x i16>2714/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only2715/// Determine if the described cast is a no-op.2716bool CastInst::isNoopCast(Instruction::CastOps Opcode,2717Type *SrcTy,2718Type *DestTy,2719const DataLayout &DL) {2720assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");2721switch (Opcode) {2722default: llvm_unreachable("Invalid CastOp");2723case Instruction::Trunc:2724case Instruction::ZExt:2725case Instruction::SExt:2726case Instruction::FPTrunc:2727case Instruction::FPExt:2728case Instruction::UIToFP:2729case Instruction::SIToFP:2730case Instruction::FPToUI:2731case Instruction::FPToSI:2732case Instruction::AddrSpaceCast:2733// TODO: Target informations may give a more accurate answer here.2734return false;2735case Instruction::BitCast:2736return true; // BitCast never modifies bits.2737case Instruction::PtrToInt:2738return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==2739DestTy->getScalarSizeInBits();2740case Instruction::IntToPtr:2741return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==2742SrcTy->getScalarSizeInBits();2743}2744}27452746bool CastInst::isNoopCast(const DataLayout &DL) const {2747return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);2748}27492750/// This function determines if a pair of casts can be eliminated and what2751/// opcode should be used in the elimination. This assumes that there are two2752/// instructions like this:2753/// * %F = firstOpcode SrcTy %x to MidTy2754/// * %S = secondOpcode MidTy %F to DstTy2755/// The function returns a resultOpcode so these two casts can be replaced with:2756/// * %Replacement = resultOpcode %SrcTy %x to DstTy2757/// If no such cast is permitted, the function returns 0.2758unsigned CastInst::isEliminableCastPair(2759Instruction::CastOps firstOp, Instruction::CastOps secondOp,2760Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,2761Type *DstIntPtrTy) {2762// Define the 144 possibilities for these two cast instructions. The values2763// in this matrix determine what to do in a given situation and select the2764// case in the switch below. The rows correspond to firstOp, the columns2765// correspond to secondOp. In looking at the table below, keep in mind2766// the following cast properties:2767//2768// Size Compare Source Destination2769// Operator Src ? Size Type Sign Type Sign2770// -------- ------------ ------------------- ---------------------2771// TRUNC > Integer Any Integral Any2772// ZEXT < Integral Unsigned Integer Any2773// SEXT < Integral Signed Integer Any2774// FPTOUI n/a FloatPt n/a Integral Unsigned2775// FPTOSI n/a FloatPt n/a Integral Signed2776// UITOFP n/a Integral Unsigned FloatPt n/a2777// SITOFP n/a Integral Signed FloatPt n/a2778// FPTRUNC > FloatPt n/a FloatPt n/a2779// FPEXT < FloatPt n/a FloatPt n/a2780// PTRTOINT n/a Pointer n/a Integral Unsigned2781// INTTOPTR n/a Integral Unsigned Pointer n/a2782// BITCAST = FirstClass n/a FirstClass n/a2783// ADDRSPCST n/a Pointer n/a Pointer n/a2784//2785// NOTE: some transforms are safe, but we consider them to be non-profitable.2786// For example, we could merge "fptoui double to i32" + "zext i32 to i64",2787// into "fptoui double to i64", but this loses information about the range2788// of the produced value (we no longer know the top-part is all zeros).2789// Further this conversion is often much more expensive for typical hardware,2790// and causes issues when building libgcc. We disallow fptosi+sext for the2791// same reason.2792const unsigned numCastOps =2793Instruction::CastOpsEnd - Instruction::CastOpsBegin;2794static const uint8_t CastResults[numCastOps][numCastOps] = {2795// T F F U S F F P I B A -+2796// R Z S P P I I T P 2 N T S |2797// U E E 2 2 2 2 R E I T C C +- secondOp2798// N X X U S F F N X N 2 V V |2799// C T T I I P P C T T P T T -+2800{ 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+2801{ 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |2802{ 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |2803{ 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |2804{ 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |2805{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp2806{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |2807{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |2808{ 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |2809{ 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |2810{ 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |2811{ 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |2812{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+2813};28142815// TODO: This logic could be encoded into the table above and handled in the2816// switch below.2817// If either of the casts are a bitcast from scalar to vector, disallow the2818// merging. However, any pair of bitcasts are allowed.2819bool IsFirstBitcast = (firstOp == Instruction::BitCast);2820bool IsSecondBitcast = (secondOp == Instruction::BitCast);2821bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;28222823// Check if any of the casts convert scalars <-> vectors.2824if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||2825(IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))2826if (!AreBothBitcasts)2827return 0;28282829int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]2830[secondOp-Instruction::CastOpsBegin];2831switch (ElimCase) {2832case 0:2833// Categorically disallowed.2834return 0;2835case 1:2836// Allowed, use first cast's opcode.2837return firstOp;2838case 2:2839// Allowed, use second cast's opcode.2840return secondOp;2841case 3:2842// No-op cast in second op implies firstOp as long as the DestTy2843// is integer and we are not converting between a vector and a2844// non-vector type.2845if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())2846return firstOp;2847return 0;2848case 4:2849// No-op cast in second op implies firstOp as long as the DestTy2850// matches MidTy.2851if (DstTy == MidTy)2852return firstOp;2853return 0;2854case 5:2855// No-op cast in first op implies secondOp as long as the SrcTy2856// is an integer.2857if (SrcTy->isIntegerTy())2858return secondOp;2859return 0;2860case 7: {2861// Disable inttoptr/ptrtoint optimization if enabled.2862if (DisableI2pP2iOpt)2863return 0;28642865// Cannot simplify if address spaces are different!2866if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())2867return 0;28682869unsigned MidSize = MidTy->getScalarSizeInBits();2870// We can still fold this without knowing the actual sizes as long we2871// know that the intermediate pointer is the largest possible2872// pointer size.2873// FIXME: Is this always true?2874if (MidSize == 64)2875return Instruction::BitCast;28762877// ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.2878if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)2879return 0;2880unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();2881if (MidSize >= PtrSize)2882return Instruction::BitCast;2883return 0;2884}2885case 8: {2886// ext, trunc -> bitcast, if the SrcTy and DstTy are the same2887// ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)2888// ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)2889unsigned SrcSize = SrcTy->getScalarSizeInBits();2890unsigned DstSize = DstTy->getScalarSizeInBits();2891if (SrcTy == DstTy)2892return Instruction::BitCast;2893if (SrcSize < DstSize)2894return firstOp;2895if (SrcSize > DstSize)2896return secondOp;2897return 0;2898}2899case 9:2900// zext, sext -> zext, because sext can't sign extend after zext2901return Instruction::ZExt;2902case 11: {2903// inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize2904if (!MidIntPtrTy)2905return 0;2906unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();2907unsigned SrcSize = SrcTy->getScalarSizeInBits();2908unsigned DstSize = DstTy->getScalarSizeInBits();2909if (SrcSize <= PtrSize && SrcSize == DstSize)2910return Instruction::BitCast;2911return 0;2912}2913case 12:2914// addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS2915// addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS2916if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())2917return Instruction::AddrSpaceCast;2918return Instruction::BitCast;2919case 13:2920// FIXME: this state can be merged with (1), but the following assert2921// is useful to check the correcteness of the sequence due to semantic2922// change of bitcast.2923assert(2924SrcTy->isPtrOrPtrVectorTy() &&2925MidTy->isPtrOrPtrVectorTy() &&2926DstTy->isPtrOrPtrVectorTy() &&2927SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&2928MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&2929"Illegal addrspacecast, bitcast sequence!");2930// Allowed, use first cast's opcode2931return firstOp;2932case 14:2933// bitcast, addrspacecast -> addrspacecast2934return Instruction::AddrSpaceCast;2935case 15:2936// FIXME: this state can be merged with (1), but the following assert2937// is useful to check the correcteness of the sequence due to semantic2938// change of bitcast.2939assert(2940SrcTy->isIntOrIntVectorTy() &&2941MidTy->isPtrOrPtrVectorTy() &&2942DstTy->isPtrOrPtrVectorTy() &&2943MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&2944"Illegal inttoptr, bitcast sequence!");2945// Allowed, use first cast's opcode2946return firstOp;2947case 16:2948// FIXME: this state can be merged with (2), but the following assert2949// is useful to check the correcteness of the sequence due to semantic2950// change of bitcast.2951assert(2952SrcTy->isPtrOrPtrVectorTy() &&2953MidTy->isPtrOrPtrVectorTy() &&2954DstTy->isIntOrIntVectorTy() &&2955SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&2956"Illegal bitcast, ptrtoint sequence!");2957// Allowed, use second cast's opcode2958return secondOp;2959case 17:2960// (sitofp (zext x)) -> (uitofp x)2961return Instruction::UIToFP;2962case 99:2963// Cast combination can't happen (error in input). This is for all cases2964// where the MidTy is not the same for the two cast instructions.2965llvm_unreachable("Invalid Cast Combination");2966default:2967llvm_unreachable("Error in CastResults table!!!");2968}2969}29702971CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,2972const Twine &Name, InsertPosition InsertBefore) {2973assert(castIsValid(op, S, Ty) && "Invalid cast!");2974// Construct and return the appropriate CastInst subclass2975switch (op) {2976case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);2977case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);2978case SExt: return new SExtInst (S, Ty, Name, InsertBefore);2979case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);2980case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);2981case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);2982case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);2983case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);2984case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);2985case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);2986case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);2987case BitCast:2988return new BitCastInst(S, Ty, Name, InsertBefore);2989case AddrSpaceCast:2990return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);2991default:2992llvm_unreachable("Invalid opcode provided");2993}2994}29952996CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,2997InsertPosition InsertBefore) {2998if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())2999return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);3000return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);3001}30023003CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,3004InsertPosition InsertBefore) {3005if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())3006return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);3007return Create(Instruction::SExt, S, Ty, Name, InsertBefore);3008}30093010CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,3011InsertPosition InsertBefore) {3012if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())3013return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);3014return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);3015}30163017/// Create a BitCast or a PtrToInt cast instruction3018CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,3019InsertPosition InsertBefore) {3020assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");3021assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&3022"Invalid cast");3023assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");3024assert((!Ty->isVectorTy() ||3025cast<VectorType>(Ty)->getElementCount() ==3026cast<VectorType>(S->getType())->getElementCount()) &&3027"Invalid cast");30283029if (Ty->isIntOrIntVectorTy())3030return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);30313032return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);3033}30343035CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(3036Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {3037assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");3038assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");30393040if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())3041return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);30423043return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);3044}30453046CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,3047const Twine &Name,3048InsertPosition InsertBefore) {3049if (S->getType()->isPointerTy() && Ty->isIntegerTy())3050return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);3051if (S->getType()->isIntegerTy() && Ty->isPointerTy())3052return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);30533054return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);3055}30563057CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,3058const Twine &Name,3059InsertPosition InsertBefore) {3060assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&3061"Invalid integer cast");3062unsigned SrcBits = C->getType()->getScalarSizeInBits();3063unsigned DstBits = Ty->getScalarSizeInBits();3064Instruction::CastOps opcode =3065(SrcBits == DstBits ? Instruction::BitCast :3066(SrcBits > DstBits ? Instruction::Trunc :3067(isSigned ? Instruction::SExt : Instruction::ZExt)));3068return Create(opcode, C, Ty, Name, InsertBefore);3069}30703071CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,3072InsertPosition InsertBefore) {3073assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&3074"Invalid cast");3075unsigned SrcBits = C->getType()->getScalarSizeInBits();3076unsigned DstBits = Ty->getScalarSizeInBits();3077assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");3078Instruction::CastOps opcode =3079(SrcBits == DstBits ? Instruction::BitCast :3080(SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));3081return Create(opcode, C, Ty, Name, InsertBefore);3082}30833084bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {3085if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())3086return false;30873088if (SrcTy == DestTy)3089return true;30903091if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {3092if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {3093if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {3094// An element by element cast. Valid if casting the elements is valid.3095SrcTy = SrcVecTy->getElementType();3096DestTy = DestVecTy->getElementType();3097}3098}3099}31003101if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {3102if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {3103return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();3104}3105}31063107TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr3108TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr31093110// Could still have vectors of pointers if the number of elements doesn't3111// match3112if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)3113return false;31143115if (SrcBits != DestBits)3116return false;31173118if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())3119return false;31203121return true;3122}31233124bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,3125const DataLayout &DL) {3126// ptrtoint and inttoptr are not allowed on non-integral pointers3127if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))3128if (auto *IntTy = dyn_cast<IntegerType>(DestTy))3129return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&3130!DL.isNonIntegralPointerType(PtrTy));3131if (auto *PtrTy = dyn_cast<PointerType>(DestTy))3132if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))3133return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&3134!DL.isNonIntegralPointerType(PtrTy));31353136return isBitCastable(SrcTy, DestTy);3137}31383139// Provide a way to get a "cast" where the cast opcode is inferred from the3140// types and size of the operand. This, basically, is a parallel of the3141// logic in the castIsValid function below. This axiom should hold:3142// castIsValid( getCastOpcode(Val, Ty), Val, Ty)3143// should not assert in castIsValid. In other words, this produces a "correct"3144// casting opcode for the arguments passed to it.3145Instruction::CastOps3146CastInst::getCastOpcode(3147const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {3148Type *SrcTy = Src->getType();31493150assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&3151"Only first class types are castable!");31523153if (SrcTy == DestTy)3154return BitCast;31553156// FIXME: Check address space sizes here3157if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))3158if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))3159if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {3160// An element by element cast. Find the appropriate opcode based on the3161// element types.3162SrcTy = SrcVecTy->getElementType();3163DestTy = DestVecTy->getElementType();3164}31653166// Get the bit sizes, we'll need these3167unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr3168unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr31693170// Run through the possibilities ...3171if (DestTy->isIntegerTy()) { // Casting to integral3172if (SrcTy->isIntegerTy()) { // Casting from integral3173if (DestBits < SrcBits)3174return Trunc; // int -> smaller int3175else if (DestBits > SrcBits) { // its an extension3176if (SrcIsSigned)3177return SExt; // signed -> SEXT3178else3179return ZExt; // unsigned -> ZEXT3180} else {3181return BitCast; // Same size, No-op cast3182}3183} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt3184if (DestIsSigned)3185return FPToSI; // FP -> sint3186else3187return FPToUI; // FP -> uint3188} else if (SrcTy->isVectorTy()) {3189assert(DestBits == SrcBits &&3190"Casting vector to integer of different width");3191return BitCast; // Same size, no-op cast3192} else {3193assert(SrcTy->isPointerTy() &&3194"Casting from a value that is not first-class type");3195return PtrToInt; // ptr -> int3196}3197} else if (DestTy->isFloatingPointTy()) { // Casting to floating pt3198if (SrcTy->isIntegerTy()) { // Casting from integral3199if (SrcIsSigned)3200return SIToFP; // sint -> FP3201else3202return UIToFP; // uint -> FP3203} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt3204if (DestBits < SrcBits) {3205return FPTrunc; // FP -> smaller FP3206} else if (DestBits > SrcBits) {3207return FPExt; // FP -> larger FP3208} else {3209return BitCast; // same size, no-op cast3210}3211} else if (SrcTy->isVectorTy()) {3212assert(DestBits == SrcBits &&3213"Casting vector to floating point of different width");3214return BitCast; // same size, no-op cast3215}3216llvm_unreachable("Casting pointer or non-first class to float");3217} else if (DestTy->isVectorTy()) {3218assert(DestBits == SrcBits &&3219"Illegal cast to vector (wrong type or size)");3220return BitCast;3221} else if (DestTy->isPointerTy()) {3222if (SrcTy->isPointerTy()) {3223if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())3224return AddrSpaceCast;3225return BitCast; // ptr -> ptr3226} else if (SrcTy->isIntegerTy()) {3227return IntToPtr; // int -> ptr3228}3229llvm_unreachable("Casting pointer to other than pointer or int");3230} else if (DestTy->isX86_MMXTy()) {3231if (SrcTy->isVectorTy()) {3232assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");3233return BitCast; // 64-bit vector to MMX3234}3235llvm_unreachable("Illegal cast to X86_MMX");3236}3237llvm_unreachable("Casting to type that is not first-class");3238}32393240//===----------------------------------------------------------------------===//3241// CastInst SubClass Constructors3242//===----------------------------------------------------------------------===//32433244/// Check that the construction parameters for a CastInst are correct. This3245/// could be broken out into the separate constructors but it is useful to have3246/// it in one place and to eliminate the redundant code for getting the sizes3247/// of the types involved.3248bool3249CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {3250if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||3251SrcTy->isAggregateType() || DstTy->isAggregateType())3252return false;32533254// Get the size of the types in bits, and whether we are dealing3255// with vector types, we'll need this later.3256bool SrcIsVec = isa<VectorType>(SrcTy);3257bool DstIsVec = isa<VectorType>(DstTy);3258unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();3259unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();32603261// If these are vector types, get the lengths of the vectors (using zero for3262// scalar types means that checking that vector lengths match also checks that3263// scalars are not being converted to vectors or vectors to scalars).3264ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()3265: ElementCount::getFixed(0);3266ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()3267: ElementCount::getFixed(0);32683269// Switch on the opcode provided3270switch (op) {3271default: return false; // This is an input error3272case Instruction::Trunc:3273return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&3274SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;3275case Instruction::ZExt:3276return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&3277SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;3278case Instruction::SExt:3279return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&3280SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;3281case Instruction::FPTrunc:3282return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&3283SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;3284case Instruction::FPExt:3285return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&3286SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;3287case Instruction::UIToFP:3288case Instruction::SIToFP:3289return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&3290SrcEC == DstEC;3291case Instruction::FPToUI:3292case Instruction::FPToSI:3293return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&3294SrcEC == DstEC;3295case Instruction::PtrToInt:3296if (SrcEC != DstEC)3297return false;3298return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();3299case Instruction::IntToPtr:3300if (SrcEC != DstEC)3301return false;3302return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();3303case Instruction::BitCast: {3304PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());3305PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());33063307// BitCast implies a no-op cast of type only. No bits change.3308// However, you can't cast pointers to anything but pointers.3309if (!SrcPtrTy != !DstPtrTy)3310return false;33113312// For non-pointer cases, the cast is okay if the source and destination bit3313// widths are identical.3314if (!SrcPtrTy)3315return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();33163317// If both are pointers then the address spaces must match.3318if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())3319return false;33203321// A vector of pointers must have the same number of elements.3322if (SrcIsVec && DstIsVec)3323return SrcEC == DstEC;3324if (SrcIsVec)3325return SrcEC == ElementCount::getFixed(1);3326if (DstIsVec)3327return DstEC == ElementCount::getFixed(1);33283329return true;3330}3331case Instruction::AddrSpaceCast: {3332PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());3333if (!SrcPtrTy)3334return false;33353336PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());3337if (!DstPtrTy)3338return false;33393340if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())3341return false;33423343return SrcEC == DstEC;3344}3345}3346}33473348TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,3349InsertPosition InsertBefore)3350: CastInst(Ty, Trunc, S, Name, InsertBefore) {3351assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");3352}33533354ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,3355InsertPosition InsertBefore)3356: CastInst(Ty, ZExt, S, Name, InsertBefore) {3357assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");3358}33593360SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,3361InsertPosition InsertBefore)3362: CastInst(Ty, SExt, S, Name, InsertBefore) {3363assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");3364}33653366FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,3367InsertPosition InsertBefore)3368: CastInst(Ty, FPTrunc, S, Name, InsertBefore) {3369assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");3370}33713372FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,3373InsertPosition InsertBefore)3374: CastInst(Ty, FPExt, S, Name, InsertBefore) {3375assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");3376}33773378UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,3379InsertPosition InsertBefore)3380: CastInst(Ty, UIToFP, S, Name, InsertBefore) {3381assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");3382}33833384SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,3385InsertPosition InsertBefore)3386: CastInst(Ty, SIToFP, S, Name, InsertBefore) {3387assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");3388}33893390FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,3391InsertPosition InsertBefore)3392: CastInst(Ty, FPToUI, S, Name, InsertBefore) {3393assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");3394}33953396FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,3397InsertPosition InsertBefore)3398: CastInst(Ty, FPToSI, S, Name, InsertBefore) {3399assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");3400}34013402PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,3403InsertPosition InsertBefore)3404: CastInst(Ty, PtrToInt, S, Name, InsertBefore) {3405assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");3406}34073408IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,3409InsertPosition InsertBefore)3410: CastInst(Ty, IntToPtr, S, Name, InsertBefore) {3411assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");3412}34133414BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,3415InsertPosition InsertBefore)3416: CastInst(Ty, BitCast, S, Name, InsertBefore) {3417assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");3418}34193420AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,3421InsertPosition InsertBefore)3422: CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {3423assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");3424}34253426//===----------------------------------------------------------------------===//3427// CmpInst Classes3428//===----------------------------------------------------------------------===//34293430CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,3431Value *RHS, const Twine &Name, InsertPosition InsertBefore,3432Instruction *FlagsSource)3433: Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),3434OperandTraits<CmpInst>::operands(this), InsertBefore) {3435Op<0>() = LHS;3436Op<1>() = RHS;3437setPredicate((Predicate)predicate);3438setName(Name);3439if (FlagsSource)3440copyIRFlags(FlagsSource);3441}34423443CmpInst *CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,3444const Twine &Name, InsertPosition InsertBefore) {3445if (Op == Instruction::ICmp) {3446if (InsertBefore.isValid())3447return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),3448S1, S2, Name);3449else3450return new ICmpInst(CmpInst::Predicate(predicate),3451S1, S2, Name);3452}34533454if (InsertBefore.isValid())3455return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),3456S1, S2, Name);3457else3458return new FCmpInst(CmpInst::Predicate(predicate),3459S1, S2, Name);3460}34613462CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,3463Value *S2,3464const Instruction *FlagsSource,3465const Twine &Name,3466InsertPosition InsertBefore) {3467CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);3468Inst->copyIRFlags(FlagsSource);3469return Inst;3470}34713472void CmpInst::swapOperands() {3473if (ICmpInst *IC = dyn_cast<ICmpInst>(this))3474IC->swapOperands();3475else3476cast<FCmpInst>(this)->swapOperands();3477}34783479bool CmpInst::isCommutative() const {3480if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))3481return IC->isCommutative();3482return cast<FCmpInst>(this)->isCommutative();3483}34843485bool CmpInst::isEquality(Predicate P) {3486if (ICmpInst::isIntPredicate(P))3487return ICmpInst::isEquality(P);3488if (FCmpInst::isFPPredicate(P))3489return FCmpInst::isEquality(P);3490llvm_unreachable("Unsupported predicate kind");3491}34923493CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {3494switch (pred) {3495default: llvm_unreachable("Unknown cmp predicate!");3496case ICMP_EQ: return ICMP_NE;3497case ICMP_NE: return ICMP_EQ;3498case ICMP_UGT: return ICMP_ULE;3499case ICMP_ULT: return ICMP_UGE;3500case ICMP_UGE: return ICMP_ULT;3501case ICMP_ULE: return ICMP_UGT;3502case ICMP_SGT: return ICMP_SLE;3503case ICMP_SLT: return ICMP_SGE;3504case ICMP_SGE: return ICMP_SLT;3505case ICMP_SLE: return ICMP_SGT;35063507case FCMP_OEQ: return FCMP_UNE;3508case FCMP_ONE: return FCMP_UEQ;3509case FCMP_OGT: return FCMP_ULE;3510case FCMP_OLT: return FCMP_UGE;3511case FCMP_OGE: return FCMP_ULT;3512case FCMP_OLE: return FCMP_UGT;3513case FCMP_UEQ: return FCMP_ONE;3514case FCMP_UNE: return FCMP_OEQ;3515case FCMP_UGT: return FCMP_OLE;3516case FCMP_ULT: return FCMP_OGE;3517case FCMP_UGE: return FCMP_OLT;3518case FCMP_ULE: return FCMP_OGT;3519case FCMP_ORD: return FCMP_UNO;3520case FCMP_UNO: return FCMP_ORD;3521case FCMP_TRUE: return FCMP_FALSE;3522case FCMP_FALSE: return FCMP_TRUE;3523}3524}35253526StringRef CmpInst::getPredicateName(Predicate Pred) {3527switch (Pred) {3528default: return "unknown";3529case FCmpInst::FCMP_FALSE: return "false";3530case FCmpInst::FCMP_OEQ: return "oeq";3531case FCmpInst::FCMP_OGT: return "ogt";3532case FCmpInst::FCMP_OGE: return "oge";3533case FCmpInst::FCMP_OLT: return "olt";3534case FCmpInst::FCMP_OLE: return "ole";3535case FCmpInst::FCMP_ONE: return "one";3536case FCmpInst::FCMP_ORD: return "ord";3537case FCmpInst::FCMP_UNO: return "uno";3538case FCmpInst::FCMP_UEQ: return "ueq";3539case FCmpInst::FCMP_UGT: return "ugt";3540case FCmpInst::FCMP_UGE: return "uge";3541case FCmpInst::FCMP_ULT: return "ult";3542case FCmpInst::FCMP_ULE: return "ule";3543case FCmpInst::FCMP_UNE: return "une";3544case FCmpInst::FCMP_TRUE: return "true";3545case ICmpInst::ICMP_EQ: return "eq";3546case ICmpInst::ICMP_NE: return "ne";3547case ICmpInst::ICMP_SGT: return "sgt";3548case ICmpInst::ICMP_SGE: return "sge";3549case ICmpInst::ICMP_SLT: return "slt";3550case ICmpInst::ICMP_SLE: return "sle";3551case ICmpInst::ICMP_UGT: return "ugt";3552case ICmpInst::ICMP_UGE: return "uge";3553case ICmpInst::ICMP_ULT: return "ult";3554case ICmpInst::ICMP_ULE: return "ule";3555}3556}35573558raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {3559OS << CmpInst::getPredicateName(Pred);3560return OS;3561}35623563ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {3564switch (pred) {3565default: llvm_unreachable("Unknown icmp predicate!");3566case ICMP_EQ: case ICMP_NE:3567case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:3568return pred;3569case ICMP_UGT: return ICMP_SGT;3570case ICMP_ULT: return ICMP_SLT;3571case ICMP_UGE: return ICMP_SGE;3572case ICMP_ULE: return ICMP_SLE;3573}3574}35753576ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {3577switch (pred) {3578default: llvm_unreachable("Unknown icmp predicate!");3579case ICMP_EQ: case ICMP_NE:3580case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:3581return pred;3582case ICMP_SGT: return ICMP_UGT;3583case ICMP_SLT: return ICMP_ULT;3584case ICMP_SGE: return ICMP_UGE;3585case ICMP_SLE: return ICMP_ULE;3586}3587}35883589CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {3590switch (pred) {3591default: llvm_unreachable("Unknown cmp predicate!");3592case ICMP_EQ: case ICMP_NE:3593return pred;3594case ICMP_SGT: return ICMP_SLT;3595case ICMP_SLT: return ICMP_SGT;3596case ICMP_SGE: return ICMP_SLE;3597case ICMP_SLE: return ICMP_SGE;3598case ICMP_UGT: return ICMP_ULT;3599case ICMP_ULT: return ICMP_UGT;3600case ICMP_UGE: return ICMP_ULE;3601case ICMP_ULE: return ICMP_UGE;36023603case FCMP_FALSE: case FCMP_TRUE:3604case FCMP_OEQ: case FCMP_ONE:3605case FCMP_UEQ: case FCMP_UNE:3606case FCMP_ORD: case FCMP_UNO:3607return pred;3608case FCMP_OGT: return FCMP_OLT;3609case FCMP_OLT: return FCMP_OGT;3610case FCMP_OGE: return FCMP_OLE;3611case FCMP_OLE: return FCMP_OGE;3612case FCMP_UGT: return FCMP_ULT;3613case FCMP_ULT: return FCMP_UGT;3614case FCMP_UGE: return FCMP_ULE;3615case FCMP_ULE: return FCMP_UGE;3616}3617}36183619bool CmpInst::isNonStrictPredicate(Predicate pred) {3620switch (pred) {3621case ICMP_SGE:3622case ICMP_SLE:3623case ICMP_UGE:3624case ICMP_ULE:3625case FCMP_OGE:3626case FCMP_OLE:3627case FCMP_UGE:3628case FCMP_ULE:3629return true;3630default:3631return false;3632}3633}36343635bool CmpInst::isStrictPredicate(Predicate pred) {3636switch (pred) {3637case ICMP_SGT:3638case ICMP_SLT:3639case ICMP_UGT:3640case ICMP_ULT:3641case FCMP_OGT:3642case FCMP_OLT:3643case FCMP_UGT:3644case FCMP_ULT:3645return true;3646default:3647return false;3648}3649}36503651CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {3652switch (pred) {3653case ICMP_SGE:3654return ICMP_SGT;3655case ICMP_SLE:3656return ICMP_SLT;3657case ICMP_UGE:3658return ICMP_UGT;3659case ICMP_ULE:3660return ICMP_ULT;3661case FCMP_OGE:3662return FCMP_OGT;3663case FCMP_OLE:3664return FCMP_OLT;3665case FCMP_UGE:3666return FCMP_UGT;3667case FCMP_ULE:3668return FCMP_ULT;3669default:3670return pred;3671}3672}36733674CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {3675switch (pred) {3676case ICMP_SGT:3677return ICMP_SGE;3678case ICMP_SLT:3679return ICMP_SLE;3680case ICMP_UGT:3681return ICMP_UGE;3682case ICMP_ULT:3683return ICMP_ULE;3684case FCMP_OGT:3685return FCMP_OGE;3686case FCMP_OLT:3687return FCMP_OLE;3688case FCMP_UGT:3689return FCMP_UGE;3690case FCMP_ULT:3691return FCMP_ULE;3692default:3693return pred;3694}3695}36963697CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {3698assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");36993700if (isStrictPredicate(pred))3701return getNonStrictPredicate(pred);3702if (isNonStrictPredicate(pred))3703return getStrictPredicate(pred);37043705llvm_unreachable("Unknown predicate!");3706}37073708CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {3709assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");37103711switch (pred) {3712default:3713llvm_unreachable("Unknown predicate!");3714case CmpInst::ICMP_ULT:3715return CmpInst::ICMP_SLT;3716case CmpInst::ICMP_ULE:3717return CmpInst::ICMP_SLE;3718case CmpInst::ICMP_UGT:3719return CmpInst::ICMP_SGT;3720case CmpInst::ICMP_UGE:3721return CmpInst::ICMP_SGE;3722}3723}37243725CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {3726assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");37273728switch (pred) {3729default:3730llvm_unreachable("Unknown predicate!");3731case CmpInst::ICMP_SLT:3732return CmpInst::ICMP_ULT;3733case CmpInst::ICMP_SLE:3734return CmpInst::ICMP_ULE;3735case CmpInst::ICMP_SGT:3736return CmpInst::ICMP_UGT;3737case CmpInst::ICMP_SGE:3738return CmpInst::ICMP_UGE;3739}3740}37413742bool CmpInst::isUnsigned(Predicate predicate) {3743switch (predicate) {3744default: return false;3745case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:3746case ICmpInst::ICMP_UGE: return true;3747}3748}37493750bool CmpInst::isSigned(Predicate predicate) {3751switch (predicate) {3752default: return false;3753case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:3754case ICmpInst::ICMP_SGE: return true;3755}3756}37573758bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,3759ICmpInst::Predicate Pred) {3760assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");3761switch (Pred) {3762case ICmpInst::Predicate::ICMP_EQ:3763return LHS.eq(RHS);3764case ICmpInst::Predicate::ICMP_NE:3765return LHS.ne(RHS);3766case ICmpInst::Predicate::ICMP_UGT:3767return LHS.ugt(RHS);3768case ICmpInst::Predicate::ICMP_UGE:3769return LHS.uge(RHS);3770case ICmpInst::Predicate::ICMP_ULT:3771return LHS.ult(RHS);3772case ICmpInst::Predicate::ICMP_ULE:3773return LHS.ule(RHS);3774case ICmpInst::Predicate::ICMP_SGT:3775return LHS.sgt(RHS);3776case ICmpInst::Predicate::ICMP_SGE:3777return LHS.sge(RHS);3778case ICmpInst::Predicate::ICMP_SLT:3779return LHS.slt(RHS);3780case ICmpInst::Predicate::ICMP_SLE:3781return LHS.sle(RHS);3782default:3783llvm_unreachable("Unexpected non-integer predicate.");3784};3785}37863787bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,3788FCmpInst::Predicate Pred) {3789APFloat::cmpResult R = LHS.compare(RHS);3790switch (Pred) {3791default:3792llvm_unreachable("Invalid FCmp Predicate");3793case FCmpInst::FCMP_FALSE:3794return false;3795case FCmpInst::FCMP_TRUE:3796return true;3797case FCmpInst::FCMP_UNO:3798return R == APFloat::cmpUnordered;3799case FCmpInst::FCMP_ORD:3800return R != APFloat::cmpUnordered;3801case FCmpInst::FCMP_UEQ:3802return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;3803case FCmpInst::FCMP_OEQ:3804return R == APFloat::cmpEqual;3805case FCmpInst::FCMP_UNE:3806return R != APFloat::cmpEqual;3807case FCmpInst::FCMP_ONE:3808return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;3809case FCmpInst::FCMP_ULT:3810return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;3811case FCmpInst::FCMP_OLT:3812return R == APFloat::cmpLessThan;3813case FCmpInst::FCMP_UGT:3814return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;3815case FCmpInst::FCMP_OGT:3816return R == APFloat::cmpGreaterThan;3817case FCmpInst::FCMP_ULE:3818return R != APFloat::cmpGreaterThan;3819case FCmpInst::FCMP_OLE:3820return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;3821case FCmpInst::FCMP_UGE:3822return R != APFloat::cmpLessThan;3823case FCmpInst::FCMP_OGE:3824return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;3825}3826}38273828CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {3829assert(CmpInst::isRelational(pred) &&3830"Call only with non-equality predicates!");38313832if (isSigned(pred))3833return getUnsignedPredicate(pred);3834if (isUnsigned(pred))3835return getSignedPredicate(pred);38363837llvm_unreachable("Unknown predicate!");3838}38393840bool CmpInst::isOrdered(Predicate predicate) {3841switch (predicate) {3842default: return false;3843case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:3844case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:3845case FCmpInst::FCMP_ORD: return true;3846}3847}38483849bool CmpInst::isUnordered(Predicate predicate) {3850switch (predicate) {3851default: return false;3852case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:3853case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:3854case FCmpInst::FCMP_UNO: return true;3855}3856}38573858bool CmpInst::isTrueWhenEqual(Predicate predicate) {3859switch(predicate) {3860default: return false;3861case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:3862case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;3863}3864}38653866bool CmpInst::isFalseWhenEqual(Predicate predicate) {3867switch(predicate) {3868case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:3869case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;3870default: return false;3871}3872}38733874bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {3875// If the predicates match, then we know the first condition implies the3876// second is true.3877if (Pred1 == Pred2)3878return true;38793880switch (Pred1) {3881default:3882break;3883case ICMP_EQ:3884// A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.3885return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||3886Pred2 == ICMP_SLE;3887case ICMP_UGT: // A >u B implies A != B and A >=u B are true.3888return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;3889case ICMP_ULT: // A <u B implies A != B and A <=u B are true.3890return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;3891case ICMP_SGT: // A >s B implies A != B and A >=s B are true.3892return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;3893case ICMP_SLT: // A <s B implies A != B and A <=s B are true.3894return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;3895}3896return false;3897}38983899bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {3900return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));3901}39023903//===----------------------------------------------------------------------===//3904// SwitchInst Implementation3905//===----------------------------------------------------------------------===//39063907void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {3908assert(Value && Default && NumReserved);3909ReservedSpace = NumReserved;3910setNumHungOffUseOperands(2);3911allocHungoffUses(ReservedSpace);39123913Op<0>() = Value;3914Op<1>() = Default;3915}39163917/// SwitchInst ctor - Create a new switch instruction, specifying a value to3918/// switch on and a default destination. The number of additional cases can3919/// be specified here to make memory allocation more efficient. This3920/// constructor can also autoinsert before another instruction.3921SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,3922InsertPosition InsertBefore)3923: Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,3924nullptr, 0, InsertBefore) {3925init(Value, Default, 2+NumCases*2);3926}39273928SwitchInst::SwitchInst(const SwitchInst &SI)3929: Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {3930init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());3931setNumHungOffUseOperands(SI.getNumOperands());3932Use *OL = getOperandList();3933const Use *InOL = SI.getOperandList();3934for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {3935OL[i] = InOL[i];3936OL[i+1] = InOL[i+1];3937}3938SubclassOptionalData = SI.SubclassOptionalData;3939}39403941/// addCase - Add an entry to the switch instruction...3942///3943void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {3944unsigned NewCaseIdx = getNumCases();3945unsigned OpNo = getNumOperands();3946if (OpNo+2 > ReservedSpace)3947growOperands(); // Get more space!3948// Initialize some new operands.3949assert(OpNo+1 < ReservedSpace && "Growing didn't work!");3950setNumHungOffUseOperands(OpNo+2);3951CaseHandle Case(this, NewCaseIdx);3952Case.setValue(OnVal);3953Case.setSuccessor(Dest);3954}39553956/// removeCase - This method removes the specified case and its successor3957/// from the switch instruction.3958SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {3959unsigned idx = I->getCaseIndex();39603961assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");39623963unsigned NumOps = getNumOperands();3964Use *OL = getOperandList();39653966// Overwrite this case with the end of the list.3967if (2 + (idx + 1) * 2 != NumOps) {3968OL[2 + idx * 2] = OL[NumOps - 2];3969OL[2 + idx * 2 + 1] = OL[NumOps - 1];3970}39713972// Nuke the last value.3973OL[NumOps-2].set(nullptr);3974OL[NumOps-2+1].set(nullptr);3975setNumHungOffUseOperands(NumOps-2);39763977return CaseIt(this, idx);3978}39793980/// growOperands - grow operands - This grows the operand list in response3981/// to a push_back style of operation. This grows the number of ops by 3 times.3982///3983void SwitchInst::growOperands() {3984unsigned e = getNumOperands();3985unsigned NumOps = e*3;39863987ReservedSpace = NumOps;3988growHungoffUses(ReservedSpace);3989}39903991MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {3992assert(Changed && "called only if metadata has changed");39933994if (!Weights)3995return nullptr;39963997assert(SI.getNumSuccessors() == Weights->size() &&3998"num of prof branch_weights must accord with num of successors");39994000bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });40014002if (AllZeroes || Weights->size() < 2)4003return nullptr;40044005return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);4006}40074008void SwitchInstProfUpdateWrapper::init() {4009MDNode *ProfileData = getBranchWeightMDNode(SI);4010if (!ProfileData)4011return;40124013if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {4014llvm_unreachable("number of prof branch_weights metadata operands does "4015"not correspond to number of succesors");4016}40174018SmallVector<uint32_t, 8> Weights;4019if (!extractBranchWeights(ProfileData, Weights))4020return;4021this->Weights = std::move(Weights);4022}40234024SwitchInst::CaseIt4025SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {4026if (Weights) {4027assert(SI.getNumSuccessors() == Weights->size() &&4028"num of prof branch_weights must accord with num of successors");4029Changed = true;4030// Copy the last case to the place of the removed one and shrink.4031// This is tightly coupled with the way SwitchInst::removeCase() removes4032// the cases in SwitchInst::removeCase(CaseIt).4033(*Weights)[I->getCaseIndex() + 1] = Weights->back();4034Weights->pop_back();4035}4036return SI.removeCase(I);4037}40384039void SwitchInstProfUpdateWrapper::addCase(4040ConstantInt *OnVal, BasicBlock *Dest,4041SwitchInstProfUpdateWrapper::CaseWeightOpt W) {4042SI.addCase(OnVal, Dest);40434044if (!Weights && W && *W) {4045Changed = true;4046Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);4047(*Weights)[SI.getNumSuccessors() - 1] = *W;4048} else if (Weights) {4049Changed = true;4050Weights->push_back(W.value_or(0));4051}4052if (Weights)4053assert(SI.getNumSuccessors() == Weights->size() &&4054"num of prof branch_weights must accord with num of successors");4055}40564057Instruction::InstListType::iterator4058SwitchInstProfUpdateWrapper::eraseFromParent() {4059// Instruction is erased. Mark as unchanged to not touch it in the destructor.4060Changed = false;4061if (Weights)4062Weights->resize(0);4063return SI.eraseFromParent();4064}40654066SwitchInstProfUpdateWrapper::CaseWeightOpt4067SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {4068if (!Weights)4069return std::nullopt;4070return (*Weights)[idx];4071}40724073void SwitchInstProfUpdateWrapper::setSuccessorWeight(4074unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {4075if (!W)4076return;40774078if (!Weights && *W)4079Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);40804081if (Weights) {4082auto &OldW = (*Weights)[idx];4083if (*W != OldW) {4084Changed = true;4085OldW = *W;4086}4087}4088}40894090SwitchInstProfUpdateWrapper::CaseWeightOpt4091SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,4092unsigned idx) {4093if (MDNode *ProfileData = getBranchWeightMDNode(SI))4094if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)4095return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))4096->getValue()4097.getZExtValue();40984099return std::nullopt;4100}41014102//===----------------------------------------------------------------------===//4103// IndirectBrInst Implementation4104//===----------------------------------------------------------------------===//41054106void IndirectBrInst::init(Value *Address, unsigned NumDests) {4107assert(Address && Address->getType()->isPointerTy() &&4108"Address of indirectbr must be a pointer");4109ReservedSpace = 1+NumDests;4110setNumHungOffUseOperands(1);4111allocHungoffUses(ReservedSpace);41124113Op<0>() = Address;4114}411541164117/// growOperands - grow operands - This grows the operand list in response4118/// to a push_back style of operation. This grows the number of ops by 2 times.4119///4120void IndirectBrInst::growOperands() {4121unsigned e = getNumOperands();4122unsigned NumOps = e*2;41234124ReservedSpace = NumOps;4125growHungoffUses(ReservedSpace);4126}41274128IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,4129InsertPosition InsertBefore)4130: Instruction(Type::getVoidTy(Address->getContext()),4131Instruction::IndirectBr, nullptr, 0, InsertBefore) {4132init(Address, NumCases);4133}41344135IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)4136: Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,4137nullptr, IBI.getNumOperands()) {4138allocHungoffUses(IBI.getNumOperands());4139Use *OL = getOperandList();4140const Use *InOL = IBI.getOperandList();4141for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)4142OL[i] = InOL[i];4143SubclassOptionalData = IBI.SubclassOptionalData;4144}41454146/// addDestination - Add a destination.4147///4148void IndirectBrInst::addDestination(BasicBlock *DestBB) {4149unsigned OpNo = getNumOperands();4150if (OpNo+1 > ReservedSpace)4151growOperands(); // Get more space!4152// Initialize some new operands.4153assert(OpNo < ReservedSpace && "Growing didn't work!");4154setNumHungOffUseOperands(OpNo+1);4155getOperandList()[OpNo] = DestBB;4156}41574158/// removeDestination - This method removes the specified successor from the4159/// indirectbr instruction.4160void IndirectBrInst::removeDestination(unsigned idx) {4161assert(idx < getNumOperands()-1 && "Successor index out of range!");41624163unsigned NumOps = getNumOperands();4164Use *OL = getOperandList();41654166// Replace this value with the last one.4167OL[idx+1] = OL[NumOps-1];41684169// Nuke the last value.4170OL[NumOps-1].set(nullptr);4171setNumHungOffUseOperands(NumOps-1);4172}41734174//===----------------------------------------------------------------------===//4175// FreezeInst Implementation4176//===----------------------------------------------------------------------===//41774178FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)4179: UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {4180setName(Name);4181}41824183//===----------------------------------------------------------------------===//4184// cloneImpl() implementations4185//===----------------------------------------------------------------------===//41864187// Define these methods here so vtables don't get emitted into every translation4188// unit that uses these classes.41894190GetElementPtrInst *GetElementPtrInst::cloneImpl() const {4191return new (getNumOperands()) GetElementPtrInst(*this);4192}41934194UnaryOperator *UnaryOperator::cloneImpl() const {4195return Create(getOpcode(), Op<0>());4196}41974198BinaryOperator *BinaryOperator::cloneImpl() const {4199return Create(getOpcode(), Op<0>(), Op<1>());4200}42014202FCmpInst *FCmpInst::cloneImpl() const {4203return new FCmpInst(getPredicate(), Op<0>(), Op<1>());4204}42054206ICmpInst *ICmpInst::cloneImpl() const {4207return new ICmpInst(getPredicate(), Op<0>(), Op<1>());4208}42094210ExtractValueInst *ExtractValueInst::cloneImpl() const {4211return new ExtractValueInst(*this);4212}42134214InsertValueInst *InsertValueInst::cloneImpl() const {4215return new InsertValueInst(*this);4216}42174218AllocaInst *AllocaInst::cloneImpl() const {4219AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),4220getOperand(0), getAlign());4221Result->setUsedWithInAlloca(isUsedWithInAlloca());4222Result->setSwiftError(isSwiftError());4223return Result;4224}42254226LoadInst *LoadInst::cloneImpl() const {4227return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),4228getAlign(), getOrdering(), getSyncScopeID());4229}42304231StoreInst *StoreInst::cloneImpl() const {4232return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),4233getOrdering(), getSyncScopeID());4234}42354236AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {4237AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(4238getOperand(0), getOperand(1), getOperand(2), getAlign(),4239getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());4240Result->setVolatile(isVolatile());4241Result->setWeak(isWeak());4242return Result;4243}42444245AtomicRMWInst *AtomicRMWInst::cloneImpl() const {4246AtomicRMWInst *Result =4247new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),4248getAlign(), getOrdering(), getSyncScopeID());4249Result->setVolatile(isVolatile());4250return Result;4251}42524253FenceInst *FenceInst::cloneImpl() const {4254return new FenceInst(getContext(), getOrdering(), getSyncScopeID());4255}42564257TruncInst *TruncInst::cloneImpl() const {4258return new TruncInst(getOperand(0), getType());4259}42604261ZExtInst *ZExtInst::cloneImpl() const {4262return new ZExtInst(getOperand(0), getType());4263}42644265SExtInst *SExtInst::cloneImpl() const {4266return new SExtInst(getOperand(0), getType());4267}42684269FPTruncInst *FPTruncInst::cloneImpl() const {4270return new FPTruncInst(getOperand(0), getType());4271}42724273FPExtInst *FPExtInst::cloneImpl() const {4274return new FPExtInst(getOperand(0), getType());4275}42764277UIToFPInst *UIToFPInst::cloneImpl() const {4278return new UIToFPInst(getOperand(0), getType());4279}42804281SIToFPInst *SIToFPInst::cloneImpl() const {4282return new SIToFPInst(getOperand(0), getType());4283}42844285FPToUIInst *FPToUIInst::cloneImpl() const {4286return new FPToUIInst(getOperand(0), getType());4287}42884289FPToSIInst *FPToSIInst::cloneImpl() const {4290return new FPToSIInst(getOperand(0), getType());4291}42924293PtrToIntInst *PtrToIntInst::cloneImpl() const {4294return new PtrToIntInst(getOperand(0), getType());4295}42964297IntToPtrInst *IntToPtrInst::cloneImpl() const {4298return new IntToPtrInst(getOperand(0), getType());4299}43004301BitCastInst *BitCastInst::cloneImpl() const {4302return new BitCastInst(getOperand(0), getType());4303}43044305AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {4306return new AddrSpaceCastInst(getOperand(0), getType());4307}43084309CallInst *CallInst::cloneImpl() const {4310if (hasOperandBundles()) {4311unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);4312return new(getNumOperands(), DescriptorBytes) CallInst(*this);4313}4314return new(getNumOperands()) CallInst(*this);4315}43164317SelectInst *SelectInst::cloneImpl() const {4318return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));4319}43204321VAArgInst *VAArgInst::cloneImpl() const {4322return new VAArgInst(getOperand(0), getType());4323}43244325ExtractElementInst *ExtractElementInst::cloneImpl() const {4326return ExtractElementInst::Create(getOperand(0), getOperand(1));4327}43284329InsertElementInst *InsertElementInst::cloneImpl() const {4330return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));4331}43324333ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {4334return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());4335}43364337PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }43384339LandingPadInst *LandingPadInst::cloneImpl() const {4340return new LandingPadInst(*this);4341}43424343ReturnInst *ReturnInst::cloneImpl() const {4344return new(getNumOperands()) ReturnInst(*this);4345}43464347BranchInst *BranchInst::cloneImpl() const {4348return new(getNumOperands()) BranchInst(*this);4349}43504351SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }43524353IndirectBrInst *IndirectBrInst::cloneImpl() const {4354return new IndirectBrInst(*this);4355}43564357InvokeInst *InvokeInst::cloneImpl() const {4358if (hasOperandBundles()) {4359unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);4360return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);4361}4362return new(getNumOperands()) InvokeInst(*this);4363}43644365CallBrInst *CallBrInst::cloneImpl() const {4366if (hasOperandBundles()) {4367unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);4368return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);4369}4370return new (getNumOperands()) CallBrInst(*this);4371}43724373ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }43744375CleanupReturnInst *CleanupReturnInst::cloneImpl() const {4376return new (getNumOperands()) CleanupReturnInst(*this);4377}43784379CatchReturnInst *CatchReturnInst::cloneImpl() const {4380return new (getNumOperands()) CatchReturnInst(*this);4381}43824383CatchSwitchInst *CatchSwitchInst::cloneImpl() const {4384return new CatchSwitchInst(*this);4385}43864387FuncletPadInst *FuncletPadInst::cloneImpl() const {4388return new (getNumOperands()) FuncletPadInst(*this);4389}43904391UnreachableInst *UnreachableInst::cloneImpl() const {4392LLVMContext &Context = getContext();4393return new UnreachableInst(Context);4394}43954396FreezeInst *FreezeInst::cloneImpl() const {4397return new FreezeInst(getOperand(0));4398}439944004401