Path: blob/main/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
35233 views
//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.9//10//===----------------------------------------------------------------------===//1112#include "CGCXXABI.h"13#include "CGCleanup.h"14#include "CGDebugInfo.h"15#include "CGObjCRuntime.h"16#include "CGOpenMPRuntime.h"17#include "CGRecordLayout.h"18#include "CodeGenFunction.h"19#include "CodeGenModule.h"20#include "ConstantEmitter.h"21#include "TargetInfo.h"22#include "clang/AST/ASTContext.h"23#include "clang/AST/Attr.h"24#include "clang/AST/DeclObjC.h"25#include "clang/AST/Expr.h"26#include "clang/AST/RecordLayout.h"27#include "clang/AST/StmtVisitor.h"28#include "clang/Basic/CodeGenOptions.h"29#include "clang/Basic/TargetInfo.h"30#include "llvm/ADT/APFixedPoint.h"31#include "llvm/IR/CFG.h"32#include "llvm/IR/Constants.h"33#include "llvm/IR/DataLayout.h"34#include "llvm/IR/DerivedTypes.h"35#include "llvm/IR/FixedPointBuilder.h"36#include "llvm/IR/Function.h"37#include "llvm/IR/GetElementPtrTypeIterator.h"38#include "llvm/IR/GlobalVariable.h"39#include "llvm/IR/Intrinsics.h"40#include "llvm/IR/IntrinsicsPowerPC.h"41#include "llvm/IR/MatrixBuilder.h"42#include "llvm/IR/Module.h"43#include "llvm/Support/TypeSize.h"44#include <cstdarg>45#include <optional>4647using namespace clang;48using namespace CodeGen;49using llvm::Value;5051//===----------------------------------------------------------------------===//52// Scalar Expression Emitter53//===----------------------------------------------------------------------===//5455namespace llvm {56extern cl::opt<bool> EnableSingleByteCoverage;57} // namespace llvm5859namespace {6061/// Determine whether the given binary operation may overflow.62/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,63/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},64/// the returned overflow check is precise. The returned value is 'true' for65/// all other opcodes, to be conservative.66bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,67BinaryOperator::Opcode Opcode, bool Signed,68llvm::APInt &Result) {69// Assume overflow is possible, unless we can prove otherwise.70bool Overflow = true;71const auto &LHSAP = LHS->getValue();72const auto &RHSAP = RHS->getValue();73if (Opcode == BO_Add) {74Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)75: LHSAP.uadd_ov(RHSAP, Overflow);76} else if (Opcode == BO_Sub) {77Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)78: LHSAP.usub_ov(RHSAP, Overflow);79} else if (Opcode == BO_Mul) {80Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)81: LHSAP.umul_ov(RHSAP, Overflow);82} else if (Opcode == BO_Div || Opcode == BO_Rem) {83if (Signed && !RHS->isZero())84Result = LHSAP.sdiv_ov(RHSAP, Overflow);85else86return false;87}88return Overflow;89}9091struct BinOpInfo {92Value *LHS;93Value *RHS;94QualType Ty; // Computation Type.95BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform96FPOptions FPFeatures;97const Expr *E; // Entire expr, for error unsupported. May not be binop.9899/// Check if the binop can result in integer overflow.100bool mayHaveIntegerOverflow() const {101// Without constant input, we can't rule out overflow.102auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);103auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);104if (!LHSCI || !RHSCI)105return true;106107llvm::APInt Result;108return ::mayHaveIntegerOverflow(109LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);110}111112/// Check if the binop computes a division or a remainder.113bool isDivremOp() const {114return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||115Opcode == BO_RemAssign;116}117118/// Check if the binop can result in an integer division by zero.119bool mayHaveIntegerDivisionByZero() const {120if (isDivremOp())121if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))122return CI->isZero();123return true;124}125126/// Check if the binop can result in a float division by zero.127bool mayHaveFloatDivisionByZero() const {128if (isDivremOp())129if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))130return CFP->isZero();131return true;132}133134/// Check if at least one operand is a fixed point type. In such cases, this135/// operation did not follow usual arithmetic conversion and both operands136/// might not be of the same type.137bool isFixedPointOp() const {138// We cannot simply check the result type since comparison operations return139// an int.140if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {141QualType LHSType = BinOp->getLHS()->getType();142QualType RHSType = BinOp->getRHS()->getType();143return LHSType->isFixedPointType() || RHSType->isFixedPointType();144}145if (const auto *UnOp = dyn_cast<UnaryOperator>(E))146return UnOp->getSubExpr()->getType()->isFixedPointType();147return false;148}149150/// Check if the RHS has a signed integer representation.151bool rhsHasSignedIntegerRepresentation() const {152if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {153QualType RHSType = BinOp->getRHS()->getType();154return RHSType->hasSignedIntegerRepresentation();155}156return false;157}158};159160static bool MustVisitNullValue(const Expr *E) {161// If a null pointer expression's type is the C++0x nullptr_t, then162// it's not necessarily a simple constant and it must be evaluated163// for its potential side effects.164return E->getType()->isNullPtrType();165}166167/// If \p E is a widened promoted integer, get its base (unpromoted) type.168static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,169const Expr *E) {170const Expr *Base = E->IgnoreImpCasts();171if (E == Base)172return std::nullopt;173174QualType BaseTy = Base->getType();175if (!Ctx.isPromotableIntegerType(BaseTy) ||176Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))177return std::nullopt;178179return BaseTy;180}181182/// Check if \p E is a widened promoted integer.183static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {184return getUnwidenedIntegerType(Ctx, E).has_value();185}186187/// Check if we can skip the overflow check for \p Op.188static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {189assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&190"Expected a unary or binary operator");191192// If the binop has constant inputs and we can prove there is no overflow,193// we can elide the overflow check.194if (!Op.mayHaveIntegerOverflow())195return true;196197// If a unary op has a widened operand, the op cannot overflow.198if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))199return !UO->canOverflow();200201// We usually don't need overflow checks for binops with widened operands.202// Multiplication with promoted unsigned operands is a special case.203const auto *BO = cast<BinaryOperator>(Op.E);204auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());205if (!OptionalLHSTy)206return false;207208auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());209if (!OptionalRHSTy)210return false;211212QualType LHSTy = *OptionalLHSTy;213QualType RHSTy = *OptionalRHSTy;214215// This is the simple case: binops without unsigned multiplication, and with216// widened operands. No overflow check is needed here.217if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||218!LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())219return true;220221// For unsigned multiplication the overflow check can be elided if either one222// of the unpromoted types are less than half the size of the promoted type.223unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());224return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||225(2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;226}227228class ScalarExprEmitter229: public StmtVisitor<ScalarExprEmitter, Value*> {230CodeGenFunction &CGF;231CGBuilderTy &Builder;232bool IgnoreResultAssign;233llvm::LLVMContext &VMContext;234public:235236ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)237: CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),238VMContext(cgf.getLLVMContext()) {239}240241//===--------------------------------------------------------------------===//242// Utilities243//===--------------------------------------------------------------------===//244245bool TestAndClearIgnoreResultAssign() {246bool I = IgnoreResultAssign;247IgnoreResultAssign = false;248return I;249}250251llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }252LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }253LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {254return CGF.EmitCheckedLValue(E, TCK);255}256257void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,258const BinOpInfo &Info);259260Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {261return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();262}263264void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {265const AlignValueAttr *AVAttr = nullptr;266if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {267const ValueDecl *VD = DRE->getDecl();268269if (VD->getType()->isReferenceType()) {270if (const auto *TTy =271VD->getType().getNonReferenceType()->getAs<TypedefType>())272AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();273} else {274// Assumptions for function parameters are emitted at the start of the275// function, so there is no need to repeat that here,276// unless the alignment-assumption sanitizer is enabled,277// then we prefer the assumption over alignment attribute278// on IR function param.279if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))280return;281282AVAttr = VD->getAttr<AlignValueAttr>();283}284}285286if (!AVAttr)287if (const auto *TTy = E->getType()->getAs<TypedefType>())288AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();289290if (!AVAttr)291return;292293Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());294llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);295CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);296}297298/// EmitLoadOfLValue - Given an expression with complex type that represents a299/// value l-value, this method emits the address of the l-value, then loads300/// and returns the result.301Value *EmitLoadOfLValue(const Expr *E) {302Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),303E->getExprLoc());304305EmitLValueAlignmentAssumption(E, V);306return V;307}308309/// EmitConversionToBool - Convert the specified expression value to a310/// boolean (i1) truth value. This is equivalent to "Val != 0".311Value *EmitConversionToBool(Value *Src, QualType DstTy);312313/// Emit a check that a conversion from a floating-point type does not314/// overflow.315void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,316Value *Src, QualType SrcType, QualType DstType,317llvm::Type *DstTy, SourceLocation Loc);318319/// Known implicit conversion check kinds.320/// This is used for bitfield conversion checks as well.321/// Keep in sync with the enum of the same name in ubsan_handlers.h322enum ImplicitConversionCheckKind : unsigned char {323ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.324ICCK_UnsignedIntegerTruncation = 1,325ICCK_SignedIntegerTruncation = 2,326ICCK_IntegerSignChange = 3,327ICCK_SignedIntegerTruncationOrSignChange = 4,328};329330/// Emit a check that an [implicit] truncation of an integer does not331/// discard any bits. It is not UB, so we use the value after truncation.332void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,333QualType DstType, SourceLocation Loc);334335/// Emit a check that an [implicit] conversion of an integer does not change336/// the sign of the value. It is not UB, so we use the value after conversion.337/// NOTE: Src and Dst may be the exact same value! (point to the same thing)338void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,339QualType DstType, SourceLocation Loc);340341/// Emit a conversion from the specified type to the specified destination342/// type, both of which are LLVM scalar types.343struct ScalarConversionOpts {344bool TreatBooleanAsSigned;345bool EmitImplicitIntegerTruncationChecks;346bool EmitImplicitIntegerSignChangeChecks;347348ScalarConversionOpts()349: TreatBooleanAsSigned(false),350EmitImplicitIntegerTruncationChecks(false),351EmitImplicitIntegerSignChangeChecks(false) {}352353ScalarConversionOpts(clang::SanitizerSet SanOpts)354: TreatBooleanAsSigned(false),355EmitImplicitIntegerTruncationChecks(356SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),357EmitImplicitIntegerSignChangeChecks(358SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}359};360Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,361llvm::Type *SrcTy, llvm::Type *DstTy,362ScalarConversionOpts Opts);363Value *364EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,365SourceLocation Loc,366ScalarConversionOpts Opts = ScalarConversionOpts());367368/// Convert between either a fixed point and other fixed point or fixed point369/// and an integer.370Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,371SourceLocation Loc);372373/// Emit a conversion from the specified complex type to the specified374/// destination type, where the destination type is an LLVM scalar type.375Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,376QualType SrcTy, QualType DstTy,377SourceLocation Loc);378379/// EmitNullValue - Emit a value that corresponds to null for the given type.380Value *EmitNullValue(QualType Ty);381382/// EmitFloatToBoolConversion - Perform an FP to boolean conversion.383Value *EmitFloatToBoolConversion(Value *V) {384// Compare against 0.0 for fp scalars.385llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());386return Builder.CreateFCmpUNE(V, Zero, "tobool");387}388389/// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.390Value *EmitPointerToBoolConversion(Value *V, QualType QT) {391Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);392393return Builder.CreateICmpNE(V, Zero, "tobool");394}395396Value *EmitIntToBoolConversion(Value *V) {397// Because of the type rules of C, we often end up computing a398// logical value, then zero extending it to int, then wanting it399// as a logical value again. Optimize this common case.400if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {401if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {402Value *Result = ZI->getOperand(0);403// If there aren't any more uses, zap the instruction to save space.404// Note that there can be more uses, for example if this405// is the result of an assignment.406if (ZI->use_empty())407ZI->eraseFromParent();408return Result;409}410}411412return Builder.CreateIsNotNull(V, "tobool");413}414415//===--------------------------------------------------------------------===//416// Visitor Methods417//===--------------------------------------------------------------------===//418419Value *Visit(Expr *E) {420ApplyDebugLocation DL(CGF, E);421return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);422}423424Value *VisitStmt(Stmt *S) {425S->dump(llvm::errs(), CGF.getContext());426llvm_unreachable("Stmt can't have complex result type!");427}428Value *VisitExpr(Expr *S);429430Value *VisitConstantExpr(ConstantExpr *E) {431// A constant expression of type 'void' generates no code and produces no432// value.433if (E->getType()->isVoidType())434return nullptr;435436if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {437if (E->isGLValue())438return CGF.EmitLoadOfScalar(439Address(Result, CGF.convertTypeForLoadStore(E->getType()),440CGF.getContext().getTypeAlignInChars(E->getType())),441/*Volatile*/ false, E->getType(), E->getExprLoc());442return Result;443}444return Visit(E->getSubExpr());445}446Value *VisitParenExpr(ParenExpr *PE) {447return Visit(PE->getSubExpr());448}449Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {450return Visit(E->getReplacement());451}452Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {453return Visit(GE->getResultExpr());454}455Value *VisitCoawaitExpr(CoawaitExpr *S) {456return CGF.EmitCoawaitExpr(*S).getScalarVal();457}458Value *VisitCoyieldExpr(CoyieldExpr *S) {459return CGF.EmitCoyieldExpr(*S).getScalarVal();460}461Value *VisitUnaryCoawait(const UnaryOperator *E) {462return Visit(E->getSubExpr());463}464465// Leaves.466Value *VisitIntegerLiteral(const IntegerLiteral *E) {467return Builder.getInt(E->getValue());468}469Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {470return Builder.getInt(E->getValue());471}472Value *VisitFloatingLiteral(const FloatingLiteral *E) {473return llvm::ConstantFP::get(VMContext, E->getValue());474}475Value *VisitCharacterLiteral(const CharacterLiteral *E) {476return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());477}478Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {479return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());480}481Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {482return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());483}484Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {485if (E->getType()->isVoidType())486return nullptr;487488return EmitNullValue(E->getType());489}490Value *VisitGNUNullExpr(const GNUNullExpr *E) {491return EmitNullValue(E->getType());492}493Value *VisitOffsetOfExpr(OffsetOfExpr *E);494Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);495Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {496llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());497return Builder.CreateBitCast(V, ConvertType(E->getType()));498}499500Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {501return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());502}503504Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {505return CGF.EmitPseudoObjectRValue(E).getScalarVal();506}507508Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);509Value *VisitEmbedExpr(EmbedExpr *E);510511Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {512if (E->isGLValue())513return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),514E->getExprLoc());515516// Otherwise, assume the mapping is the scalar directly.517return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();518}519520// l-values.521Value *VisitDeclRefExpr(DeclRefExpr *E) {522if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))523return CGF.emitScalarConstant(Constant, E);524return EmitLoadOfLValue(E);525}526527Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {528return CGF.EmitObjCSelectorExpr(E);529}530Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {531return CGF.EmitObjCProtocolExpr(E);532}533Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {534return EmitLoadOfLValue(E);535}536Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {537if (E->getMethodDecl() &&538E->getMethodDecl()->getReturnType()->isReferenceType())539return EmitLoadOfLValue(E);540return CGF.EmitObjCMessageExpr(E).getScalarVal();541}542543Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {544LValue LV = CGF.EmitObjCIsaExpr(E);545Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();546return V;547}548549Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {550VersionTuple Version = E->getVersion();551552// If we're checking for a platform older than our minimum deployment553// target, we can fold the check away.554if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())555return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);556557return CGF.EmitBuiltinAvailable(Version);558}559560Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);561Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);562Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);563Value *VisitConvertVectorExpr(ConvertVectorExpr *E);564Value *VisitMemberExpr(MemberExpr *E);565Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }566Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {567// Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which568// transitively calls EmitCompoundLiteralLValue, here in C++ since compound569// literals aren't l-values in C++. We do so simply because that's the570// cleanest way to handle compound literals in C++.571// See the discussion here: https://reviews.llvm.org/D64464572return EmitLoadOfLValue(E);573}574575Value *VisitInitListExpr(InitListExpr *E);576577Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {578assert(CGF.getArrayInitIndex() &&579"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");580return CGF.getArrayInitIndex();581}582583Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {584return EmitNullValue(E->getType());585}586Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {587CGF.CGM.EmitExplicitCastExprType(E, &CGF);588return VisitCastExpr(E);589}590Value *VisitCastExpr(CastExpr *E);591592Value *VisitCallExpr(const CallExpr *E) {593if (E->getCallReturnType(CGF.getContext())->isReferenceType())594return EmitLoadOfLValue(E);595596Value *V = CGF.EmitCallExpr(E).getScalarVal();597598EmitLValueAlignmentAssumption(E, V);599return V;600}601602Value *VisitStmtExpr(const StmtExpr *E);603604// Unary Operators.605Value *VisitUnaryPostDec(const UnaryOperator *E) {606LValue LV = EmitLValue(E->getSubExpr());607return EmitScalarPrePostIncDec(E, LV, false, false);608}609Value *VisitUnaryPostInc(const UnaryOperator *E) {610LValue LV = EmitLValue(E->getSubExpr());611return EmitScalarPrePostIncDec(E, LV, true, false);612}613Value *VisitUnaryPreDec(const UnaryOperator *E) {614LValue LV = EmitLValue(E->getSubExpr());615return EmitScalarPrePostIncDec(E, LV, false, true);616}617Value *VisitUnaryPreInc(const UnaryOperator *E) {618LValue LV = EmitLValue(E->getSubExpr());619return EmitScalarPrePostIncDec(E, LV, true, true);620}621622llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,623llvm::Value *InVal,624bool IsInc);625626llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,627bool isInc, bool isPre);628629630Value *VisitUnaryAddrOf(const UnaryOperator *E) {631if (isa<MemberPointerType>(E->getType())) // never sugared632return CGF.CGM.getMemberPointerConstant(E);633634return EmitLValue(E->getSubExpr()).getPointer(CGF);635}636Value *VisitUnaryDeref(const UnaryOperator *E) {637if (E->getType()->isVoidType())638return Visit(E->getSubExpr()); // the actual value should be unused639return EmitLoadOfLValue(E);640}641642Value *VisitUnaryPlus(const UnaryOperator *E,643QualType PromotionType = QualType());644Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);645Value *VisitUnaryMinus(const UnaryOperator *E,646QualType PromotionType = QualType());647Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);648649Value *VisitUnaryNot (const UnaryOperator *E);650Value *VisitUnaryLNot (const UnaryOperator *E);651Value *VisitUnaryReal(const UnaryOperator *E,652QualType PromotionType = QualType());653Value *VisitReal(const UnaryOperator *E, QualType PromotionType);654Value *VisitUnaryImag(const UnaryOperator *E,655QualType PromotionType = QualType());656Value *VisitImag(const UnaryOperator *E, QualType PromotionType);657Value *VisitUnaryExtension(const UnaryOperator *E) {658return Visit(E->getSubExpr());659}660661// C++662Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {663return EmitLoadOfLValue(E);664}665Value *VisitSourceLocExpr(SourceLocExpr *SLE) {666auto &Ctx = CGF.getContext();667APValue Evaluated =668SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());669return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,670SLE->getType());671}672673Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {674CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);675return Visit(DAE->getExpr());676}677Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {678CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);679return Visit(DIE->getExpr());680}681Value *VisitCXXThisExpr(CXXThisExpr *TE) {682return CGF.LoadCXXThis();683}684685Value *VisitExprWithCleanups(ExprWithCleanups *E);686Value *VisitCXXNewExpr(const CXXNewExpr *E) {687return CGF.EmitCXXNewExpr(E);688}689Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {690CGF.EmitCXXDeleteExpr(E);691return nullptr;692}693694Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {695return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());696}697698Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {699return Builder.getInt1(E->isSatisfied());700}701702Value *VisitRequiresExpr(const RequiresExpr *E) {703return Builder.getInt1(E->isSatisfied());704}705706Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {707return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());708}709710Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {711return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());712}713714Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {715// C++ [expr.pseudo]p1:716// The result shall only be used as the operand for the function call717// operator (), and the result of such a call has type void. The only718// effect is the evaluation of the postfix-expression before the dot or719// arrow.720CGF.EmitScalarExpr(E->getBase());721return nullptr;722}723724Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {725return EmitNullValue(E->getType());726}727728Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {729CGF.EmitCXXThrowExpr(E);730return nullptr;731}732733Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {734return Builder.getInt1(E->getValue());735}736737// Binary Operators.738Value *EmitMul(const BinOpInfo &Ops) {739if (Ops.Ty->isSignedIntegerOrEnumerationType()) {740switch (CGF.getLangOpts().getSignedOverflowBehavior()) {741case LangOptions::SOB_Defined:742if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))743return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");744[[fallthrough]];745case LangOptions::SOB_Undefined:746if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))747return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");748[[fallthrough]];749case LangOptions::SOB_Trapping:750if (CanElideOverflowCheck(CGF.getContext(), Ops))751return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");752return EmitOverflowCheckedBinOp(Ops);753}754}755756if (Ops.Ty->isConstantMatrixType()) {757llvm::MatrixBuilder MB(Builder);758// We need to check the types of the operands of the operator to get the759// correct matrix dimensions.760auto *BO = cast<BinaryOperator>(Ops.E);761auto *LHSMatTy = dyn_cast<ConstantMatrixType>(762BO->getLHS()->getType().getCanonicalType());763auto *RHSMatTy = dyn_cast<ConstantMatrixType>(764BO->getRHS()->getType().getCanonicalType());765CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);766if (LHSMatTy && RHSMatTy)767return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),768LHSMatTy->getNumColumns(),769RHSMatTy->getNumColumns());770return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);771}772773if (Ops.Ty->isUnsignedIntegerType() &&774CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&775!CanElideOverflowCheck(CGF.getContext(), Ops))776return EmitOverflowCheckedBinOp(Ops);777778if (Ops.LHS->getType()->isFPOrFPVectorTy()) {779// Preserve the old values780CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);781return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");782}783if (Ops.isFixedPointOp())784return EmitFixedPointBinOp(Ops);785return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");786}787/// Create a binary op that checks for overflow.788/// Currently only supports +, - and *.789Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);790791// Check for undefined division and modulus behaviors.792void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,793llvm::Value *Zero,bool isDiv);794// Common helper for getting how wide LHS of shift is.795static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);796797// Used for shifting constraints for OpenCL, do mask for powers of 2, URem for798// non powers of two.799Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);800801Value *EmitDiv(const BinOpInfo &Ops);802Value *EmitRem(const BinOpInfo &Ops);803Value *EmitAdd(const BinOpInfo &Ops);804Value *EmitSub(const BinOpInfo &Ops);805Value *EmitShl(const BinOpInfo &Ops);806Value *EmitShr(const BinOpInfo &Ops);807Value *EmitAnd(const BinOpInfo &Ops) {808return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");809}810Value *EmitXor(const BinOpInfo &Ops) {811return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");812}813Value *EmitOr (const BinOpInfo &Ops) {814return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");815}816817// Helper functions for fixed point binary operations.818Value *EmitFixedPointBinOp(const BinOpInfo &Ops);819820BinOpInfo EmitBinOps(const BinaryOperator *E,821QualType PromotionTy = QualType());822823Value *EmitPromotedValue(Value *result, QualType PromotionType);824Value *EmitUnPromotedValue(Value *result, QualType ExprType);825Value *EmitPromoted(const Expr *E, QualType PromotionType);826827LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,828Value *(ScalarExprEmitter::*F)(const BinOpInfo &),829Value *&Result);830831Value *EmitCompoundAssign(const CompoundAssignOperator *E,832Value *(ScalarExprEmitter::*F)(const BinOpInfo &));833834QualType getPromotionType(QualType Ty) {835const auto &Ctx = CGF.getContext();836if (auto *CT = Ty->getAs<ComplexType>()) {837QualType ElementType = CT->getElementType();838if (ElementType.UseExcessPrecision(Ctx))839return Ctx.getComplexType(Ctx.FloatTy);840}841842if (Ty.UseExcessPrecision(Ctx)) {843if (auto *VT = Ty->getAs<VectorType>()) {844unsigned NumElements = VT->getNumElements();845return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());846}847return Ctx.FloatTy;848}849850return QualType();851}852853// Binary operators and binary compound assignment operators.854#define HANDLEBINOP(OP) \855Value *VisitBin##OP(const BinaryOperator *E) { \856QualType promotionTy = getPromotionType(E->getType()); \857auto result = Emit##OP(EmitBinOps(E, promotionTy)); \858if (result && !promotionTy.isNull()) \859result = EmitUnPromotedValue(result, E->getType()); \860return result; \861} \862Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \863return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \864}865HANDLEBINOP(Mul)866HANDLEBINOP(Div)867HANDLEBINOP(Rem)868HANDLEBINOP(Add)869HANDLEBINOP(Sub)870HANDLEBINOP(Shl)871HANDLEBINOP(Shr)872HANDLEBINOP(And)873HANDLEBINOP(Xor)874HANDLEBINOP(Or)875#undef HANDLEBINOP876877// Comparisons.878Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,879llvm::CmpInst::Predicate SICmpOpc,880llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);881#define VISITCOMP(CODE, UI, SI, FP, SIG) \882Value *VisitBin##CODE(const BinaryOperator *E) { \883return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \884llvm::FCmpInst::FP, SIG); }885VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)886VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)887VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)888VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)889VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)890VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)891#undef VISITCOMP892893Value *VisitBinAssign (const BinaryOperator *E);894895Value *VisitBinLAnd (const BinaryOperator *E);896Value *VisitBinLOr (const BinaryOperator *E);897Value *VisitBinComma (const BinaryOperator *E);898899Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }900Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }901902Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {903return Visit(E->getSemanticForm());904}905906// Other Operators.907Value *VisitBlockExpr(const BlockExpr *BE);908Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);909Value *VisitChooseExpr(ChooseExpr *CE);910Value *VisitVAArgExpr(VAArgExpr *VE);911Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {912return CGF.EmitObjCStringLiteral(E);913}914Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {915return CGF.EmitObjCBoxedExpr(E);916}917Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {918return CGF.EmitObjCArrayLiteral(E);919}920Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {921return CGF.EmitObjCDictionaryLiteral(E);922}923Value *VisitAsTypeExpr(AsTypeExpr *CE);924Value *VisitAtomicExpr(AtomicExpr *AE);925Value *VisitPackIndexingExpr(PackIndexingExpr *E) {926return Visit(E->getSelectedExpr());927}928};929} // end anonymous namespace.930931//===----------------------------------------------------------------------===//932// Utilities933//===----------------------------------------------------------------------===//934935/// EmitConversionToBool - Convert the specified expression value to a936/// boolean (i1) truth value. This is equivalent to "Val != 0".937Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {938assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");939940if (SrcType->isRealFloatingType())941return EmitFloatToBoolConversion(Src);942943if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))944return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);945946assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&947"Unknown scalar type to convert");948949if (isa<llvm::IntegerType>(Src->getType()))950return EmitIntToBoolConversion(Src);951952assert(isa<llvm::PointerType>(Src->getType()));953return EmitPointerToBoolConversion(Src, SrcType);954}955956void ScalarExprEmitter::EmitFloatConversionCheck(957Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,958QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {959assert(SrcType->isFloatingType() && "not a conversion from floating point");960if (!isa<llvm::IntegerType>(DstTy))961return;962963CodeGenFunction::SanitizerScope SanScope(&CGF);964using llvm::APFloat;965using llvm::APSInt;966967llvm::Value *Check = nullptr;968const llvm::fltSemantics &SrcSema =969CGF.getContext().getFloatTypeSemantics(OrigSrcType);970971// Floating-point to integer. This has undefined behavior if the source is972// +-Inf, NaN, or doesn't fit into the destination type (after truncation973// to an integer).974unsigned Width = CGF.getContext().getIntWidth(DstType);975bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();976977APSInt Min = APSInt::getMinValue(Width, Unsigned);978APFloat MinSrc(SrcSema, APFloat::uninitialized);979if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &980APFloat::opOverflow)981// Don't need an overflow check for lower bound. Just check for982// -Inf/NaN.983MinSrc = APFloat::getInf(SrcSema, true);984else985// Find the largest value which is too small to represent (before986// truncation toward zero).987MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);988989APSInt Max = APSInt::getMaxValue(Width, Unsigned);990APFloat MaxSrc(SrcSema, APFloat::uninitialized);991if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &992APFloat::opOverflow)993// Don't need an overflow check for upper bound. Just check for994// +Inf/NaN.995MaxSrc = APFloat::getInf(SrcSema, false);996else997// Find the smallest value which is too large to represent (before998// truncation toward zero).999MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);10001001// If we're converting from __half, convert the range to float to match1002// the type of src.1003if (OrigSrcType->isHalfType()) {1004const llvm::fltSemantics &Sema =1005CGF.getContext().getFloatTypeSemantics(SrcType);1006bool IsInexact;1007MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);1008MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);1009}10101011llvm::Value *GE =1012Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));1013llvm::Value *LE =1014Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));1015Check = Builder.CreateAnd(GE, LE);10161017llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),1018CGF.EmitCheckTypeDescriptor(OrigSrcType),1019CGF.EmitCheckTypeDescriptor(DstType)};1020CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),1021SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);1022}10231024// Should be called within CodeGenFunction::SanitizerScope RAII scope.1025// Returns 'i1 false' when the truncation Src -> Dst was lossy.1026static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1027std::pair<llvm::Value *, SanitizerMask>>1028EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,1029QualType DstType, CGBuilderTy &Builder) {1030llvm::Type *SrcTy = Src->getType();1031llvm::Type *DstTy = Dst->getType();1032(void)DstTy; // Only used in assert()10331034// This should be truncation of integral types.1035assert(Src != Dst);1036assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());1037assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&1038"non-integer llvm type");10391040bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1041bool DstSigned = DstType->isSignedIntegerOrEnumerationType();10421043// If both (src and dst) types are unsigned, then it's an unsigned truncation.1044// Else, it is a signed truncation.1045ScalarExprEmitter::ImplicitConversionCheckKind Kind;1046SanitizerMask Mask;1047if (!SrcSigned && !DstSigned) {1048Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;1049Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;1050} else {1051Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;1052Mask = SanitizerKind::ImplicitSignedIntegerTruncation;1053}10541055llvm::Value *Check = nullptr;1056// 1. Extend the truncated value back to the same width as the Src.1057Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");1058// 2. Equality-compare with the original source value1059Check = Builder.CreateICmpEQ(Check, Src, "truncheck");1060// If the comparison result is 'i1 false', then the truncation was lossy.1061return std::make_pair(Kind, std::make_pair(Check, Mask));1062}10631064static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(1065QualType SrcType, QualType DstType) {1066return SrcType->isIntegerType() && DstType->isIntegerType();1067}10681069void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,1070Value *Dst, QualType DstType,1071SourceLocation Loc) {1072if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))1073return;10741075// We only care about int->int conversions here.1076// We ignore conversions to/from pointer and/or bool.1077if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,1078DstType))1079return;10801081unsigned SrcBits = Src->getType()->getScalarSizeInBits();1082unsigned DstBits = Dst->getType()->getScalarSizeInBits();1083// This must be truncation. Else we do not care.1084if (SrcBits <= DstBits)1085return;10861087assert(!DstType->isBooleanType() && "we should not get here with booleans.");10881089// If the integer sign change sanitizer is enabled,1090// and we are truncating from larger unsigned type to smaller signed type,1091// let that next sanitizer deal with it.1092bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1093bool DstSigned = DstType->isSignedIntegerOrEnumerationType();1094if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&1095(!SrcSigned && DstSigned))1096return;10971098CodeGenFunction::SanitizerScope SanScope(&CGF);10991100std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1101std::pair<llvm::Value *, SanitizerMask>>1102Check =1103EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);1104// If the comparison result is 'i1 false', then the truncation was lossy.11051106// Do we care about this type of truncation?1107if (!CGF.SanOpts.has(Check.second.second))1108return;11091110llvm::Constant *StaticArgs[] = {1111CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),1112CGF.EmitCheckTypeDescriptor(DstType),1113llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),1114llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};11151116CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,1117{Src, Dst});1118}11191120static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,1121const char *Name,1122CGBuilderTy &Builder) {1123bool VSigned = VType->isSignedIntegerOrEnumerationType();1124llvm::Type *VTy = V->getType();1125if (!VSigned) {1126// If the value is unsigned, then it is never negative.1127return llvm::ConstantInt::getFalse(VTy->getContext());1128}1129llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);1130return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,1131llvm::Twine(Name) + "." + V->getName() +1132".negativitycheck");1133}11341135// Should be called within CodeGenFunction::SanitizerScope RAII scope.1136// Returns 'i1 false' when the conversion Src -> Dst changed the sign.1137static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1138std::pair<llvm::Value *, SanitizerMask>>1139EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,1140QualType DstType, CGBuilderTy &Builder) {1141llvm::Type *SrcTy = Src->getType();1142llvm::Type *DstTy = Dst->getType();11431144assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&1145"non-integer llvm type");11461147bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1148bool DstSigned = DstType->isSignedIntegerOrEnumerationType();1149(void)SrcSigned; // Only used in assert()1150(void)DstSigned; // Only used in assert()1151unsigned SrcBits = SrcTy->getScalarSizeInBits();1152unsigned DstBits = DstTy->getScalarSizeInBits();1153(void)SrcBits; // Only used in assert()1154(void)DstBits; // Only used in assert()11551156assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&1157"either the widths should be different, or the signednesses.");11581159// 1. Was the old Value negative?1160llvm::Value *SrcIsNegative =1161EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);1162// 2. Is the new Value negative?1163llvm::Value *DstIsNegative =1164EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);1165// 3. Now, was the 'negativity status' preserved during the conversion?1166// NOTE: conversion from negative to zero is considered to change the sign.1167// (We want to get 'false' when the conversion changed the sign)1168// So we should just equality-compare the negativity statuses.1169llvm::Value *Check = nullptr;1170Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");1171// If the comparison result is 'false', then the conversion changed the sign.1172return std::make_pair(1173ScalarExprEmitter::ICCK_IntegerSignChange,1174std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));1175}11761177void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,1178Value *Dst, QualType DstType,1179SourceLocation Loc) {1180if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))1181return;11821183llvm::Type *SrcTy = Src->getType();1184llvm::Type *DstTy = Dst->getType();11851186// We only care about int->int conversions here.1187// We ignore conversions to/from pointer and/or bool.1188if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,1189DstType))1190return;11911192bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1193bool DstSigned = DstType->isSignedIntegerOrEnumerationType();1194unsigned SrcBits = SrcTy->getScalarSizeInBits();1195unsigned DstBits = DstTy->getScalarSizeInBits();11961197// Now, we do not need to emit the check in *all* of the cases.1198// We can avoid emitting it in some obvious cases where it would have been1199// dropped by the opt passes (instcombine) always anyways.1200// If it's a cast between effectively the same type, no check.1201// NOTE: this is *not* equivalent to checking the canonical types.1202if (SrcSigned == DstSigned && SrcBits == DstBits)1203return;1204// At least one of the values needs to have signed type.1205// If both are unsigned, then obviously, neither of them can be negative.1206if (!SrcSigned && !DstSigned)1207return;1208// If the conversion is to *larger* *signed* type, then no check is needed.1209// Because either sign-extension happens (so the sign will remain),1210// or zero-extension will happen (the sign bit will be zero.)1211if ((DstBits > SrcBits) && DstSigned)1212return;1213if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&1214(SrcBits > DstBits) && SrcSigned) {1215// If the signed integer truncation sanitizer is enabled,1216// and this is a truncation from signed type, then no check is needed.1217// Because here sign change check is interchangeable with truncation check.1218return;1219}1220// That's it. We can't rule out any more cases with the data we have.12211222CodeGenFunction::SanitizerScope SanScope(&CGF);12231224std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1225std::pair<llvm::Value *, SanitizerMask>>1226Check;12271228// Each of these checks needs to return 'false' when an issue was detected.1229ImplicitConversionCheckKind CheckKind;1230llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;1231// So we can 'and' all the checks together, and still get 'false',1232// if at least one of the checks detected an issue.12331234Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);1235CheckKind = Check.first;1236Checks.emplace_back(Check.second);12371238if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&1239(SrcBits > DstBits) && !SrcSigned && DstSigned) {1240// If the signed integer truncation sanitizer was enabled,1241// and we are truncating from larger unsigned type to smaller signed type,1242// let's handle the case we skipped in that check.1243Check =1244EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);1245CheckKind = ICCK_SignedIntegerTruncationOrSignChange;1246Checks.emplace_back(Check.second);1247// If the comparison result is 'i1 false', then the truncation was lossy.1248}12491250llvm::Constant *StaticArgs[] = {1251CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),1252CGF.EmitCheckTypeDescriptor(DstType),1253llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),1254llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};1255// EmitCheck() will 'and' all the checks together.1256CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,1257{Src, Dst});1258}12591260// Should be called within CodeGenFunction::SanitizerScope RAII scope.1261// Returns 'i1 false' when the truncation Src -> Dst was lossy.1262static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1263std::pair<llvm::Value *, SanitizerMask>>1264EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,1265QualType DstType, CGBuilderTy &Builder) {1266bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1267bool DstSigned = DstType->isSignedIntegerOrEnumerationType();12681269ScalarExprEmitter::ImplicitConversionCheckKind Kind;1270if (!SrcSigned && !DstSigned)1271Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;1272else1273Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;12741275llvm::Value *Check = nullptr;1276// 1. Extend the truncated value back to the same width as the Src.1277Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");1278// 2. Equality-compare with the original source value1279Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");1280// If the comparison result is 'i1 false', then the truncation was lossy.12811282return std::make_pair(1283Kind, std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));1284}12851286// Should be called within CodeGenFunction::SanitizerScope RAII scope.1287// Returns 'i1 false' when the conversion Src -> Dst changed the sign.1288static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1289std::pair<llvm::Value *, SanitizerMask>>1290EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,1291QualType DstType, CGBuilderTy &Builder) {1292// 1. Was the old Value negative?1293llvm::Value *SrcIsNegative =1294EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);1295// 2. Is the new Value negative?1296llvm::Value *DstIsNegative =1297EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);1298// 3. Now, was the 'negativity status' preserved during the conversion?1299// NOTE: conversion from negative to zero is considered to change the sign.1300// (We want to get 'false' when the conversion changed the sign)1301// So we should just equality-compare the negativity statuses.1302llvm::Value *Check = nullptr;1303Check =1304Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");1305// If the comparison result is 'false', then the conversion changed the sign.1306return std::make_pair(1307ScalarExprEmitter::ICCK_IntegerSignChange,1308std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));1309}13101311void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,1312Value *Dst, QualType DstType,1313const CGBitFieldInfo &Info,1314SourceLocation Loc) {13151316if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))1317return;13181319// We only care about int->int conversions here.1320// We ignore conversions to/from pointer and/or bool.1321if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,1322DstType))1323return;13241325if (DstType->isBooleanType() || SrcType->isBooleanType())1326return;13271328// This should be truncation of integral types.1329assert(isa<llvm::IntegerType>(Src->getType()) &&1330isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");13311332// TODO: Calculate src width to avoid emitting code1333// for unecessary cases.1334unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();1335unsigned DstBits = Info.Size;13361337bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();1338bool DstSigned = DstType->isSignedIntegerOrEnumerationType();13391340CodeGenFunction::SanitizerScope SanScope(this);13411342std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,1343std::pair<llvm::Value *, SanitizerMask>>1344Check;13451346// Truncation1347bool EmitTruncation = DstBits < SrcBits;1348// If Dst is signed and Src unsigned, we want to be more specific1349// about the CheckKind we emit, in this case we want to emit1350// ICCK_SignedIntegerTruncationOrSignChange.1351bool EmitTruncationFromUnsignedToSigned =1352EmitTruncation && DstSigned && !SrcSigned;1353// Sign change1354bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;1355bool BothUnsigned = !SrcSigned && !DstSigned;1356bool LargerSigned = (DstBits > SrcBits) && DstSigned;1357// We can avoid emitting sign change checks in some obvious cases1358// 1. If Src and Dst have the same signedness and size1359// 2. If both are unsigned sign check is unecessary!1360// 3. If Dst is signed and bigger than Src, either1361// sign-extension or zero-extension will make sure1362// the sign remains.1363bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;13641365if (EmitTruncation)1366Check =1367EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);1368else if (EmitSignChange) {1369assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&1370"either the widths should be different, or the signednesses.");1371Check =1372EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);1373} else1374return;13751376ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;1377if (EmitTruncationFromUnsignedToSigned)1378CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;13791380llvm::Constant *StaticArgs[] = {1381EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(SrcType),1382EmitCheckTypeDescriptor(DstType),1383llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),1384llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};13851386EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,1387{Src, Dst});1388}13891390Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,1391QualType DstType, llvm::Type *SrcTy,1392llvm::Type *DstTy,1393ScalarConversionOpts Opts) {1394// The Element types determine the type of cast to perform.1395llvm::Type *SrcElementTy;1396llvm::Type *DstElementTy;1397QualType SrcElementType;1398QualType DstElementType;1399if (SrcType->isMatrixType() && DstType->isMatrixType()) {1400SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();1401DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();1402SrcElementType = SrcType->castAs<MatrixType>()->getElementType();1403DstElementType = DstType->castAs<MatrixType>()->getElementType();1404} else {1405assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&1406"cannot cast between matrix and non-matrix types");1407SrcElementTy = SrcTy;1408DstElementTy = DstTy;1409SrcElementType = SrcType;1410DstElementType = DstType;1411}14121413if (isa<llvm::IntegerType>(SrcElementTy)) {1414bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();1415if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {1416InputSigned = true;1417}14181419if (isa<llvm::IntegerType>(DstElementTy))1420return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");1421if (InputSigned)1422return Builder.CreateSIToFP(Src, DstTy, "conv");1423return Builder.CreateUIToFP(Src, DstTy, "conv");1424}14251426if (isa<llvm::IntegerType>(DstElementTy)) {1427assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");1428bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();14291430// If we can't recognize overflow as undefined behavior, assume that1431// overflow saturates. This protects against normal optimizations if we are1432// compiling with non-standard FP semantics.1433if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {1434llvm::Intrinsic::ID IID =1435IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;1436return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);1437}14381439if (IsSigned)1440return Builder.CreateFPToSI(Src, DstTy, "conv");1441return Builder.CreateFPToUI(Src, DstTy, "conv");1442}14431444if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())1445return Builder.CreateFPTrunc(Src, DstTy, "conv");1446return Builder.CreateFPExt(Src, DstTy, "conv");1447}14481449/// Emit a conversion from the specified type to the specified destination type,1450/// both of which are LLVM scalar types.1451Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,1452QualType DstType,1453SourceLocation Loc,1454ScalarConversionOpts Opts) {1455// All conversions involving fixed point types should be handled by the1456// EmitFixedPoint family functions. This is done to prevent bloating up this1457// function more, and although fixed point numbers are represented by1458// integers, we do not want to follow any logic that assumes they should be1459// treated as integers.1460// TODO(leonardchan): When necessary, add another if statement checking for1461// conversions to fixed point types from other types.1462if (SrcType->isFixedPointType()) {1463if (DstType->isBooleanType())1464// It is important that we check this before checking if the dest type is1465// an integer because booleans are technically integer types.1466// We do not need to check the padding bit on unsigned types if unsigned1467// padding is enabled because overflow into this bit is undefined1468// behavior.1469return Builder.CreateIsNotNull(Src, "tobool");1470if (DstType->isFixedPointType() || DstType->isIntegerType() ||1471DstType->isRealFloatingType())1472return EmitFixedPointConversion(Src, SrcType, DstType, Loc);14731474llvm_unreachable(1475"Unhandled scalar conversion from a fixed point type to another type.");1476} else if (DstType->isFixedPointType()) {1477if (SrcType->isIntegerType() || SrcType->isRealFloatingType())1478// This also includes converting booleans and enums to fixed point types.1479return EmitFixedPointConversion(Src, SrcType, DstType, Loc);14801481llvm_unreachable(1482"Unhandled scalar conversion to a fixed point type from another type.");1483}14841485QualType NoncanonicalSrcType = SrcType;1486QualType NoncanonicalDstType = DstType;14871488SrcType = CGF.getContext().getCanonicalType(SrcType);1489DstType = CGF.getContext().getCanonicalType(DstType);1490if (SrcType == DstType) return Src;14911492if (DstType->isVoidType()) return nullptr;14931494llvm::Value *OrigSrc = Src;1495QualType OrigSrcType = SrcType;1496llvm::Type *SrcTy = Src->getType();14971498// Handle conversions to bool first, they are special: comparisons against 0.1499if (DstType->isBooleanType())1500return EmitConversionToBool(Src, SrcType);15011502llvm::Type *DstTy = ConvertType(DstType);15031504// Cast from half through float if half isn't a native type.1505if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {1506// Cast to FP using the intrinsic if the half type itself isn't supported.1507if (DstTy->isFloatingPointTy()) {1508if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())1509return Builder.CreateCall(1510CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),1511Src);1512} else {1513// Cast to other types through float, using either the intrinsic or FPExt,1514// depending on whether the half type itself is supported1515// (as opposed to operations on half, available with NativeHalfType).1516if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {1517Src = Builder.CreateCall(1518CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,1519CGF.CGM.FloatTy),1520Src);1521} else {1522Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");1523}1524SrcType = CGF.getContext().FloatTy;1525SrcTy = CGF.FloatTy;1526}1527}15281529// Ignore conversions like int -> uint.1530if (SrcTy == DstTy) {1531if (Opts.EmitImplicitIntegerSignChangeChecks)1532EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,1533NoncanonicalDstType, Loc);15341535return Src;1536}15371538// Handle pointer conversions next: pointers can only be converted to/from1539// other pointers and integers. Check for pointer types in terms of LLVM, as1540// some native types (like Obj-C id) may map to a pointer type.1541if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {1542// The source value may be an integer, or a pointer.1543if (isa<llvm::PointerType>(SrcTy))1544return Src;15451546assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");1547// First, convert to the correct width so that we control the kind of1548// extension.1549llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);1550bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();1551llvm::Value* IntResult =1552Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");1553// Then, cast to pointer.1554return Builder.CreateIntToPtr(IntResult, DstTy, "conv");1555}15561557if (isa<llvm::PointerType>(SrcTy)) {1558// Must be an ptr to int cast.1559assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");1560return Builder.CreatePtrToInt(Src, DstTy, "conv");1561}15621563// A scalar can be splatted to an extended vector of the same element type1564if (DstType->isExtVectorType() && !SrcType->isVectorType()) {1565// Sema should add casts to make sure that the source expression's type is1566// the same as the vector's element type (sans qualifiers)1567assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==1568SrcType.getTypePtr() &&1569"Splatted expr doesn't match with vector element type?");15701571// Splat the element across to all elements1572unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();1573return Builder.CreateVectorSplat(NumElements, Src, "splat");1574}15751576if (SrcType->isMatrixType() && DstType->isMatrixType())1577return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);15781579if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {1580// Allow bitcast from vector to integer/fp of the same size.1581llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();1582llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();1583if (SrcSize == DstSize)1584return Builder.CreateBitCast(Src, DstTy, "conv");15851586// Conversions between vectors of different sizes are not allowed except1587// when vectors of half are involved. Operations on storage-only half1588// vectors require promoting half vector operands to float vectors and1589// truncating the result, which is either an int or float vector, to a1590// short or half vector.15911592// Source and destination are both expected to be vectors.1593llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();1594llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();1595(void)DstElementTy;15961597assert(((SrcElementTy->isIntegerTy() &&1598DstElementTy->isIntegerTy()) ||1599(SrcElementTy->isFloatingPointTy() &&1600DstElementTy->isFloatingPointTy())) &&1601"unexpected conversion between a floating-point vector and an "1602"integer vector");16031604// Truncate an i32 vector to an i16 vector.1605if (SrcElementTy->isIntegerTy())1606return Builder.CreateIntCast(Src, DstTy, false, "conv");16071608// Truncate a float vector to a half vector.1609if (SrcSize > DstSize)1610return Builder.CreateFPTrunc(Src, DstTy, "conv");16111612// Promote a half vector to a float vector.1613return Builder.CreateFPExt(Src, DstTy, "conv");1614}16151616// Finally, we have the arithmetic types: real int/float.1617Value *Res = nullptr;1618llvm::Type *ResTy = DstTy;16191620// An overflowing conversion has undefined behavior if either the source type1621// or the destination type is a floating-point type. However, we consider the1622// range of representable values for all floating-point types to be1623// [-inf,+inf], so no overflow can ever happen when the destination type is a1624// floating-point type.1625if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&1626OrigSrcType->isFloatingType())1627EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,1628Loc);16291630// Cast to half through float if half isn't a native type.1631if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {1632// Make sure we cast in a single step if from another FP type.1633if (SrcTy->isFloatingPointTy()) {1634// Use the intrinsic if the half type itself isn't supported1635// (as opposed to operations on half, available with NativeHalfType).1636if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())1637return Builder.CreateCall(1638CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);1639// If the half type is supported, just use an fptrunc.1640return Builder.CreateFPTrunc(Src, DstTy);1641}1642DstTy = CGF.FloatTy;1643}16441645Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);16461647if (DstTy != ResTy) {1648if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {1649assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");1650Res = Builder.CreateCall(1651CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),1652Res);1653} else {1654Res = Builder.CreateFPTrunc(Res, ResTy, "conv");1655}1656}16571658if (Opts.EmitImplicitIntegerTruncationChecks)1659EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,1660NoncanonicalDstType, Loc);16611662if (Opts.EmitImplicitIntegerSignChangeChecks)1663EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,1664NoncanonicalDstType, Loc);16651666return Res;1667}16681669Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,1670QualType DstTy,1671SourceLocation Loc) {1672llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);1673llvm::Value *Result;1674if (SrcTy->isRealFloatingType())1675Result = FPBuilder.CreateFloatingToFixed(Src,1676CGF.getContext().getFixedPointSemantics(DstTy));1677else if (DstTy->isRealFloatingType())1678Result = FPBuilder.CreateFixedToFloating(Src,1679CGF.getContext().getFixedPointSemantics(SrcTy),1680ConvertType(DstTy));1681else {1682auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);1683auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);16841685if (DstTy->isIntegerType())1686Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,1687DstFPSema.getWidth(),1688DstFPSema.isSigned());1689else if (SrcTy->isIntegerType())1690Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),1691DstFPSema);1692else1693Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);1694}1695return Result;1696}16971698/// Emit a conversion from the specified complex type to the specified1699/// destination type, where the destination type is an LLVM scalar type.1700Value *ScalarExprEmitter::EmitComplexToScalarConversion(1701CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,1702SourceLocation Loc) {1703// Get the source element type.1704SrcTy = SrcTy->castAs<ComplexType>()->getElementType();17051706// Handle conversions to bool first, they are special: comparisons against 0.1707if (DstTy->isBooleanType()) {1708// Complex != 0 -> (Real != 0) | (Imag != 0)1709Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);1710Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);1711return Builder.CreateOr(Src.first, Src.second, "tobool");1712}17131714// C99 6.3.1.7p2: "When a value of complex type is converted to a real type,1715// the imaginary part of the complex value is discarded and the value of the1716// real part is converted according to the conversion rules for the1717// corresponding real type.1718return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);1719}17201721Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {1722return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);1723}17241725/// Emit a sanitization check for the given "binary" operation (which1726/// might actually be a unary increment which has been lowered to a binary1727/// operation). The check passes if all values in \p Checks (which are \c i1),1728/// are \c true.1729void ScalarExprEmitter::EmitBinOpCheck(1730ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {1731assert(CGF.IsSanitizerScope);1732SanitizerHandler Check;1733SmallVector<llvm::Constant *, 4> StaticData;1734SmallVector<llvm::Value *, 2> DynamicData;17351736BinaryOperatorKind Opcode = Info.Opcode;1737if (BinaryOperator::isCompoundAssignmentOp(Opcode))1738Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);17391740StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));1741const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);1742if (UO && UO->getOpcode() == UO_Minus) {1743Check = SanitizerHandler::NegateOverflow;1744StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));1745DynamicData.push_back(Info.RHS);1746} else {1747if (BinaryOperator::isShiftOp(Opcode)) {1748// Shift LHS negative or too large, or RHS out of bounds.1749Check = SanitizerHandler::ShiftOutOfBounds;1750const BinaryOperator *BO = cast<BinaryOperator>(Info.E);1751StaticData.push_back(1752CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));1753StaticData.push_back(1754CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));1755} else if (Opcode == BO_Div || Opcode == BO_Rem) {1756// Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).1757Check = SanitizerHandler::DivremOverflow;1758StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));1759} else {1760// Arithmetic overflow (+, -, *).1761switch (Opcode) {1762case BO_Add: Check = SanitizerHandler::AddOverflow; break;1763case BO_Sub: Check = SanitizerHandler::SubOverflow; break;1764case BO_Mul: Check = SanitizerHandler::MulOverflow; break;1765default: llvm_unreachable("unexpected opcode for bin op check");1766}1767StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));1768}1769DynamicData.push_back(Info.LHS);1770DynamicData.push_back(Info.RHS);1771}17721773CGF.EmitCheck(Checks, Check, StaticData, DynamicData);1774}17751776//===----------------------------------------------------------------------===//1777// Visitor Methods1778//===----------------------------------------------------------------------===//17791780Value *ScalarExprEmitter::VisitExpr(Expr *E) {1781CGF.ErrorUnsupported(E, "scalar expression");1782if (E->getType()->isVoidType())1783return nullptr;1784return llvm::UndefValue::get(CGF.ConvertType(E->getType()));1785}17861787Value *1788ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {1789ASTContext &Context = CGF.getContext();1790unsigned AddrSpace =1791Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace());1792llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(1793E->ComputeName(Context), "__usn_str", AddrSpace);17941795llvm::Type *ExprTy = ConvertType(E->getType());1796return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,1797"usn_addr_cast");1798}17991800Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {1801assert(E->getDataElementCount() == 1);1802auto It = E->begin();1803return Builder.getInt((*It)->getValue());1804}18051806Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {1807// Vector Mask Case1808if (E->getNumSubExprs() == 2) {1809Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));1810Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));1811Value *Mask;18121813auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());1814unsigned LHSElts = LTy->getNumElements();18151816Mask = RHS;18171818auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());18191820// Mask off the high bits of each shuffle index.1821Value *MaskBits =1822llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);1823Mask = Builder.CreateAnd(Mask, MaskBits, "mask");18241825// newv = undef1826// mask = mask & maskbits1827// for each elt1828// n = extract mask i1829// x = extract val n1830// newv = insert newv, x, i1831auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),1832MTy->getNumElements());1833Value* NewV = llvm::PoisonValue::get(RTy);1834for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {1835Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);1836Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");18371838Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");1839NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");1840}1841return NewV;1842}18431844Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));1845Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));18461847SmallVector<int, 32> Indices;1848for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {1849llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);1850// Check for -1 and output it as undef in the IR.1851if (Idx.isSigned() && Idx.isAllOnes())1852Indices.push_back(-1);1853else1854Indices.push_back(Idx.getZExtValue());1855}18561857return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");1858}18591860Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {1861QualType SrcType = E->getSrcExpr()->getType(),1862DstType = E->getType();18631864Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());18651866SrcType = CGF.getContext().getCanonicalType(SrcType);1867DstType = CGF.getContext().getCanonicalType(DstType);1868if (SrcType == DstType) return Src;18691870assert(SrcType->isVectorType() &&1871"ConvertVector source type must be a vector");1872assert(DstType->isVectorType() &&1873"ConvertVector destination type must be a vector");18741875llvm::Type *SrcTy = Src->getType();1876llvm::Type *DstTy = ConvertType(DstType);18771878// Ignore conversions like int -> uint.1879if (SrcTy == DstTy)1880return Src;18811882QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),1883DstEltType = DstType->castAs<VectorType>()->getElementType();18841885assert(SrcTy->isVectorTy() &&1886"ConvertVector source IR type must be a vector");1887assert(DstTy->isVectorTy() &&1888"ConvertVector destination IR type must be a vector");18891890llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),1891*DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();18921893if (DstEltType->isBooleanType()) {1894assert((SrcEltTy->isFloatingPointTy() ||1895isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");18961897llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);1898if (SrcEltTy->isFloatingPointTy()) {1899return Builder.CreateFCmpUNE(Src, Zero, "tobool");1900} else {1901return Builder.CreateICmpNE(Src, Zero, "tobool");1902}1903}19041905// We have the arithmetic types: real int/float.1906Value *Res = nullptr;19071908if (isa<llvm::IntegerType>(SrcEltTy)) {1909bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();1910if (isa<llvm::IntegerType>(DstEltTy))1911Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");1912else if (InputSigned)1913Res = Builder.CreateSIToFP(Src, DstTy, "conv");1914else1915Res = Builder.CreateUIToFP(Src, DstTy, "conv");1916} else if (isa<llvm::IntegerType>(DstEltTy)) {1917assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");1918if (DstEltType->isSignedIntegerOrEnumerationType())1919Res = Builder.CreateFPToSI(Src, DstTy, "conv");1920else1921Res = Builder.CreateFPToUI(Src, DstTy, "conv");1922} else {1923assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&1924"Unknown real conversion");1925if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())1926Res = Builder.CreateFPTrunc(Src, DstTy, "conv");1927else1928Res = Builder.CreateFPExt(Src, DstTy, "conv");1929}19301931return Res;1932}19331934Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {1935if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {1936CGF.EmitIgnoredExpr(E->getBase());1937return CGF.emitScalarConstant(Constant, E);1938} else {1939Expr::EvalResult Result;1940if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {1941llvm::APSInt Value = Result.Val.getInt();1942CGF.EmitIgnoredExpr(E->getBase());1943return Builder.getInt(Value);1944}1945}19461947llvm::Value *Result = EmitLoadOfLValue(E);19481949// If -fdebug-info-for-profiling is specified, emit a pseudo variable and its1950// debug info for the pointer, even if there is no variable associated with1951// the pointer's expression.1952if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {1953if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {1954if (llvm::GetElementPtrInst *GEP =1955dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {1956if (llvm::Instruction *Pointer =1957dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {1958QualType Ty = E->getBase()->getType();1959if (!E->isArrow())1960Ty = CGF.getContext().getPointerType(Ty);1961CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);1962}1963}1964}1965}1966return Result;1967}19681969Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {1970TestAndClearIgnoreResultAssign();19711972// Emit subscript expressions in rvalue context's. For most cases, this just1973// loads the lvalue formed by the subscript expr. However, we have to be1974// careful, because the base of a vector subscript is occasionally an rvalue,1975// so we can't get it as an lvalue.1976if (!E->getBase()->getType()->isVectorType() &&1977!E->getBase()->getType()->isSveVLSBuiltinType())1978return EmitLoadOfLValue(E);19791980// Handle the vector case. The base must be a vector, the index must be an1981// integer value.1982Value *Base = Visit(E->getBase());1983Value *Idx = Visit(E->getIdx());1984QualType IdxTy = E->getIdx()->getType();19851986if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))1987CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);19881989return Builder.CreateExtractElement(Base, Idx, "vecext");1990}19911992Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {1993TestAndClearIgnoreResultAssign();19941995// Handle the vector case. The base must be a vector, the index must be an1996// integer value.1997Value *RowIdx = Visit(E->getRowIdx());1998Value *ColumnIdx = Visit(E->getColumnIdx());19992000const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();2001unsigned NumRows = MatrixTy->getNumRows();2002llvm::MatrixBuilder MB(Builder);2003Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);2004if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)2005MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());20062007Value *Matrix = Visit(E->getBase());20082009// TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?2010return Builder.CreateExtractElement(Matrix, Idx, "matrixext");2011}20122013static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,2014unsigned Off) {2015int MV = SVI->getMaskValue(Idx);2016if (MV == -1)2017return -1;2018return Off + MV;2019}20202021static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {2022assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&2023"Index operand too large for shufflevector mask!");2024return C->getZExtValue();2025}20262027Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {2028bool Ignore = TestAndClearIgnoreResultAssign();2029(void)Ignore;2030assert (Ignore == false && "init list ignored");2031unsigned NumInitElements = E->getNumInits();20322033if (E->hadArrayRangeDesignator())2034CGF.ErrorUnsupported(E, "GNU array range designator extension");20352036llvm::VectorType *VType =2037dyn_cast<llvm::VectorType>(ConvertType(E->getType()));20382039if (!VType) {2040if (NumInitElements == 0) {2041// C++11 value-initialization for the scalar.2042return EmitNullValue(E->getType());2043}2044// We have a scalar in braces. Just use the first element.2045return Visit(E->getInit(0));2046}20472048if (isa<llvm::ScalableVectorType>(VType)) {2049if (NumInitElements == 0) {2050// C++11 value-initialization for the vector.2051return EmitNullValue(E->getType());2052}20532054if (NumInitElements == 1) {2055Expr *InitVector = E->getInit(0);20562057// Initialize from another scalable vector of the same type.2058if (InitVector->getType() == E->getType())2059return Visit(InitVector);2060}20612062llvm_unreachable("Unexpected initialization of a scalable vector!");2063}20642065unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();20662067// Loop over initializers collecting the Value for each, and remembering2068// whether the source was swizzle (ExtVectorElementExpr). This will allow2069// us to fold the shuffle for the swizzle into the shuffle for the vector2070// initializer, since LLVM optimizers generally do not want to touch2071// shuffles.2072unsigned CurIdx = 0;2073bool VIsPoisonShuffle = false;2074llvm::Value *V = llvm::PoisonValue::get(VType);2075for (unsigned i = 0; i != NumInitElements; ++i) {2076Expr *IE = E->getInit(i);2077Value *Init = Visit(IE);2078SmallVector<int, 16> Args;20792080llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());20812082// Handle scalar elements. If the scalar initializer is actually one2083// element of a different vector of the same width, use shuffle instead of2084// extract+insert.2085if (!VVT) {2086if (isa<ExtVectorElementExpr>(IE)) {2087llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);20882089if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())2090->getNumElements() == ResElts) {2091llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());2092Value *LHS = nullptr, *RHS = nullptr;2093if (CurIdx == 0) {2094// insert into poison -> shuffle (src, poison)2095// shufflemask must use an i322096Args.push_back(getAsInt32(C, CGF.Int32Ty));2097Args.resize(ResElts, -1);20982099LHS = EI->getVectorOperand();2100RHS = V;2101VIsPoisonShuffle = true;2102} else if (VIsPoisonShuffle) {2103// insert into poison shuffle && size match -> shuffle (v, src)2104llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);2105for (unsigned j = 0; j != CurIdx; ++j)2106Args.push_back(getMaskElt(SVV, j, 0));2107Args.push_back(ResElts + C->getZExtValue());2108Args.resize(ResElts, -1);21092110LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);2111RHS = EI->getVectorOperand();2112VIsPoisonShuffle = false;2113}2114if (!Args.empty()) {2115V = Builder.CreateShuffleVector(LHS, RHS, Args);2116++CurIdx;2117continue;2118}2119}2120}2121V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),2122"vecinit");2123VIsPoisonShuffle = false;2124++CurIdx;2125continue;2126}21272128unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();21292130// If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's2131// input is the same width as the vector being constructed, generate an2132// optimized shuffle of the swizzle input into the result.2133unsigned Offset = (CurIdx == 0) ? 0 : ResElts;2134if (isa<ExtVectorElementExpr>(IE)) {2135llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);2136Value *SVOp = SVI->getOperand(0);2137auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());21382139if (OpTy->getNumElements() == ResElts) {2140for (unsigned j = 0; j != CurIdx; ++j) {2141// If the current vector initializer is a shuffle with poison, merge2142// this shuffle directly into it.2143if (VIsPoisonShuffle) {2144Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));2145} else {2146Args.push_back(j);2147}2148}2149for (unsigned j = 0, je = InitElts; j != je; ++j)2150Args.push_back(getMaskElt(SVI, j, Offset));2151Args.resize(ResElts, -1);21522153if (VIsPoisonShuffle)2154V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);21552156Init = SVOp;2157}2158}21592160// Extend init to result vector length, and then shuffle its contribution2161// to the vector initializer into V.2162if (Args.empty()) {2163for (unsigned j = 0; j != InitElts; ++j)2164Args.push_back(j);2165Args.resize(ResElts, -1);2166Init = Builder.CreateShuffleVector(Init, Args, "vext");21672168Args.clear();2169for (unsigned j = 0; j != CurIdx; ++j)2170Args.push_back(j);2171for (unsigned j = 0; j != InitElts; ++j)2172Args.push_back(j + Offset);2173Args.resize(ResElts, -1);2174}21752176// If V is poison, make sure it ends up on the RHS of the shuffle to aid2177// merging subsequent shuffles into this one.2178if (CurIdx == 0)2179std::swap(V, Init);2180V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");2181VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);2182CurIdx += InitElts;2183}21842185// FIXME: evaluate codegen vs. shuffling against constant null vector.2186// Emit remaining default initializers.2187llvm::Type *EltTy = VType->getElementType();21882189// Emit remaining default initializers2190for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {2191Value *Idx = Builder.getInt32(CurIdx);2192llvm::Value *Init = llvm::Constant::getNullValue(EltTy);2193V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");2194}2195return V;2196}21972198bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {2199const Expr *E = CE->getSubExpr();22002201if (CE->getCastKind() == CK_UncheckedDerivedToBase)2202return false;22032204if (isa<CXXThisExpr>(E->IgnoreParens())) {2205// We always assume that 'this' is never null.2206return false;2207}22082209if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {2210// And that glvalue casts are never null.2211if (ICE->isGLValue())2212return false;2213}22142215return true;2216}22172218// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts2219// have to handle a more broad range of conversions than explicit casts, as they2220// handle things like function to ptr-to-function decay etc.2221Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {2222Expr *E = CE->getSubExpr();2223QualType DestTy = CE->getType();2224CastKind Kind = CE->getCastKind();2225CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);22262227// These cases are generally not written to ignore the result of2228// evaluating their sub-expressions, so we clear this now.2229bool Ignored = TestAndClearIgnoreResultAssign();22302231// Since almost all cast kinds apply to scalars, this switch doesn't have2232// a default case, so the compiler will warn on a missing case. The cases2233// are in the same order as in the CastKind enum.2234switch (Kind) {2235case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");2236case CK_BuiltinFnToFnPtr:2237llvm_unreachable("builtin functions are handled elsewhere");22382239case CK_LValueBitCast:2240case CK_ObjCObjectLValueCast: {2241Address Addr = EmitLValue(E).getAddress();2242Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));2243LValue LV = CGF.MakeAddrLValue(Addr, DestTy);2244return EmitLoadOfLValue(LV, CE->getExprLoc());2245}22462247case CK_LValueToRValueBitCast: {2248LValue SourceLVal = CGF.EmitLValue(E);2249Address Addr =2250SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));2251LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);2252DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());2253return EmitLoadOfLValue(DestLV, CE->getExprLoc());2254}22552256case CK_CPointerToObjCPointerCast:2257case CK_BlockPointerToObjCPointerCast:2258case CK_AnyPointerToBlockPointerCast:2259case CK_BitCast: {2260Value *Src = Visit(const_cast<Expr*>(E));2261llvm::Type *SrcTy = Src->getType();2262llvm::Type *DstTy = ConvertType(DestTy);2263assert(2264(!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||2265SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&2266"Address-space cast must be used to convert address spaces");22672268if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {2269if (auto *PT = DestTy->getAs<PointerType>()) {2270CGF.EmitVTablePtrCheckForCast(2271PT->getPointeeType(),2272Address(Src,2273CGF.ConvertTypeForMem(2274E->getType()->castAs<PointerType>()->getPointeeType()),2275CGF.getPointerAlign()),2276/*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,2277CE->getBeginLoc());2278}2279}22802281if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {2282const QualType SrcType = E->getType();22832284if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {2285// Casting to pointer that could carry dynamic information (provided by2286// invariant.group) requires launder.2287Src = Builder.CreateLaunderInvariantGroup(Src);2288} else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {2289// Casting to pointer that does not carry dynamic information (provided2290// by invariant.group) requires stripping it. Note that we don't do it2291// if the source could not be dynamic type and destination could be2292// dynamic because dynamic information is already laundered. It is2293// because launder(strip(src)) == launder(src), so there is no need to2294// add extra strip before launder.2295Src = Builder.CreateStripInvariantGroup(Src);2296}2297}22982299// Update heapallocsite metadata when there is an explicit pointer cast.2300if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {2301if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&2302!isa<CastExpr>(E)) {2303QualType PointeeType = DestTy->getPointeeType();2304if (!PointeeType.isNull())2305CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,2306CE->getExprLoc());2307}2308}23092310// If Src is a fixed vector and Dst is a scalable vector, and both have the2311// same element type, use the llvm.vector.insert intrinsic to perform the2312// bitcast.2313if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {2314if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {2315// If we are casting a fixed i8 vector to a scalable i1 predicate2316// vector, use a vector insert and bitcast the result.2317if (ScalableDstTy->getElementType()->isIntegerTy(1) &&2318ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&2319FixedSrcTy->getElementType()->isIntegerTy(8)) {2320ScalableDstTy = llvm::ScalableVectorType::get(2321FixedSrcTy->getElementType(),2322ScalableDstTy->getElementCount().getKnownMinValue() / 8);2323}2324if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {2325llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);2326llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);2327llvm::Value *Result = Builder.CreateInsertVector(2328ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");2329if (Result->getType() != DstTy)2330Result = Builder.CreateBitCast(Result, DstTy);2331return Result;2332}2333}2334}23352336// If Src is a scalable vector and Dst is a fixed vector, and both have the2337// same element type, use the llvm.vector.extract intrinsic to perform the2338// bitcast.2339if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {2340if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {2341// If we are casting a scalable i1 predicate vector to a fixed i82342// vector, bitcast the source and use a vector extract.2343if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&2344ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&2345FixedDstTy->getElementType()->isIntegerTy(8)) {2346ScalableSrcTy = llvm::ScalableVectorType::get(2347FixedDstTy->getElementType(),2348ScalableSrcTy->getElementCount().getKnownMinValue() / 8);2349Src = Builder.CreateBitCast(Src, ScalableSrcTy);2350}2351if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {2352llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);2353return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");2354}2355}2356}23572358// Perform VLAT <-> VLST bitcast through memory.2359// TODO: since the llvm.vector.{insert,extract} intrinsics2360// require the element types of the vectors to be the same, we2361// need to keep this around for bitcasts between VLAT <-> VLST where2362// the element types of the vectors are not the same, until we figure2363// out a better way of doing these casts.2364if ((isa<llvm::FixedVectorType>(SrcTy) &&2365isa<llvm::ScalableVectorType>(DstTy)) ||2366(isa<llvm::ScalableVectorType>(SrcTy) &&2367isa<llvm::FixedVectorType>(DstTy))) {2368Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");2369LValue LV = CGF.MakeAddrLValue(Addr, E->getType());2370CGF.EmitStoreOfScalar(Src, LV);2371Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));2372LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);2373DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());2374return EmitLoadOfLValue(DestLV, CE->getExprLoc());2375}23762377llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);2378return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);2379}2380case CK_AddressSpaceConversion: {2381Expr::EvalResult Result;2382if (E->EvaluateAsRValue(Result, CGF.getContext()) &&2383Result.Val.isNullPointer()) {2384// If E has side effect, it is emitted even if its final result is a2385// null pointer. In that case, a DCE pass should be able to2386// eliminate the useless instructions emitted during translating E.2387if (Result.HasSideEffects)2388Visit(E);2389return CGF.CGM.getNullPointer(cast<llvm::PointerType>(2390ConvertType(DestTy)), DestTy);2391}2392// Since target may map different address spaces in AST to the same address2393// space, an address space conversion may end up as a bitcast.2394return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(2395CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),2396DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));2397}2398case CK_AtomicToNonAtomic:2399case CK_NonAtomicToAtomic:2400case CK_UserDefinedConversion:2401return Visit(const_cast<Expr*>(E));24022403case CK_NoOp: {2404return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)2405: Visit(const_cast<Expr *>(E));2406}24072408case CK_BaseToDerived: {2409const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();2410assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");24112412Address Base = CGF.EmitPointerWithAlignment(E);2413Address Derived =2414CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,2415CE->path_begin(), CE->path_end(),2416CGF.ShouldNullCheckClassCastValue(CE));24172418// C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is2419// performed and the object is not of the derived type.2420if (CGF.sanitizePerformTypeCheck())2421CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),2422Derived, DestTy->getPointeeType());24232424if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))2425CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,2426/*MayBeNull=*/true,2427CodeGenFunction::CFITCK_DerivedCast,2428CE->getBeginLoc());24292430return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());2431}2432case CK_UncheckedDerivedToBase:2433case CK_DerivedToBase: {2434// The EmitPointerWithAlignment path does this fine; just discard2435// the alignment.2436return CGF.getAsNaturalPointerTo(CGF.EmitPointerWithAlignment(CE),2437CE->getType()->getPointeeType());2438}24392440case CK_Dynamic: {2441Address V = CGF.EmitPointerWithAlignment(E);2442const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);2443return CGF.EmitDynamicCast(V, DCE);2444}24452446case CK_ArrayToPointerDecay:2447return CGF.getAsNaturalPointerTo(CGF.EmitArrayToPointerDecay(E),2448CE->getType()->getPointeeType());2449case CK_FunctionToPointerDecay:2450return EmitLValue(E).getPointer(CGF);24512452case CK_NullToPointer:2453if (MustVisitNullValue(E))2454CGF.EmitIgnoredExpr(E);24552456return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),2457DestTy);24582459case CK_NullToMemberPointer: {2460if (MustVisitNullValue(E))2461CGF.EmitIgnoredExpr(E);24622463const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();2464return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);2465}24662467case CK_ReinterpretMemberPointer:2468case CK_BaseToDerivedMemberPointer:2469case CK_DerivedToBaseMemberPointer: {2470Value *Src = Visit(E);24712472// Note that the AST doesn't distinguish between checked and2473// unchecked member pointer conversions, so we always have to2474// implement checked conversions here. This is inefficient when2475// actual control flow may be required in order to perform the2476// check, which it is for data member pointers (but not member2477// function pointers on Itanium and ARM).2478return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);2479}24802481case CK_ARCProduceObject:2482return CGF.EmitARCRetainScalarExpr(E);2483case CK_ARCConsumeObject:2484return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));2485case CK_ARCReclaimReturnedObject:2486return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);2487case CK_ARCExtendBlockObject:2488return CGF.EmitARCExtendBlockObject(E);24892490case CK_CopyAndAutoreleaseBlockObject:2491return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());24922493case CK_FloatingRealToComplex:2494case CK_FloatingComplexCast:2495case CK_IntegralRealToComplex:2496case CK_IntegralComplexCast:2497case CK_IntegralComplexToFloatingComplex:2498case CK_FloatingComplexToIntegralComplex:2499case CK_ConstructorConversion:2500case CK_ToUnion:2501case CK_HLSLArrayRValue:2502llvm_unreachable("scalar cast to non-scalar value");25032504case CK_LValueToRValue:2505assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));2506assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");2507return Visit(const_cast<Expr*>(E));25082509case CK_IntegralToPointer: {2510Value *Src = Visit(const_cast<Expr*>(E));25112512// First, convert to the correct width so that we control the kind of2513// extension.2514auto DestLLVMTy = ConvertType(DestTy);2515llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);2516bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();2517llvm::Value* IntResult =2518Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");25192520auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);25212522if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {2523// Going from integer to pointer that could be dynamic requires reloading2524// dynamic information from invariant.group.2525if (DestTy.mayBeDynamicClass())2526IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);2527}25282529IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);2530return IntToPtr;2531}2532case CK_PointerToIntegral: {2533assert(!DestTy->isBooleanType() && "bool should use PointerToBool");2534auto *PtrExpr = Visit(E);25352536if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {2537const QualType SrcType = E->getType();25382539// Casting to integer requires stripping dynamic information as it does2540// not carries it.2541if (SrcType.mayBeDynamicClass())2542PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);2543}25442545PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);2546return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));2547}2548case CK_ToVoid: {2549CGF.EmitIgnoredExpr(E);2550return nullptr;2551}2552case CK_MatrixCast: {2553return EmitScalarConversion(Visit(E), E->getType(), DestTy,2554CE->getExprLoc());2555}2556case CK_VectorSplat: {2557llvm::Type *DstTy = ConvertType(DestTy);2558Value *Elt = Visit(const_cast<Expr *>(E));2559// Splat the element across to all elements2560llvm::ElementCount NumElements =2561cast<llvm::VectorType>(DstTy)->getElementCount();2562return Builder.CreateVectorSplat(NumElements, Elt, "splat");2563}25642565case CK_FixedPointCast:2566return EmitScalarConversion(Visit(E), E->getType(), DestTy,2567CE->getExprLoc());25682569case CK_FixedPointToBoolean:2570assert(E->getType()->isFixedPointType() &&2571"Expected src type to be fixed point type");2572assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");2573return EmitScalarConversion(Visit(E), E->getType(), DestTy,2574CE->getExprLoc());25752576case CK_FixedPointToIntegral:2577assert(E->getType()->isFixedPointType() &&2578"Expected src type to be fixed point type");2579assert(DestTy->isIntegerType() && "Expected dest type to be an integer");2580return EmitScalarConversion(Visit(E), E->getType(), DestTy,2581CE->getExprLoc());25822583case CK_IntegralToFixedPoint:2584assert(E->getType()->isIntegerType() &&2585"Expected src type to be an integer");2586assert(DestTy->isFixedPointType() &&2587"Expected dest type to be fixed point type");2588return EmitScalarConversion(Visit(E), E->getType(), DestTy,2589CE->getExprLoc());25902591case CK_IntegralCast: {2592if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {2593QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();2594return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),2595SrcElTy->isSignedIntegerOrEnumerationType(),2596"conv");2597}2598ScalarConversionOpts Opts;2599if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {2600if (!ICE->isPartOfExplicitCast())2601Opts = ScalarConversionOpts(CGF.SanOpts);2602}2603return EmitScalarConversion(Visit(E), E->getType(), DestTy,2604CE->getExprLoc(), Opts);2605}2606case CK_IntegralToFloating: {2607if (E->getType()->isVectorType() && DestTy->isVectorType()) {2608// TODO: Support constrained FP intrinsics.2609QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();2610if (SrcElTy->isSignedIntegerOrEnumerationType())2611return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");2612return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");2613}2614CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);2615return EmitScalarConversion(Visit(E), E->getType(), DestTy,2616CE->getExprLoc());2617}2618case CK_FloatingToIntegral: {2619if (E->getType()->isVectorType() && DestTy->isVectorType()) {2620// TODO: Support constrained FP intrinsics.2621QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();2622if (DstElTy->isSignedIntegerOrEnumerationType())2623return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");2624return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");2625}2626CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);2627return EmitScalarConversion(Visit(E), E->getType(), DestTy,2628CE->getExprLoc());2629}2630case CK_FloatingCast: {2631if (E->getType()->isVectorType() && DestTy->isVectorType()) {2632// TODO: Support constrained FP intrinsics.2633QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();2634QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();2635if (DstElTy->castAs<BuiltinType>()->getKind() <2636SrcElTy->castAs<BuiltinType>()->getKind())2637return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");2638return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");2639}2640CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);2641return EmitScalarConversion(Visit(E), E->getType(), DestTy,2642CE->getExprLoc());2643}2644case CK_FixedPointToFloating:2645case CK_FloatingToFixedPoint: {2646CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);2647return EmitScalarConversion(Visit(E), E->getType(), DestTy,2648CE->getExprLoc());2649}2650case CK_BooleanToSignedIntegral: {2651ScalarConversionOpts Opts;2652Opts.TreatBooleanAsSigned = true;2653return EmitScalarConversion(Visit(E), E->getType(), DestTy,2654CE->getExprLoc(), Opts);2655}2656case CK_IntegralToBoolean:2657return EmitIntToBoolConversion(Visit(E));2658case CK_PointerToBoolean:2659return EmitPointerToBoolConversion(Visit(E), E->getType());2660case CK_FloatingToBoolean: {2661CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);2662return EmitFloatToBoolConversion(Visit(E));2663}2664case CK_MemberPointerToBoolean: {2665llvm::Value *MemPtr = Visit(E);2666const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();2667return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);2668}26692670case CK_FloatingComplexToReal:2671case CK_IntegralComplexToReal:2672return CGF.EmitComplexExpr(E, false, true).first;26732674case CK_FloatingComplexToBoolean:2675case CK_IntegralComplexToBoolean: {2676CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);26772678// TODO: kill this function off, inline appropriate case here2679return EmitComplexToScalarConversion(V, E->getType(), DestTy,2680CE->getExprLoc());2681}26822683case CK_ZeroToOCLOpaqueType: {2684assert((DestTy->isEventT() || DestTy->isQueueT() ||2685DestTy->isOCLIntelSubgroupAVCType()) &&2686"CK_ZeroToOCLEvent cast on non-event type");2687return llvm::Constant::getNullValue(ConvertType(DestTy));2688}26892690case CK_IntToOCLSampler:2691return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);26922693case CK_HLSLVectorTruncation: {2694assert(DestTy->isVectorType() && "Expected dest type to be vector type");2695Value *Vec = Visit(const_cast<Expr *>(E));2696SmallVector<int, 16> Mask;2697unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();2698for (unsigned I = 0; I != NumElts; ++I)2699Mask.push_back(I);27002701return Builder.CreateShuffleVector(Vec, Mask, "trunc");2702}27032704} // end of switch27052706llvm_unreachable("unknown scalar cast");2707}27082709Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {2710CodeGenFunction::StmtExprEvaluation eval(CGF);2711Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),2712!E->getType()->isVoidType());2713if (!RetAlloca.isValid())2714return nullptr;2715return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),2716E->getExprLoc());2717}27182719Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {2720CodeGenFunction::RunCleanupsScope Scope(CGF);2721Value *V = Visit(E->getSubExpr());2722// Defend against dominance problems caused by jumps out of expression2723// evaluation through the shared cleanup block.2724Scope.ForceCleanup({&V});2725return V;2726}27272728//===----------------------------------------------------------------------===//2729// Unary Operators2730//===----------------------------------------------------------------------===//27312732static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,2733llvm::Value *InVal, bool IsInc,2734FPOptions FPFeatures) {2735BinOpInfo BinOp;2736BinOp.LHS = InVal;2737BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);2738BinOp.Ty = E->getType();2739BinOp.Opcode = IsInc ? BO_Add : BO_Sub;2740BinOp.FPFeatures = FPFeatures;2741BinOp.E = E;2742return BinOp;2743}27442745llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(2746const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {2747llvm::Value *Amount =2748llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);2749StringRef Name = IsInc ? "inc" : "dec";2750switch (CGF.getLangOpts().getSignedOverflowBehavior()) {2751case LangOptions::SOB_Defined:2752if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))2753return Builder.CreateAdd(InVal, Amount, Name);2754[[fallthrough]];2755case LangOptions::SOB_Undefined:2756if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))2757return Builder.CreateNSWAdd(InVal, Amount, Name);2758[[fallthrough]];2759case LangOptions::SOB_Trapping:2760if (!E->canOverflow())2761return Builder.CreateNSWAdd(InVal, Amount, Name);2762return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(2763E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));2764}2765llvm_unreachable("Unknown SignedOverflowBehaviorTy");2766}27672768namespace {2769/// Handles check and update for lastprivate conditional variables.2770class OMPLastprivateConditionalUpdateRAII {2771private:2772CodeGenFunction &CGF;2773const UnaryOperator *E;27742775public:2776OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,2777const UnaryOperator *E)2778: CGF(CGF), E(E) {}2779~OMPLastprivateConditionalUpdateRAII() {2780if (CGF.getLangOpts().OpenMP)2781CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(2782CGF, E->getSubExpr());2783}2784};2785} // namespace27862787llvm::Value *2788ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,2789bool isInc, bool isPre) {2790OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);2791QualType type = E->getSubExpr()->getType();2792llvm::PHINode *atomicPHI = nullptr;2793llvm::Value *value;2794llvm::Value *input;2795llvm::Value *Previous = nullptr;2796QualType SrcType = E->getType();27972798int amount = (isInc ? 1 : -1);2799bool isSubtraction = !isInc;28002801if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {2802type = atomicTy->getValueType();2803if (isInc && type->isBooleanType()) {2804llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);2805if (isPre) {2806Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())2807->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);2808return Builder.getTrue();2809}2810// For atomic bool increment, we just store true and return it for2811// preincrement, do an atomic swap with true for postincrement2812return Builder.CreateAtomicRMW(2813llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,2814llvm::AtomicOrdering::SequentiallyConsistent);2815}2816// Special case for atomic increment / decrement on integers, emit2817// atomicrmw instructions. We skip this if we want to be doing overflow2818// checking, and fall into the slow path with the atomic cmpxchg loop.2819if (!type->isBooleanType() && type->isIntegerType() &&2820!(type->isUnsignedIntegerType() &&2821CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&2822CGF.getLangOpts().getSignedOverflowBehavior() !=2823LangOptions::SOB_Trapping) {2824llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :2825llvm::AtomicRMWInst::Sub;2826llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :2827llvm::Instruction::Sub;2828llvm::Value *amt = CGF.EmitToMemory(2829llvm::ConstantInt::get(ConvertType(type), 1, true), type);2830llvm::Value *old =2831Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,2832llvm::AtomicOrdering::SequentiallyConsistent);2833return isPre ? Builder.CreateBinOp(op, old, amt) : old;2834}2835// Special case for atomic increment/decrement on floats.2836// Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).2837if (type->isFloatingType()) {2838llvm::Type *Ty = ConvertType(type);2839if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {2840llvm::AtomicRMWInst::BinOp aop =2841isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;2842llvm::Instruction::BinaryOps op =2843isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;2844llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);2845llvm::AtomicRMWInst *old = Builder.CreateAtomicRMW(2846aop, LV.getAddress(), amt,2847llvm::AtomicOrdering::SequentiallyConsistent);28482849return isPre ? Builder.CreateBinOp(op, old, amt) : old;2850}2851}2852value = EmitLoadOfLValue(LV, E->getExprLoc());2853input = value;2854// For every other atomic operation, we need to emit a load-op-cmpxchg loop2855llvm::BasicBlock *startBB = Builder.GetInsertBlock();2856llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);2857value = CGF.EmitToMemory(value, type);2858Builder.CreateBr(opBB);2859Builder.SetInsertPoint(opBB);2860atomicPHI = Builder.CreatePHI(value->getType(), 2);2861atomicPHI->addIncoming(value, startBB);2862value = atomicPHI;2863} else {2864value = EmitLoadOfLValue(LV, E->getExprLoc());2865input = value;2866}28672868// Special case of integer increment that we have to check first: bool++.2869// Due to promotion rules, we get:2870// bool++ -> bool = bool + 12871// -> bool = (int)bool + 12872// -> bool = ((int)bool + 1 != 0)2873// An interesting aspect of this is that increment is always true.2874// Decrement does not have this property.2875if (isInc && type->isBooleanType()) {2876value = Builder.getTrue();28772878// Most common case by far: integer increment.2879} else if (type->isIntegerType()) {2880QualType promotedType;2881bool canPerformLossyDemotionCheck = false;2882if (CGF.getContext().isPromotableIntegerType(type)) {2883promotedType = CGF.getContext().getPromotedIntegerType(type);2884assert(promotedType != type && "Shouldn't promote to the same type.");2885canPerformLossyDemotionCheck = true;2886canPerformLossyDemotionCheck &=2887CGF.getContext().getCanonicalType(type) !=2888CGF.getContext().getCanonicalType(promotedType);2889canPerformLossyDemotionCheck &=2890PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(2891type, promotedType);2892assert((!canPerformLossyDemotionCheck ||2893type->isSignedIntegerOrEnumerationType() ||2894promotedType->isSignedIntegerOrEnumerationType() ||2895ConvertType(type)->getScalarSizeInBits() ==2896ConvertType(promotedType)->getScalarSizeInBits()) &&2897"The following check expects that if we do promotion to different "2898"underlying canonical type, at least one of the types (either "2899"base or promoted) will be signed, or the bitwidths will match.");2900}2901if (CGF.SanOpts.hasOneOf(2902SanitizerKind::ImplicitIntegerArithmeticValueChange |2903SanitizerKind::ImplicitBitfieldConversion) &&2904canPerformLossyDemotionCheck) {2905// While `x += 1` (for `x` with width less than int) is modeled as2906// promotion+arithmetics+demotion, and we can catch lossy demotion with2907// ease; inc/dec with width less than int can't overflow because of2908// promotion rules, so we omit promotion+demotion, which means that we can2909// not catch lossy "demotion". Because we still want to catch these cases2910// when the sanitizer is enabled, we perform the promotion, then perform2911// the increment/decrement in the wider type, and finally2912// perform the demotion. This will catch lossy demotions.29132914// We have a special case for bitfields defined using all the bits of the2915// type. In this case we need to do the same trick as for the integer2916// sanitizer checks, i.e., promotion -> increment/decrement -> demotion.29172918value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());2919Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);2920value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");2921// Do pass non-default ScalarConversionOpts so that sanitizer check is2922// emitted if LV is not a bitfield, otherwise the bitfield sanitizer2923// checks will take care of the conversion.2924ScalarConversionOpts Opts;2925if (!LV.isBitField())2926Opts = ScalarConversionOpts(CGF.SanOpts);2927else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {2928Previous = value;2929SrcType = promotedType;2930}29312932value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),2933Opts);29342935// Note that signed integer inc/dec with width less than int can't2936// overflow because of promotion rules; we're just eliding a few steps2937// here.2938} else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {2939value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);2940} else if (E->canOverflow() && type->isUnsignedIntegerType() &&2941CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {2942value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(2943E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));2944} else {2945llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);2946value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");2947}29482949// Next most common: pointer increment.2950} else if (const PointerType *ptr = type->getAs<PointerType>()) {2951QualType type = ptr->getPointeeType();29522953// VLA types don't have constant size.2954if (const VariableArrayType *vla2955= CGF.getContext().getAsVariableArrayType(type)) {2956llvm::Value *numElts = CGF.getVLASize(vla).NumElts;2957if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");2958llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());2959if (CGF.getLangOpts().isSignedOverflowDefined())2960value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");2961else2962value = CGF.EmitCheckedInBoundsGEP(2963elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,2964E->getExprLoc(), "vla.inc");29652966// Arithmetic on function pointers (!) is just +-1.2967} else if (type->isFunctionType()) {2968llvm::Value *amt = Builder.getInt32(amount);29692970if (CGF.getLangOpts().isSignedOverflowDefined())2971value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");2972else2973value =2974CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,2975/*SignedIndices=*/false, isSubtraction,2976E->getExprLoc(), "incdec.funcptr");29772978// For everything else, we can just do a simple increment.2979} else {2980llvm::Value *amt = Builder.getInt32(amount);2981llvm::Type *elemTy = CGF.ConvertTypeForMem(type);2982if (CGF.getLangOpts().isSignedOverflowDefined())2983value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");2984else2985value = CGF.EmitCheckedInBoundsGEP(2986elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,2987E->getExprLoc(), "incdec.ptr");2988}29892990// Vector increment/decrement.2991} else if (type->isVectorType()) {2992if (type->hasIntegerRepresentation()) {2993llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);29942995value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");2996} else {2997value = Builder.CreateFAdd(2998value,2999llvm::ConstantFP::get(value->getType(), amount),3000isInc ? "inc" : "dec");3001}30023003// Floating point.3004} else if (type->isRealFloatingType()) {3005// Add the inc/dec to the real part.3006llvm::Value *amt;3007CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);30083009if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {3010// Another special case: half FP increment should be done via float3011if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {3012value = Builder.CreateCall(3013CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,3014CGF.CGM.FloatTy),3015input, "incdec.conv");3016} else {3017value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");3018}3019}30203021if (value->getType()->isFloatTy())3022amt = llvm::ConstantFP::get(VMContext,3023llvm::APFloat(static_cast<float>(amount)));3024else if (value->getType()->isDoubleTy())3025amt = llvm::ConstantFP::get(VMContext,3026llvm::APFloat(static_cast<double>(amount)));3027else {3028// Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.3029// Convert from float.3030llvm::APFloat F(static_cast<float>(amount));3031bool ignored;3032const llvm::fltSemantics *FS;3033// Don't use getFloatTypeSemantics because Half isn't3034// necessarily represented using the "half" LLVM type.3035if (value->getType()->isFP128Ty())3036FS = &CGF.getTarget().getFloat128Format();3037else if (value->getType()->isHalfTy())3038FS = &CGF.getTarget().getHalfFormat();3039else if (value->getType()->isBFloatTy())3040FS = &CGF.getTarget().getBFloat16Format();3041else if (value->getType()->isPPC_FP128Ty())3042FS = &CGF.getTarget().getIbm128Format();3043else3044FS = &CGF.getTarget().getLongDoubleFormat();3045F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);3046amt = llvm::ConstantFP::get(VMContext, F);3047}3048value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");30493050if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {3051if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {3052value = Builder.CreateCall(3053CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,3054CGF.CGM.FloatTy),3055value, "incdec.conv");3056} else {3057value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");3058}3059}30603061// Fixed-point types.3062} else if (type->isFixedPointType()) {3063// Fixed-point types are tricky. In some cases, it isn't possible to3064// represent a 1 or a -1 in the type at all. Piggyback off of3065// EmitFixedPointBinOp to avoid having to reimplement saturation.3066BinOpInfo Info;3067Info.E = E;3068Info.Ty = E->getType();3069Info.Opcode = isInc ? BO_Add : BO_Sub;3070Info.LHS = value;3071Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);3072// If the type is signed, it's better to represent this as +(-1) or -(-1),3073// since -1 is guaranteed to be representable.3074if (type->isSignedFixedPointType()) {3075Info.Opcode = isInc ? BO_Sub : BO_Add;3076Info.RHS = Builder.CreateNeg(Info.RHS);3077}3078// Now, convert from our invented integer literal to the type of the unary3079// op. This will upscale and saturate if necessary. This value can become3080// undef in some cases.3081llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);3082auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);3083Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);3084value = EmitFixedPointBinOp(Info);30853086// Objective-C pointer types.3087} else {3088const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();30893090CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());3091if (!isInc) size = -size;3092llvm::Value *sizeValue =3093llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());30943095if (CGF.getLangOpts().isSignedOverflowDefined())3096value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");3097else3098value = CGF.EmitCheckedInBoundsGEP(3099CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,3100E->getExprLoc(), "incdec.objptr");3101value = Builder.CreateBitCast(value, input->getType());3102}31033104if (atomicPHI) {3105llvm::BasicBlock *curBlock = Builder.GetInsertBlock();3106llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);3107auto Pair = CGF.EmitAtomicCompareExchange(3108LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());3109llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);3110llvm::Value *success = Pair.second;3111atomicPHI->addIncoming(old, curBlock);3112Builder.CreateCondBr(success, contBB, atomicPHI->getParent());3113Builder.SetInsertPoint(contBB);3114return isPre ? value : input;3115}31163117// Store the updated result through the lvalue.3118if (LV.isBitField()) {3119Value *Src = Previous ? Previous : value;3120CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);3121CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),3122LV.getBitFieldInfo(), E->getExprLoc());3123} else3124CGF.EmitStoreThroughLValue(RValue::get(value), LV);31253126// If this is a postinc, return the value read from memory, otherwise use the3127// updated value.3128return isPre ? value : input;3129}313031313132Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,3133QualType PromotionType) {3134QualType promotionTy = PromotionType.isNull()3135? getPromotionType(E->getSubExpr()->getType())3136: PromotionType;3137Value *result = VisitPlus(E, promotionTy);3138if (result && !promotionTy.isNull())3139result = EmitUnPromotedValue(result, E->getType());3140return result;3141}31423143Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,3144QualType PromotionType) {3145// This differs from gcc, though, most likely due to a bug in gcc.3146TestAndClearIgnoreResultAssign();3147if (!PromotionType.isNull())3148return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);3149return Visit(E->getSubExpr());3150}31513152Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,3153QualType PromotionType) {3154QualType promotionTy = PromotionType.isNull()3155? getPromotionType(E->getSubExpr()->getType())3156: PromotionType;3157Value *result = VisitMinus(E, promotionTy);3158if (result && !promotionTy.isNull())3159result = EmitUnPromotedValue(result, E->getType());3160return result;3161}31623163Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,3164QualType PromotionType) {3165TestAndClearIgnoreResultAssign();3166Value *Op;3167if (!PromotionType.isNull())3168Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);3169else3170Op = Visit(E->getSubExpr());31713172// Generate a unary FNeg for FP ops.3173if (Op->getType()->isFPOrFPVectorTy())3174return Builder.CreateFNeg(Op, "fneg");31753176// Emit unary minus with EmitSub so we handle overflow cases etc.3177BinOpInfo BinOp;3178BinOp.RHS = Op;3179BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());3180BinOp.Ty = E->getType();3181BinOp.Opcode = BO_Sub;3182BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());3183BinOp.E = E;3184return EmitSub(BinOp);3185}31863187Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {3188TestAndClearIgnoreResultAssign();3189Value *Op = Visit(E->getSubExpr());3190return Builder.CreateNot(Op, "not");3191}31923193Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {3194// Perform vector logical not on comparison with zero vector.3195if (E->getType()->isVectorType() &&3196E->getType()->castAs<VectorType>()->getVectorKind() ==3197VectorKind::Generic) {3198Value *Oper = Visit(E->getSubExpr());3199Value *Zero = llvm::Constant::getNullValue(Oper->getType());3200Value *Result;3201if (Oper->getType()->isFPOrFPVectorTy()) {3202CodeGenFunction::CGFPOptionsRAII FPOptsRAII(3203CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));3204Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");3205} else3206Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");3207return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");3208}32093210// Compare operand to zero.3211Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());32123213// Invert value.3214// TODO: Could dynamically modify easy computations here. For example, if3215// the operand is an icmp ne, turn into icmp eq.3216BoolVal = Builder.CreateNot(BoolVal, "lnot");32173218// ZExt result to the expr type.3219return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");3220}32213222Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {3223// Try folding the offsetof to a constant.3224Expr::EvalResult EVResult;3225if (E->EvaluateAsInt(EVResult, CGF.getContext())) {3226llvm::APSInt Value = EVResult.Val.getInt();3227return Builder.getInt(Value);3228}32293230// Loop over the components of the offsetof to compute the value.3231unsigned n = E->getNumComponents();3232llvm::Type* ResultType = ConvertType(E->getType());3233llvm::Value* Result = llvm::Constant::getNullValue(ResultType);3234QualType CurrentType = E->getTypeSourceInfo()->getType();3235for (unsigned i = 0; i != n; ++i) {3236OffsetOfNode ON = E->getComponent(i);3237llvm::Value *Offset = nullptr;3238switch (ON.getKind()) {3239case OffsetOfNode::Array: {3240// Compute the index3241Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());3242llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);3243bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();3244Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");32453246// Save the element type3247CurrentType =3248CGF.getContext().getAsArrayType(CurrentType)->getElementType();32493250// Compute the element size3251llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,3252CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());32533254// Multiply out to compute the result3255Offset = Builder.CreateMul(Idx, ElemSize);3256break;3257}32583259case OffsetOfNode::Field: {3260FieldDecl *MemberDecl = ON.getField();3261RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();3262const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);32633264// Compute the index of the field in its parent.3265unsigned i = 0;3266// FIXME: It would be nice if we didn't have to loop here!3267for (RecordDecl::field_iterator Field = RD->field_begin(),3268FieldEnd = RD->field_end();3269Field != FieldEnd; ++Field, ++i) {3270if (*Field == MemberDecl)3271break;3272}3273assert(i < RL.getFieldCount() && "offsetof field in wrong type");32743275// Compute the offset to the field3276int64_t OffsetInt = RL.getFieldOffset(i) /3277CGF.getContext().getCharWidth();3278Offset = llvm::ConstantInt::get(ResultType, OffsetInt);32793280// Save the element type.3281CurrentType = MemberDecl->getType();3282break;3283}32843285case OffsetOfNode::Identifier:3286llvm_unreachable("dependent __builtin_offsetof");32873288case OffsetOfNode::Base: {3289if (ON.getBase()->isVirtual()) {3290CGF.ErrorUnsupported(E, "virtual base in offsetof");3291continue;3292}32933294RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();3295const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);32963297// Save the element type.3298CurrentType = ON.getBase()->getType();32993300// Compute the offset to the base.3301auto *BaseRT = CurrentType->castAs<RecordType>();3302auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());3303CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);3304Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());3305break;3306}3307}3308Result = Builder.CreateAdd(Result, Offset);3309}3310return Result;3311}33123313/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of3314/// argument of the sizeof expression as an integer.3315Value *3316ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(3317const UnaryExprOrTypeTraitExpr *E) {3318QualType TypeToSize = E->getTypeOfArgument();3319if (auto Kind = E->getKind();3320Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {3321if (const VariableArrayType *VAT =3322CGF.getContext().getAsVariableArrayType(TypeToSize)) {3323if (E->isArgumentType()) {3324// sizeof(type) - make sure to emit the VLA size.3325CGF.EmitVariablyModifiedType(TypeToSize);3326} else {3327// C99 6.5.3.4p2: If the argument is an expression of type3328// VLA, it is evaluated.3329CGF.EmitIgnoredExpr(E->getArgumentExpr());3330}33313332auto VlaSize = CGF.getVLASize(VAT);3333llvm::Value *size = VlaSize.NumElts;33343335// Scale the number of non-VLA elements by the non-VLA element size.3336CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);3337if (!eltSize.isOne())3338size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);33393340return size;3341}3342} else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {3343auto Alignment =3344CGF.getContext()3345.toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(3346E->getTypeOfArgument()->getPointeeType()))3347.getQuantity();3348return llvm::ConstantInt::get(CGF.SizeTy, Alignment);3349} else if (E->getKind() == UETT_VectorElements) {3350auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));3351return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());3352}33533354// If this isn't sizeof(vla), the result must be constant; use the constant3355// folding logic so we don't have to duplicate it here.3356return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));3357}33583359Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,3360QualType PromotionType) {3361QualType promotionTy = PromotionType.isNull()3362? getPromotionType(E->getSubExpr()->getType())3363: PromotionType;3364Value *result = VisitReal(E, promotionTy);3365if (result && !promotionTy.isNull())3366result = EmitUnPromotedValue(result, E->getType());3367return result;3368}33693370Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,3371QualType PromotionType) {3372Expr *Op = E->getSubExpr();3373if (Op->getType()->isAnyComplexType()) {3374// If it's an l-value, load through the appropriate subobject l-value.3375// Note that we have to ask E because Op might be an l-value that3376// this won't work for, e.g. an Obj-C property.3377if (E->isGLValue()) {3378if (!PromotionType.isNull()) {3379CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(3380Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);3381if (result.first)3382result.first = CGF.EmitPromotedValue(result, PromotionType).first;3383return result.first;3384} else {3385return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())3386.getScalarVal();3387}3388}3389// Otherwise, calculate and project.3390return CGF.EmitComplexExpr(Op, false, true).first;3391}33923393if (!PromotionType.isNull())3394return CGF.EmitPromotedScalarExpr(Op, PromotionType);3395return Visit(Op);3396}33973398Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,3399QualType PromotionType) {3400QualType promotionTy = PromotionType.isNull()3401? getPromotionType(E->getSubExpr()->getType())3402: PromotionType;3403Value *result = VisitImag(E, promotionTy);3404if (result && !promotionTy.isNull())3405result = EmitUnPromotedValue(result, E->getType());3406return result;3407}34083409Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,3410QualType PromotionType) {3411Expr *Op = E->getSubExpr();3412if (Op->getType()->isAnyComplexType()) {3413// If it's an l-value, load through the appropriate subobject l-value.3414// Note that we have to ask E because Op might be an l-value that3415// this won't work for, e.g. an Obj-C property.3416if (Op->isGLValue()) {3417if (!PromotionType.isNull()) {3418CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(3419Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);3420if (result.second)3421result.second = CGF.EmitPromotedValue(result, PromotionType).second;3422return result.second;3423} else {3424return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())3425.getScalarVal();3426}3427}3428// Otherwise, calculate and project.3429return CGF.EmitComplexExpr(Op, true, false).second;3430}34313432// __imag on a scalar returns zero. Emit the subexpr to ensure side3433// effects are evaluated, but not the actual value.3434if (Op->isGLValue())3435CGF.EmitLValue(Op);3436else if (!PromotionType.isNull())3437CGF.EmitPromotedScalarExpr(Op, PromotionType);3438else3439CGF.EmitScalarExpr(Op, true);3440if (!PromotionType.isNull())3441return llvm::Constant::getNullValue(ConvertType(PromotionType));3442return llvm::Constant::getNullValue(ConvertType(E->getType()));3443}34443445//===----------------------------------------------------------------------===//3446// Binary Operators3447//===----------------------------------------------------------------------===//34483449Value *ScalarExprEmitter::EmitPromotedValue(Value *result,3450QualType PromotionType) {3451return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");3452}34533454Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,3455QualType ExprType) {3456return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");3457}34583459Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {3460E = E->IgnoreParens();3461if (auto BO = dyn_cast<BinaryOperator>(E)) {3462switch (BO->getOpcode()) {3463#define HANDLE_BINOP(OP) \3464case BO_##OP: \3465return Emit##OP(EmitBinOps(BO, PromotionType));3466HANDLE_BINOP(Add)3467HANDLE_BINOP(Sub)3468HANDLE_BINOP(Mul)3469HANDLE_BINOP(Div)3470#undef HANDLE_BINOP3471default:3472break;3473}3474} else if (auto UO = dyn_cast<UnaryOperator>(E)) {3475switch (UO->getOpcode()) {3476case UO_Imag:3477return VisitImag(UO, PromotionType);3478case UO_Real:3479return VisitReal(UO, PromotionType);3480case UO_Minus:3481return VisitMinus(UO, PromotionType);3482case UO_Plus:3483return VisitPlus(UO, PromotionType);3484default:3485break;3486}3487}3488auto result = Visit(const_cast<Expr *>(E));3489if (result) {3490if (!PromotionType.isNull())3491return EmitPromotedValue(result, PromotionType);3492else3493return EmitUnPromotedValue(result, E->getType());3494}3495return result;3496}34973498BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,3499QualType PromotionType) {3500TestAndClearIgnoreResultAssign();3501BinOpInfo Result;3502Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);3503Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);3504if (!PromotionType.isNull())3505Result.Ty = PromotionType;3506else3507Result.Ty = E->getType();3508Result.Opcode = E->getOpcode();3509Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());3510Result.E = E;3511return Result;3512}35133514LValue ScalarExprEmitter::EmitCompoundAssignLValue(3515const CompoundAssignOperator *E,3516Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),3517Value *&Result) {3518QualType LHSTy = E->getLHS()->getType();3519BinOpInfo OpInfo;35203521if (E->getComputationResultType()->isAnyComplexType())3522return CGF.EmitScalarCompoundAssignWithComplex(E, Result);35233524// Emit the RHS first. __block variables need to have the rhs evaluated3525// first, plus this should improve codegen a little.35263527QualType PromotionTypeCR;3528PromotionTypeCR = getPromotionType(E->getComputationResultType());3529if (PromotionTypeCR.isNull())3530PromotionTypeCR = E->getComputationResultType();3531QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());3532QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());3533if (!PromotionTypeRHS.isNull())3534OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);3535else3536OpInfo.RHS = Visit(E->getRHS());3537OpInfo.Ty = PromotionTypeCR;3538OpInfo.Opcode = E->getOpcode();3539OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());3540OpInfo.E = E;3541// Load/convert the LHS.3542LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);35433544llvm::PHINode *atomicPHI = nullptr;3545if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {3546QualType type = atomicTy->getValueType();3547if (!type->isBooleanType() && type->isIntegerType() &&3548!(type->isUnsignedIntegerType() &&3549CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&3550CGF.getLangOpts().getSignedOverflowBehavior() !=3551LangOptions::SOB_Trapping) {3552llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;3553llvm::Instruction::BinaryOps Op;3554switch (OpInfo.Opcode) {3555// We don't have atomicrmw operands for *, %, /, <<, >>3556case BO_MulAssign: case BO_DivAssign:3557case BO_RemAssign:3558case BO_ShlAssign:3559case BO_ShrAssign:3560break;3561case BO_AddAssign:3562AtomicOp = llvm::AtomicRMWInst::Add;3563Op = llvm::Instruction::Add;3564break;3565case BO_SubAssign:3566AtomicOp = llvm::AtomicRMWInst::Sub;3567Op = llvm::Instruction::Sub;3568break;3569case BO_AndAssign:3570AtomicOp = llvm::AtomicRMWInst::And;3571Op = llvm::Instruction::And;3572break;3573case BO_XorAssign:3574AtomicOp = llvm::AtomicRMWInst::Xor;3575Op = llvm::Instruction::Xor;3576break;3577case BO_OrAssign:3578AtomicOp = llvm::AtomicRMWInst::Or;3579Op = llvm::Instruction::Or;3580break;3581default:3582llvm_unreachable("Invalid compound assignment type");3583}3584if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {3585llvm::Value *Amt = CGF.EmitToMemory(3586EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,3587E->getExprLoc()),3588LHSTy);3589Value *OldVal = Builder.CreateAtomicRMW(3590AtomicOp, LHSLV.getAddress(), Amt,3591llvm::AtomicOrdering::SequentiallyConsistent);35923593// Since operation is atomic, the result type is guaranteed to be the3594// same as the input in LLVM terms.3595Result = Builder.CreateBinOp(Op, OldVal, Amt);3596return LHSLV;3597}3598}3599// FIXME: For floating point types, we should be saving and restoring the3600// floating point environment in the loop.3601llvm::BasicBlock *startBB = Builder.GetInsertBlock();3602llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);3603OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());3604OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);3605Builder.CreateBr(opBB);3606Builder.SetInsertPoint(opBB);3607atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);3608atomicPHI->addIncoming(OpInfo.LHS, startBB);3609OpInfo.LHS = atomicPHI;3610}3611else3612OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());36133614CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);3615SourceLocation Loc = E->getExprLoc();3616if (!PromotionTypeLHS.isNull())3617OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,3618E->getExprLoc());3619else3620OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,3621E->getComputationLHSType(), Loc);36223623// Expand the binary operator.3624Result = (this->*Func)(OpInfo);36253626// Convert the result back to the LHS type,3627// potentially with Implicit Conversion sanitizer check.3628// If LHSLV is a bitfield, use default ScalarConversionOpts3629// to avoid emit any implicit integer checks.3630Value *Previous = nullptr;3631if (LHSLV.isBitField()) {3632Previous = Result;3633Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);3634} else3635Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,3636ScalarConversionOpts(CGF.SanOpts));36373638if (atomicPHI) {3639llvm::BasicBlock *curBlock = Builder.GetInsertBlock();3640llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);3641auto Pair = CGF.EmitAtomicCompareExchange(3642LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());3643llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);3644llvm::Value *success = Pair.second;3645atomicPHI->addIncoming(old, curBlock);3646Builder.CreateCondBr(success, contBB, atomicPHI->getParent());3647Builder.SetInsertPoint(contBB);3648return LHSLV;3649}36503651// Store the result value into the LHS lvalue. Bit-fields are handled3652// specially because the result is altered by the store, i.e., [C99 6.5.16p1]3653// 'An assignment expression has the value of the left operand after the3654// assignment...'.3655if (LHSLV.isBitField()) {3656Value *Src = Previous ? Previous : Result;3657QualType SrcType = E->getRHS()->getType();3658QualType DstType = E->getLHS()->getType();3659CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);3660CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,3661LHSLV.getBitFieldInfo(), E->getExprLoc());3662} else3663CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);36643665if (CGF.getLangOpts().OpenMP)3666CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,3667E->getLHS());3668return LHSLV;3669}36703671Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,3672Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {3673bool Ignore = TestAndClearIgnoreResultAssign();3674Value *RHS = nullptr;3675LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);36763677// If the result is clearly ignored, return now.3678if (Ignore)3679return nullptr;36803681// The result of an assignment in C is the assigned r-value.3682if (!CGF.getLangOpts().CPlusPlus)3683return RHS;36843685// If the lvalue is non-volatile, return the computed value of the assignment.3686if (!LHS.isVolatileQualified())3687return RHS;36883689// Otherwise, reload the value.3690return EmitLoadOfLValue(LHS, E->getExprLoc());3691}36923693void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(3694const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {3695SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;36963697if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {3698Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),3699SanitizerKind::IntegerDivideByZero));3700}37013702const auto *BO = cast<BinaryOperator>(Ops.E);3703if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&3704Ops.Ty->hasSignedIntegerRepresentation() &&3705!IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&3706Ops.mayHaveIntegerOverflow()) {3707llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());37083709llvm::Value *IntMin =3710Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));3711llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);37123713llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);3714llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);3715llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");3716Checks.push_back(3717std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));3718}37193720if (Checks.size() > 0)3721EmitBinOpCheck(Checks, Ops);3722}37233724Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {3725{3726CodeGenFunction::SanitizerScope SanScope(&CGF);3727if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||3728CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&3729Ops.Ty->isIntegerType() &&3730(Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {3731llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));3732EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);3733} else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&3734Ops.Ty->isRealFloatingType() &&3735Ops.mayHaveFloatDivisionByZero()) {3736llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));3737llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);3738EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),3739Ops);3740}3741}37423743if (Ops.Ty->isConstantMatrixType()) {3744llvm::MatrixBuilder MB(Builder);3745// We need to check the types of the operands of the operator to get the3746// correct matrix dimensions.3747auto *BO = cast<BinaryOperator>(Ops.E);3748(void)BO;3749assert(3750isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&3751"first operand must be a matrix");3752assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&3753"second operand must be an arithmetic type");3754CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);3755return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,3756Ops.Ty->hasUnsignedIntegerRepresentation());3757}37583759if (Ops.LHS->getType()->isFPOrFPVectorTy()) {3760llvm::Value *Val;3761CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);3762Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");3763CGF.SetDivFPAccuracy(Val);3764return Val;3765}3766else if (Ops.isFixedPointOp())3767return EmitFixedPointBinOp(Ops);3768else if (Ops.Ty->hasUnsignedIntegerRepresentation())3769return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");3770else3771return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");3772}37733774Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {3775// Rem in C can't be a floating point type: C99 6.5.5p2.3776if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||3777CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&3778Ops.Ty->isIntegerType() &&3779(Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {3780CodeGenFunction::SanitizerScope SanScope(&CGF);3781llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));3782EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);3783}37843785if (Ops.Ty->hasUnsignedIntegerRepresentation())3786return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");3787else3788return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");3789}37903791Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {3792unsigned IID;3793unsigned OpID = 0;3794SanitizerHandler OverflowKind;37953796bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();3797switch (Ops.Opcode) {3798case BO_Add:3799case BO_AddAssign:3800OpID = 1;3801IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :3802llvm::Intrinsic::uadd_with_overflow;3803OverflowKind = SanitizerHandler::AddOverflow;3804break;3805case BO_Sub:3806case BO_SubAssign:3807OpID = 2;3808IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :3809llvm::Intrinsic::usub_with_overflow;3810OverflowKind = SanitizerHandler::SubOverflow;3811break;3812case BO_Mul:3813case BO_MulAssign:3814OpID = 3;3815IID = isSigned ? llvm::Intrinsic::smul_with_overflow :3816llvm::Intrinsic::umul_with_overflow;3817OverflowKind = SanitizerHandler::MulOverflow;3818break;3819default:3820llvm_unreachable("Unsupported operation for overflow detection");3821}3822OpID <<= 1;3823if (isSigned)3824OpID |= 1;38253826CodeGenFunction::SanitizerScope SanScope(&CGF);3827llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);38283829llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);38303831Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});3832Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);3833Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);38343835// Handle overflow with llvm.trap if no custom handler has been specified.3836const std::string *handlerName =3837&CGF.getLangOpts().OverflowHandler;3838if (handlerName->empty()) {3839// If the signed-integer-overflow sanitizer is enabled, emit a call to its3840// runtime. Otherwise, this is a -ftrapv check, so just emit a trap.3841if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {3842llvm::Value *NotOverflow = Builder.CreateNot(overflow);3843SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow3844: SanitizerKind::UnsignedIntegerOverflow;3845EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);3846} else3847CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);3848return result;3849}38503851// Branch in case of overflow.3852llvm::BasicBlock *initialBB = Builder.GetInsertBlock();3853llvm::BasicBlock *continueBB =3854CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());3855llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);38563857Builder.CreateCondBr(overflow, overflowBB, continueBB);38583859// If an overflow handler is set, then we want to call it and then use its3860// result, if it returns.3861Builder.SetInsertPoint(overflowBB);38623863// Get the overflow handler.3864llvm::Type *Int8Ty = CGF.Int8Ty;3865llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };3866llvm::FunctionType *handlerTy =3867llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);3868llvm::FunctionCallee handler =3869CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);38703871// Sign extend the args to 64-bit, so that we can use the same handler for3872// all types of overflow.3873llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);3874llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);38753876// Call the handler with the two arguments, the operation, and the size of3877// the result.3878llvm::Value *handlerArgs[] = {3879lhs,3880rhs,3881Builder.getInt8(OpID),3882Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())3883};3884llvm::Value *handlerResult =3885CGF.EmitNounwindRuntimeCall(handler, handlerArgs);38863887// Truncate the result back to the desired size.3888handlerResult = Builder.CreateTrunc(handlerResult, opTy);3889Builder.CreateBr(continueBB);38903891Builder.SetInsertPoint(continueBB);3892llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);3893phi->addIncoming(result, initialBB);3894phi->addIncoming(handlerResult, overflowBB);38953896return phi;3897}38983899/// Emit pointer + index arithmetic.3900static Value *emitPointerArithmetic(CodeGenFunction &CGF,3901const BinOpInfo &op,3902bool isSubtraction) {3903// Must have binary (not unary) expr here. Unary pointer3904// increment/decrement doesn't use this path.3905const BinaryOperator *expr = cast<BinaryOperator>(op.E);39063907Value *pointer = op.LHS;3908Expr *pointerOperand = expr->getLHS();3909Value *index = op.RHS;3910Expr *indexOperand = expr->getRHS();39113912// In a subtraction, the LHS is always the pointer.3913if (!isSubtraction && !pointer->getType()->isPointerTy()) {3914std::swap(pointer, index);3915std::swap(pointerOperand, indexOperand);3916}39173918bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();39193920unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();3921auto &DL = CGF.CGM.getDataLayout();3922auto PtrTy = cast<llvm::PointerType>(pointer->getType());39233924// Some versions of glibc and gcc use idioms (particularly in their malloc3925// routines) that add a pointer-sized integer (known to be a pointer value)3926// to a null pointer in order to cast the value back to an integer or as3927// part of a pointer alignment algorithm. This is undefined behavior, but3928// we'd like to be able to compile programs that use it.3929//3930// Normally, we'd generate a GEP with a null-pointer base here in response3931// to that code, but it's also UB to dereference a pointer created that3932// way. Instead (as an acknowledged hack to tolerate the idiom) we will3933// generate a direct cast of the integer value to a pointer.3934//3935// The idiom (p = nullptr + N) is not met if any of the following are true:3936//3937// The operation is subtraction.3938// The index is not pointer-sized.3939// The pointer type is not byte-sized.3940//3941if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),3942op.Opcode,3943expr->getLHS(),3944expr->getRHS()))3945return CGF.Builder.CreateIntToPtr(index, pointer->getType());39463947if (width != DL.getIndexTypeSizeInBits(PtrTy)) {3948// Zero-extend or sign-extend the pointer value according to3949// whether the index is signed or not.3950index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,3951"idx.ext");3952}39533954// If this is subtraction, negate the index.3955if (isSubtraction)3956index = CGF.Builder.CreateNeg(index, "idx.neg");39573958if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))3959CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),3960/*Accessed*/ false);39613962const PointerType *pointerType3963= pointerOperand->getType()->getAs<PointerType>();3964if (!pointerType) {3965QualType objectType = pointerOperand->getType()3966->castAs<ObjCObjectPointerType>()3967->getPointeeType();3968llvm::Value *objectSize3969= CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));39703971index = CGF.Builder.CreateMul(index, objectSize);39723973Value *result =3974CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");3975return CGF.Builder.CreateBitCast(result, pointer->getType());3976}39773978QualType elementType = pointerType->getPointeeType();3979if (const VariableArrayType *vla3980= CGF.getContext().getAsVariableArrayType(elementType)) {3981// The element count here is the total number of non-VLA elements.3982llvm::Value *numElements = CGF.getVLASize(vla).NumElts;39833984// Effectively, the multiply by the VLA size is part of the GEP.3985// GEP indexes are signed, and scaling an index isn't permitted to3986// signed-overflow, so we use the same semantics for our explicit3987// multiply. We suppress this if overflow is not undefined behavior.3988llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());3989if (CGF.getLangOpts().isSignedOverflowDefined()) {3990index = CGF.Builder.CreateMul(index, numElements, "vla.index");3991pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");3992} else {3993index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");3994pointer = CGF.EmitCheckedInBoundsGEP(3995elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),3996"add.ptr");3997}3998return pointer;3999}40004001// Explicitly handle GNU void* and function pointer arithmetic extensions. The4002// GNU void* casts amount to no-ops since our void* type is i8*, but this is4003// future proof.4004llvm::Type *elemTy;4005if (elementType->isVoidType() || elementType->isFunctionType())4006elemTy = CGF.Int8Ty;4007else4008elemTy = CGF.ConvertTypeForMem(elementType);40094010if (CGF.getLangOpts().isSignedOverflowDefined())4011return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");40124013return CGF.EmitCheckedInBoundsGEP(4014elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),4015"add.ptr");4016}40174018// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and4019// Addend. Use negMul and negAdd to negate the first operand of the Mul or4020// the add operand respectively. This allows fmuladd to represent a*b-c, or4021// c-a*b. Patterns in LLVM should catch the negated forms and translate them to4022// efficient operations.4023static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,4024const CodeGenFunction &CGF, CGBuilderTy &Builder,4025bool negMul, bool negAdd) {4026Value *MulOp0 = MulOp->getOperand(0);4027Value *MulOp1 = MulOp->getOperand(1);4028if (negMul)4029MulOp0 = Builder.CreateFNeg(MulOp0, "neg");4030if (negAdd)4031Addend = Builder.CreateFNeg(Addend, "neg");40324033Value *FMulAdd = nullptr;4034if (Builder.getIsFPConstrained()) {4035assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&4036"Only constrained operation should be created when Builder is in FP "4037"constrained mode");4038FMulAdd = Builder.CreateConstrainedFPCall(4039CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,4040Addend->getType()),4041{MulOp0, MulOp1, Addend});4042} else {4043FMulAdd = Builder.CreateCall(4044CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),4045{MulOp0, MulOp1, Addend});4046}4047MulOp->eraseFromParent();40484049return FMulAdd;4050}40514052// Check whether it would be legal to emit an fmuladd intrinsic call to4053// represent op and if so, build the fmuladd.4054//4055// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.4056// Does NOT check the type of the operation - it's assumed that this function4057// will be called from contexts where it's known that the type is contractable.4058static Value* tryEmitFMulAdd(const BinOpInfo &op,4059const CodeGenFunction &CGF, CGBuilderTy &Builder,4060bool isSub=false) {40614062assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||4063op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&4064"Only fadd/fsub can be the root of an fmuladd.");40654066// Check whether this op is marked as fusable.4067if (!op.FPFeatures.allowFPContractWithinStatement())4068return nullptr;40694070Value *LHS = op.LHS;4071Value *RHS = op.RHS;40724073// Peek through fneg to look for fmul. Make sure fneg has no users, and that4074// it is the only use of its operand.4075bool NegLHS = false;4076if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {4077if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&4078LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {4079LHS = LHSUnOp->getOperand(0);4080NegLHS = true;4081}4082}40834084bool NegRHS = false;4085if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {4086if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&4087RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {4088RHS = RHSUnOp->getOperand(0);4089NegRHS = true;4090}4091}40924093// We have a potentially fusable op. Look for a mul on one of the operands.4094// Also, make sure that the mul result isn't used directly. In that case,4095// there's no point creating a muladd operation.4096if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {4097if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&4098(LHSBinOp->use_empty() || NegLHS)) {4099// If we looked through fneg, erase it.4100if (NegLHS)4101cast<llvm::Instruction>(op.LHS)->eraseFromParent();4102return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);4103}4104}4105if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {4106if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&4107(RHSBinOp->use_empty() || NegRHS)) {4108// If we looked through fneg, erase it.4109if (NegRHS)4110cast<llvm::Instruction>(op.RHS)->eraseFromParent();4111return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);4112}4113}41144115if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {4116if (LHSBinOp->getIntrinsicID() ==4117llvm::Intrinsic::experimental_constrained_fmul &&4118(LHSBinOp->use_empty() || NegLHS)) {4119// If we looked through fneg, erase it.4120if (NegLHS)4121cast<llvm::Instruction>(op.LHS)->eraseFromParent();4122return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);4123}4124}4125if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {4126if (RHSBinOp->getIntrinsicID() ==4127llvm::Intrinsic::experimental_constrained_fmul &&4128(RHSBinOp->use_empty() || NegRHS)) {4129// If we looked through fneg, erase it.4130if (NegRHS)4131cast<llvm::Instruction>(op.RHS)->eraseFromParent();4132return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);4133}4134}41354136return nullptr;4137}41384139Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {4140if (op.LHS->getType()->isPointerTy() ||4141op.RHS->getType()->isPointerTy())4142return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);41434144if (op.Ty->isSignedIntegerOrEnumerationType()) {4145switch (CGF.getLangOpts().getSignedOverflowBehavior()) {4146case LangOptions::SOB_Defined:4147if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))4148return Builder.CreateAdd(op.LHS, op.RHS, "add");4149[[fallthrough]];4150case LangOptions::SOB_Undefined:4151if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))4152return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");4153[[fallthrough]];4154case LangOptions::SOB_Trapping:4155if (CanElideOverflowCheck(CGF.getContext(), op))4156return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");4157return EmitOverflowCheckedBinOp(op);4158}4159}41604161// For vector and matrix adds, try to fold into a fmuladd.4162if (op.LHS->getType()->isFPOrFPVectorTy()) {4163CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4164// Try to form an fmuladd.4165if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))4166return FMulAdd;4167}41684169if (op.Ty->isConstantMatrixType()) {4170llvm::MatrixBuilder MB(Builder);4171CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4172return MB.CreateAdd(op.LHS, op.RHS);4173}41744175if (op.Ty->isUnsignedIntegerType() &&4176CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&4177!CanElideOverflowCheck(CGF.getContext(), op))4178return EmitOverflowCheckedBinOp(op);41794180if (op.LHS->getType()->isFPOrFPVectorTy()) {4181CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4182return Builder.CreateFAdd(op.LHS, op.RHS, "add");4183}41844185if (op.isFixedPointOp())4186return EmitFixedPointBinOp(op);41874188return Builder.CreateAdd(op.LHS, op.RHS, "add");4189}41904191/// The resulting value must be calculated with exact precision, so the operands4192/// may not be the same type.4193Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {4194using llvm::APSInt;4195using llvm::ConstantInt;41964197// This is either a binary operation where at least one of the operands is4198// a fixed-point type, or a unary operation where the operand is a fixed-point4199// type. The result type of a binary operation is determined by4200// Sema::handleFixedPointConversions().4201QualType ResultTy = op.Ty;4202QualType LHSTy, RHSTy;4203if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {4204RHSTy = BinOp->getRHS()->getType();4205if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {4206// For compound assignment, the effective type of the LHS at this point4207// is the computation LHS type, not the actual LHS type, and the final4208// result type is not the type of the expression but rather the4209// computation result type.4210LHSTy = CAO->getComputationLHSType();4211ResultTy = CAO->getComputationResultType();4212} else4213LHSTy = BinOp->getLHS()->getType();4214} else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {4215LHSTy = UnOp->getSubExpr()->getType();4216RHSTy = UnOp->getSubExpr()->getType();4217}4218ASTContext &Ctx = CGF.getContext();4219Value *LHS = op.LHS;4220Value *RHS = op.RHS;42214222auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);4223auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);4224auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);4225auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);42264227// Perform the actual operation.4228Value *Result;4229llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);4230switch (op.Opcode) {4231case BO_AddAssign:4232case BO_Add:4233Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);4234break;4235case BO_SubAssign:4236case BO_Sub:4237Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);4238break;4239case BO_MulAssign:4240case BO_Mul:4241Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);4242break;4243case BO_DivAssign:4244case BO_Div:4245Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);4246break;4247case BO_ShlAssign:4248case BO_Shl:4249Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);4250break;4251case BO_ShrAssign:4252case BO_Shr:4253Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);4254break;4255case BO_LT:4256return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);4257case BO_GT:4258return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);4259case BO_LE:4260return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);4261case BO_GE:4262return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);4263case BO_EQ:4264// For equality operations, we assume any padding bits on unsigned types are4265// zero'd out. They could be overwritten through non-saturating operations4266// that cause overflow, but this leads to undefined behavior.4267return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);4268case BO_NE:4269return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);4270case BO_Cmp:4271case BO_LAnd:4272case BO_LOr:4273llvm_unreachable("Found unimplemented fixed point binary operation");4274case BO_PtrMemD:4275case BO_PtrMemI:4276case BO_Rem:4277case BO_Xor:4278case BO_And:4279case BO_Or:4280case BO_Assign:4281case BO_RemAssign:4282case BO_AndAssign:4283case BO_XorAssign:4284case BO_OrAssign:4285case BO_Comma:4286llvm_unreachable("Found unsupported binary operation for fixed point types.");4287}42884289bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||4290BinaryOperator::isShiftAssignOp(op.Opcode);4291// Convert to the result type.4292return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema4293: CommonFixedSema,4294ResultFixedSema);4295}42964297Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {4298// The LHS is always a pointer if either side is.4299if (!op.LHS->getType()->isPointerTy()) {4300if (op.Ty->isSignedIntegerOrEnumerationType()) {4301switch (CGF.getLangOpts().getSignedOverflowBehavior()) {4302case LangOptions::SOB_Defined:4303if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))4304return Builder.CreateSub(op.LHS, op.RHS, "sub");4305[[fallthrough]];4306case LangOptions::SOB_Undefined:4307if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))4308return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");4309[[fallthrough]];4310case LangOptions::SOB_Trapping:4311if (CanElideOverflowCheck(CGF.getContext(), op))4312return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");4313return EmitOverflowCheckedBinOp(op);4314}4315}43164317// For vector and matrix subs, try to fold into a fmuladd.4318if (op.LHS->getType()->isFPOrFPVectorTy()) {4319CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4320// Try to form an fmuladd.4321if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))4322return FMulAdd;4323}43244325if (op.Ty->isConstantMatrixType()) {4326llvm::MatrixBuilder MB(Builder);4327CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4328return MB.CreateSub(op.LHS, op.RHS);4329}43304331if (op.Ty->isUnsignedIntegerType() &&4332CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&4333!CanElideOverflowCheck(CGF.getContext(), op))4334return EmitOverflowCheckedBinOp(op);43354336if (op.LHS->getType()->isFPOrFPVectorTy()) {4337CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);4338return Builder.CreateFSub(op.LHS, op.RHS, "sub");4339}43404341if (op.isFixedPointOp())4342return EmitFixedPointBinOp(op);43434344return Builder.CreateSub(op.LHS, op.RHS, "sub");4345}43464347// If the RHS is not a pointer, then we have normal pointer4348// arithmetic.4349if (!op.RHS->getType()->isPointerTy())4350return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);43514352// Otherwise, this is a pointer subtraction.43534354// Do the raw subtraction part.4355llvm::Value *LHS4356= Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");4357llvm::Value *RHS4358= Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");4359Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");43604361// Okay, figure out the element size.4362const BinaryOperator *expr = cast<BinaryOperator>(op.E);4363QualType elementType = expr->getLHS()->getType()->getPointeeType();43644365llvm::Value *divisor = nullptr;43664367// For a variable-length array, this is going to be non-constant.4368if (const VariableArrayType *vla4369= CGF.getContext().getAsVariableArrayType(elementType)) {4370auto VlaSize = CGF.getVLASize(vla);4371elementType = VlaSize.Type;4372divisor = VlaSize.NumElts;43734374// Scale the number of non-VLA elements by the non-VLA element size.4375CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);4376if (!eltSize.isOne())4377divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);43784379// For everything elese, we can just compute it, safe in the4380// assumption that Sema won't let anything through that we can't4381// safely compute the size of.4382} else {4383CharUnits elementSize;4384// Handle GCC extension for pointer arithmetic on void* and4385// function pointer types.4386if (elementType->isVoidType() || elementType->isFunctionType())4387elementSize = CharUnits::One();4388else4389elementSize = CGF.getContext().getTypeSizeInChars(elementType);43904391// Don't even emit the divide for element size of 1.4392if (elementSize.isOne())4393return diffInChars;43944395divisor = CGF.CGM.getSize(elementSize);4396}43974398// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since4399// pointer difference in C is only defined in the case where both operands4400// are pointing to elements of an array.4401return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");4402}44034404Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,4405bool RHSIsSigned) {4406llvm::IntegerType *Ty;4407if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))4408Ty = cast<llvm::IntegerType>(VT->getElementType());4409else4410Ty = cast<llvm::IntegerType>(LHS->getType());4411// For a given type of LHS the maximum shift amount is width(LHS)-1, however4412// it can occur that width(LHS)-1 > range(RHS). Since there is no check for4413// this in ConstantInt::get, this results in the value getting truncated.4414// Constrain the return value to be max(RHS) in this case.4415llvm::Type *RHSTy = RHS->getType();4416llvm::APInt RHSMax =4417RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())4418: llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());4419if (RHSMax.ult(Ty->getBitWidth()))4420return llvm::ConstantInt::get(RHSTy, RHSMax);4421return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);4422}44234424Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,4425const Twine &Name) {4426llvm::IntegerType *Ty;4427if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))4428Ty = cast<llvm::IntegerType>(VT->getElementType());4429else4430Ty = cast<llvm::IntegerType>(LHS->getType());44314432if (llvm::isPowerOf2_64(Ty->getBitWidth()))4433return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);44344435return Builder.CreateURem(4436RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);4437}44384439Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {4440// TODO: This misses out on the sanitizer check below.4441if (Ops.isFixedPointOp())4442return EmitFixedPointBinOp(Ops);44434444// LLVM requires the LHS and RHS to be the same type: promote or truncate the4445// RHS to the same size as the LHS.4446Value *RHS = Ops.RHS;4447if (Ops.LHS->getType() != RHS->getType())4448RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");44494450bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&4451Ops.Ty->hasSignedIntegerRepresentation() &&4452!CGF.getLangOpts().isSignedOverflowDefined() &&4453!CGF.getLangOpts().CPlusPlus20;4454bool SanitizeUnsignedBase =4455CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&4456Ops.Ty->hasUnsignedIntegerRepresentation();4457bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;4458bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);4459// OpenCL 6.3j: shift values are effectively % word size of LHS.4460if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)4461RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");4462else if ((SanitizeBase || SanitizeExponent) &&4463isa<llvm::IntegerType>(Ops.LHS->getType())) {4464CodeGenFunction::SanitizerScope SanScope(&CGF);4465SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;4466bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();4467llvm::Value *WidthMinusOne =4468GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);4469llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);44704471if (SanitizeExponent) {4472Checks.push_back(4473std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));4474}44754476if (SanitizeBase) {4477// Check whether we are shifting any non-zero bits off the top of the4478// integer. We only emit this check if exponent is valid - otherwise4479// instructions below will have undefined behavior themselves.4480llvm::BasicBlock *Orig = Builder.GetInsertBlock();4481llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");4482llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");4483Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);4484llvm::Value *PromotedWidthMinusOne =4485(RHS == Ops.RHS) ? WidthMinusOne4486: GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);4487CGF.EmitBlock(CheckShiftBase);4488llvm::Value *BitsShiftedOff = Builder.CreateLShr(4489Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",4490/*NUW*/ true, /*NSW*/ true),4491"shl.check");4492if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {4493// In C99, we are not permitted to shift a 1 bit into the sign bit.4494// Under C++11's rules, shifting a 1 bit into the sign bit is4495// OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't4496// define signed left shifts, so we use the C99 and C++11 rules there).4497// Unsigned shifts can always shift into the top bit.4498llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);4499BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);4500}4501llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);4502llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);4503CGF.EmitBlock(Cont);4504llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);4505BaseCheck->addIncoming(Builder.getTrue(), Orig);4506BaseCheck->addIncoming(ValidBase, CheckShiftBase);4507Checks.push_back(std::make_pair(4508BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase4509: SanitizerKind::UnsignedShiftBase));4510}45114512assert(!Checks.empty());4513EmitBinOpCheck(Checks, Ops);4514}45154516return Builder.CreateShl(Ops.LHS, RHS, "shl");4517}45184519Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {4520// TODO: This misses out on the sanitizer check below.4521if (Ops.isFixedPointOp())4522return EmitFixedPointBinOp(Ops);45234524// LLVM requires the LHS and RHS to be the same type: promote or truncate the4525// RHS to the same size as the LHS.4526Value *RHS = Ops.RHS;4527if (Ops.LHS->getType() != RHS->getType())4528RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");45294530// OpenCL 6.3j: shift values are effectively % word size of LHS.4531if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)4532RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");4533else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&4534isa<llvm::IntegerType>(Ops.LHS->getType())) {4535CodeGenFunction::SanitizerScope SanScope(&CGF);4536bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();4537llvm::Value *Valid = Builder.CreateICmpULE(4538Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));4539EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);4540}45414542if (Ops.Ty->hasUnsignedIntegerRepresentation())4543return Builder.CreateLShr(Ops.LHS, RHS, "shr");4544return Builder.CreateAShr(Ops.LHS, RHS, "shr");4545}45464547enum IntrinsicType { VCMPEQ, VCMPGT };4548// return corresponding comparison intrinsic for given vector type4549static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,4550BuiltinType::Kind ElemKind) {4551switch (ElemKind) {4552default: llvm_unreachable("unexpected element type");4553case BuiltinType::Char_U:4554case BuiltinType::UChar:4555return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :4556llvm::Intrinsic::ppc_altivec_vcmpgtub_p;4557case BuiltinType::Char_S:4558case BuiltinType::SChar:4559return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :4560llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;4561case BuiltinType::UShort:4562return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :4563llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;4564case BuiltinType::Short:4565return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :4566llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;4567case BuiltinType::UInt:4568return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :4569llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;4570case BuiltinType::Int:4571return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :4572llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;4573case BuiltinType::ULong:4574case BuiltinType::ULongLong:4575return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :4576llvm::Intrinsic::ppc_altivec_vcmpgtud_p;4577case BuiltinType::Long:4578case BuiltinType::LongLong:4579return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :4580llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;4581case BuiltinType::Float:4582return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :4583llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;4584case BuiltinType::Double:4585return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :4586llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;4587case BuiltinType::UInt128:4588return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p4589: llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;4590case BuiltinType::Int128:4591return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p4592: llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;4593}4594}45954596Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,4597llvm::CmpInst::Predicate UICmpOpc,4598llvm::CmpInst::Predicate SICmpOpc,4599llvm::CmpInst::Predicate FCmpOpc,4600bool IsSignaling) {4601TestAndClearIgnoreResultAssign();4602Value *Result;4603QualType LHSTy = E->getLHS()->getType();4604QualType RHSTy = E->getRHS()->getType();4605if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {4606assert(E->getOpcode() == BO_EQ ||4607E->getOpcode() == BO_NE);4608Value *LHS = CGF.EmitScalarExpr(E->getLHS());4609Value *RHS = CGF.EmitScalarExpr(E->getRHS());4610Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(4611CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);4612} else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {4613BinOpInfo BOInfo = EmitBinOps(E);4614Value *LHS = BOInfo.LHS;4615Value *RHS = BOInfo.RHS;46164617// If AltiVec, the comparison results in a numeric type, so we use4618// intrinsics comparing vectors and giving 0 or 1 as a result4619if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {4620// constants for mapping CR6 register bits to predicate result4621enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;46224623llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;46244625// in several cases vector arguments order will be reversed4626Value *FirstVecArg = LHS,4627*SecondVecArg = RHS;46284629QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();4630BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();46314632switch(E->getOpcode()) {4633default: llvm_unreachable("is not a comparison operation");4634case BO_EQ:4635CR6 = CR6_LT;4636ID = GetIntrinsic(VCMPEQ, ElementKind);4637break;4638case BO_NE:4639CR6 = CR6_EQ;4640ID = GetIntrinsic(VCMPEQ, ElementKind);4641break;4642case BO_LT:4643CR6 = CR6_LT;4644ID = GetIntrinsic(VCMPGT, ElementKind);4645std::swap(FirstVecArg, SecondVecArg);4646break;4647case BO_GT:4648CR6 = CR6_LT;4649ID = GetIntrinsic(VCMPGT, ElementKind);4650break;4651case BO_LE:4652if (ElementKind == BuiltinType::Float) {4653CR6 = CR6_LT;4654ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;4655std::swap(FirstVecArg, SecondVecArg);4656}4657else {4658CR6 = CR6_EQ;4659ID = GetIntrinsic(VCMPGT, ElementKind);4660}4661break;4662case BO_GE:4663if (ElementKind == BuiltinType::Float) {4664CR6 = CR6_LT;4665ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;4666}4667else {4668CR6 = CR6_EQ;4669ID = GetIntrinsic(VCMPGT, ElementKind);4670std::swap(FirstVecArg, SecondVecArg);4671}4672break;4673}46744675Value *CR6Param = Builder.getInt32(CR6);4676llvm::Function *F = CGF.CGM.getIntrinsic(ID);4677Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});46784679// The result type of intrinsic may not be same as E->getType().4680// If E->getType() is not BoolTy, EmitScalarConversion will do the4681// conversion work. If E->getType() is BoolTy, EmitScalarConversion will4682// do nothing, if ResultTy is not i1 at the same time, it will cause4683// crash later.4684llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());4685if (ResultTy->getBitWidth() > 1 &&4686E->getType() == CGF.getContext().BoolTy)4687Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());4688return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),4689E->getExprLoc());4690}46914692if (BOInfo.isFixedPointOp()) {4693Result = EmitFixedPointBinOp(BOInfo);4694} else if (LHS->getType()->isFPOrFPVectorTy()) {4695CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);4696if (!IsSignaling)4697Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");4698else4699Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");4700} else if (LHSTy->hasSignedIntegerRepresentation()) {4701Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");4702} else {4703// Unsigned integers and pointers.47044705if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&4706!isa<llvm::ConstantPointerNull>(LHS) &&4707!isa<llvm::ConstantPointerNull>(RHS)) {47084709// Dynamic information is required to be stripped for comparisons,4710// because it could leak the dynamic information. Based on comparisons4711// of pointers to dynamic objects, the optimizer can replace one pointer4712// with another, which might be incorrect in presence of invariant4713// groups. Comparison with null is safe because null does not carry any4714// dynamic information.4715if (LHSTy.mayBeDynamicClass())4716LHS = Builder.CreateStripInvariantGroup(LHS);4717if (RHSTy.mayBeDynamicClass())4718RHS = Builder.CreateStripInvariantGroup(RHS);4719}47204721Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");4722}47234724// If this is a vector comparison, sign extend the result to the appropriate4725// vector integer type and return it (don't convert to bool).4726if (LHSTy->isVectorType())4727return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");47284729} else {4730// Complex Comparison: can only be an equality comparison.4731CodeGenFunction::ComplexPairTy LHS, RHS;4732QualType CETy;4733if (auto *CTy = LHSTy->getAs<ComplexType>()) {4734LHS = CGF.EmitComplexExpr(E->getLHS());4735CETy = CTy->getElementType();4736} else {4737LHS.first = Visit(E->getLHS());4738LHS.second = llvm::Constant::getNullValue(LHS.first->getType());4739CETy = LHSTy;4740}4741if (auto *CTy = RHSTy->getAs<ComplexType>()) {4742RHS = CGF.EmitComplexExpr(E->getRHS());4743assert(CGF.getContext().hasSameUnqualifiedType(CETy,4744CTy->getElementType()) &&4745"The element types must always match.");4746(void)CTy;4747} else {4748RHS.first = Visit(E->getRHS());4749RHS.second = llvm::Constant::getNullValue(RHS.first->getType());4750assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&4751"The element types must always match.");4752}47534754Value *ResultR, *ResultI;4755if (CETy->isRealFloatingType()) {4756// As complex comparisons can only be equality comparisons, they4757// are never signaling comparisons.4758ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");4759ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");4760} else {4761// Complex comparisons can only be equality comparisons. As such, signed4762// and unsigned opcodes are the same.4763ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");4764ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");4765}47664767if (E->getOpcode() == BO_EQ) {4768Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");4769} else {4770assert(E->getOpcode() == BO_NE &&4771"Complex comparison other than == or != ?");4772Result = Builder.CreateOr(ResultR, ResultI, "or.ri");4773}4774}47754776return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),4777E->getExprLoc());4778}47794780llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(4781const BinaryOperator *E, Value **Previous, QualType *SrcType) {4782// In case we have the integer or bitfield sanitizer checks enabled4783// we want to get the expression before scalar conversion.4784if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {4785CastKind Kind = ICE->getCastKind();4786if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {4787*SrcType = ICE->getSubExpr()->getType();4788*Previous = EmitScalarExpr(ICE->getSubExpr());4789// Pass default ScalarConversionOpts to avoid emitting4790// integer sanitizer checks as E refers to bitfield.4791return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),4792ICE->getExprLoc());4793}4794}4795return EmitScalarExpr(E->getRHS());4796}47974798Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {4799bool Ignore = TestAndClearIgnoreResultAssign();48004801Value *RHS;4802LValue LHS;48034804switch (E->getLHS()->getType().getObjCLifetime()) {4805case Qualifiers::OCL_Strong:4806std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);4807break;48084809case Qualifiers::OCL_Autoreleasing:4810std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);4811break;48124813case Qualifiers::OCL_ExplicitNone:4814std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);4815break;48164817case Qualifiers::OCL_Weak:4818RHS = Visit(E->getRHS());4819LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);4820RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);4821break;48224823case Qualifiers::OCL_None:4824// __block variables need to have the rhs evaluated first, plus4825// this should improve codegen just a little.4826Value *Previous = nullptr;4827QualType SrcType = E->getRHS()->getType();4828// Check if LHS is a bitfield, if RHS contains an implicit cast expression4829// we want to extract that value and potentially (if the bitfield sanitizer4830// is enabled) use it to check for an implicit conversion.4831if (E->getLHS()->refersToBitField())4832RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);4833else4834RHS = Visit(E->getRHS());48354836LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);48374838// Store the value into the LHS. Bit-fields are handled specially4839// because the result is altered by the store, i.e., [C99 6.5.16p1]4840// 'An assignment expression has the value of the left operand after4841// the assignment...'.4842if (LHS.isBitField()) {4843CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);4844// If the expression contained an implicit conversion, make sure4845// to use the value before the scalar conversion.4846Value *Src = Previous ? Previous : RHS;4847QualType DstType = E->getLHS()->getType();4848CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,4849LHS.getBitFieldInfo(), E->getExprLoc());4850} else {4851CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());4852CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);4853}4854}48554856// If the result is clearly ignored, return now.4857if (Ignore)4858return nullptr;48594860// The result of an assignment in C is the assigned r-value.4861if (!CGF.getLangOpts().CPlusPlus)4862return RHS;48634864// If the lvalue is non-volatile, return the computed value of the assignment.4865if (!LHS.isVolatileQualified())4866return RHS;48674868// Otherwise, reload the value.4869return EmitLoadOfLValue(LHS, E->getExprLoc());4870}48714872Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {4873// Perform vector logical and on comparisons with zero vectors.4874if (E->getType()->isVectorType()) {4875CGF.incrementProfileCounter(E);48764877Value *LHS = Visit(E->getLHS());4878Value *RHS = Visit(E->getRHS());4879Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());4880if (LHS->getType()->isFPOrFPVectorTy()) {4881CodeGenFunction::CGFPOptionsRAII FPOptsRAII(4882CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));4883LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");4884RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");4885} else {4886LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");4887RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");4888}4889Value *And = Builder.CreateAnd(LHS, RHS);4890return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");4891}48924893bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();4894llvm::Type *ResTy = ConvertType(E->getType());48954896// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.4897// If we have 1 && X, just emit X without inserting the control flow.4898bool LHSCondVal;4899if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {4900if (LHSCondVal) { // If we have 1 && X, just emit X.4901CGF.incrementProfileCounter(E);49024903// If the top of the logical operator nest, reset the MCDC temp to 0.4904if (CGF.MCDCLogOpStack.empty())4905CGF.maybeResetMCDCCondBitmap(E);49064907CGF.MCDCLogOpStack.push_back(E);49084909Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());49104911// If we're generating for profiling or coverage, generate a branch to a4912// block that increments the RHS counter needed to track branch condition4913// coverage. In this case, use "FBlock" as both the final "TrueBlock" and4914// "FalseBlock" after the increment is done.4915if (InstrumentRegions &&4916CodeGenFunction::isInstrumentedCondition(E->getRHS())) {4917CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);4918llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");4919llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");4920Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);4921CGF.EmitBlock(RHSBlockCnt);4922CGF.incrementProfileCounter(E->getRHS());4923CGF.EmitBranch(FBlock);4924CGF.EmitBlock(FBlock);4925}49264927CGF.MCDCLogOpStack.pop_back();4928// If the top of the logical operator nest, update the MCDC bitmap.4929if (CGF.MCDCLogOpStack.empty())4930CGF.maybeUpdateMCDCTestVectorBitmap(E);49314932// ZExt result to int or bool.4933return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");4934}49354936// 0 && RHS: If it is safe, just elide the RHS, and return 0/false.4937if (!CGF.ContainsLabel(E->getRHS()))4938return llvm::Constant::getNullValue(ResTy);4939}49404941// If the top of the logical operator nest, reset the MCDC temp to 0.4942if (CGF.MCDCLogOpStack.empty())4943CGF.maybeResetMCDCCondBitmap(E);49444945CGF.MCDCLogOpStack.push_back(E);49464947llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");4948llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");49494950CodeGenFunction::ConditionalEvaluation eval(CGF);49514952// Branch on the LHS first. If it is false, go to the failure (cont) block.4953CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,4954CGF.getProfileCount(E->getRHS()));49554956// Any edges into the ContBlock are now from an (indeterminate number of)4957// edges from this first condition. All of these values will be false. Start4958// setting up the PHI node in the Cont Block for this.4959llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,4960"", ContBlock);4961for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);4962PI != PE; ++PI)4963PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);49644965eval.begin(CGF);4966CGF.EmitBlock(RHSBlock);4967CGF.incrementProfileCounter(E);4968Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());4969eval.end(CGF);49704971// Reaquire the RHS block, as there may be subblocks inserted.4972RHSBlock = Builder.GetInsertBlock();49734974// If we're generating for profiling or coverage, generate a branch on the4975// RHS to a block that increments the RHS true counter needed to track branch4976// condition coverage.4977if (InstrumentRegions &&4978CodeGenFunction::isInstrumentedCondition(E->getRHS())) {4979CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);4980llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");4981Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);4982CGF.EmitBlock(RHSBlockCnt);4983CGF.incrementProfileCounter(E->getRHS());4984CGF.EmitBranch(ContBlock);4985PN->addIncoming(RHSCond, RHSBlockCnt);4986}49874988// Emit an unconditional branch from this block to ContBlock.4989{4990// There is no need to emit line number for unconditional branch.4991auto NL = ApplyDebugLocation::CreateEmpty(CGF);4992CGF.EmitBlock(ContBlock);4993}4994// Insert an entry into the phi node for the edge with the value of RHSCond.4995PN->addIncoming(RHSCond, RHSBlock);49964997CGF.MCDCLogOpStack.pop_back();4998// If the top of the logical operator nest, update the MCDC bitmap.4999if (CGF.MCDCLogOpStack.empty())5000CGF.maybeUpdateMCDCTestVectorBitmap(E);50015002// Artificial location to preserve the scope information5003{5004auto NL = ApplyDebugLocation::CreateArtificial(CGF);5005PN->setDebugLoc(Builder.getCurrentDebugLocation());5006}50075008// ZExt result to int.5009return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");5010}50115012Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {5013// Perform vector logical or on comparisons with zero vectors.5014if (E->getType()->isVectorType()) {5015CGF.incrementProfileCounter(E);50165017Value *LHS = Visit(E->getLHS());5018Value *RHS = Visit(E->getRHS());5019Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());5020if (LHS->getType()->isFPOrFPVectorTy()) {5021CodeGenFunction::CGFPOptionsRAII FPOptsRAII(5022CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));5023LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");5024RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");5025} else {5026LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");5027RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");5028}5029Value *Or = Builder.CreateOr(LHS, RHS);5030return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");5031}50325033bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();5034llvm::Type *ResTy = ConvertType(E->getType());50355036// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.5037// If we have 0 || X, just emit X without inserting the control flow.5038bool LHSCondVal;5039if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {5040if (!LHSCondVal) { // If we have 0 || X, just emit X.5041CGF.incrementProfileCounter(E);50425043// If the top of the logical operator nest, reset the MCDC temp to 0.5044if (CGF.MCDCLogOpStack.empty())5045CGF.maybeResetMCDCCondBitmap(E);50465047CGF.MCDCLogOpStack.push_back(E);50485049Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());50505051// If we're generating for profiling or coverage, generate a branch to a5052// block that increments the RHS counter need to track branch condition5053// coverage. In this case, use "FBlock" as both the final "TrueBlock" and5054// "FalseBlock" after the increment is done.5055if (InstrumentRegions &&5056CodeGenFunction::isInstrumentedCondition(E->getRHS())) {5057CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);5058llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");5059llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");5060Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);5061CGF.EmitBlock(RHSBlockCnt);5062CGF.incrementProfileCounter(E->getRHS());5063CGF.EmitBranch(FBlock);5064CGF.EmitBlock(FBlock);5065}50665067CGF.MCDCLogOpStack.pop_back();5068// If the top of the logical operator nest, update the MCDC bitmap.5069if (CGF.MCDCLogOpStack.empty())5070CGF.maybeUpdateMCDCTestVectorBitmap(E);50715072// ZExt result to int or bool.5073return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");5074}50755076// 1 || RHS: If it is safe, just elide the RHS, and return 1/true.5077if (!CGF.ContainsLabel(E->getRHS()))5078return llvm::ConstantInt::get(ResTy, 1);5079}50805081// If the top of the logical operator nest, reset the MCDC temp to 0.5082if (CGF.MCDCLogOpStack.empty())5083CGF.maybeResetMCDCCondBitmap(E);50845085CGF.MCDCLogOpStack.push_back(E);50865087llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");5088llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");50895090CodeGenFunction::ConditionalEvaluation eval(CGF);50915092// Branch on the LHS first. If it is true, go to the success (cont) block.5093CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,5094CGF.getCurrentProfileCount() -5095CGF.getProfileCount(E->getRHS()));50965097// Any edges into the ContBlock are now from an (indeterminate number of)5098// edges from this first condition. All of these values will be true. Start5099// setting up the PHI node in the Cont Block for this.5100llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,5101"", ContBlock);5102for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);5103PI != PE; ++PI)5104PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);51055106eval.begin(CGF);51075108// Emit the RHS condition as a bool value.5109CGF.EmitBlock(RHSBlock);5110CGF.incrementProfileCounter(E);5111Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());51125113eval.end(CGF);51145115// Reaquire the RHS block, as there may be subblocks inserted.5116RHSBlock = Builder.GetInsertBlock();51175118// If we're generating for profiling or coverage, generate a branch on the5119// RHS to a block that increments the RHS true counter needed to track branch5120// condition coverage.5121if (InstrumentRegions &&5122CodeGenFunction::isInstrumentedCondition(E->getRHS())) {5123CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);5124llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");5125Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);5126CGF.EmitBlock(RHSBlockCnt);5127CGF.incrementProfileCounter(E->getRHS());5128CGF.EmitBranch(ContBlock);5129PN->addIncoming(RHSCond, RHSBlockCnt);5130}51315132// Emit an unconditional branch from this block to ContBlock. Insert an entry5133// into the phi node for the edge with the value of RHSCond.5134CGF.EmitBlock(ContBlock);5135PN->addIncoming(RHSCond, RHSBlock);51365137CGF.MCDCLogOpStack.pop_back();5138// If the top of the logical operator nest, update the MCDC bitmap.5139if (CGF.MCDCLogOpStack.empty())5140CGF.maybeUpdateMCDCTestVectorBitmap(E);51415142// ZExt result to int.5143return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");5144}51455146Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {5147CGF.EmitIgnoredExpr(E->getLHS());5148CGF.EnsureInsertPoint();5149return Visit(E->getRHS());5150}51515152//===----------------------------------------------------------------------===//5153// Other Operators5154//===----------------------------------------------------------------------===//51555156/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified5157/// expression is cheap enough and side-effect-free enough to evaluate5158/// unconditionally instead of conditionally. This is used to convert control5159/// flow into selects in some cases.5160static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,5161CodeGenFunction &CGF) {5162// Anything that is an integer or floating point constant is fine.5163return E->IgnoreParens()->isEvaluatable(CGF.getContext());51645165// Even non-volatile automatic variables can't be evaluated unconditionally.5166// Referencing a thread_local may cause non-trivial initialization work to5167// occur. If we're inside a lambda and one of the variables is from the scope5168// outside the lambda, that function may have returned already. Reading its5169// locals is a bad idea. Also, these reads may introduce races there didn't5170// exist in the source-level program.5171}517251735174Value *ScalarExprEmitter::5175VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {5176TestAndClearIgnoreResultAssign();51775178// Bind the common expression if necessary.5179CodeGenFunction::OpaqueValueMapping binding(CGF, E);51805181Expr *condExpr = E->getCond();5182Expr *lhsExpr = E->getTrueExpr();5183Expr *rhsExpr = E->getFalseExpr();51845185// If the condition constant folds and can be elided, try to avoid emitting5186// the condition and the dead arm.5187bool CondExprBool;5188if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {5189Expr *live = lhsExpr, *dead = rhsExpr;5190if (!CondExprBool) std::swap(live, dead);51915192// If the dead side doesn't have labels we need, just emit the Live part.5193if (!CGF.ContainsLabel(dead)) {5194if (CondExprBool) {5195if (llvm::EnableSingleByteCoverage) {5196CGF.incrementProfileCounter(lhsExpr);5197CGF.incrementProfileCounter(rhsExpr);5198}5199CGF.incrementProfileCounter(E);5200}5201Value *Result = Visit(live);52025203// If the live part is a throw expression, it acts like it has a void5204// type, so evaluating it returns a null Value*. However, a conditional5205// with non-void type must return a non-null Value*.5206if (!Result && !E->getType()->isVoidType())5207Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));52085209return Result;5210}5211}52125213// OpenCL: If the condition is a vector, we can treat this condition like5214// the select function.5215if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||5216condExpr->getType()->isExtVectorType()) {5217CGF.incrementProfileCounter(E);52185219llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);5220llvm::Value *LHS = Visit(lhsExpr);5221llvm::Value *RHS = Visit(rhsExpr);52225223llvm::Type *condType = ConvertType(condExpr->getType());5224auto *vecTy = cast<llvm::FixedVectorType>(condType);52255226unsigned numElem = vecTy->getNumElements();5227llvm::Type *elemType = vecTy->getElementType();52285229llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);5230llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);5231llvm::Value *tmp = Builder.CreateSExt(5232TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");5233llvm::Value *tmp2 = Builder.CreateNot(tmp);52345235// Cast float to int to perform ANDs if necessary.5236llvm::Value *RHSTmp = RHS;5237llvm::Value *LHSTmp = LHS;5238bool wasCast = false;5239llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());5240if (rhsVTy->getElementType()->isFloatingPointTy()) {5241RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());5242LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());5243wasCast = true;5244}52455246llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);5247llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);5248llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");5249if (wasCast)5250tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());52515252return tmp5;5253}52545255if (condExpr->getType()->isVectorType() ||5256condExpr->getType()->isSveVLSBuiltinType()) {5257CGF.incrementProfileCounter(E);52585259llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);5260llvm::Value *LHS = Visit(lhsExpr);5261llvm::Value *RHS = Visit(rhsExpr);52625263llvm::Type *CondType = ConvertType(condExpr->getType());5264auto *VecTy = cast<llvm::VectorType>(CondType);5265llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);52665267CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");5268return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");5269}52705271// If this is a really simple expression (like x ? 4 : 5), emit this as a5272// select instead of as control flow. We can only do this if it is cheap and5273// safe to evaluate the LHS and RHS unconditionally.5274if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&5275isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {5276llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);5277llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);52785279if (llvm::EnableSingleByteCoverage) {5280CGF.incrementProfileCounter(lhsExpr);5281CGF.incrementProfileCounter(rhsExpr);5282CGF.incrementProfileCounter(E);5283} else5284CGF.incrementProfileCounter(E, StepV);52855286llvm::Value *LHS = Visit(lhsExpr);5287llvm::Value *RHS = Visit(rhsExpr);5288if (!LHS) {5289// If the conditional has void type, make sure we return a null Value*.5290assert(!RHS && "LHS and RHS types must match");5291return nullptr;5292}5293return Builder.CreateSelect(CondV, LHS, RHS, "cond");5294}52955296// If the top of the logical operator nest, reset the MCDC temp to 0.5297if (CGF.MCDCLogOpStack.empty())5298CGF.maybeResetMCDCCondBitmap(condExpr);52995300llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");5301llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");5302llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");53035304CodeGenFunction::ConditionalEvaluation eval(CGF);5305CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,5306CGF.getProfileCount(lhsExpr));53075308CGF.EmitBlock(LHSBlock);53095310// If the top of the logical operator nest, update the MCDC bitmap for the5311// ConditionalOperator prior to visiting its LHS and RHS blocks, since they5312// may also contain a boolean expression.5313if (CGF.MCDCLogOpStack.empty())5314CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);53155316if (llvm::EnableSingleByteCoverage)5317CGF.incrementProfileCounter(lhsExpr);5318else5319CGF.incrementProfileCounter(E);53205321eval.begin(CGF);5322Value *LHS = Visit(lhsExpr);5323eval.end(CGF);53245325LHSBlock = Builder.GetInsertBlock();5326Builder.CreateBr(ContBlock);53275328CGF.EmitBlock(RHSBlock);53295330// If the top of the logical operator nest, update the MCDC bitmap for the5331// ConditionalOperator prior to visiting its LHS and RHS blocks, since they5332// may also contain a boolean expression.5333if (CGF.MCDCLogOpStack.empty())5334CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);53355336if (llvm::EnableSingleByteCoverage)5337CGF.incrementProfileCounter(rhsExpr);53385339eval.begin(CGF);5340Value *RHS = Visit(rhsExpr);5341eval.end(CGF);53425343RHSBlock = Builder.GetInsertBlock();5344CGF.EmitBlock(ContBlock);53455346// If the LHS or RHS is a throw expression, it will be legitimately null.5347if (!LHS)5348return RHS;5349if (!RHS)5350return LHS;53515352// Create a PHI node for the real part.5353llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");5354PN->addIncoming(LHS, LHSBlock);5355PN->addIncoming(RHS, RHSBlock);53565357// When single byte coverage mode is enabled, add a counter to continuation5358// block.5359if (llvm::EnableSingleByteCoverage)5360CGF.incrementProfileCounter(E);53615362return PN;5363}53645365Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {5366return Visit(E->getChosenSubExpr());5367}53685369Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {5370QualType Ty = VE->getType();53715372if (Ty->isVariablyModifiedType())5373CGF.EmitVariablyModifiedType(Ty);53745375Address ArgValue = Address::invalid();5376RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);53775378return ArgPtr.getScalarVal();5379}53805381Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {5382return CGF.EmitBlockLiteral(block);5383}53845385// Convert a vec3 to vec4, or vice versa.5386static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,5387Value *Src, unsigned NumElementsDst) {5388static constexpr int Mask[] = {0, 1, 2, -1};5389return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));5390}53915392// Create cast instructions for converting LLVM value \p Src to LLVM type \p5393// DstTy. \p Src has the same size as \p DstTy. Both are single value types5394// but could be scalar or vectors of different lengths, and either can be5395// pointer.5396// There are 4 cases:5397// 1. non-pointer -> non-pointer : needs 1 bitcast5398// 2. pointer -> pointer : needs 1 bitcast or addrspacecast5399// 3. pointer -> non-pointer5400// a) pointer -> intptr_t : needs 1 ptrtoint5401// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast5402// 4. non-pointer -> pointer5403// a) intptr_t -> pointer : needs 1 inttoptr5404// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr5405// Note: for cases 3b and 4b two casts are required since LLVM casts do not5406// allow casting directly between pointer types and non-integer non-pointer5407// types.5408static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,5409const llvm::DataLayout &DL,5410Value *Src, llvm::Type *DstTy,5411StringRef Name = "") {5412auto SrcTy = Src->getType();54135414// Case 1.5415if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())5416return Builder.CreateBitCast(Src, DstTy, Name);54175418// Case 2.5419if (SrcTy->isPointerTy() && DstTy->isPointerTy())5420return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);54215422// Case 3.5423if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {5424// Case 3b.5425if (!DstTy->isIntegerTy())5426Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));5427// Cases 3a and 3b.5428return Builder.CreateBitOrPointerCast(Src, DstTy, Name);5429}54305431// Case 4b.5432if (!SrcTy->isIntegerTy())5433Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));5434// Cases 4a and 4b.5435return Builder.CreateIntToPtr(Src, DstTy, Name);5436}54375438Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {5439Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());5440llvm::Type *DstTy = ConvertType(E->getType());54415442llvm::Type *SrcTy = Src->getType();5443unsigned NumElementsSrc =5444isa<llvm::VectorType>(SrcTy)5445? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()5446: 0;5447unsigned NumElementsDst =5448isa<llvm::VectorType>(DstTy)5449? cast<llvm::FixedVectorType>(DstTy)->getNumElements()5450: 0;54515452// Use bit vector expansion for ext_vector_type boolean vectors.5453if (E->getType()->isExtVectorBoolType())5454return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");54555456// Going from vec3 to non-vec3 is a special case and requires a shuffle5457// vector to get a vec4, then a bitcast if the target type is different.5458if (NumElementsSrc == 3 && NumElementsDst != 3) {5459Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);5460Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,5461DstTy);54625463Src->setName("astype");5464return Src;5465}54665467// Going from non-vec3 to vec3 is a special case and requires a bitcast5468// to vec4 if the original type is not vec4, then a shuffle vector to5469// get a vec3.5470if (NumElementsSrc != 3 && NumElementsDst == 3) {5471auto *Vec4Ty = llvm::FixedVectorType::get(5472cast<llvm::VectorType>(DstTy)->getElementType(), 4);5473Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,5474Vec4Ty);54755476Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);5477Src->setName("astype");5478return Src;5479}54805481return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),5482Src, DstTy, "astype");5483}54845485Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {5486return CGF.EmitAtomicExpr(E).getScalarVal();5487}54885489//===----------------------------------------------------------------------===//5490// Entry Point into this File5491//===----------------------------------------------------------------------===//54925493/// Emit the computation of the specified expression of scalar type, ignoring5494/// the result.5495Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {5496assert(E && hasScalarEvaluationKind(E->getType()) &&5497"Invalid scalar expression to emit");54985499return ScalarExprEmitter(*this, IgnoreResultAssign)5500.Visit(const_cast<Expr *>(E));5501}55025503/// Emit a conversion from the specified type to the specified destination type,5504/// both of which are LLVM scalar types.5505Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,5506QualType DstTy,5507SourceLocation Loc) {5508assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&5509"Invalid scalar expression to emit");5510return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);5511}55125513/// Emit a conversion from the specified complex type to the specified5514/// destination type, where the destination type is an LLVM scalar type.5515Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,5516QualType SrcTy,5517QualType DstTy,5518SourceLocation Loc) {5519assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&5520"Invalid complex -> scalar conversion");5521return ScalarExprEmitter(*this)5522.EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);5523}552455255526Value *5527CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,5528QualType PromotionType) {5529if (!PromotionType.isNull())5530return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);5531else5532return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));5533}553455355536llvm::Value *CodeGenFunction::5537EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,5538bool isInc, bool isPre) {5539return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);5540}55415542LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {5543// object->isa or (*object).isa5544// Generate code as for: *(Class*)object55455546Expr *BaseExpr = E->getBase();5547Address Addr = Address::invalid();5548if (BaseExpr->isPRValue()) {5549llvm::Type *BaseTy =5550ConvertTypeForMem(BaseExpr->getType()->getPointeeType());5551Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());5552} else {5553Addr = EmitLValue(BaseExpr).getAddress();5554}55555556// Cast the address to Class*.5557Addr = Addr.withElementType(ConvertType(E->getType()));5558return MakeAddrLValue(Addr, E->getType());5559}556055615562LValue CodeGenFunction::EmitCompoundAssignmentLValue(5563const CompoundAssignOperator *E) {5564ScalarExprEmitter Scalar(*this);5565Value *Result = nullptr;5566switch (E->getOpcode()) {5567#define COMPOUND_OP(Op) \5568case BO_##Op##Assign: \5569return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \5570Result)5571COMPOUND_OP(Mul);5572COMPOUND_OP(Div);5573COMPOUND_OP(Rem);5574COMPOUND_OP(Add);5575COMPOUND_OP(Sub);5576COMPOUND_OP(Shl);5577COMPOUND_OP(Shr);5578COMPOUND_OP(And);5579COMPOUND_OP(Xor);5580COMPOUND_OP(Or);5581#undef COMPOUND_OP55825583case BO_PtrMemD:5584case BO_PtrMemI:5585case BO_Mul:5586case BO_Div:5587case BO_Rem:5588case BO_Add:5589case BO_Sub:5590case BO_Shl:5591case BO_Shr:5592case BO_LT:5593case BO_GT:5594case BO_LE:5595case BO_GE:5596case BO_EQ:5597case BO_NE:5598case BO_Cmp:5599case BO_And:5600case BO_Xor:5601case BO_Or:5602case BO_LAnd:5603case BO_LOr:5604case BO_Assign:5605case BO_Comma:5606llvm_unreachable("Not valid compound assignment operators");5607}56085609llvm_unreachable("Unhandled compound assignment operator");5610}56115612struct GEPOffsetAndOverflow {5613// The total (signed) byte offset for the GEP.5614llvm::Value *TotalOffset;5615// The offset overflow flag - true if the total offset overflows.5616llvm::Value *OffsetOverflows;5617};56185619/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,5620/// and compute the total offset it applies from it's base pointer BasePtr.5621/// Returns offset in bytes and a boolean flag whether an overflow happened5622/// during evaluation.5623static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,5624llvm::LLVMContext &VMContext,5625CodeGenModule &CGM,5626CGBuilderTy &Builder) {5627const auto &DL = CGM.getDataLayout();56285629// The total (signed) byte offset for the GEP.5630llvm::Value *TotalOffset = nullptr;56315632// Was the GEP already reduced to a constant?5633if (isa<llvm::Constant>(GEPVal)) {5634// Compute the offset by casting both pointers to integers and subtracting:5635// GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)5636Value *BasePtr_int =5637Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));5638Value *GEPVal_int =5639Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));5640TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);5641return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};5642}56435644auto *GEP = cast<llvm::GEPOperator>(GEPVal);5645assert(GEP->getPointerOperand() == BasePtr &&5646"BasePtr must be the base of the GEP.");5647assert(GEP->isInBounds() && "Expected inbounds GEP");56485649auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());56505651// Grab references to the signed add/mul overflow intrinsics for intptr_t.5652auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);5653auto *SAddIntrinsic =5654CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);5655auto *SMulIntrinsic =5656CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);56575658// The offset overflow flag - true if the total offset overflows.5659llvm::Value *OffsetOverflows = Builder.getFalse();56605661/// Return the result of the given binary operation.5662auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,5663llvm::Value *RHS) -> llvm::Value * {5664assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");56655666// If the operands are constants, return a constant result.5667if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {5668if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {5669llvm::APInt N;5670bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,5671/*Signed=*/true, N);5672if (HasOverflow)5673OffsetOverflows = Builder.getTrue();5674return llvm::ConstantInt::get(VMContext, N);5675}5676}56775678// Otherwise, compute the result with checked arithmetic.5679auto *ResultAndOverflow = Builder.CreateCall(5680(Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});5681OffsetOverflows = Builder.CreateOr(5682Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);5683return Builder.CreateExtractValue(ResultAndOverflow, 0);5684};56855686// Determine the total byte offset by looking at each GEP operand.5687for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);5688GTI != GTE; ++GTI) {5689llvm::Value *LocalOffset;5690auto *Index = GTI.getOperand();5691// Compute the local offset contributed by this indexing step:5692if (auto *STy = GTI.getStructTypeOrNull()) {5693// For struct indexing, the local offset is the byte position of the5694// specified field.5695unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();5696LocalOffset = llvm::ConstantInt::get(5697IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));5698} else {5699// Otherwise this is array-like indexing. The local offset is the index5700// multiplied by the element size.5701auto *ElementSize =5702llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));5703auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);5704LocalOffset = eval(BO_Mul, ElementSize, IndexS);5705}57065707// If this is the first offset, set it as the total offset. Otherwise, add5708// the local offset into the running total.5709if (!TotalOffset || TotalOffset == Zero)5710TotalOffset = LocalOffset;5711else5712TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);5713}57145715return {TotalOffset, OffsetOverflows};5716}57175718Value *5719CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,5720ArrayRef<Value *> IdxList,5721bool SignedIndices, bool IsSubtraction,5722SourceLocation Loc, const Twine &Name) {5723llvm::Type *PtrTy = Ptr->getType();5724Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);57255726// If the pointer overflow sanitizer isn't enabled, do nothing.5727if (!SanOpts.has(SanitizerKind::PointerOverflow))5728return GEPVal;57295730// Perform nullptr-and-offset check unless the nullptr is defined.5731bool PerformNullCheck = !NullPointerIsDefined(5732Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());5733// Check for overflows unless the GEP got constant-folded,5734// and only in the default address space5735bool PerformOverflowCheck =5736!isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;57375738if (!(PerformNullCheck || PerformOverflowCheck))5739return GEPVal;57405741const auto &DL = CGM.getDataLayout();57425743SanitizerScope SanScope(this);5744llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);57455746GEPOffsetAndOverflow EvaluatedGEP =5747EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);57485749assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||5750EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&5751"If the offset got constant-folded, we don't expect that there was an "5752"overflow.");57535754auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);57555756// Common case: if the total offset is zero, and we are using C++ semantics,5757// where nullptr+0 is defined, don't emit a check.5758if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)5759return GEPVal;57605761// Now that we've computed the total offset, add it to the base pointer (with5762// wrapping semantics).5763auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);5764auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);57655766llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;57675768if (PerformNullCheck) {5769// In C++, if the base pointer evaluates to a null pointer value,5770// the only valid pointer this inbounds GEP can produce is also5771// a null pointer, so the offset must also evaluate to zero.5772// Likewise, if we have non-zero base pointer, we can not get null pointer5773// as a result, so the offset can not be -intptr_t(BasePtr).5774// In other words, both pointers are either null, or both are non-null,5775// or the behaviour is undefined.5776//5777// C, however, is more strict in this regard, and gives more5778// optimization opportunities: in C, additionally, nullptr+0 is undefined.5779// So both the input to the 'gep inbounds' AND the output must not be null.5780auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);5781auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);5782auto *Valid =5783CGM.getLangOpts().CPlusPlus5784? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)5785: Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);5786Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);5787}57885789if (PerformOverflowCheck) {5790// The GEP is valid if:5791// 1) The total offset doesn't overflow, and5792// 2) The sign of the difference between the computed address and the base5793// pointer matches the sign of the total offset.5794llvm::Value *ValidGEP;5795auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);5796if (SignedIndices) {5797// GEP is computed as `unsigned base + signed offset`, therefore:5798// * If offset was positive, then the computed pointer can not be5799// [unsigned] less than the base pointer, unless it overflowed.5800// * If offset was negative, then the computed pointer can not be5801// [unsigned] greater than the bas pointere, unless it overflowed.5802auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);5803auto *PosOrZeroOffset =5804Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);5805llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);5806ValidGEP =5807Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);5808} else if (!IsSubtraction) {5809// GEP is computed as `unsigned base + unsigned offset`, therefore the5810// computed pointer can not be [unsigned] less than base pointer,5811// unless there was an overflow.5812// Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.5813ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);5814} else {5815// GEP is computed as `unsigned base - unsigned offset`, therefore the5816// computed pointer can not be [unsigned] greater than base pointer,5817// unless there was an overflow.5818// Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.5819ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);5820}5821ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);5822Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);5823}58245825assert(!Checks.empty() && "Should have produced some checks.");58265827llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};5828// Pass the computed GEP to the runtime to avoid emitting poisoned arguments.5829llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};5830EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);58315832return GEPVal;5833}58345835Address CodeGenFunction::EmitCheckedInBoundsGEP(5836Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,5837bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,5838const Twine &Name) {5839if (!SanOpts.has(SanitizerKind::PointerOverflow))5840return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name);58415842return RawAddress(5843EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),5844IdxList, SignedIndices, IsSubtraction, Loc, Name),5845elementType, Align);5846}584758485849