Path: blob/main/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
35233 views
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This contains code to emit Expr nodes as LLVM code.9//10//===----------------------------------------------------------------------===//1112#include "ABIInfoImpl.h"13#include "CGCUDARuntime.h"14#include "CGCXXABI.h"15#include "CGCall.h"16#include "CGCleanup.h"17#include "CGDebugInfo.h"18#include "CGObjCRuntime.h"19#include "CGOpenMPRuntime.h"20#include "CGRecordLayout.h"21#include "CodeGenFunction.h"22#include "CodeGenModule.h"23#include "ConstantEmitter.h"24#include "TargetInfo.h"25#include "clang/AST/ASTContext.h"26#include "clang/AST/Attr.h"27#include "clang/AST/DeclObjC.h"28#include "clang/AST/NSAPI.h"29#include "clang/AST/StmtVisitor.h"30#include "clang/Basic/Builtins.h"31#include "clang/Basic/CodeGenOptions.h"32#include "clang/Basic/SourceManager.h"33#include "llvm/ADT/Hashing.h"34#include "llvm/ADT/STLExtras.h"35#include "llvm/ADT/StringExtras.h"36#include "llvm/IR/DataLayout.h"37#include "llvm/IR/Intrinsics.h"38#include "llvm/IR/IntrinsicsWebAssembly.h"39#include "llvm/IR/LLVMContext.h"40#include "llvm/IR/MDBuilder.h"41#include "llvm/IR/MatrixBuilder.h"42#include "llvm/Passes/OptimizationLevel.h"43#include "llvm/Support/ConvertUTF.h"44#include "llvm/Support/MathExtras.h"45#include "llvm/Support/Path.h"46#include "llvm/Support/SaveAndRestore.h"47#include "llvm/Support/xxhash.h"48#include "llvm/Transforms/Utils/SanitizerStats.h"4950#include <optional>51#include <string>5253using namespace clang;54using namespace CodeGen;5556// Experiment to make sanitizers easier to debug57static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(58"ubsan-unique-traps", llvm::cl::Optional,59llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));6061// TODO: Introduce frontend options to enabled per sanitizers, similar to62// `fsanitize-trap`.63static llvm::cl::opt<bool> ClSanitizeGuardChecks(64"ubsan-guard-checks", llvm::cl::Optional,65llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));6667//===--------------------------------------------------------------------===//68// Miscellaneous Helper Methods69//===--------------------------------------------------------------------===//7071/// CreateTempAlloca - This creates a alloca and inserts it into the entry72/// block.73RawAddress74CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,75const Twine &Name,76llvm::Value *ArraySize) {77auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);78Alloca->setAlignment(Align.getAsAlign());79return RawAddress(Alloca, Ty, Align, KnownNonNull);80}8182/// CreateTempAlloca - This creates a alloca and inserts it into the entry83/// block. The alloca is casted to default address space if necessary.84RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,85const Twine &Name,86llvm::Value *ArraySize,87RawAddress *AllocaAddr) {88auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);89if (AllocaAddr)90*AllocaAddr = Alloca;91llvm::Value *V = Alloca.getPointer();92// Alloca always returns a pointer in alloca address space, which may93// be different from the type defined by the language. For example,94// in C++ the auto variables are in the default address space. Therefore95// cast alloca to the default address space when necessary.96if (getASTAllocaAddressSpace() != LangAS::Default) {97auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);98llvm::IRBuilderBase::InsertPointGuard IPG(Builder);99// When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,100// otherwise alloca is inserted at the current insertion point of the101// builder.102if (!ArraySize)103Builder.SetInsertPoint(getPostAllocaInsertPoint());104V = getTargetHooks().performAddrSpaceCast(105*this, V, getASTAllocaAddressSpace(), LangAS::Default,106Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);107}108109return RawAddress(V, Ty, Align, KnownNonNull);110}111112/// CreateTempAlloca - This creates an alloca and inserts it into the entry113/// block if \p ArraySize is nullptr, otherwise inserts it at the current114/// insertion point of the builder.115llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,116const Twine &Name,117llvm::Value *ArraySize) {118llvm::AllocaInst *Alloca;119if (ArraySize)120Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);121else122Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),123ArraySize, Name, &*AllocaInsertPt);124if (Allocas) {125Allocas->Add(Alloca);126}127return Alloca;128}129130/// CreateDefaultAlignTempAlloca - This creates an alloca with the131/// default alignment of the corresponding LLVM type, which is *not*132/// guaranteed to be related in any way to the expected alignment of133/// an AST type that might have been lowered to Ty.134RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,135const Twine &Name) {136CharUnits Align =137CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));138return CreateTempAlloca(Ty, Align, Name);139}140141RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {142CharUnits Align = getContext().getTypeAlignInChars(Ty);143return CreateTempAlloca(ConvertType(Ty), Align, Name);144}145146RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,147RawAddress *Alloca) {148// FIXME: Should we prefer the preferred type alignment here?149return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);150}151152RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,153const Twine &Name,154RawAddress *Alloca) {155RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,156/*ArraySize=*/nullptr, Alloca);157158if (Ty->isConstantMatrixType()) {159auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());160auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),161ArrayTy->getNumElements());162163Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),164KnownNonNull);165}166return Result;167}168169RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,170CharUnits Align,171const Twine &Name) {172return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);173}174175RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,176const Twine &Name) {177return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),178Name);179}180181/// EvaluateExprAsBool - Perform the usual unary conversions on the specified182/// expression and compare the result against zero, returning an Int1Ty value.183llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {184PGO.setCurrentStmt(E);185if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {186llvm::Value *MemPtr = EmitScalarExpr(E);187return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);188}189190QualType BoolTy = getContext().BoolTy;191SourceLocation Loc = E->getExprLoc();192CGFPOptionsRAII FPOptsRAII(*this, E);193if (!E->getType()->isAnyComplexType())194return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);195196return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,197Loc);198}199200/// EmitIgnoredExpr - Emit code to compute the specified expression,201/// ignoring the result.202void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {203if (E->isPRValue())204return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);205206// if this is a bitfield-resulting conditional operator, we can special case207// emit this. The normal 'EmitLValue' version of this is particularly208// difficult to codegen for, since creating a single "LValue" for two209// different sized arguments here is not particularly doable.210if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(211E->IgnoreParenNoopCasts(getContext()))) {212if (CondOp->getObjectKind() == OK_BitField)213return EmitIgnoredConditionalOperator(CondOp);214}215216// Just emit it as an l-value and drop the result.217EmitLValue(E);218}219220/// EmitAnyExpr - Emit code to compute the specified expression which221/// can have any type. The result is returned as an RValue struct.222/// If this is an aggregate expression, AggSlot indicates where the223/// result should be returned.224RValue CodeGenFunction::EmitAnyExpr(const Expr *E,225AggValueSlot aggSlot,226bool ignoreResult) {227switch (getEvaluationKind(E->getType())) {228case TEK_Scalar:229return RValue::get(EmitScalarExpr(E, ignoreResult));230case TEK_Complex:231return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));232case TEK_Aggregate:233if (!ignoreResult && aggSlot.isIgnored())234aggSlot = CreateAggTemp(E->getType(), "agg-temp");235EmitAggExpr(E, aggSlot);236return aggSlot.asRValue();237}238llvm_unreachable("bad evaluation kind");239}240241/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will242/// always be accessible even if no aggregate location is provided.243RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {244AggValueSlot AggSlot = AggValueSlot::ignored();245246if (hasAggregateEvaluationKind(E->getType()))247AggSlot = CreateAggTemp(E->getType(), "agg.tmp");248return EmitAnyExpr(E, AggSlot);249}250251/// EmitAnyExprToMem - Evaluate an expression into a given memory252/// location.253void CodeGenFunction::EmitAnyExprToMem(const Expr *E,254Address Location,255Qualifiers Quals,256bool IsInit) {257// FIXME: This function should take an LValue as an argument.258switch (getEvaluationKind(E->getType())) {259case TEK_Complex:260EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),261/*isInit*/ false);262return;263264case TEK_Aggregate: {265EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,266AggValueSlot::IsDestructed_t(IsInit),267AggValueSlot::DoesNotNeedGCBarriers,268AggValueSlot::IsAliased_t(!IsInit),269AggValueSlot::MayOverlap));270return;271}272273case TEK_Scalar: {274RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));275LValue LV = MakeAddrLValue(Location, E->getType());276EmitStoreThroughLValue(RV, LV);277return;278}279}280llvm_unreachable("bad evaluation kind");281}282283static void284pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,285const Expr *E, Address ReferenceTemporary) {286// Objective-C++ ARC:287// If we are binding a reference to a temporary that has ownership, we288// need to perform retain/release operations on the temporary.289//290// FIXME: This should be looking at E, not M.291if (auto Lifetime = M->getType().getObjCLifetime()) {292switch (Lifetime) {293case Qualifiers::OCL_None:294case Qualifiers::OCL_ExplicitNone:295// Carry on to normal cleanup handling.296break;297298case Qualifiers::OCL_Autoreleasing:299// Nothing to do; cleaned up by an autorelease pool.300return;301302case Qualifiers::OCL_Strong:303case Qualifiers::OCL_Weak:304switch (StorageDuration Duration = M->getStorageDuration()) {305case SD_Static:306// Note: we intentionally do not register a cleanup to release307// the object on program termination.308return;309310case SD_Thread:311// FIXME: We should probably register a cleanup in this case.312return;313314case SD_Automatic:315case SD_FullExpression:316CodeGenFunction::Destroyer *Destroy;317CleanupKind CleanupKind;318if (Lifetime == Qualifiers::OCL_Strong) {319const ValueDecl *VD = M->getExtendingDecl();320bool Precise = isa_and_nonnull<VarDecl>(VD) &&321VD->hasAttr<ObjCPreciseLifetimeAttr>();322CleanupKind = CGF.getARCCleanupKind();323Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise324: &CodeGenFunction::destroyARCStrongImprecise;325} else {326// __weak objects always get EH cleanups; otherwise, exceptions327// could cause really nasty crashes instead of mere leaks.328CleanupKind = NormalAndEHCleanup;329Destroy = &CodeGenFunction::destroyARCWeak;330}331if (Duration == SD_FullExpression)332CGF.pushDestroy(CleanupKind, ReferenceTemporary,333M->getType(), *Destroy,334CleanupKind & EHCleanup);335else336CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,337M->getType(),338*Destroy, CleanupKind & EHCleanup);339return;340341case SD_Dynamic:342llvm_unreachable("temporary cannot have dynamic storage duration");343}344llvm_unreachable("unknown storage duration");345}346}347348CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;349if (const RecordType *RT =350E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {351// Get the destructor for the reference temporary.352auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());353if (!ClassDecl->hasTrivialDestructor())354ReferenceTemporaryDtor = ClassDecl->getDestructor();355}356357if (!ReferenceTemporaryDtor)358return;359360// Call the destructor for the temporary.361switch (M->getStorageDuration()) {362case SD_Static:363case SD_Thread: {364llvm::FunctionCallee CleanupFn;365llvm::Constant *CleanupArg;366if (E->getType()->isArrayType()) {367CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(368ReferenceTemporary, E->getType(),369CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,370dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));371CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);372} else {373CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(374GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));375CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));376}377CGF.CGM.getCXXABI().registerGlobalDtor(378CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);379break;380}381382case SD_FullExpression:383CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),384CodeGenFunction::destroyCXXObject,385CGF.getLangOpts().Exceptions);386break;387388case SD_Automatic:389CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,390ReferenceTemporary, E->getType(),391CodeGenFunction::destroyCXXObject,392CGF.getLangOpts().Exceptions);393break;394395case SD_Dynamic:396llvm_unreachable("temporary cannot have dynamic storage duration");397}398}399400static RawAddress createReferenceTemporary(CodeGenFunction &CGF,401const MaterializeTemporaryExpr *M,402const Expr *Inner,403RawAddress *Alloca = nullptr) {404auto &TCG = CGF.getTargetHooks();405switch (M->getStorageDuration()) {406case SD_FullExpression:407case SD_Automatic: {408// If we have a constant temporary array or record try to promote it into a409// constant global under the same rules a normal constant would've been410// promoted. This is easier on the optimizer and generally emits fewer411// instructions.412QualType Ty = Inner->getType();413if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&414(Ty->isArrayType() || Ty->isRecordType()) &&415Ty.isConstantStorage(CGF.getContext(), true, false))416if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {417auto AS = CGF.CGM.GetGlobalConstantAddressSpace();418auto *GV = new llvm::GlobalVariable(419CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,420llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,421llvm::GlobalValue::NotThreadLocal,422CGF.getContext().getTargetAddressSpace(AS));423CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);424GV->setAlignment(alignment.getAsAlign());425llvm::Constant *C = GV;426if (AS != LangAS::Default)427C = TCG.performAddrSpaceCast(428CGF.CGM, GV, AS, LangAS::Default,429GV->getValueType()->getPointerTo(430CGF.getContext().getTargetAddressSpace(LangAS::Default)));431// FIXME: Should we put the new global into a COMDAT?432return RawAddress(C, GV->getValueType(), alignment);433}434return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);435}436case SD_Thread:437case SD_Static:438return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);439440case SD_Dynamic:441llvm_unreachable("temporary can't have dynamic storage duration");442}443llvm_unreachable("unknown storage duration");444}445446/// Helper method to check if the underlying ABI is AAPCS447static bool isAAPCS(const TargetInfo &TargetInfo) {448return TargetInfo.getABI().starts_with("aapcs");449}450451LValue CodeGenFunction::452EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {453const Expr *E = M->getSubExpr();454455assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||456!cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&457"Reference should never be pseudo-strong!");458459// FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so460// as that will cause the lifetime adjustment to be lost for ARC461auto ownership = M->getType().getObjCLifetime();462if (ownership != Qualifiers::OCL_None &&463ownership != Qualifiers::OCL_ExplicitNone) {464RawAddress Object = createReferenceTemporary(*this, M, E);465if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {466llvm::Type *Ty = ConvertTypeForMem(E->getType());467Object = Object.withElementType(Ty);468469// createReferenceTemporary will promote the temporary to a global with a470// constant initializer if it can. It can only do this to a value of471// ARC-manageable type if the value is global and therefore "immune" to472// ref-counting operations. Therefore we have no need to emit either a473// dynamic initialization or a cleanup and we can just return the address474// of the temporary.475if (Var->hasInitializer())476return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);477478Var->setInitializer(CGM.EmitNullConstant(E->getType()));479}480LValue RefTempDst = MakeAddrLValue(Object, M->getType(),481AlignmentSource::Decl);482483switch (getEvaluationKind(E->getType())) {484default: llvm_unreachable("expected scalar or aggregate expression");485case TEK_Scalar:486EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);487break;488case TEK_Aggregate: {489EmitAggExpr(E, AggValueSlot::forAddr(Object,490E->getType().getQualifiers(),491AggValueSlot::IsDestructed,492AggValueSlot::DoesNotNeedGCBarriers,493AggValueSlot::IsNotAliased,494AggValueSlot::DoesNotOverlap));495break;496}497}498499pushTemporaryCleanup(*this, M, E, Object);500return RefTempDst;501}502503SmallVector<const Expr *, 2> CommaLHSs;504SmallVector<SubobjectAdjustment, 2> Adjustments;505E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);506507for (const auto &Ignored : CommaLHSs)508EmitIgnoredExpr(Ignored);509510if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {511if (opaque->getType()->isRecordType()) {512assert(Adjustments.empty());513return EmitOpaqueValueLValue(opaque);514}515}516517// Create and initialize the reference temporary.518RawAddress Alloca = Address::invalid();519RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);520if (auto *Var = dyn_cast<llvm::GlobalVariable>(521Object.getPointer()->stripPointerCasts())) {522llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());523Object = Object.withElementType(TemporaryType);524// If the temporary is a global and has a constant initializer or is a525// constant temporary that we promoted to a global, we may have already526// initialized it.527if (!Var->hasInitializer()) {528Var->setInitializer(CGM.EmitNullConstant(E->getType()));529EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);530}531} else {532switch (M->getStorageDuration()) {533case SD_Automatic:534if (auto *Size = EmitLifetimeStart(535CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),536Alloca.getPointer())) {537pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,538Alloca, Size);539}540break;541542case SD_FullExpression: {543if (!ShouldEmitLifetimeMarkers)544break;545546// Avoid creating a conditional cleanup just to hold an llvm.lifetime.end547// marker. Instead, start the lifetime of a conditional temporary earlier548// so that it's unconditional. Don't do this with sanitizers which need549// more precise lifetime marks. However when inside an "await.suspend"550// block, we should always avoid conditional cleanup because it creates551// boolean marker that lives across await_suspend, which can destroy coro552// frame.553ConditionalEvaluation *OldConditional = nullptr;554CGBuilderTy::InsertPoint OldIP;555if (isInConditionalBranch() && !E->getType().isDestructedType() &&556((!SanOpts.has(SanitizerKind::HWAddress) &&557!SanOpts.has(SanitizerKind::Memory) &&558!CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||559inSuspendBlock())) {560OldConditional = OutermostConditional;561OutermostConditional = nullptr;562563OldIP = Builder.saveIP();564llvm::BasicBlock *Block = OldConditional->getStartingBlock();565Builder.restoreIP(CGBuilderTy::InsertPoint(566Block, llvm::BasicBlock::iterator(Block->back())));567}568569if (auto *Size = EmitLifetimeStart(570CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),571Alloca.getPointer())) {572pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,573Size);574}575576if (OldConditional) {577OutermostConditional = OldConditional;578Builder.restoreIP(OldIP);579}580break;581}582583default:584break;585}586EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);587}588pushTemporaryCleanup(*this, M, E, Object);589590// Perform derived-to-base casts and/or field accesses, to get from the591// temporary object we created (and, potentially, for which we extended592// the lifetime) to the subobject we're binding the reference to.593for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {594switch (Adjustment.Kind) {595case SubobjectAdjustment::DerivedToBaseAdjustment:596Object =597GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,598Adjustment.DerivedToBase.BasePath->path_begin(),599Adjustment.DerivedToBase.BasePath->path_end(),600/*NullCheckValue=*/ false, E->getExprLoc());601break;602603case SubobjectAdjustment::FieldAdjustment: {604LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);605LV = EmitLValueForField(LV, Adjustment.Field);606assert(LV.isSimple() &&607"materialized temporary field is not a simple lvalue");608Object = LV.getAddress();609break;610}611612case SubobjectAdjustment::MemberPointerAdjustment: {613llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);614Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,615Adjustment.Ptr.MPT);616break;617}618}619}620621return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);622}623624RValue625CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {626// Emit the expression as an lvalue.627LValue LV = EmitLValue(E);628assert(LV.isSimple());629llvm::Value *Value = LV.getPointer(*this);630631if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {632// C++11 [dcl.ref]p5 (as amended by core issue 453):633// If a glvalue to which a reference is directly bound designates neither634// an existing object or function of an appropriate type nor a region of635// storage of suitable size and alignment to contain an object of the636// reference's type, the behavior is undefined.637QualType Ty = E->getType();638EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);639}640641return RValue::get(Value);642}643644645/// getAccessedFieldNo - Given an encoded value and a result number, return the646/// input field number being accessed.647unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,648const llvm::Constant *Elts) {649return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))650->getZExtValue();651}652653static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,654llvm::Value *Ptr) {655llvm::Value *A0 =656Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));657llvm::Value *A1 =658Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));659return Builder.CreateXor(Acc, A1);660}661662bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {663return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||664TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;665}666667bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {668CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();669return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&670(TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||671TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||672TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);673}674675bool CodeGenFunction::sanitizePerformTypeCheck() const {676return SanOpts.has(SanitizerKind::Null) ||677SanOpts.has(SanitizerKind::Alignment) ||678SanOpts.has(SanitizerKind::ObjectSize) ||679SanOpts.has(SanitizerKind::Vptr);680}681682void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,683llvm::Value *Ptr, QualType Ty,684CharUnits Alignment,685SanitizerSet SkippedChecks,686llvm::Value *ArraySize) {687if (!sanitizePerformTypeCheck())688return;689690// Don't check pointers outside the default address space. The null check691// isn't correct, the object-size check isn't supported by LLVM, and we can't692// communicate the addresses to the runtime handler for the vptr check.693if (Ptr->getType()->getPointerAddressSpace())694return;695696// Don't check pointers to volatile data. The behavior here is implementation-697// defined.698if (Ty.isVolatileQualified())699return;700701SanitizerScope SanScope(this);702703SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;704llvm::BasicBlock *Done = nullptr;705706// Quickly determine whether we have a pointer to an alloca. It's possible707// to skip null checks, and some alignment checks, for these pointers. This708// can reduce compile-time significantly.709auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());710711llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());712llvm::Value *IsNonNull = nullptr;713bool IsGuaranteedNonNull =714SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;715bool AllowNullPointers = isNullPointerAllowed(TCK);716if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&717!IsGuaranteedNonNull) {718// The glvalue must not be an empty glvalue.719IsNonNull = Builder.CreateIsNotNull(Ptr);720721// The IR builder can constant-fold the null check if the pointer points to722// a constant.723IsGuaranteedNonNull = IsNonNull == True;724725// Skip the null check if the pointer is known to be non-null.726if (!IsGuaranteedNonNull) {727if (AllowNullPointers) {728// When performing pointer casts, it's OK if the value is null.729// Skip the remaining checks in that case.730Done = createBasicBlock("null");731llvm::BasicBlock *Rest = createBasicBlock("not.null");732Builder.CreateCondBr(IsNonNull, Rest, Done);733EmitBlock(Rest);734} else {735Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));736}737}738}739740if (SanOpts.has(SanitizerKind::ObjectSize) &&741!SkippedChecks.has(SanitizerKind::ObjectSize) &&742!Ty->isIncompleteType()) {743uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();744llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);745if (ArraySize)746Size = Builder.CreateMul(Size, ArraySize);747748// Degenerate case: new X[0] does not need an objectsize check.749llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);750if (!ConstantSize || !ConstantSize->isNullValue()) {751// The glvalue must refer to a large enough storage region.752// FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation753// to check this.754// FIXME: Get object address space755llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };756llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);757llvm::Value *Min = Builder.getFalse();758llvm::Value *NullIsUnknown = Builder.getFalse();759llvm::Value *Dynamic = Builder.getFalse();760llvm::Value *LargeEnough = Builder.CreateICmpUGE(761Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);762Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));763}764}765766llvm::MaybeAlign AlignVal;767llvm::Value *PtrAsInt = nullptr;768769if (SanOpts.has(SanitizerKind::Alignment) &&770!SkippedChecks.has(SanitizerKind::Alignment)) {771AlignVal = Alignment.getAsMaybeAlign();772if (!Ty->isIncompleteType() && !AlignVal)773AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,774/*ForPointeeType=*/true)775.getAsMaybeAlign();776777// The glvalue must be suitably aligned.778if (AlignVal && *AlignVal > llvm::Align(1) &&779(!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {780PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);781llvm::Value *Align = Builder.CreateAnd(782PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));783llvm::Value *Aligned =784Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));785if (Aligned != True)786Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));787}788}789790if (Checks.size() > 0) {791llvm::Constant *StaticData[] = {792EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),793llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),794llvm::ConstantInt::get(Int8Ty, TCK)};795EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,796PtrAsInt ? PtrAsInt : Ptr);797}798799// If possible, check that the vptr indicates that there is a subobject of800// type Ty at offset zero within this object.801//802// C++11 [basic.life]p5,6:803// [For storage which does not refer to an object within its lifetime]804// The program has undefined behavior if:805// -- the [pointer or glvalue] is used to access a non-static data member806// or call a non-static member function807if (SanOpts.has(SanitizerKind::Vptr) &&808!SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {809// Ensure that the pointer is non-null before loading it. If there is no810// compile-time guarantee, reuse the run-time null check or emit a new one.811if (!IsGuaranteedNonNull) {812if (!IsNonNull)813IsNonNull = Builder.CreateIsNotNull(Ptr);814if (!Done)815Done = createBasicBlock("vptr.null");816llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");817Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);818EmitBlock(VptrNotNull);819}820821// Compute a deterministic hash of the mangled name of the type.822SmallString<64> MangledName;823llvm::raw_svector_ostream Out(MangledName);824CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),825Out);826827// Contained in NoSanitizeList based on the mangled type.828if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,829Out.str())) {830// Load the vptr, and mix it with TypeHash.831llvm::Value *TypeHash =832llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));833834llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);835Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());836llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,837Ty->getAsCXXRecordDecl(),838VTableAuthMode::UnsafeUbsanStrip);839VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);840841llvm::Value *Hash =842emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));843Hash = Builder.CreateTrunc(Hash, IntPtrTy);844845// Look the hash up in our cache.846const int CacheSize = 128;847llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);848llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,849"__ubsan_vptr_type_cache");850llvm::Value *Slot = Builder.CreateAnd(Hash,851llvm::ConstantInt::get(IntPtrTy,852CacheSize-1));853llvm::Value *Indices[] = { Builder.getInt32(0), Slot };854llvm::Value *CacheVal = Builder.CreateAlignedLoad(855IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),856getPointerAlign());857858// If the hash isn't in the cache, call a runtime handler to perform the859// hard work of checking whether the vptr is for an object of the right860// type. This will either fill in the cache and return, or produce a861// diagnostic.862llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);863llvm::Constant *StaticData[] = {864EmitCheckSourceLocation(Loc),865EmitCheckTypeDescriptor(Ty),866CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),867llvm::ConstantInt::get(Int8Ty, TCK)868};869llvm::Value *DynamicData[] = { Ptr, Hash };870EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),871SanitizerHandler::DynamicTypeCacheMiss, StaticData,872DynamicData);873}874}875876if (Done) {877Builder.CreateBr(Done);878EmitBlock(Done);879}880}881882llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,883QualType EltTy) {884ASTContext &C = getContext();885uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();886if (!EltSize)887return nullptr;888889auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());890if (!ArrayDeclRef)891return nullptr;892893auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());894if (!ParamDecl)895return nullptr;896897auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();898if (!POSAttr)899return nullptr;900901// Don't load the size if it's a lower bound.902int POSType = POSAttr->getType();903if (POSType != 0 && POSType != 1)904return nullptr;905906// Find the implicit size parameter.907auto PassedSizeIt = SizeArguments.find(ParamDecl);908if (PassedSizeIt == SizeArguments.end())909return nullptr;910911const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;912assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");913Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;914llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,915C.getSizeType(), E->getExprLoc());916llvm::Value *SizeOfElement =917llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);918return Builder.CreateUDiv(SizeInBytes, SizeOfElement);919}920921/// If Base is known to point to the start of an array, return the length of922/// that array. Return 0 if the length cannot be determined.923static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,924const Expr *Base,925QualType &IndexedType,926LangOptions::StrictFlexArraysLevelKind927StrictFlexArraysLevel) {928// For the vector indexing extension, the bound is the number of elements.929if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {930IndexedType = Base->getType();931return CGF.Builder.getInt32(VT->getNumElements());932}933934Base = Base->IgnoreParens();935936if (const auto *CE = dyn_cast<CastExpr>(Base)) {937if (CE->getCastKind() == CK_ArrayToPointerDecay &&938!CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),939StrictFlexArraysLevel)) {940CodeGenFunction::SanitizerScope SanScope(&CGF);941942IndexedType = CE->getSubExpr()->getType();943const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();944if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))945return CGF.Builder.getInt(CAT->getSize());946947if (const auto *VAT = dyn_cast<VariableArrayType>(AT))948return CGF.getVLASize(VAT).NumElts;949// Ignore pass_object_size here. It's not applicable on decayed pointers.950}951}952953CodeGenFunction::SanitizerScope SanScope(&CGF);954955QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};956if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {957IndexedType = Base->getType();958return POS;959}960961return nullptr;962}963964namespace {965966/// \p StructAccessBase returns the base \p Expr of a field access. It returns967/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:968///969/// p in p-> a.b.c970///971/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're972/// looking for:973///974/// struct s {975/// struct s *ptr;976/// int count;977/// char array[] __attribute__((counted_by(count)));978/// };979///980/// If we have an expression like \p p->ptr->array[index], we want the981/// \p MemberExpr for \p p->ptr instead of \p p.982class StructAccessBase983: public ConstStmtVisitor<StructAccessBase, const Expr *> {984const RecordDecl *ExpectedRD;985986bool IsExpectedRecordDecl(const Expr *E) const {987QualType Ty = E->getType();988if (Ty->isPointerType())989Ty = Ty->getPointeeType();990return ExpectedRD == Ty->getAsRecordDecl();991}992993public:994StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}995996//===--------------------------------------------------------------------===//997// Visitor Methods998//===--------------------------------------------------------------------===//9991000// NOTE: If we build C++ support for counted_by, then we'll have to handle1001// horrors like this:1002//1003// struct S {1004// int x, y;1005// int blah[] __attribute__((counted_by(x)));1006// } s;1007//1008// int foo(int index, int val) {1009// int (S::*IHatePMDs)[] = &S::blah;1010// (s.*IHatePMDs)[index] = val;1011// }10121013const Expr *Visit(const Expr *E) {1014return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);1015}10161017const Expr *VisitStmt(const Stmt *S) { return nullptr; }10181019// These are the types we expect to return (in order of most to least1020// likely):1021//1022// 1. DeclRefExpr - This is the expression for the base of the structure.1023// It's exactly what we want to build an access to the \p counted_by1024// field.1025// 2. MemberExpr - This is the expression that has the same \p RecordDecl1026// as the flexble array member's lexical enclosing \p RecordDecl. This1027// allows us to catch things like: "p->p->array"1028// 3. CompoundLiteralExpr - This is for people who create something1029// heretical like (struct foo has a flexible array member):1030//1031// (struct foo){ 1, 2 }.blah[idx];1032const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {1033return IsExpectedRecordDecl(E) ? E : nullptr;1034}1035const Expr *VisitMemberExpr(const MemberExpr *E) {1036if (IsExpectedRecordDecl(E) && E->isArrow())1037return E;1038const Expr *Res = Visit(E->getBase());1039return !Res && IsExpectedRecordDecl(E) ? E : Res;1040}1041const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {1042return IsExpectedRecordDecl(E) ? E : nullptr;1043}1044const Expr *VisitCallExpr(const CallExpr *E) {1045return IsExpectedRecordDecl(E) ? E : nullptr;1046}10471048const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {1049if (IsExpectedRecordDecl(E))1050return E;1051return Visit(E->getBase());1052}1053const Expr *VisitCastExpr(const CastExpr *E) {1054if (E->getCastKind() == CK_LValueToRValue)1055return IsExpectedRecordDecl(E) ? E : nullptr;1056return Visit(E->getSubExpr());1057}1058const Expr *VisitParenExpr(const ParenExpr *E) {1059return Visit(E->getSubExpr());1060}1061const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {1062return Visit(E->getSubExpr());1063}1064const Expr *VisitUnaryDeref(const UnaryOperator *E) {1065return Visit(E->getSubExpr());1066}1067};10681069} // end anonymous namespace10701071using RecIndicesTy =1072SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>;10731074static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,1075const FieldDecl *Field,1076RecIndicesTy &Indices) {1077const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);1078int64_t FieldNo = -1;1079for (const FieldDecl *FD : RD->fields()) {1080if (!Layout.containsFieldDecl(FD))1081// This could happen if the field has a struct type that's empty. I don't1082// know why either.1083continue;10841085FieldNo = Layout.getLLVMFieldNo(FD);1086if (FD == Field) {1087Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));1088return true;1089}10901091QualType Ty = FD->getType();1092if (Ty->isRecordType()) {1093if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {1094if (RD->isUnion())1095FieldNo = 0;1096Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));1097return true;1098}1099}1100}11011102return false;1103}11041105/// This method is typically called in contexts where we can't generate1106/// side-effects, like in __builtin_dynamic_object_size. When finding1107/// expressions, only choose those that have either already been emitted or can1108/// be loaded without side-effects.1109///1110/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be1111/// within the top-level struct.1112/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.1113llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(1114const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {1115const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();11161117// Find the base struct expr (i.e. p in p->a.b.c.d).1118const Expr *StructBase = StructAccessBase(RD).Visit(Base);1119if (!StructBase || StructBase->HasSideEffects(getContext()))1120return nullptr;11211122llvm::Value *Res = nullptr;1123if (StructBase->getType()->isPointerType()) {1124LValueBaseInfo BaseInfo;1125TBAAAccessInfo TBAAInfo;1126Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);1127Res = Addr.emitRawPointer(*this);1128} else if (StructBase->isLValue()) {1129LValue LV = EmitLValue(StructBase);1130Address Addr = LV.getAddress();1131Res = Addr.emitRawPointer(*this);1132} else {1133return nullptr;1134}11351136llvm::Value *Zero = Builder.getInt32(0);1137RecIndicesTy Indices;11381139getGEPIndicesToField(*this, RD, CountDecl, Indices);11401141for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)1142Res = Builder.CreateInBoundsGEP(1143ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,1144{Zero, I->second}, "..counted_by.gep");11451146return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,1147getIntAlign(), "..counted_by.load");1148}11491150const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) {1151if (!FD)1152return nullptr;11531154const auto *CAT = FD->getType()->getAs<CountAttributedType>();1155if (!CAT)1156return nullptr;11571158const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());1159const auto *CountDecl = CountDRE->getDecl();1160if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))1161CountDecl = IFD->getAnonField();11621163return dyn_cast<FieldDecl>(CountDecl);1164}11651166void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,1167llvm::Value *Index, QualType IndexType,1168bool Accessed) {1169assert(SanOpts.has(SanitizerKind::ArrayBounds) &&1170"should not be called unless adding bounds checks");1171const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =1172getLangOpts().getStrictFlexArraysLevel();1173QualType IndexedType;1174llvm::Value *Bound =1175getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);11761177EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);1178}11791180void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,1181llvm::Value *Index,1182QualType IndexType,1183QualType IndexedType, bool Accessed) {1184if (!Bound)1185return;11861187SanitizerScope SanScope(this);11881189bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();1190llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);1191llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);11921193llvm::Constant *StaticData[] = {1194EmitCheckSourceLocation(E->getExprLoc()),1195EmitCheckTypeDescriptor(IndexedType),1196EmitCheckTypeDescriptor(IndexType)1197};1198llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)1199: Builder.CreateICmpULE(IndexVal, BoundVal);1200EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),1201SanitizerHandler::OutOfBounds, StaticData, Index);1202}12031204CodeGenFunction::ComplexPairTy CodeGenFunction::1205EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,1206bool isInc, bool isPre) {1207ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());12081209llvm::Value *NextVal;1210if (isa<llvm::IntegerType>(InVal.first->getType())) {1211uint64_t AmountVal = isInc ? 1 : -1;1212NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);12131214// Add the inc/dec to the real part.1215NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");1216} else {1217QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();1218llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);1219if (!isInc)1220FVal.changeSign();1221NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);12221223// Add the inc/dec to the real part.1224NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");1225}12261227ComplexPairTy IncVal(NextVal, InVal.second);12281229// Store the updated result through the lvalue.1230EmitStoreOfComplex(IncVal, LV, /*init*/ false);1231if (getLangOpts().OpenMP)1232CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,1233E->getSubExpr());12341235// If this is a postinc, return the value read from memory, otherwise use the1236// updated value.1237return isPre ? IncVal : InVal;1238}12391240void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,1241CodeGenFunction *CGF) {1242// Bind VLAs in the cast type.1243if (CGF && E->getType()->isVariablyModifiedType())1244CGF->EmitVariablyModifiedType(E->getType());12451246if (CGDebugInfo *DI = getModuleDebugInfo())1247DI->EmitExplicitCastType(E->getType());1248}12491250//===----------------------------------------------------------------------===//1251// LValue Expression Emission1252//===----------------------------------------------------------------------===//12531254static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,1255TBAAAccessInfo *TBAAInfo,1256KnownNonNull_t IsKnownNonNull,1257CodeGenFunction &CGF) {1258// We allow this with ObjC object pointers because of fragile ABIs.1259assert(E->getType()->isPointerType() ||1260E->getType()->isObjCObjectPointerType());1261E = E->IgnoreParens();12621263// Casts:1264if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {1265if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))1266CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);12671268switch (CE->getCastKind()) {1269// Non-converting casts (but not C's implicit conversion from void*).1270case CK_BitCast:1271case CK_NoOp:1272case CK_AddressSpaceConversion:1273if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {1274if (PtrTy->getPointeeType()->isVoidType())1275break;12761277LValueBaseInfo InnerBaseInfo;1278TBAAAccessInfo InnerTBAAInfo;1279Address Addr = CGF.EmitPointerWithAlignment(1280CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);1281if (BaseInfo) *BaseInfo = InnerBaseInfo;1282if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;12831284if (isa<ExplicitCastExpr>(CE)) {1285LValueBaseInfo TargetTypeBaseInfo;1286TBAAAccessInfo TargetTypeTBAAInfo;1287CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(1288E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);1289if (TBAAInfo)1290*TBAAInfo =1291CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);1292// If the source l-value is opaque, honor the alignment of the1293// casted-to type.1294if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {1295if (BaseInfo)1296BaseInfo->mergeForCast(TargetTypeBaseInfo);1297Addr.setAlignment(Align);1298}1299}13001301if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&1302CE->getCastKind() == CK_BitCast) {1303if (auto PT = E->getType()->getAs<PointerType>())1304CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,1305/*MayBeNull=*/true,1306CodeGenFunction::CFITCK_UnrelatedCast,1307CE->getBeginLoc());1308}13091310llvm::Type *ElemTy =1311CGF.ConvertTypeForMem(E->getType()->getPointeeType());1312Addr = Addr.withElementType(ElemTy);1313if (CE->getCastKind() == CK_AddressSpaceConversion)1314Addr = CGF.Builder.CreateAddrSpaceCast(1315Addr, CGF.ConvertType(E->getType()), ElemTy);1316return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),1317CE->getType());1318}1319break;13201321// Array-to-pointer decay.1322case CK_ArrayToPointerDecay:1323return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);13241325// Derived-to-base conversions.1326case CK_UncheckedDerivedToBase:1327case CK_DerivedToBase: {1328// TODO: Support accesses to members of base classes in TBAA. For now, we1329// conservatively pretend that the complete object is of the base class1330// type.1331if (TBAAInfo)1332*TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());1333Address Addr = CGF.EmitPointerWithAlignment(1334CE->getSubExpr(), BaseInfo, nullptr,1335(KnownNonNull_t)(IsKnownNonNull ||1336CE->getCastKind() == CK_UncheckedDerivedToBase));1337auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();1338return CGF.GetAddressOfBaseClass(1339Addr, Derived, CE->path_begin(), CE->path_end(),1340CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());1341}13421343// TODO: Is there any reason to treat base-to-derived conversions1344// specially?1345default:1346break;1347}1348}13491350// Unary &.1351if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {1352if (UO->getOpcode() == UO_AddrOf) {1353LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);1354if (BaseInfo) *BaseInfo = LV.getBaseInfo();1355if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();1356return LV.getAddress();1357}1358}13591360// std::addressof and variants.1361if (auto *Call = dyn_cast<CallExpr>(E)) {1362switch (Call->getBuiltinCallee()) {1363default:1364break;1365case Builtin::BIaddressof:1366case Builtin::BI__addressof:1367case Builtin::BI__builtin_addressof: {1368LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);1369if (BaseInfo) *BaseInfo = LV.getBaseInfo();1370if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();1371return LV.getAddress();1372}1373}1374}13751376// TODO: conditional operators, comma.13771378// Otherwise, use the alignment of the type.1379return CGF.makeNaturalAddressForPointer(1380CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),1381/*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);1382}13831384/// EmitPointerWithAlignment - Given an expression of pointer type, try to1385/// derive a more accurate bound on the alignment of the pointer.1386Address CodeGenFunction::EmitPointerWithAlignment(1387const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,1388KnownNonNull_t IsKnownNonNull) {1389Address Addr =1390::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);1391if (IsKnownNonNull && !Addr.isKnownNonNull())1392Addr.setKnownNonNull();1393return Addr;1394}13951396llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {1397llvm::Value *V = RV.getScalarVal();1398if (auto MPT = T->getAs<MemberPointerType>())1399return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);1400return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));1401}14021403RValue CodeGenFunction::GetUndefRValue(QualType Ty) {1404if (Ty->isVoidType())1405return RValue::get(nullptr);14061407switch (getEvaluationKind(Ty)) {1408case TEK_Complex: {1409llvm::Type *EltTy =1410ConvertType(Ty->castAs<ComplexType>()->getElementType());1411llvm::Value *U = llvm::UndefValue::get(EltTy);1412return RValue::getComplex(std::make_pair(U, U));1413}14141415// If this is a use of an undefined aggregate type, the aggregate must have an1416// identifiable address. Just because the contents of the value are undefined1417// doesn't mean that the address can't be taken and compared.1418case TEK_Aggregate: {1419Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");1420return RValue::getAggregate(DestPtr);1421}14221423case TEK_Scalar:1424return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));1425}1426llvm_unreachable("bad evaluation kind");1427}14281429RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,1430const char *Name) {1431ErrorUnsupported(E, Name);1432return GetUndefRValue(E->getType());1433}14341435LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,1436const char *Name) {1437ErrorUnsupported(E, Name);1438llvm::Type *ElTy = ConvertType(E->getType());1439llvm::Type *Ty = UnqualPtrTy;1440return MakeAddrLValue(1441Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());1442}14431444bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {1445const Expr *Base = Obj;1446while (!isa<CXXThisExpr>(Base)) {1447// The result of a dynamic_cast can be null.1448if (isa<CXXDynamicCastExpr>(Base))1449return false;14501451if (const auto *CE = dyn_cast<CastExpr>(Base)) {1452Base = CE->getSubExpr();1453} else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {1454Base = PE->getSubExpr();1455} else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {1456if (UO->getOpcode() == UO_Extension)1457Base = UO->getSubExpr();1458else1459return false;1460} else {1461return false;1462}1463}1464return true;1465}14661467LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {1468LValue LV;1469if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))1470LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);1471else1472LV = EmitLValue(E);1473if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {1474SanitizerSet SkippedChecks;1475if (const auto *ME = dyn_cast<MemberExpr>(E)) {1476bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());1477if (IsBaseCXXThis)1478SkippedChecks.set(SanitizerKind::Alignment, true);1479if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))1480SkippedChecks.set(SanitizerKind::Null, true);1481}1482EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);1483}1484return LV;1485}14861487/// EmitLValue - Emit code to compute a designator that specifies the location1488/// of the expression.1489///1490/// This can return one of two things: a simple address or a bitfield reference.1491/// In either case, the LLVM Value* in the LValue structure is guaranteed to be1492/// an LLVM pointer type.1493///1494/// If this returns a bitfield reference, nothing about the pointee type of the1495/// LLVM value is known: For example, it may not be a pointer to an integer.1496///1497/// If this returns a normal address, and if the lvalue's C type is fixed size,1498/// this method guarantees that the returned pointer type will point to an LLVM1499/// type of the same size of the lvalue's type. If the lvalue has a variable1500/// length type, this is not possible.1501///1502LValue CodeGenFunction::EmitLValue(const Expr *E,1503KnownNonNull_t IsKnownNonNull) {1504LValue LV = EmitLValueHelper(E, IsKnownNonNull);1505if (IsKnownNonNull && !LV.isKnownNonNull())1506LV.setKnownNonNull();1507return LV;1508}15091510static QualType getConstantExprReferredType(const FullExpr *E,1511const ASTContext &Ctx) {1512const Expr *SE = E->getSubExpr()->IgnoreImplicit();1513if (isa<OpaqueValueExpr>(SE))1514return SE->getType();1515return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();1516}15171518LValue CodeGenFunction::EmitLValueHelper(const Expr *E,1519KnownNonNull_t IsKnownNonNull) {1520ApplyDebugLocation DL(*this, E);1521switch (E->getStmtClass()) {1522default: return EmitUnsupportedLValue(E, "l-value expression");15231524case Expr::ObjCPropertyRefExprClass:1525llvm_unreachable("cannot emit a property reference directly");15261527case Expr::ObjCSelectorExprClass:1528return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));1529case Expr::ObjCIsaExprClass:1530return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));1531case Expr::BinaryOperatorClass:1532return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));1533case Expr::CompoundAssignOperatorClass: {1534QualType Ty = E->getType();1535if (const AtomicType *AT = Ty->getAs<AtomicType>())1536Ty = AT->getValueType();1537if (!Ty->isAnyComplexType())1538return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));1539return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));1540}1541case Expr::CallExprClass:1542case Expr::CXXMemberCallExprClass:1543case Expr::CXXOperatorCallExprClass:1544case Expr::UserDefinedLiteralClass:1545return EmitCallExprLValue(cast<CallExpr>(E));1546case Expr::CXXRewrittenBinaryOperatorClass:1547return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),1548IsKnownNonNull);1549case Expr::VAArgExprClass:1550return EmitVAArgExprLValue(cast<VAArgExpr>(E));1551case Expr::DeclRefExprClass:1552return EmitDeclRefLValue(cast<DeclRefExpr>(E));1553case Expr::ConstantExprClass: {1554const ConstantExpr *CE = cast<ConstantExpr>(E);1555if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {1556QualType RetType = getConstantExprReferredType(CE, getContext());1557return MakeNaturalAlignAddrLValue(Result, RetType);1558}1559return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);1560}1561case Expr::ParenExprClass:1562return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);1563case Expr::GenericSelectionExprClass:1564return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),1565IsKnownNonNull);1566case Expr::PredefinedExprClass:1567return EmitPredefinedLValue(cast<PredefinedExpr>(E));1568case Expr::StringLiteralClass:1569return EmitStringLiteralLValue(cast<StringLiteral>(E));1570case Expr::ObjCEncodeExprClass:1571return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));1572case Expr::PseudoObjectExprClass:1573return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));1574case Expr::InitListExprClass:1575return EmitInitListLValue(cast<InitListExpr>(E));1576case Expr::CXXTemporaryObjectExprClass:1577case Expr::CXXConstructExprClass:1578return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));1579case Expr::CXXBindTemporaryExprClass:1580return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));1581case Expr::CXXUuidofExprClass:1582return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));1583case Expr::LambdaExprClass:1584return EmitAggExprToLValue(E);15851586case Expr::ExprWithCleanupsClass: {1587const auto *cleanups = cast<ExprWithCleanups>(E);1588RunCleanupsScope Scope(*this);1589LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);1590if (LV.isSimple()) {1591// Defend against branches out of gnu statement expressions surrounded by1592// cleanups.1593Address Addr = LV.getAddress();1594llvm::Value *V = Addr.getBasePointer();1595Scope.ForceCleanup({&V});1596Addr.replaceBasePointer(V);1597return LValue::MakeAddr(Addr, LV.getType(), getContext(),1598LV.getBaseInfo(), LV.getTBAAInfo());1599}1600// FIXME: Is it possible to create an ExprWithCleanups that produces a1601// bitfield lvalue or some other non-simple lvalue?1602return LV;1603}16041605case Expr::CXXDefaultArgExprClass: {1606auto *DAE = cast<CXXDefaultArgExpr>(E);1607CXXDefaultArgExprScope Scope(*this, DAE);1608return EmitLValue(DAE->getExpr(), IsKnownNonNull);1609}1610case Expr::CXXDefaultInitExprClass: {1611auto *DIE = cast<CXXDefaultInitExpr>(E);1612CXXDefaultInitExprScope Scope(*this, DIE);1613return EmitLValue(DIE->getExpr(), IsKnownNonNull);1614}1615case Expr::CXXTypeidExprClass:1616return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));16171618case Expr::ObjCMessageExprClass:1619return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));1620case Expr::ObjCIvarRefExprClass:1621return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));1622case Expr::StmtExprClass:1623return EmitStmtExprLValue(cast<StmtExpr>(E));1624case Expr::UnaryOperatorClass:1625return EmitUnaryOpLValue(cast<UnaryOperator>(E));1626case Expr::ArraySubscriptExprClass:1627return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));1628case Expr::MatrixSubscriptExprClass:1629return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));1630case Expr::ArraySectionExprClass:1631return EmitArraySectionExpr(cast<ArraySectionExpr>(E));1632case Expr::ExtVectorElementExprClass:1633return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));1634case Expr::CXXThisExprClass:1635return MakeAddrLValue(LoadCXXThisAddress(), E->getType());1636case Expr::MemberExprClass:1637return EmitMemberExpr(cast<MemberExpr>(E));1638case Expr::CompoundLiteralExprClass:1639return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));1640case Expr::ConditionalOperatorClass:1641return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));1642case Expr::BinaryConditionalOperatorClass:1643return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));1644case Expr::ChooseExprClass:1645return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);1646case Expr::OpaqueValueExprClass:1647return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));1648case Expr::SubstNonTypeTemplateParmExprClass:1649return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),1650IsKnownNonNull);1651case Expr::ImplicitCastExprClass:1652case Expr::CStyleCastExprClass:1653case Expr::CXXFunctionalCastExprClass:1654case Expr::CXXStaticCastExprClass:1655case Expr::CXXDynamicCastExprClass:1656case Expr::CXXReinterpretCastExprClass:1657case Expr::CXXConstCastExprClass:1658case Expr::CXXAddrspaceCastExprClass:1659case Expr::ObjCBridgedCastExprClass:1660return EmitCastLValue(cast<CastExpr>(E));16611662case Expr::MaterializeTemporaryExprClass:1663return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));16641665case Expr::CoawaitExprClass:1666return EmitCoawaitLValue(cast<CoawaitExpr>(E));1667case Expr::CoyieldExprClass:1668return EmitCoyieldLValue(cast<CoyieldExpr>(E));1669case Expr::PackIndexingExprClass:1670return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());1671}1672}16731674/// Given an object of the given canonical type, can we safely copy a1675/// value out of it based on its initializer?1676static bool isConstantEmittableObjectType(QualType type) {1677assert(type.isCanonical());1678assert(!type->isReferenceType());16791680// Must be const-qualified but non-volatile.1681Qualifiers qs = type.getLocalQualifiers();1682if (!qs.hasConst() || qs.hasVolatile()) return false;16831684// Otherwise, all object types satisfy this except C++ classes with1685// mutable subobjects or non-trivial copy/destroy behavior.1686if (const auto *RT = dyn_cast<RecordType>(type))1687if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))1688if (RD->hasMutableFields() || !RD->isTrivial())1689return false;16901691return true;1692}16931694/// Can we constant-emit a load of a reference to a variable of the1695/// given type? This is different from predicates like1696/// Decl::mightBeUsableInConstantExpressions because we do want it to apply1697/// in situations that don't necessarily satisfy the language's rules1698/// for this (e.g. C++'s ODR-use rules). For example, we want to able1699/// to do this with const float variables even if those variables1700/// aren't marked 'constexpr'.1701enum ConstantEmissionKind {1702CEK_None,1703CEK_AsReferenceOnly,1704CEK_AsValueOrReference,1705CEK_AsValueOnly1706};1707static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {1708type = type.getCanonicalType();1709if (const auto *ref = dyn_cast<ReferenceType>(type)) {1710if (isConstantEmittableObjectType(ref->getPointeeType()))1711return CEK_AsValueOrReference;1712return CEK_AsReferenceOnly;1713}1714if (isConstantEmittableObjectType(type))1715return CEK_AsValueOnly;1716return CEK_None;1717}17181719/// Try to emit a reference to the given value without producing it as1720/// an l-value. This is just an optimization, but it avoids us needing1721/// to emit global copies of variables if they're named without triggering1722/// a formal use in a context where we can't emit a direct reference to them,1723/// for instance if a block or lambda or a member of a local class uses a1724/// const int variable or constexpr variable from an enclosing function.1725CodeGenFunction::ConstantEmission1726CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {1727ValueDecl *value = refExpr->getDecl();17281729// The value needs to be an enum constant or a constant variable.1730ConstantEmissionKind CEK;1731if (isa<ParmVarDecl>(value)) {1732CEK = CEK_None;1733} else if (auto *var = dyn_cast<VarDecl>(value)) {1734CEK = checkVarTypeForConstantEmission(var->getType());1735} else if (isa<EnumConstantDecl>(value)) {1736CEK = CEK_AsValueOnly;1737} else {1738CEK = CEK_None;1739}1740if (CEK == CEK_None) return ConstantEmission();17411742Expr::EvalResult result;1743bool resultIsReference;1744QualType resultType;17451746// It's best to evaluate all the way as an r-value if that's permitted.1747if (CEK != CEK_AsReferenceOnly &&1748refExpr->EvaluateAsRValue(result, getContext())) {1749resultIsReference = false;1750resultType = refExpr->getType();17511752// Otherwise, try to evaluate as an l-value.1753} else if (CEK != CEK_AsValueOnly &&1754refExpr->EvaluateAsLValue(result, getContext())) {1755resultIsReference = true;1756resultType = value->getType();17571758// Failure.1759} else {1760return ConstantEmission();1761}17621763// In any case, if the initializer has side-effects, abandon ship.1764if (result.HasSideEffects)1765return ConstantEmission();17661767// In CUDA/HIP device compilation, a lambda may capture a reference variable1768// referencing a global host variable by copy. In this case the lambda should1769// make a copy of the value of the global host variable. The DRE of the1770// captured reference variable cannot be emitted as load from the host1771// global variable as compile time constant, since the host variable is not1772// accessible on device. The DRE of the captured reference variable has to be1773// loaded from captures.1774if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&1775refExpr->refersToEnclosingVariableOrCapture()) {1776auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);1777if (MD && MD->getParent()->isLambda() &&1778MD->getOverloadedOperator() == OO_Call) {1779const APValue::LValueBase &base = result.Val.getLValueBase();1780if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {1781if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {1782if (!VD->hasAttr<CUDADeviceAttr>()) {1783return ConstantEmission();1784}1785}1786}1787}1788}17891790// Emit as a constant.1791auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),1792result.Val, resultType);17931794// Make sure we emit a debug reference to the global variable.1795// This should probably fire even for1796if (isa<VarDecl>(value)) {1797if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))1798EmitDeclRefExprDbgValue(refExpr, result.Val);1799} else {1800assert(isa<EnumConstantDecl>(value));1801EmitDeclRefExprDbgValue(refExpr, result.Val);1802}18031804// If we emitted a reference constant, we need to dereference that.1805if (resultIsReference)1806return ConstantEmission::forReference(C);18071808return ConstantEmission::forValue(C);1809}18101811static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,1812const MemberExpr *ME) {1813if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {1814// Try to emit static variable member expressions as DREs.1815return DeclRefExpr::Create(1816CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,1817/*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),1818ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());1819}1820return nullptr;1821}18221823CodeGenFunction::ConstantEmission1824CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {1825if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))1826return tryEmitAsConstant(DRE);1827return ConstantEmission();1828}18291830llvm::Value *CodeGenFunction::emitScalarConstant(1831const CodeGenFunction::ConstantEmission &Constant, Expr *E) {1832assert(Constant && "not a constant");1833if (Constant.isReference())1834return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),1835E->getExprLoc())1836.getScalarVal();1837return Constant.getValue();1838}18391840llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,1841SourceLocation Loc) {1842return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),1843lvalue.getType(), Loc, lvalue.getBaseInfo(),1844lvalue.getTBAAInfo(), lvalue.isNontemporal());1845}18461847static bool hasBooleanRepresentation(QualType Ty) {1848if (Ty->isBooleanType())1849return true;18501851if (const EnumType *ET = Ty->getAs<EnumType>())1852return ET->getDecl()->getIntegerType()->isBooleanType();18531854if (const AtomicType *AT = Ty->getAs<AtomicType>())1855return hasBooleanRepresentation(AT->getValueType());18561857return false;1858}18591860static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,1861llvm::APInt &Min, llvm::APInt &End,1862bool StrictEnums, bool IsBool) {1863const EnumType *ET = Ty->getAs<EnumType>();1864bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&1865ET && !ET->getDecl()->isFixed();1866if (!IsBool && !IsRegularCPlusPlusEnum)1867return false;18681869if (IsBool) {1870Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);1871End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);1872} else {1873const EnumDecl *ED = ET->getDecl();1874ED->getValueRange(End, Min);1875}1876return true;1877}18781879llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {1880llvm::APInt Min, End;1881if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,1882hasBooleanRepresentation(Ty)))1883return nullptr;18841885llvm::MDBuilder MDHelper(getLLVMContext());1886return MDHelper.createRange(Min, End);1887}18881889bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,1890SourceLocation Loc) {1891bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);1892bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);1893if (!HasBoolCheck && !HasEnumCheck)1894return false;18951896bool IsBool = hasBooleanRepresentation(Ty) ||1897NSAPI(CGM.getContext()).isObjCBOOLType(Ty);1898bool NeedsBoolCheck = HasBoolCheck && IsBool;1899bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();1900if (!NeedsBoolCheck && !NeedsEnumCheck)1901return false;19021903// Single-bit booleans don't need to be checked. Special-case this to avoid1904// a bit width mismatch when handling bitfield values. This is handled by1905// EmitFromMemory for the non-bitfield case.1906if (IsBool &&1907cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)1908return false;19091910llvm::APInt Min, End;1911if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))1912return true;19131914auto &Ctx = getLLVMContext();1915SanitizerScope SanScope(this);1916llvm::Value *Check;1917--End;1918if (!Min) {1919Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));1920} else {1921llvm::Value *Upper =1922Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));1923llvm::Value *Lower =1924Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));1925Check = Builder.CreateAnd(Upper, Lower);1926}1927llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),1928EmitCheckTypeDescriptor(Ty)};1929SanitizerMask Kind =1930NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;1931EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,1932StaticArgs, EmitCheckValue(Value));1933return true;1934}19351936llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,1937QualType Ty,1938SourceLocation Loc,1939LValueBaseInfo BaseInfo,1940TBAAAccessInfo TBAAInfo,1941bool isNontemporal) {1942if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))1943if (GV->isThreadLocal())1944Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),1945NotKnownNonNull);19461947if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {1948// Boolean vectors use `iN` as storage type.1949if (ClangVecTy->isExtVectorBoolType()) {1950llvm::Type *ValTy = ConvertType(Ty);1951unsigned ValNumElems =1952cast<llvm::FixedVectorType>(ValTy)->getNumElements();1953// Load the `iP` storage object (P is the padded vector size).1954auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");1955const auto *RawIntTy = RawIntV->getType();1956assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");1957// Bitcast iP --> <P x i1>.1958auto *PaddedVecTy = llvm::FixedVectorType::get(1959Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());1960llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);1961// Shuffle <P x i1> --> <N x i1> (N is the actual bit size).1962V = emitBoolVecConversion(V, ValNumElems, "extractvec");19631964return EmitFromMemory(V, Ty);1965}19661967// Handle vectors of size 3 like size 4 for better performance.1968const llvm::Type *EltTy = Addr.getElementType();1969const auto *VTy = cast<llvm::FixedVectorType>(EltTy);19701971if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {19721973llvm::VectorType *vec4Ty =1974llvm::FixedVectorType::get(VTy->getElementType(), 4);1975Address Cast = Addr.withElementType(vec4Ty);1976// Now load value.1977llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");19781979// Shuffle vector to get vec3.1980V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");1981return EmitFromMemory(V, Ty);1982}1983}19841985// Atomic operations have to be done on integral types.1986LValue AtomicLValue =1987LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);1988if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {1989return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();1990}19911992Addr =1993Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));19941995llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);1996if (isNontemporal) {1997llvm::MDNode *Node = llvm::MDNode::get(1998Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));1999Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);2000}20012002CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);20032004if (EmitScalarRangeCheck(Load, Ty, Loc)) {2005// In order to prevent the optimizer from throwing away the check, don't2006// attach range metadata to the load.2007} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)2008if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {2009Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);2010Load->setMetadata(llvm::LLVMContext::MD_noundef,2011llvm::MDNode::get(getLLVMContext(), std::nullopt));2012}20132014return EmitFromMemory(Load, Ty);2015}20162017/// Converts a scalar value from its primary IR type (as returned2018/// by ConvertType) to its load/store type (as returned by2019/// convertTypeForLoadStore).2020llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {2021if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {2022llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());2023bool Signed = Ty->isSignedIntegerOrEnumerationType();2024return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");2025}20262027if (Ty->isExtVectorBoolType()) {2028llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());2029// Expand to the memory bit width.2030unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();2031// <N x i1> --> <P x i1>.2032Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");2033// <P x i1> --> iP.2034Value = Builder.CreateBitCast(Value, StoreTy);2035}20362037return Value;2038}20392040/// Converts a scalar value from its load/store type (as returned2041/// by convertTypeForLoadStore) to its primary IR type (as returned2042/// by ConvertType).2043llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {2044if (Ty->isExtVectorBoolType()) {2045const auto *RawIntTy = Value->getType();2046// Bitcast iP --> <P x i1>.2047auto *PaddedVecTy = llvm::FixedVectorType::get(2048Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());2049auto *V = Builder.CreateBitCast(Value, PaddedVecTy);2050// Shuffle <P x i1> --> <N x i1> (N is the actual bit size).2051llvm::Type *ValTy = ConvertType(Ty);2052unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();2053return emitBoolVecConversion(V, ValNumElems, "extractvec");2054}20552056if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {2057llvm::Type *ResTy = ConvertType(Ty);2058return Builder.CreateTrunc(Value, ResTy, "loadedv");2059}20602061return Value;2062}20632064// Convert the pointer of \p Addr to a pointer to a vector (the value type of2065// MatrixType), if it points to a array (the memory type of MatrixType).2066static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,2067CodeGenFunction &CGF,2068bool IsVector = true) {2069auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());2070if (ArrayTy && IsVector) {2071auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),2072ArrayTy->getNumElements());20732074return Addr.withElementType(VectorTy);2075}2076auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());2077if (VectorTy && !IsVector) {2078auto *ArrayTy = llvm::ArrayType::get(2079VectorTy->getElementType(),2080cast<llvm::FixedVectorType>(VectorTy)->getNumElements());20812082return Addr.withElementType(ArrayTy);2083}20842085return Addr;2086}20872088// Emit a store of a matrix LValue. This may require casting the original2089// pointer to memory address (ArrayType) to a pointer to the value type2090// (VectorType).2091static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,2092bool isInit, CodeGenFunction &CGF) {2093Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,2094value->getType()->isVectorTy());2095CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),2096lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,2097lvalue.isNontemporal());2098}20992100void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,2101bool Volatile, QualType Ty,2102LValueBaseInfo BaseInfo,2103TBAAAccessInfo TBAAInfo,2104bool isInit, bool isNontemporal) {2105if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))2106if (GV->isThreadLocal())2107Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),2108NotKnownNonNull);21092110llvm::Type *SrcTy = Value->getType();2111if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {2112auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);2113if (!CGM.getCodeGenOpts().PreserveVec3Type) {2114// Handle vec3 special.2115if (VecTy && !ClangVecTy->isExtVectorBoolType() &&2116cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {2117// Our source is a vec3, do a shuffle vector to make it a vec4.2118Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},2119"extractVec");2120SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);2121}2122if (Addr.getElementType() != SrcTy) {2123Addr = Addr.withElementType(SrcTy);2124}2125}2126}21272128Value = EmitToMemory(Value, Ty);21292130LValue AtomicLValue =2131LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);2132if (Ty->isAtomicType() ||2133(!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {2134EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);2135return;2136}21372138llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);2139if (isNontemporal) {2140llvm::MDNode *Node =2141llvm::MDNode::get(Store->getContext(),2142llvm::ConstantAsMetadata::get(Builder.getInt32(1)));2143Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);2144}21452146CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);2147}21482149void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,2150bool isInit) {2151if (lvalue.getType()->isConstantMatrixType()) {2152EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);2153return;2154}21552156EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),2157lvalue.getType(), lvalue.getBaseInfo(),2158lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());2159}21602161// Emit a load of a LValue of matrix type. This may require casting the pointer2162// to memory address (ArrayType) to a pointer to the value type (VectorType).2163static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,2164CodeGenFunction &CGF) {2165assert(LV.getType()->isConstantMatrixType());2166Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);2167LV.setAddress(Addr);2168return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));2169}21702171RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,2172SourceLocation Loc) {2173QualType Ty = LV.getType();2174switch (getEvaluationKind(Ty)) {2175case TEK_Scalar:2176return EmitLoadOfLValue(LV, Loc);2177case TEK_Complex:2178return RValue::getComplex(EmitLoadOfComplex(LV, Loc));2179case TEK_Aggregate:2180EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);2181return Slot.asRValue();2182}2183llvm_unreachable("bad evaluation kind");2184}21852186/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this2187/// method emits the address of the lvalue, then loads the result as an rvalue,2188/// returning the rvalue.2189RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {2190if (LV.isObjCWeak()) {2191// load of a __weak object.2192Address AddrWeakObj = LV.getAddress();2193return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,2194AddrWeakObj));2195}2196if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {2197// In MRC mode, we do a load+autorelease.2198if (!getLangOpts().ObjCAutoRefCount) {2199return RValue::get(EmitARCLoadWeak(LV.getAddress()));2200}22012202// In ARC mode, we load retained and then consume the value.2203llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());2204Object = EmitObjCConsumeObject(LV.getType(), Object);2205return RValue::get(Object);2206}22072208if (LV.isSimple()) {2209assert(!LV.getType()->isFunctionType());22102211if (LV.getType()->isConstantMatrixType())2212return EmitLoadOfMatrixLValue(LV, Loc, *this);22132214// Everything needs a load.2215return RValue::get(EmitLoadOfScalar(LV, Loc));2216}22172218if (LV.isVectorElt()) {2219llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),2220LV.isVolatileQualified());2221return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),2222"vecext"));2223}22242225// If this is a reference to a subset of the elements of a vector, either2226// shuffle the input or extract/insert them as appropriate.2227if (LV.isExtVectorElt()) {2228return EmitLoadOfExtVectorElementLValue(LV);2229}22302231// Global Register variables always invoke intrinsics2232if (LV.isGlobalReg())2233return EmitLoadOfGlobalRegLValue(LV);22342235if (LV.isMatrixElt()) {2236llvm::Value *Idx = LV.getMatrixIdx();2237if (CGM.getCodeGenOpts().OptimizationLevel > 0) {2238const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();2239llvm::MatrixBuilder MB(Builder);2240MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());2241}2242llvm::LoadInst *Load =2243Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());2244return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));2245}22462247assert(LV.isBitField() && "Unknown LValue type!");2248return EmitLoadOfBitfieldLValue(LV, Loc);2249}22502251RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,2252SourceLocation Loc) {2253const CGBitFieldInfo &Info = LV.getBitFieldInfo();22542255// Get the output type.2256llvm::Type *ResLTy = ConvertType(LV.getType());22572258Address Ptr = LV.getBitFieldAddress();2259llvm::Value *Val =2260Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");22612262bool UseVolatile = LV.isVolatileQualified() &&2263Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());2264const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;2265const unsigned StorageSize =2266UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;2267if (Info.IsSigned) {2268assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);2269unsigned HighBits = StorageSize - Offset - Info.Size;2270if (HighBits)2271Val = Builder.CreateShl(Val, HighBits, "bf.shl");2272if (Offset + HighBits)2273Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");2274} else {2275if (Offset)2276Val = Builder.CreateLShr(Val, Offset, "bf.lshr");2277if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)2278Val = Builder.CreateAnd(2279Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");2280}2281Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");2282EmitScalarRangeCheck(Val, LV.getType(), Loc);2283return RValue::get(Val);2284}22852286// If this is a reference to a subset of the elements of a vector, create an2287// appropriate shufflevector.2288RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {2289llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),2290LV.isVolatileQualified());22912292// HLSL allows treating scalars as one-element vectors. Converting the scalar2293// IR value to a vector here allows the rest of codegen to behave as normal.2294if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {2295llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);2296llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);2297Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");2298}22992300const llvm::Constant *Elts = LV.getExtVectorElts();23012302// If the result of the expression is a non-vector type, we must be extracting2303// a single element. Just codegen as an extractelement.2304const VectorType *ExprVT = LV.getType()->getAs<VectorType>();2305if (!ExprVT) {2306unsigned InIdx = getAccessedFieldNo(0, Elts);2307llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);2308return RValue::get(Builder.CreateExtractElement(Vec, Elt));2309}23102311// Always use shuffle vector to try to retain the original program structure2312unsigned NumResultElts = ExprVT->getNumElements();23132314SmallVector<int, 4> Mask;2315for (unsigned i = 0; i != NumResultElts; ++i)2316Mask.push_back(getAccessedFieldNo(i, Elts));23172318Vec = Builder.CreateShuffleVector(Vec, Mask);2319return RValue::get(Vec);2320}23212322/// Generates lvalue for partial ext_vector access.2323Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {2324Address VectorAddress = LV.getExtVectorAddress();2325QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();2326llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);23272328Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);23292330const llvm::Constant *Elts = LV.getExtVectorElts();2331unsigned ix = getAccessedFieldNo(0, Elts);23322333Address VectorBasePtrPlusIx =2334Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,2335"vector.elt");23362337return VectorBasePtrPlusIx;2338}23392340/// Load of global gamed gegisters are always calls to intrinsics.2341RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {2342assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&2343"Bad type for register variable");2344llvm::MDNode *RegName = cast<llvm::MDNode>(2345cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());23462347// We accept integer and pointer types only2348llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());2349llvm::Type *Ty = OrigTy;2350if (OrigTy->isPointerTy())2351Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);2352llvm::Type *Types[] = { Ty };23532354llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);2355llvm::Value *Call = Builder.CreateCall(2356F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));2357if (OrigTy->isPointerTy())2358Call = Builder.CreateIntToPtr(Call, OrigTy);2359return RValue::get(Call);2360}23612362/// EmitStoreThroughLValue - Store the specified rvalue into the specified2363/// lvalue, where both are guaranteed to the have the same type, and that type2364/// is 'Ty'.2365void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,2366bool isInit) {2367if (!Dst.isSimple()) {2368if (Dst.isVectorElt()) {2369// Read/modify/write the vector, inserting the new element.2370llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),2371Dst.isVolatileQualified());2372auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());2373if (IRStoreTy) {2374auto *IRVecTy = llvm::FixedVectorType::get(2375Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());2376Vec = Builder.CreateBitCast(Vec, IRVecTy);2377// iN --> <N x i1>.2378}2379Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),2380Dst.getVectorIdx(), "vecins");2381if (IRStoreTy) {2382// <N x i1> --> <iN>.2383Vec = Builder.CreateBitCast(Vec, IRStoreTy);2384}2385Builder.CreateStore(Vec, Dst.getVectorAddress(),2386Dst.isVolatileQualified());2387return;2388}23892390// If this is an update of extended vector elements, insert them as2391// appropriate.2392if (Dst.isExtVectorElt())2393return EmitStoreThroughExtVectorComponentLValue(Src, Dst);23942395if (Dst.isGlobalReg())2396return EmitStoreThroughGlobalRegLValue(Src, Dst);23972398if (Dst.isMatrixElt()) {2399llvm::Value *Idx = Dst.getMatrixIdx();2400if (CGM.getCodeGenOpts().OptimizationLevel > 0) {2401const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();2402llvm::MatrixBuilder MB(Builder);2403MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());2404}2405llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());2406llvm::Value *Vec =2407Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");2408Builder.CreateStore(Vec, Dst.getMatrixAddress(),2409Dst.isVolatileQualified());2410return;2411}24122413assert(Dst.isBitField() && "Unknown LValue type");2414return EmitStoreThroughBitfieldLValue(Src, Dst);2415}24162417// There's special magic for assigning into an ARC-qualified l-value.2418if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {2419switch (Lifetime) {2420case Qualifiers::OCL_None:2421llvm_unreachable("present but none");24222423case Qualifiers::OCL_ExplicitNone:2424// nothing special2425break;24262427case Qualifiers::OCL_Strong:2428if (isInit) {2429Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));2430break;2431}2432EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);2433return;24342435case Qualifiers::OCL_Weak:2436if (isInit)2437// Initialize and then skip the primitive store.2438EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());2439else2440EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),2441/*ignore*/ true);2442return;24432444case Qualifiers::OCL_Autoreleasing:2445Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),2446Src.getScalarVal()));2447// fall into the normal path2448break;2449}2450}24512452if (Dst.isObjCWeak() && !Dst.isNonGC()) {2453// load of a __weak object.2454Address LvalueDst = Dst.getAddress();2455llvm::Value *src = Src.getScalarVal();2456CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);2457return;2458}24592460if (Dst.isObjCStrong() && !Dst.isNonGC()) {2461// load of a __strong object.2462Address LvalueDst = Dst.getAddress();2463llvm::Value *src = Src.getScalarVal();2464if (Dst.isObjCIvar()) {2465assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");2466llvm::Type *ResultType = IntPtrTy;2467Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());2468llvm::Value *RHS = dst.emitRawPointer(*this);2469RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");2470llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),2471ResultType, "sub.ptr.lhs.cast");2472llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");2473CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);2474} else if (Dst.isGlobalObjCRef()) {2475CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,2476Dst.isThreadLocalRef());2477}2478else2479CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);2480return;2481}24822483assert(Src.isScalar() && "Can't emit an agg store with this method");2484EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);2485}24862487void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,2488llvm::Value **Result) {2489const CGBitFieldInfo &Info = Dst.getBitFieldInfo();2490llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());2491Address Ptr = Dst.getBitFieldAddress();24922493// Get the source value, truncated to the width of the bit-field.2494llvm::Value *SrcVal = Src.getScalarVal();24952496// Cast the source to the storage type and shift it into place.2497SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),2498/*isSigned=*/false);2499llvm::Value *MaskedVal = SrcVal;25002501const bool UseVolatile =2502CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&2503Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());2504const unsigned StorageSize =2505UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;2506const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;2507// See if there are other bits in the bitfield's storage we'll need to load2508// and mask together with source before storing.2509if (StorageSize != Info.Size) {2510assert(StorageSize > Info.Size && "Invalid bitfield size.");2511llvm::Value *Val =2512Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");25132514// Mask the source value as needed.2515if (!hasBooleanRepresentation(Dst.getType()))2516SrcVal = Builder.CreateAnd(2517SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),2518"bf.value");2519MaskedVal = SrcVal;2520if (Offset)2521SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");25222523// Mask out the original value.2524Val = Builder.CreateAnd(2525Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),2526"bf.clear");25272528// Or together the unchanged values and the source value.2529SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");2530} else {2531assert(Offset == 0);2532// According to the AACPS:2533// When a volatile bit-field is written, and its container does not overlap2534// with any non-bit-field member, its container must be read exactly once2535// and written exactly once using the access width appropriate to the type2536// of the container. The two accesses are not atomic.2537if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&2538CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)2539Builder.CreateLoad(Ptr, true, "bf.load");2540}25412542// Write the new value back out.2543Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());25442545// Return the new value of the bit-field, if requested.2546if (Result) {2547llvm::Value *ResultVal = MaskedVal;25482549// Sign extend the value if needed.2550if (Info.IsSigned) {2551assert(Info.Size <= StorageSize);2552unsigned HighBits = StorageSize - Info.Size;2553if (HighBits) {2554ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");2555ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");2556}2557}25582559ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,2560"bf.result.cast");2561*Result = EmitFromMemory(ResultVal, Dst.getType());2562}2563}25642565void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,2566LValue Dst) {2567// HLSL allows storing to scalar values through ExtVector component LValues.2568// To support this we need to handle the case where the destination address is2569// a scalar.2570Address DstAddr = Dst.getExtVectorAddress();2571if (!DstAddr.getElementType()->isVectorTy()) {2572assert(!Dst.getType()->isVectorType() &&2573"this should only occur for non-vector l-values");2574Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());2575return;2576}25772578// This access turns into a read/modify/write of the vector. Load the input2579// value now.2580llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());2581const llvm::Constant *Elts = Dst.getExtVectorElts();25822583llvm::Value *SrcVal = Src.getScalarVal();25842585if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {2586unsigned NumSrcElts = VTy->getNumElements();2587unsigned NumDstElts =2588cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();2589if (NumDstElts == NumSrcElts) {2590// Use shuffle vector is the src and destination are the same number of2591// elements and restore the vector mask since it is on the side it will be2592// stored.2593SmallVector<int, 4> Mask(NumDstElts);2594for (unsigned i = 0; i != NumSrcElts; ++i)2595Mask[getAccessedFieldNo(i, Elts)] = i;25962597Vec = Builder.CreateShuffleVector(SrcVal, Mask);2598} else if (NumDstElts > NumSrcElts) {2599// Extended the source vector to the same length and then shuffle it2600// into the destination.2601// FIXME: since we're shuffling with undef, can we just use the indices2602// into that? This could be simpler.2603SmallVector<int, 4> ExtMask;2604for (unsigned i = 0; i != NumSrcElts; ++i)2605ExtMask.push_back(i);2606ExtMask.resize(NumDstElts, -1);2607llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);2608// build identity2609SmallVector<int, 4> Mask;2610for (unsigned i = 0; i != NumDstElts; ++i)2611Mask.push_back(i);26122613// When the vector size is odd and .odd or .hi is used, the last element2614// of the Elts constant array will be one past the size of the vector.2615// Ignore the last element here, if it is greater than the mask size.2616if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())2617NumSrcElts--;26182619// modify when what gets shuffled in2620for (unsigned i = 0; i != NumSrcElts; ++i)2621Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;2622Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);2623} else {2624// We should never shorten the vector2625llvm_unreachable("unexpected shorten vector length");2626}2627} else {2628// If the Src is a scalar (not a vector), and the target is a vector it must2629// be updating one element.2630unsigned InIdx = getAccessedFieldNo(0, Elts);2631llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);2632Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);2633}26342635Builder.CreateStore(Vec, Dst.getExtVectorAddress(),2636Dst.isVolatileQualified());2637}26382639/// Store of global named registers are always calls to intrinsics.2640void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {2641assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&2642"Bad type for register variable");2643llvm::MDNode *RegName = cast<llvm::MDNode>(2644cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());2645assert(RegName && "Register LValue is not metadata");26462647// We accept integer and pointer types only2648llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());2649llvm::Type *Ty = OrigTy;2650if (OrigTy->isPointerTy())2651Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);2652llvm::Type *Types[] = { Ty };26532654llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);2655llvm::Value *Value = Src.getScalarVal();2656if (OrigTy->isPointerTy())2657Value = Builder.CreatePtrToInt(Value, Ty);2658Builder.CreateCall(2659F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});2660}26612662// setObjCGCLValueClass - sets class of the lvalue for the purpose of2663// generating write-barries API. It is currently a global, ivar,2664// or neither.2665static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,2666LValue &LV,2667bool IsMemberAccess=false) {2668if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)2669return;26702671if (isa<ObjCIvarRefExpr>(E)) {2672QualType ExpTy = E->getType();2673if (IsMemberAccess && ExpTy->isPointerType()) {2674// If ivar is a structure pointer, assigning to field of2675// this struct follows gcc's behavior and makes it a non-ivar2676// writer-barrier conservatively.2677ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();2678if (ExpTy->isRecordType()) {2679LV.setObjCIvar(false);2680return;2681}2682}2683LV.setObjCIvar(true);2684auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));2685LV.setBaseIvarExp(Exp->getBase());2686LV.setObjCArray(E->getType()->isArrayType());2687return;2688}26892690if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {2691if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {2692if (VD->hasGlobalStorage()) {2693LV.setGlobalObjCRef(true);2694LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);2695}2696}2697LV.setObjCArray(E->getType()->isArrayType());2698return;2699}27002701if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {2702setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);2703return;2704}27052706if (const auto *Exp = dyn_cast<ParenExpr>(E)) {2707setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);2708if (LV.isObjCIvar()) {2709// If cast is to a structure pointer, follow gcc's behavior and make it2710// a non-ivar write-barrier.2711QualType ExpTy = E->getType();2712if (ExpTy->isPointerType())2713ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();2714if (ExpTy->isRecordType())2715LV.setObjCIvar(false);2716}2717return;2718}27192720if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {2721setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);2722return;2723}27242725if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {2726setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);2727return;2728}27292730if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {2731setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);2732return;2733}27342735if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {2736setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);2737return;2738}27392740if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {2741setObjCGCLValueClass(Ctx, Exp->getBase(), LV);2742if (LV.isObjCIvar() && !LV.isObjCArray())2743// Using array syntax to assigning to what an ivar points to is not2744// same as assigning to the ivar itself. {id *Names;} Names[i] = 0;2745LV.setObjCIvar(false);2746else if (LV.isGlobalObjCRef() && !LV.isObjCArray())2747// Using array syntax to assigning to what global points to is not2748// same as assigning to the global itself. {id *G;} G[i] = 0;2749LV.setGlobalObjCRef(false);2750return;2751}27522753if (const auto *Exp = dyn_cast<MemberExpr>(E)) {2754setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);2755// We don't know if member is an 'ivar', but this flag is looked at2756// only in the context of LV.isObjCIvar().2757LV.setObjCArray(E->getType()->isArrayType());2758return;2759}2760}27612762static LValue EmitThreadPrivateVarDeclLValue(2763CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,2764llvm::Type *RealVarTy, SourceLocation Loc) {2765if (CGF.CGM.getLangOpts().OpenMPIRBuilder)2766Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(2767CGF, VD, Addr, Loc);2768else2769Addr =2770CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);27712772Addr = Addr.withElementType(RealVarTy);2773return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);2774}27752776static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,2777const VarDecl *VD, QualType T) {2778std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =2779OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);2780// Return an invalid address if variable is MT_To (or MT_Enter starting with2781// OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link2782// and MT_To (or MT_Enter) with unified memory, return a valid address.2783if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||2784*Res == OMPDeclareTargetDeclAttr::MT_Enter) &&2785!CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))2786return Address::invalid();2787assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||2788((*Res == OMPDeclareTargetDeclAttr::MT_To ||2789*Res == OMPDeclareTargetDeclAttr::MT_Enter) &&2790CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&2791"Expected link clause OR to clause with unified memory enabled.");2792QualType PtrTy = CGF.getContext().getPointerType(VD->getType());2793Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);2794return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());2795}27962797Address2798CodeGenFunction::EmitLoadOfReference(LValue RefLVal,2799LValueBaseInfo *PointeeBaseInfo,2800TBAAAccessInfo *PointeeTBAAInfo) {2801llvm::LoadInst *Load =2802Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());2803CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());2804return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),2805CharUnits(), /*ForPointeeType=*/true,2806PointeeBaseInfo, PointeeTBAAInfo);2807}28082809LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {2810LValueBaseInfo PointeeBaseInfo;2811TBAAAccessInfo PointeeTBAAInfo;2812Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,2813&PointeeTBAAInfo);2814return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),2815PointeeBaseInfo, PointeeTBAAInfo);2816}28172818Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,2819const PointerType *PtrTy,2820LValueBaseInfo *BaseInfo,2821TBAAAccessInfo *TBAAInfo) {2822llvm::Value *Addr = Builder.CreateLoad(Ptr);2823return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),2824CharUnits(), /*ForPointeeType=*/true,2825BaseInfo, TBAAInfo);2826}28272828LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,2829const PointerType *PtrTy) {2830LValueBaseInfo BaseInfo;2831TBAAAccessInfo TBAAInfo;2832Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);2833return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);2834}28352836static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,2837const Expr *E, const VarDecl *VD) {2838QualType T = E->getType();28392840// If it's thread_local, emit a call to its wrapper function instead.2841if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&2842CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))2843return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);2844// Check if the variable is marked as declare target with link clause in2845// device codegen.2846if (CGF.getLangOpts().OpenMPIsTargetDevice) {2847Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);2848if (Addr.isValid())2849return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);2850}28512852llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);28532854if (VD->getTLSKind() != VarDecl::TLS_None)2855V = CGF.Builder.CreateThreadLocalAddress(V);28562857llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());2858CharUnits Alignment = CGF.getContext().getDeclAlign(VD);2859Address Addr(V, RealVarTy, Alignment);2860// Emit reference to the private copy of the variable if it is an OpenMP2861// threadprivate variable.2862if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&2863VD->hasAttr<OMPThreadPrivateDeclAttr>()) {2864return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,2865E->getExprLoc());2866}2867LValue LV = VD->getType()->isReferenceType() ?2868CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),2869AlignmentSource::Decl) :2870CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);2871setObjCGCLValueClass(CGF.getContext(), E, LV);2872return LV;2873}28742875llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,2876llvm::Type *Ty) {2877const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());2878if (FD->hasAttr<WeakRefAttr>()) {2879ConstantAddress aliasee = GetWeakRefReference(FD);2880return aliasee.getPointer();2881}28822883llvm::Constant *V = GetAddrOfFunction(GD, Ty);2884return V;2885}28862887static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,2888GlobalDecl GD) {2889const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());2890llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);2891CharUnits Alignment = CGF.getContext().getDeclAlign(FD);2892return CGF.MakeAddrLValue(V, E->getType(), Alignment,2893AlignmentSource::Decl);2894}28952896static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,2897llvm::Value *ThisValue) {28982899return CGF.EmitLValueForLambdaField(FD, ThisValue);2900}29012902/// Named Registers are named metadata pointing to the register name2903/// which will be read from/written to as an argument to the intrinsic2904/// @llvm.read/write_register.2905/// So far, only the name is being passed down, but other options such as2906/// register type, allocation type or even optimization options could be2907/// passed down via the metadata node.2908static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {2909SmallString<64> Name("llvm.named.register.");2910AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();2911assert(Asm->getLabel().size() < 64-Name.size() &&2912"Register name too big");2913Name.append(Asm->getLabel());2914llvm::NamedMDNode *M =2915CGM.getModule().getOrInsertNamedMetadata(Name);2916if (M->getNumOperands() == 0) {2917llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),2918Asm->getLabel());2919llvm::Metadata *Ops[] = {Str};2920M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));2921}29222923CharUnits Alignment = CGM.getContext().getDeclAlign(VD);29242925llvm::Value *Ptr =2926llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));2927return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());2928}29292930/// Determine whether we can emit a reference to \p VD from the current2931/// context, despite not necessarily having seen an odr-use of the variable in2932/// this context.2933static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,2934const DeclRefExpr *E,2935const VarDecl *VD) {2936// For a variable declared in an enclosing scope, do not emit a spurious2937// reference even if we have a capture, as that will emit an unwarranted2938// reference to our capture state, and will likely generate worse code than2939// emitting a local copy.2940if (E->refersToEnclosingVariableOrCapture())2941return false;29422943// For a local declaration declared in this function, we can always reference2944// it even if we don't have an odr-use.2945if (VD->hasLocalStorage()) {2946return VD->getDeclContext() ==2947dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);2948}29492950// For a global declaration, we can emit a reference to it if we know2951// for sure that we are able to emit a definition of it.2952VD = VD->getDefinition(CGF.getContext());2953if (!VD)2954return false;29552956// Don't emit a spurious reference if it might be to a variable that only2957// exists on a different device / target.2958// FIXME: This is unnecessarily broad. Check whether this would actually be a2959// cross-target reference.2960if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||2961CGF.getLangOpts().OpenCL) {2962return false;2963}29642965// We can emit a spurious reference only if the linkage implies that we'll2966// be emitting a non-interposable symbol that will be retained until link2967// time.2968switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {2969case llvm::GlobalValue::ExternalLinkage:2970case llvm::GlobalValue::LinkOnceODRLinkage:2971case llvm::GlobalValue::WeakODRLinkage:2972case llvm::GlobalValue::InternalLinkage:2973case llvm::GlobalValue::PrivateLinkage:2974return true;2975default:2976return false;2977}2978}29792980LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {2981const NamedDecl *ND = E->getDecl();2982QualType T = E->getType();29832984assert(E->isNonOdrUse() != NOUR_Unevaluated &&2985"should not emit an unevaluated operand");29862987if (const auto *VD = dyn_cast<VarDecl>(ND)) {2988// Global Named registers access via intrinsics only2989if (VD->getStorageClass() == SC_Register &&2990VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())2991return EmitGlobalNamedRegister(VD, CGM);29922993// If this DeclRefExpr does not constitute an odr-use of the variable,2994// we're not permitted to emit a reference to it in general, and it might2995// not be captured if capture would be necessary for a use. Emit the2996// constant value directly instead.2997if (E->isNonOdrUse() == NOUR_Constant &&2998(VD->getType()->isReferenceType() ||2999!canEmitSpuriousReferenceToVariable(*this, E, VD))) {3000VD->getAnyInitializer(VD);3001llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(3002E->getLocation(), *VD->evaluateValue(), VD->getType());3003assert(Val && "failed to emit constant expression");30043005Address Addr = Address::invalid();3006if (!VD->getType()->isReferenceType()) {3007// Spill the constant value to a global.3008Addr = CGM.createUnnamedGlobalFrom(*VD, Val,3009getContext().getDeclAlign(VD));3010llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());3011auto *PTy = llvm::PointerType::get(3012VarTy, getTypes().getTargetAddressSpace(VD->getType()));3013Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);3014} else {3015// Should we be using the alignment of the constant pointer we emitted?3016CharUnits Alignment =3017CGM.getNaturalTypeAlignment(E->getType(),3018/* BaseInfo= */ nullptr,3019/* TBAAInfo= */ nullptr,3020/* forPointeeType= */ true);3021Addr = makeNaturalAddressForPointer(Val, T, Alignment);3022}3023return MakeAddrLValue(Addr, T, AlignmentSource::Decl);3024}30253026// FIXME: Handle other kinds of non-odr-use DeclRefExprs.30273028// Check for captured variables.3029if (E->refersToEnclosingVariableOrCapture()) {3030VD = VD->getCanonicalDecl();3031if (auto *FD = LambdaCaptureFields.lookup(VD))3032return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);3033if (CapturedStmtInfo) {3034auto I = LocalDeclMap.find(VD);3035if (I != LocalDeclMap.end()) {3036LValue CapLVal;3037if (VD->getType()->isReferenceType())3038CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),3039AlignmentSource::Decl);3040else3041CapLVal = MakeAddrLValue(I->second, T);3042// Mark lvalue as nontemporal if the variable is marked as nontemporal3043// in simd context.3044if (getLangOpts().OpenMP &&3045CGM.getOpenMPRuntime().isNontemporalDecl(VD))3046CapLVal.setNontemporal(/*Value=*/true);3047return CapLVal;3048}3049LValue CapLVal =3050EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),3051CapturedStmtInfo->getContextValue());3052Address LValueAddress = CapLVal.getAddress();3053CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),3054LValueAddress.getElementType(),3055getContext().getDeclAlign(VD)),3056CapLVal.getType(),3057LValueBaseInfo(AlignmentSource::Decl),3058CapLVal.getTBAAInfo());3059// Mark lvalue as nontemporal if the variable is marked as nontemporal3060// in simd context.3061if (getLangOpts().OpenMP &&3062CGM.getOpenMPRuntime().isNontemporalDecl(VD))3063CapLVal.setNontemporal(/*Value=*/true);3064return CapLVal;3065}30663067assert(isa<BlockDecl>(CurCodeDecl));3068Address addr = GetAddrOfBlockDecl(VD);3069return MakeAddrLValue(addr, T, AlignmentSource::Decl);3070}3071}30723073// FIXME: We should be able to assert this for FunctionDecls as well!3074// FIXME: We should be able to assert this for all DeclRefExprs, not just3075// those with a valid source location.3076assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||3077!E->getLocation().isValid()) &&3078"Should not use decl without marking it used!");30793080if (ND->hasAttr<WeakRefAttr>()) {3081const auto *VD = cast<ValueDecl>(ND);3082ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);3083return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);3084}30853086if (const auto *VD = dyn_cast<VarDecl>(ND)) {3087// Check if this is a global variable.3088if (VD->hasLinkage() || VD->isStaticDataMember())3089return EmitGlobalVarDeclLValue(*this, E, VD);30903091Address addr = Address::invalid();30923093// The variable should generally be present in the local decl map.3094auto iter = LocalDeclMap.find(VD);3095if (iter != LocalDeclMap.end()) {3096addr = iter->second;30973098// Otherwise, it might be static local we haven't emitted yet for3099// some reason; most likely, because it's in an outer function.3100} else if (VD->isStaticLocal()) {3101llvm::Constant *var = CGM.getOrCreateStaticVarDecl(3102*VD, CGM.getLLVMLinkageVarDefinition(VD));3103addr = Address(3104var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));31053106// No other cases for now.3107} else {3108llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");3109}31103111// Handle threadlocal function locals.3112if (VD->getTLSKind() != VarDecl::TLS_None)3113addr = addr.withPointer(3114Builder.CreateThreadLocalAddress(addr.getBasePointer()),3115NotKnownNonNull);31163117// Check for OpenMP threadprivate variables.3118if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&3119VD->hasAttr<OMPThreadPrivateDeclAttr>()) {3120return EmitThreadPrivateVarDeclLValue(3121*this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),3122E->getExprLoc());3123}31243125// Drill into block byref variables.3126bool isBlockByref = VD->isEscapingByref();3127if (isBlockByref) {3128addr = emitBlockByrefAddress(addr, VD);3129}31303131// Drill into reference types.3132LValue LV = VD->getType()->isReferenceType() ?3133EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :3134MakeAddrLValue(addr, T, AlignmentSource::Decl);31353136bool isLocalStorage = VD->hasLocalStorage();31373138bool NonGCable = isLocalStorage &&3139!VD->getType()->isReferenceType() &&3140!isBlockByref;3141if (NonGCable) {3142LV.getQuals().removeObjCGCAttr();3143LV.setNonGC(true);3144}31453146bool isImpreciseLifetime =3147(isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());3148if (isImpreciseLifetime)3149LV.setARCPreciseLifetime(ARCImpreciseLifetime);3150setObjCGCLValueClass(getContext(), E, LV);3151return LV;3152}31533154if (const auto *FD = dyn_cast<FunctionDecl>(ND))3155return EmitFunctionDeclLValue(*this, E, FD);31563157// FIXME: While we're emitting a binding from an enclosing scope, all other3158// DeclRefExprs we see should be implicitly treated as if they also refer to3159// an enclosing scope.3160if (const auto *BD = dyn_cast<BindingDecl>(ND)) {3161if (E->refersToEnclosingVariableOrCapture()) {3162auto *FD = LambdaCaptureFields.lookup(BD);3163return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);3164}3165return EmitLValue(BD->getBinding());3166}31673168// We can form DeclRefExprs naming GUID declarations when reconstituting3169// non-type template parameters into expressions.3170if (const auto *GD = dyn_cast<MSGuidDecl>(ND))3171return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,3172AlignmentSource::Decl);31733174if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {3175auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);3176auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());31773178if (AS != T.getAddressSpace()) {3179auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());3180auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);3181auto ASC = getTargetHooks().performAddrSpaceCast(3182CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);3183ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());3184}31853186return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);3187}31883189llvm_unreachable("Unhandled DeclRefExpr");3190}31913192LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {3193// __extension__ doesn't affect lvalue-ness.3194if (E->getOpcode() == UO_Extension)3195return EmitLValue(E->getSubExpr());31963197QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());3198switch (E->getOpcode()) {3199default: llvm_unreachable("Unknown unary operator lvalue!");3200case UO_Deref: {3201QualType T = E->getSubExpr()->getType()->getPointeeType();3202assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");32033204LValueBaseInfo BaseInfo;3205TBAAAccessInfo TBAAInfo;3206Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,3207&TBAAInfo);3208LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);3209LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());32103211// We should not generate __weak write barrier on indirect reference3212// of a pointer to object; as in void foo (__weak id *param); *param = 0;3213// But, we continue to generate __strong write barrier on indirect write3214// into a pointer to object.3215if (getLangOpts().ObjC &&3216getLangOpts().getGC() != LangOptions::NonGC &&3217LV.isObjCWeak())3218LV.setNonGC(!E->isOBJCGCCandidate(getContext()));3219return LV;3220}3221case UO_Real:3222case UO_Imag: {3223LValue LV = EmitLValue(E->getSubExpr());3224assert(LV.isSimple() && "real/imag on non-ordinary l-value");32253226// __real is valid on scalars. This is a faster way of testing that.3227// __imag can only produce an rvalue on scalars.3228if (E->getOpcode() == UO_Real &&3229!LV.getAddress().getElementType()->isStructTy()) {3230assert(E->getSubExpr()->getType()->isArithmeticType());3231return LV;3232}32333234QualType T = ExprTy->castAs<ComplexType>()->getElementType();32353236Address Component =3237(E->getOpcode() == UO_Real3238? emitAddrOfRealComponent(LV.getAddress(), LV.getType())3239: emitAddrOfImagComponent(LV.getAddress(), LV.getType()));3240LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),3241CGM.getTBAAInfoForSubobject(LV, T));3242ElemLV.getQuals().addQualifiers(LV.getQuals());3243return ElemLV;3244}3245case UO_PreInc:3246case UO_PreDec: {3247LValue LV = EmitLValue(E->getSubExpr());3248bool isInc = E->getOpcode() == UO_PreInc;32493250if (E->getType()->isAnyComplexType())3251EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);3252else3253EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);3254return LV;3255}3256}3257}32583259LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {3260return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),3261E->getType(), AlignmentSource::Decl);3262}32633264LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {3265return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),3266E->getType(), AlignmentSource::Decl);3267}32683269LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {3270auto SL = E->getFunctionName();3271assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");3272StringRef FnName = CurFn->getName();3273if (FnName.starts_with("\01"))3274FnName = FnName.substr(1);3275StringRef NameItems[] = {3276PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};3277std::string GVName = llvm::join(NameItems, NameItems + 2, ".");3278if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {3279std::string Name = std::string(SL->getString());3280if (!Name.empty()) {3281unsigned Discriminator =3282CGM.getCXXABI().getMangleContext().getBlockId(BD, true);3283if (Discriminator)3284Name += "_" + Twine(Discriminator + 1).str();3285auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());3286return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);3287} else {3288auto C =3289CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());3290return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);3291}3292}3293auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);3294return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);3295}32963297/// Emit a type description suitable for use by a runtime sanitizer library. The3298/// format of a type descriptor is3299///3300/// \code3301/// { i16 TypeKind, i16 TypeInfo }3302/// \endcode3303///3304/// followed by an array of i8 containing the type name. TypeKind is 0 for an3305/// integer, 1 for a floating point value, and -1 for anything else.3306llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {3307// Only emit each type's descriptor once.3308if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))3309return C;33103311uint16_t TypeKind = -1;3312uint16_t TypeInfo = 0;33133314if (T->isIntegerType()) {3315TypeKind = 0;3316TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |3317(T->isSignedIntegerType() ? 1 : 0);3318} else if (T->isFloatingType()) {3319TypeKind = 1;3320TypeInfo = getContext().getTypeSize(T);3321}33223323// Format the type name as if for a diagnostic, including quotes and3324// optionally an 'aka'.3325SmallString<32> Buffer;3326CGM.getDiags().ConvertArgToString(3327DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),3328StringRef(), std::nullopt, Buffer, std::nullopt);33293330llvm::Constant *Components[] = {3331Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),3332llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)3333};3334llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);33353336auto *GV = new llvm::GlobalVariable(3337CGM.getModule(), Descriptor->getType(),3338/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);3339GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);3340CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);33413342// Remember the descriptor for this type.3343CGM.setTypeDescriptorInMap(T, GV);33443345return GV;3346}33473348llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {3349llvm::Type *TargetTy = IntPtrTy;33503351if (V->getType() == TargetTy)3352return V;33533354// Floating-point types which fit into intptr_t are bitcast to integers3355// and then passed directly (after zero-extension, if necessary).3356if (V->getType()->isFloatingPointTy()) {3357unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();3358if (Bits <= TargetTy->getIntegerBitWidth())3359V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),3360Bits));3361}33623363// Integers which fit in intptr_t are zero-extended and passed directly.3364if (V->getType()->isIntegerTy() &&3365V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())3366return Builder.CreateZExt(V, TargetTy);33673368// Pointers are passed directly, everything else is passed by address.3369if (!V->getType()->isPointerTy()) {3370RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());3371Builder.CreateStore(V, Ptr);3372V = Ptr.getPointer();3373}3374return Builder.CreatePtrToInt(V, TargetTy);3375}33763377/// Emit a representation of a SourceLocation for passing to a handler3378/// in a sanitizer runtime library. The format for this data is:3379/// \code3380/// struct SourceLocation {3381/// const char *Filename;3382/// int32_t Line, Column;3383/// };3384/// \endcode3385/// For an invalid SourceLocation, the Filename pointer is null.3386llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {3387llvm::Constant *Filename;3388int Line, Column;33893390PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);3391if (PLoc.isValid()) {3392StringRef FilenameString = PLoc.getFilename();33933394int PathComponentsToStrip =3395CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;3396if (PathComponentsToStrip < 0) {3397assert(PathComponentsToStrip != INT_MIN);3398int PathComponentsToKeep = -PathComponentsToStrip;3399auto I = llvm::sys::path::rbegin(FilenameString);3400auto E = llvm::sys::path::rend(FilenameString);3401while (I != E && --PathComponentsToKeep)3402++I;34033404FilenameString = FilenameString.substr(I - E);3405} else if (PathComponentsToStrip > 0) {3406auto I = llvm::sys::path::begin(FilenameString);3407auto E = llvm::sys::path::end(FilenameString);3408while (I != E && PathComponentsToStrip--)3409++I;34103411if (I != E)3412FilenameString =3413FilenameString.substr(I - llvm::sys::path::begin(FilenameString));3414else3415FilenameString = llvm::sys::path::filename(FilenameString);3416}34173418auto FilenameGV =3419CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");3420CGM.getSanitizerMetadata()->disableSanitizerForGlobal(3421cast<llvm::GlobalVariable>(3422FilenameGV.getPointer()->stripPointerCasts()));3423Filename = FilenameGV.getPointer();3424Line = PLoc.getLine();3425Column = PLoc.getColumn();3426} else {3427Filename = llvm::Constant::getNullValue(Int8PtrTy);3428Line = Column = 0;3429}34303431llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),3432Builder.getInt32(Column)};34333434return llvm::ConstantStruct::getAnon(Data);3435}34363437namespace {3438/// Specify under what conditions this check can be recovered3439enum class CheckRecoverableKind {3440/// Always terminate program execution if this check fails.3441Unrecoverable,3442/// Check supports recovering, runtime has both fatal (noreturn) and3443/// non-fatal handlers for this check.3444Recoverable,3445/// Runtime conditionally aborts, always need to support recovery.3446AlwaysRecoverable3447};3448}34493450static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {3451assert(Kind.countPopulation() == 1);3452if (Kind == SanitizerKind::Vptr)3453return CheckRecoverableKind::AlwaysRecoverable;3454else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)3455return CheckRecoverableKind::Unrecoverable;3456else3457return CheckRecoverableKind::Recoverable;3458}34593460namespace {3461struct SanitizerHandlerInfo {3462char const *const Name;3463unsigned Version;3464};3465}34663467const SanitizerHandlerInfo SanitizerHandlers[] = {3468#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},3469LIST_SANITIZER_CHECKS3470#undef SANITIZER_CHECK3471};34723473static void emitCheckHandlerCall(CodeGenFunction &CGF,3474llvm::FunctionType *FnType,3475ArrayRef<llvm::Value *> FnArgs,3476SanitizerHandler CheckHandler,3477CheckRecoverableKind RecoverKind, bool IsFatal,3478llvm::BasicBlock *ContBB) {3479assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);3480std::optional<ApplyDebugLocation> DL;3481if (!CGF.Builder.getCurrentDebugLocation()) {3482// Ensure that the call has at least an artificial debug location.3483DL.emplace(CGF, SourceLocation());3484}3485bool NeedsAbortSuffix =3486IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;3487bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;3488const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];3489const StringRef CheckName = CheckInfo.Name;3490std::string FnName = "__ubsan_handle_" + CheckName.str();3491if (CheckInfo.Version && !MinimalRuntime)3492FnName += "_v" + llvm::utostr(CheckInfo.Version);3493if (MinimalRuntime)3494FnName += "_minimal";3495if (NeedsAbortSuffix)3496FnName += "_abort";3497bool MayReturn =3498!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;34993500llvm::AttrBuilder B(CGF.getLLVMContext());3501if (!MayReturn) {3502B.addAttribute(llvm::Attribute::NoReturn)3503.addAttribute(llvm::Attribute::NoUnwind);3504}3505B.addUWTableAttr(llvm::UWTableKind::Default);35063507llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(3508FnType, FnName,3509llvm::AttributeList::get(CGF.getLLVMContext(),3510llvm::AttributeList::FunctionIndex, B),3511/*Local=*/true);3512llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);3513if (!MayReturn) {3514HandlerCall->setDoesNotReturn();3515CGF.Builder.CreateUnreachable();3516} else {3517CGF.Builder.CreateBr(ContBB);3518}3519}35203521void CodeGenFunction::EmitCheck(3522ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,3523SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,3524ArrayRef<llvm::Value *> DynamicArgs) {3525assert(IsSanitizerScope);3526assert(Checked.size() > 0);3527assert(CheckHandler >= 0 &&3528size_t(CheckHandler) < std::size(SanitizerHandlers));3529const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;35303531llvm::Value *FatalCond = nullptr;3532llvm::Value *RecoverableCond = nullptr;3533llvm::Value *TrapCond = nullptr;3534for (int i = 0, n = Checked.size(); i < n; ++i) {3535llvm::Value *Check = Checked[i].first;3536// -fsanitize-trap= overrides -fsanitize-recover=.3537llvm::Value *&Cond =3538CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)3539? TrapCond3540: CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)3541? RecoverableCond3542: FatalCond;3543Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;3544}35453546if (ClSanitizeGuardChecks) {3547llvm::Value *Allow =3548Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),3549llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));35503551for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {3552if (*Cond)3553*Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));3554}3555}35563557if (TrapCond)3558EmitTrapCheck(TrapCond, CheckHandler);3559if (!FatalCond && !RecoverableCond)3560return;35613562llvm::Value *JointCond;3563if (FatalCond && RecoverableCond)3564JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);3565else3566JointCond = FatalCond ? FatalCond : RecoverableCond;3567assert(JointCond);35683569CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);3570assert(SanOpts.has(Checked[0].second));3571#ifndef NDEBUG3572for (int i = 1, n = Checked.size(); i < n; ++i) {3573assert(RecoverKind == getRecoverableKind(Checked[i].second) &&3574"All recoverable kinds in a single check must be same!");3575assert(SanOpts.has(Checked[i].second));3576}3577#endif35783579llvm::BasicBlock *Cont = createBasicBlock("cont");3580llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);3581llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);3582// Give hint that we very much don't expect to execute the handler3583llvm::MDBuilder MDHelper(getLLVMContext());3584llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();3585Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);3586EmitBlock(Handlers);35873588// Handler functions take an i8* pointing to the (handler-specific) static3589// information block, followed by a sequence of intptr_t arguments3590// representing operand values.3591SmallVector<llvm::Value *, 4> Args;3592SmallVector<llvm::Type *, 4> ArgTypes;3593if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {3594Args.reserve(DynamicArgs.size() + 1);3595ArgTypes.reserve(DynamicArgs.size() + 1);35963597// Emit handler arguments and create handler function type.3598if (!StaticArgs.empty()) {3599llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);3600auto *InfoPtr = new llvm::GlobalVariable(3601CGM.getModule(), Info->getType(), false,3602llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,3603llvm::GlobalVariable::NotThreadLocal,3604CGM.getDataLayout().getDefaultGlobalsAddressSpace());3605InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);3606CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);3607Args.push_back(InfoPtr);3608ArgTypes.push_back(Args.back()->getType());3609}36103611for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {3612Args.push_back(EmitCheckValue(DynamicArgs[i]));3613ArgTypes.push_back(IntPtrTy);3614}3615}36163617llvm::FunctionType *FnType =3618llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);36193620if (!FatalCond || !RecoverableCond) {3621// Simple case: we need to generate a single handler call, either3622// fatal, or non-fatal.3623emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,3624(FatalCond != nullptr), Cont);3625} else {3626// Emit two handler calls: first one for set of unrecoverable checks,3627// another one for recoverable.3628llvm::BasicBlock *NonFatalHandlerBB =3629createBasicBlock("non_fatal." + CheckName);3630llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);3631Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);3632EmitBlock(FatalHandlerBB);3633emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,3634NonFatalHandlerBB);3635EmitBlock(NonFatalHandlerBB);3636emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,3637Cont);3638}36393640EmitBlock(Cont);3641}36423643void CodeGenFunction::EmitCfiSlowPathCheck(3644SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,3645llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {3646llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");36473648llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");3649llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);36503651llvm::MDBuilder MDHelper(getLLVMContext());3652llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();3653BI->setMetadata(llvm::LLVMContext::MD_prof, Node);36543655EmitBlock(CheckBB);36563657bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);36583659llvm::CallInst *CheckCall;3660llvm::FunctionCallee SlowPathFn;3661if (WithDiag) {3662llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);3663auto *InfoPtr =3664new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,3665llvm::GlobalVariable::PrivateLinkage, Info);3666InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);3667CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);36683669SlowPathFn = CGM.getModule().getOrInsertFunction(3670"__cfi_slowpath_diag",3671llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},3672false));3673CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});3674} else {3675SlowPathFn = CGM.getModule().getOrInsertFunction(3676"__cfi_slowpath",3677llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));3678CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});3679}36803681CGM.setDSOLocal(3682cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));3683CheckCall->setDoesNotThrow();36843685EmitBlock(Cont);3686}36873688// Emit a stub for __cfi_check function so that the linker knows about this3689// symbol in LTO mode.3690void CodeGenFunction::EmitCfiCheckStub() {3691llvm::Module *M = &CGM.getModule();3692ASTContext &C = getContext();3693QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);36943695FunctionArgList FnArgs;3696ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);3697ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);3698ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,3699ImplicitParamKind::Other);3700FnArgs.push_back(&ArgCallsiteTypeId);3701FnArgs.push_back(&ArgAddr);3702FnArgs.push_back(&ArgCFICheckFailData);3703const CGFunctionInfo &FI =3704CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);37053706llvm::Function *F = llvm::Function::Create(3707llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),3708llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);3709CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);3710CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);3711F->setAlignment(llvm::Align(4096));3712CGM.setDSOLocal(F);37133714llvm::LLVMContext &Ctx = M->getContext();3715llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);3716// CrossDSOCFI pass is not executed if there is no executable code.3717SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};3718llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);3719llvm::ReturnInst::Create(Ctx, nullptr, BB);3720}37213722// This function is basically a switch over the CFI failure kind, which is3723// extracted from CFICheckFailData (1st function argument). Each case is either3724// llvm.trap or a call to one of the two runtime handlers, based on3725// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid3726// failure kind) traps, but this should really never happen. CFICheckFailData3727// can be nullptr if the calling module has -fsanitize-trap behavior for this3728// check kind; in this case __cfi_check_fail traps as well.3729void CodeGenFunction::EmitCfiCheckFail() {3730SanitizerScope SanScope(this);3731FunctionArgList Args;3732ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,3733ImplicitParamKind::Other);3734ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,3735ImplicitParamKind::Other);3736Args.push_back(&ArgData);3737Args.push_back(&ArgAddr);37383739const CGFunctionInfo &FI =3740CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);37413742llvm::Function *F = llvm::Function::Create(3743llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),3744llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());37453746CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);3747CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);3748F->setVisibility(llvm::GlobalValue::HiddenVisibility);37493750StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,3751SourceLocation());37523753// This function is not affected by NoSanitizeList. This function does3754// not have a source location, but "src:*" would still apply. Revert any3755// changes to SanOpts made in StartFunction.3756SanOpts = CGM.getLangOpts().Sanitize;37573758llvm::Value *Data =3759EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,3760CGM.getContext().VoidPtrTy, ArgData.getLocation());3761llvm::Value *Addr =3762EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,3763CGM.getContext().VoidPtrTy, ArgAddr.getLocation());37643765// Data == nullptr means the calling module has trap behaviour for this check.3766llvm::Value *DataIsNotNullPtr =3767Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));3768EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);37693770llvm::StructType *SourceLocationTy =3771llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);3772llvm::StructType *CfiCheckFailDataTy =3773llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);37743775llvm::Value *V = Builder.CreateConstGEP2_32(3776CfiCheckFailDataTy,3777Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,37780);37793780Address CheckKindAddr(V, Int8Ty, getIntAlign());3781llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);37823783llvm::Value *AllVtables = llvm::MetadataAsValue::get(3784CGM.getLLVMContext(),3785llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));3786llvm::Value *ValidVtable = Builder.CreateZExt(3787Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),3788{Addr, AllVtables}),3789IntPtrTy);37903791const std::pair<int, SanitizerMask> CheckKinds[] = {3792{CFITCK_VCall, SanitizerKind::CFIVCall},3793{CFITCK_NVCall, SanitizerKind::CFINVCall},3794{CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},3795{CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},3796{CFITCK_ICall, SanitizerKind::CFIICall}};37973798SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks;3799for (auto CheckKindMaskPair : CheckKinds) {3800int Kind = CheckKindMaskPair.first;3801SanitizerMask Mask = CheckKindMaskPair.second;3802llvm::Value *Cond =3803Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));3804if (CGM.getLangOpts().Sanitize.has(Mask))3805EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},3806{Data, Addr, ValidVtable});3807else3808EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);3809}38103811FinishFunction();3812// The only reference to this function will be created during LTO link.3813// Make sure it survives until then.3814CGM.addUsedGlobal(F);3815}38163817void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {3818if (SanOpts.has(SanitizerKind::Unreachable)) {3819SanitizerScope SanScope(this);3820EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),3821SanitizerKind::Unreachable),3822SanitizerHandler::BuiltinUnreachable,3823EmitCheckSourceLocation(Loc), std::nullopt);3824}3825Builder.CreateUnreachable();3826}38273828void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,3829SanitizerHandler CheckHandlerID) {3830llvm::BasicBlock *Cont = createBasicBlock("cont");38313832// If we're optimizing, collapse all calls to trap down to just one per3833// check-type per function to save on code size.3834if ((int)TrapBBs.size() <= CheckHandlerID)3835TrapBBs.resize(CheckHandlerID + 1);38363837llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];38383839if (!ClSanitizeDebugDeoptimization &&3840CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&3841(!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {3842auto Call = TrapBB->begin();3843assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");38443845Call->applyMergedLocation(Call->getDebugLoc(),3846Builder.getCurrentDebugLocation());3847Builder.CreateCondBr(Checked, Cont, TrapBB);3848} else {3849TrapBB = createBasicBlock("trap");3850Builder.CreateCondBr(Checked, Cont, TrapBB);3851EmitBlock(TrapBB);38523853llvm::CallInst *TrapCall = Builder.CreateCall(3854CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),3855llvm::ConstantInt::get(CGM.Int8Ty,3856ClSanitizeDebugDeoptimization3857? TrapBB->getParent()->size()3858: static_cast<uint64_t>(CheckHandlerID)));38593860if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {3861auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",3862CGM.getCodeGenOpts().TrapFuncName);3863TrapCall->addFnAttr(A);3864}3865TrapCall->setDoesNotReturn();3866TrapCall->setDoesNotThrow();3867Builder.CreateUnreachable();3868}38693870EmitBlock(Cont);3871}38723873llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {3874llvm::CallInst *TrapCall =3875Builder.CreateCall(CGM.getIntrinsic(IntrID));38763877if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {3878auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",3879CGM.getCodeGenOpts().TrapFuncName);3880TrapCall->addFnAttr(A);3881}38823883return TrapCall;3884}38853886Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,3887LValueBaseInfo *BaseInfo,3888TBAAAccessInfo *TBAAInfo) {3889assert(E->getType()->isArrayType() &&3890"Array to pointer decay must have array source type!");38913892// Expressions of array type can't be bitfields or vector elements.3893LValue LV = EmitLValue(E);3894Address Addr = LV.getAddress();38953896// If the array type was an incomplete type, we need to make sure3897// the decay ends up being the right type.3898llvm::Type *NewTy = ConvertType(E->getType());3899Addr = Addr.withElementType(NewTy);39003901// Note that VLA pointers are always decayed, so we don't need to do3902// anything here.3903if (!E->getType()->isVariableArrayType()) {3904assert(isa<llvm::ArrayType>(Addr.getElementType()) &&3905"Expected pointer to array");3906Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");3907}39083909// The result of this decay conversion points to an array element within the3910// base lvalue. However, since TBAA currently does not support representing3911// accesses to elements of member arrays, we conservatively represent accesses3912// to the pointee object as if it had no any base lvalue specified.3913// TODO: Support TBAA for member arrays.3914QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();3915if (BaseInfo) *BaseInfo = LV.getBaseInfo();3916if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);39173918return Addr.withElementType(ConvertTypeForMem(EltType));3919}39203921/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an3922/// array to pointer, return the array subexpression.3923static const Expr *isSimpleArrayDecayOperand(const Expr *E) {3924// If this isn't just an array->pointer decay, bail out.3925const auto *CE = dyn_cast<CastExpr>(E);3926if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)3927return nullptr;39283929// If this is a decay from variable width array, bail out.3930const Expr *SubExpr = CE->getSubExpr();3931if (SubExpr->getType()->isVariableArrayType())3932return nullptr;39333934return SubExpr;3935}39363937static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,3938llvm::Type *elemType,3939llvm::Value *ptr,3940ArrayRef<llvm::Value*> indices,3941bool inbounds,3942bool signedIndices,3943SourceLocation loc,3944const llvm::Twine &name = "arrayidx") {3945if (inbounds) {3946return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,3947CodeGenFunction::NotSubtraction, loc,3948name);3949} else {3950return CGF.Builder.CreateGEP(elemType, ptr, indices, name);3951}3952}39533954static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,3955ArrayRef<llvm::Value *> indices,3956llvm::Type *elementType, bool inbounds,3957bool signedIndices, SourceLocation loc,3958CharUnits align,3959const llvm::Twine &name = "arrayidx") {3960if (inbounds) {3961return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,3962CodeGenFunction::NotSubtraction, loc,3963align, name);3964} else {3965return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);3966}3967}39683969static CharUnits getArrayElementAlign(CharUnits arrayAlign,3970llvm::Value *idx,3971CharUnits eltSize) {3972// If we have a constant index, we can use the exact offset of the3973// element we're accessing.3974if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {3975CharUnits offset = constantIdx->getZExtValue() * eltSize;3976return arrayAlign.alignmentAtOffset(offset);39773978// Otherwise, use the worst-case alignment for any element.3979} else {3980return arrayAlign.alignmentOfArrayElement(eltSize);3981}3982}39833984static QualType getFixedSizeElementType(const ASTContext &ctx,3985const VariableArrayType *vla) {3986QualType eltType;3987do {3988eltType = vla->getElementType();3989} while ((vla = ctx.getAsVariableArrayType(eltType)));3990return eltType;3991}39923993static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {3994return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();3995}39963997static bool hasBPFPreserveStaticOffset(const Expr *E) {3998if (!E)3999return false;4000QualType PointeeType = E->getType()->getPointeeType();4001if (PointeeType.isNull())4002return false;4003if (const auto *BaseDecl = PointeeType->getAsRecordDecl())4004return hasBPFPreserveStaticOffset(BaseDecl);4005return false;4006}40074008// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.4009static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,4010Address &Addr) {4011if (!CGF.getTarget().getTriple().isBPF())4012return Addr;40134014llvm::Function *Fn =4015CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);4016llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});4017return Address(Call, Addr.getElementType(), Addr.getAlignment());4018}40194020/// Given an array base, check whether its member access belongs to a record4021/// with preserve_access_index attribute or not.4022static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {4023if (!ArrayBase || !CGF.getDebugInfo())4024return false;40254026// Only support base as either a MemberExpr or DeclRefExpr.4027// DeclRefExpr to cover cases like:4028// struct s { int a; int b[10]; };4029// struct s *p;4030// p[1].a4031// p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.4032// p->b[5] is a MemberExpr example.4033const Expr *E = ArrayBase->IgnoreImpCasts();4034if (const auto *ME = dyn_cast<MemberExpr>(E))4035return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();40364037if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {4038const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());4039if (!VarDef)4040return false;40414042const auto *PtrT = VarDef->getType()->getAs<PointerType>();4043if (!PtrT)4044return false;40454046const auto *PointeeT = PtrT->getPointeeType()4047->getUnqualifiedDesugaredType();4048if (const auto *RecT = dyn_cast<RecordType>(PointeeT))4049return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();4050return false;4051}40524053return false;4054}40554056static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,4057ArrayRef<llvm::Value *> indices,4058QualType eltType, bool inbounds,4059bool signedIndices, SourceLocation loc,4060QualType *arrayType = nullptr,4061const Expr *Base = nullptr,4062const llvm::Twine &name = "arrayidx") {4063// All the indices except that last must be zero.4064#ifndef NDEBUG4065for (auto *idx : indices.drop_back())4066assert(isa<llvm::ConstantInt>(idx) &&4067cast<llvm::ConstantInt>(idx)->isZero());4068#endif40694070// Determine the element size of the statically-sized base. This is4071// the thing that the indices are expressed in terms of.4072if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {4073eltType = getFixedSizeElementType(CGF.getContext(), vla);4074}40754076// We can use that to compute the best alignment of the element.4077CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);4078CharUnits eltAlign =4079getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);40804081if (hasBPFPreserveStaticOffset(Base))4082addr = wrapWithBPFPreserveStaticOffset(CGF, addr);40834084llvm::Value *eltPtr;4085auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());4086if (!LastIndex ||4087(!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {4088addr = emitArraySubscriptGEP(CGF, addr, indices,4089CGF.ConvertTypeForMem(eltType), inbounds,4090signedIndices, loc, eltAlign, name);4091return addr;4092} else {4093// Remember the original array subscript for bpf target4094unsigned idx = LastIndex->getZExtValue();4095llvm::DIType *DbgInfo = nullptr;4096if (arrayType)4097DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);4098eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(4099addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,4100idx, DbgInfo);4101}41024103return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);4104}41054106/// The offset of a field from the beginning of the record.4107static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,4108const FieldDecl *FD, int64_t &Offset) {4109ASTContext &Ctx = CGF.getContext();4110const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);4111unsigned FieldNo = 0;41124113for (const Decl *D : RD->decls()) {4114if (const auto *Record = dyn_cast<RecordDecl>(D))4115if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {4116Offset += Layout.getFieldOffset(FieldNo);4117return true;4118}41194120if (const auto *Field = dyn_cast<FieldDecl>(D))4121if (FD == Field) {4122Offset += Layout.getFieldOffset(FieldNo);4123return true;4124}41254126if (isa<FieldDecl>(D))4127++FieldNo;4128}41294130return false;4131}41324133/// Returns the relative offset difference between \p FD1 and \p FD2.4134/// \code4135/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)4136/// \endcode4137/// Both fields must be within the same struct.4138static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,4139const FieldDecl *FD1,4140const FieldDecl *FD2) {4141const RecordDecl *FD1OuterRec =4142FD1->getParent()->getOuterLexicalRecordContext();4143const RecordDecl *FD2OuterRec =4144FD2->getParent()->getOuterLexicalRecordContext();41454146if (FD1OuterRec != FD2OuterRec)4147// Fields must be within the same RecordDecl.4148return std::optional<int64_t>();41494150int64_t FD1Offset = 0;4151if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))4152return std::optional<int64_t>();41534154int64_t FD2Offset = 0;4155if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))4156return std::optional<int64_t>();41574158return std::make_optional<int64_t>(FD1Offset - FD2Offset);4159}41604161LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,4162bool Accessed) {4163// The index must always be an integer, which is not an aggregate. Emit it4164// in lexical order (this complexity is, sadly, required by C++17).4165llvm::Value *IdxPre =4166(E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;4167bool SignedIndices = false;4168auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {4169auto *Idx = IdxPre;4170if (E->getLHS() != E->getIdx()) {4171assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");4172Idx = EmitScalarExpr(E->getIdx());4173}41744175QualType IdxTy = E->getIdx()->getType();4176bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();4177SignedIndices |= IdxSigned;41784179if (SanOpts.has(SanitizerKind::ArrayBounds))4180EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);41814182// Extend or truncate the index type to 32 or 64-bits.4183if (Promote && Idx->getType() != IntPtrTy)4184Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");41854186return Idx;4187};4188IdxPre = nullptr;41894190// If the base is a vector type, then we are forming a vector element lvalue4191// with this subscript.4192if (E->getBase()->getType()->isSubscriptableVectorType() &&4193!isa<ExtVectorElementExpr>(E->getBase())) {4194// Emit the vector as an lvalue to get its address.4195LValue LHS = EmitLValue(E->getBase());4196auto *Idx = EmitIdxAfterBase(/*Promote*/false);4197assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");4198return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),4199LHS.getBaseInfo(), TBAAAccessInfo());4200}42014202// All the other cases basically behave like simple offsetting.42034204// Handle the extvector case we ignored above.4205if (isa<ExtVectorElementExpr>(E->getBase())) {4206LValue LV = EmitLValue(E->getBase());4207auto *Idx = EmitIdxAfterBase(/*Promote*/true);4208Address Addr = EmitExtVectorElementLValue(LV);42094210QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();4211Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,4212SignedIndices, E->getExprLoc());4213return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),4214CGM.getTBAAInfoForSubobject(LV, EltType));4215}42164217LValueBaseInfo EltBaseInfo;4218TBAAAccessInfo EltTBAAInfo;4219Address Addr = Address::invalid();4220if (const VariableArrayType *vla =4221getContext().getAsVariableArrayType(E->getType())) {4222// The base must be a pointer, which is not an aggregate. Emit4223// it. It needs to be emitted first in case it's what captures4224// the VLA bounds.4225Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);4226auto *Idx = EmitIdxAfterBase(/*Promote*/true);42274228// The element count here is the total number of non-VLA elements.4229llvm::Value *numElements = getVLASize(vla).NumElts;42304231// Effectively, the multiply by the VLA size is part of the GEP.4232// GEP indexes are signed, and scaling an index isn't permitted to4233// signed-overflow, so we use the same semantics for our explicit4234// multiply. We suppress this if overflow is not undefined behavior.4235if (getLangOpts().isSignedOverflowDefined()) {4236Idx = Builder.CreateMul(Idx, numElements);4237} else {4238Idx = Builder.CreateNSWMul(Idx, numElements);4239}42404241Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),4242!getLangOpts().isSignedOverflowDefined(),4243SignedIndices, E->getExprLoc());42444245} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){4246// Indexing over an interface, as in "NSString *P; P[4];"42474248// Emit the base pointer.4249Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);4250auto *Idx = EmitIdxAfterBase(/*Promote*/true);42514252CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);4253llvm::Value *InterfaceSizeVal =4254llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());42554256llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);42574258// We don't necessarily build correct LLVM struct types for ObjC4259// interfaces, so we can't rely on GEP to do this scaling4260// correctly, so we need to cast to i8*. FIXME: is this actually4261// true? A lot of other things in the fragile ABI would break...4262llvm::Type *OrigBaseElemTy = Addr.getElementType();42634264// Do the GEP.4265CharUnits EltAlign =4266getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);4267llvm::Value *EltPtr =4268emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),4269ScaledIdx, false, SignedIndices, E->getExprLoc());4270Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);4271} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {4272// If this is A[i] where A is an array, the frontend will have decayed the4273// base to be a ArrayToPointerDecay implicit cast. While correct, it is4274// inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a4275// "gep x, i" here. Emit one "gep A, 0, i".4276assert(Array->getType()->isArrayType() &&4277"Array to pointer decay must have array source type!");4278LValue ArrayLV;4279// For simple multidimensional array indexing, set the 'accessed' flag for4280// better bounds-checking of the base expression.4281if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))4282ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);4283else4284ArrayLV = EmitLValue(Array);4285auto *Idx = EmitIdxAfterBase(/*Promote*/true);42864287if (SanOpts.has(SanitizerKind::ArrayBounds)) {4288// If the array being accessed has a "counted_by" attribute, generate4289// bounds checking code. The "count" field is at the top level of the4290// struct or in an anonymous struct, that's also at the top level. Future4291// expansions may allow the "count" to reside at any place in the struct,4292// but the value of "counted_by" will be a "simple" path to the count,4293// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or4294// similar to emit the correct GEP.4295const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =4296getLangOpts().getStrictFlexArraysLevel();42974298if (const auto *ME = dyn_cast<MemberExpr>(Array);4299ME &&4300ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&4301ME->getMemberDecl()->getType()->isCountAttributedType()) {4302const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());4303if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {4304if (std::optional<int64_t> Diff =4305getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {4306CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);43074308// Create a GEP with a byte offset between the FAM and count and4309// use that to load the count value.4310Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(4311ArrayLV.getAddress(), Int8PtrTy, Int8Ty);43124313llvm::Type *CountTy = ConvertType(CountFD->getType());4314llvm::Value *Res = Builder.CreateInBoundsGEP(4315Int8Ty, Addr.emitRawPointer(*this),4316Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");4317Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),4318".counted_by.load");43194320// Now emit the bounds checking.4321EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),4322Array->getType(), Accessed);4323}4324}4325}4326}43274328// Propagate the alignment from the array itself to the result.4329QualType arrayType = Array->getType();4330Addr = emitArraySubscriptGEP(4331*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},4332E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,4333E->getExprLoc(), &arrayType, E->getBase());4334EltBaseInfo = ArrayLV.getBaseInfo();4335EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());4336} else {4337// The base must be a pointer; emit it with an estimate of its alignment.4338Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);4339auto *Idx = EmitIdxAfterBase(/*Promote*/true);4340QualType ptrType = E->getBase()->getType();4341Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),4342!getLangOpts().isSignedOverflowDefined(),4343SignedIndices, E->getExprLoc(), &ptrType,4344E->getBase());4345}43464347LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);43484349if (getLangOpts().ObjC &&4350getLangOpts().getGC() != LangOptions::NonGC) {4351LV.setNonGC(!E->isOBJCGCCandidate(getContext()));4352setObjCGCLValueClass(getContext(), E, LV);4353}4354return LV;4355}43564357LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {4358assert(4359!E->isIncomplete() &&4360"incomplete matrix subscript expressions should be rejected during Sema");4361LValue Base = EmitLValue(E->getBase());4362llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());4363llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());4364llvm::Value *NumRows = Builder.getIntN(4365RowIdx->getType()->getScalarSizeInBits(),4366E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());4367llvm::Value *FinalIdx =4368Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);4369return LValue::MakeMatrixElt(4370MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,4371E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());4372}43734374static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,4375LValueBaseInfo &BaseInfo,4376TBAAAccessInfo &TBAAInfo,4377QualType BaseTy, QualType ElTy,4378bool IsLowerBound) {4379LValue BaseLVal;4380if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {4381BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);4382if (BaseTy->isArrayType()) {4383Address Addr = BaseLVal.getAddress();4384BaseInfo = BaseLVal.getBaseInfo();43854386// If the array type was an incomplete type, we need to make sure4387// the decay ends up being the right type.4388llvm::Type *NewTy = CGF.ConvertType(BaseTy);4389Addr = Addr.withElementType(NewTy);43904391// Note that VLA pointers are always decayed, so we don't need to do4392// anything here.4393if (!BaseTy->isVariableArrayType()) {4394assert(isa<llvm::ArrayType>(Addr.getElementType()) &&4395"Expected pointer to array");4396Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");4397}43984399return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));4400}4401LValueBaseInfo TypeBaseInfo;4402TBAAAccessInfo TypeTBAAInfo;4403CharUnits Align =4404CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);4405BaseInfo.mergeForCast(TypeBaseInfo);4406TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);4407return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),4408CGF.ConvertTypeForMem(ElTy), Align);4409}4410return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);4411}44124413LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,4414bool IsLowerBound) {44154416assert(!E->isOpenACCArraySection() &&4417"OpenACC Array section codegen not implemented");44184419QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase());4420QualType ResultExprTy;4421if (auto *AT = getContext().getAsArrayType(BaseTy))4422ResultExprTy = AT->getElementType();4423else4424ResultExprTy = BaseTy->getPointeeType();4425llvm::Value *Idx = nullptr;4426if (IsLowerBound || E->getColonLocFirst().isInvalid()) {4427// Requesting lower bound or upper bound, but without provided length and4428// without ':' symbol for the default length -> length = 1.4429// Idx = LowerBound ?: 0;4430if (auto *LowerBound = E->getLowerBound()) {4431Idx = Builder.CreateIntCast(4432EmitScalarExpr(LowerBound), IntPtrTy,4433LowerBound->getType()->hasSignedIntegerRepresentation());4434} else4435Idx = llvm::ConstantInt::getNullValue(IntPtrTy);4436} else {4437// Try to emit length or lower bound as constant. If this is possible, 14438// is subtracted from constant length or lower bound. Otherwise, emit LLVM4439// IR (LB + Len) - 1.4440auto &C = CGM.getContext();4441auto *Length = E->getLength();4442llvm::APSInt ConstLength;4443if (Length) {4444// Idx = LowerBound + Length - 1;4445if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {4446ConstLength = CL->zextOrTrunc(PointerWidthInBits);4447Length = nullptr;4448}4449auto *LowerBound = E->getLowerBound();4450llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);4451if (LowerBound) {4452if (std::optional<llvm::APSInt> LB =4453LowerBound->getIntegerConstantExpr(C)) {4454ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);4455LowerBound = nullptr;4456}4457}4458if (!Length)4459--ConstLength;4460else if (!LowerBound)4461--ConstLowerBound;44624463if (Length || LowerBound) {4464auto *LowerBoundVal =4465LowerBound4466? Builder.CreateIntCast(4467EmitScalarExpr(LowerBound), IntPtrTy,4468LowerBound->getType()->hasSignedIntegerRepresentation())4469: llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);4470auto *LengthVal =4471Length4472? Builder.CreateIntCast(4473EmitScalarExpr(Length), IntPtrTy,4474Length->getType()->hasSignedIntegerRepresentation())4475: llvm::ConstantInt::get(IntPtrTy, ConstLength);4476Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",4477/*HasNUW=*/false,4478!getLangOpts().isSignedOverflowDefined());4479if (Length && LowerBound) {4480Idx = Builder.CreateSub(4481Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",4482/*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());4483}4484} else4485Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);4486} else {4487// Idx = ArraySize - 1;4488QualType ArrayTy = BaseTy->isPointerType()4489? E->getBase()->IgnoreParenImpCasts()->getType()4490: BaseTy;4491if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {4492Length = VAT->getSizeExpr();4493if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {4494ConstLength = *L;4495Length = nullptr;4496}4497} else {4498auto *CAT = C.getAsConstantArrayType(ArrayTy);4499assert(CAT && "unexpected type for array initializer");4500ConstLength = CAT->getSize();4501}4502if (Length) {4503auto *LengthVal = Builder.CreateIntCast(4504EmitScalarExpr(Length), IntPtrTy,4505Length->getType()->hasSignedIntegerRepresentation());4506Idx = Builder.CreateSub(4507LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",4508/*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());4509} else {4510ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);4511--ConstLength;4512Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);4513}4514}4515}4516assert(Idx);45174518Address EltPtr = Address::invalid();4519LValueBaseInfo BaseInfo;4520TBAAAccessInfo TBAAInfo;4521if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {4522// The base must be a pointer, which is not an aggregate. Emit4523// it. It needs to be emitted first in case it's what captures4524// the VLA bounds.4525Address Base =4526emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,4527BaseTy, VLA->getElementType(), IsLowerBound);4528// The element count here is the total number of non-VLA elements.4529llvm::Value *NumElements = getVLASize(VLA).NumElts;45304531// Effectively, the multiply by the VLA size is part of the GEP.4532// GEP indexes are signed, and scaling an index isn't permitted to4533// signed-overflow, so we use the same semantics for our explicit4534// multiply. We suppress this if overflow is not undefined behavior.4535if (getLangOpts().isSignedOverflowDefined())4536Idx = Builder.CreateMul(Idx, NumElements);4537else4538Idx = Builder.CreateNSWMul(Idx, NumElements);4539EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),4540!getLangOpts().isSignedOverflowDefined(),4541/*signedIndices=*/false, E->getExprLoc());4542} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {4543// If this is A[i] where A is an array, the frontend will have decayed the4544// base to be a ArrayToPointerDecay implicit cast. While correct, it is4545// inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a4546// "gep x, i" here. Emit one "gep A, 0, i".4547assert(Array->getType()->isArrayType() &&4548"Array to pointer decay must have array source type!");4549LValue ArrayLV;4550// For simple multidimensional array indexing, set the 'accessed' flag for4551// better bounds-checking of the base expression.4552if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))4553ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);4554else4555ArrayLV = EmitLValue(Array);45564557// Propagate the alignment from the array itself to the result.4558EltPtr = emitArraySubscriptGEP(4559*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},4560ResultExprTy, !getLangOpts().isSignedOverflowDefined(),4561/*signedIndices=*/false, E->getExprLoc());4562BaseInfo = ArrayLV.getBaseInfo();4563TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);4564} else {4565Address Base =4566emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,4567ResultExprTy, IsLowerBound);4568EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,4569!getLangOpts().isSignedOverflowDefined(),4570/*signedIndices=*/false, E->getExprLoc());4571}45724573return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);4574}45754576LValue CodeGenFunction::4577EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {4578// Emit the base vector as an l-value.4579LValue Base;45804581// ExtVectorElementExpr's base can either be a vector or pointer to vector.4582if (E->isArrow()) {4583// If it is a pointer to a vector, emit the address and form an lvalue with4584// it.4585LValueBaseInfo BaseInfo;4586TBAAAccessInfo TBAAInfo;4587Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);4588const auto *PT = E->getBase()->getType()->castAs<PointerType>();4589Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);4590Base.getQuals().removeObjCGCAttr();4591} else if (E->getBase()->isGLValue()) {4592// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),4593// emit the base as an lvalue.4594assert(E->getBase()->getType()->isVectorType());4595Base = EmitLValue(E->getBase());4596} else {4597// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.4598assert(E->getBase()->getType()->isVectorType() &&4599"Result must be a vector");4600llvm::Value *Vec = EmitScalarExpr(E->getBase());46014602// Store the vector to memory (because LValue wants an address).4603Address VecMem = CreateMemTemp(E->getBase()->getType());4604Builder.CreateStore(Vec, VecMem);4605Base = MakeAddrLValue(VecMem, E->getBase()->getType(),4606AlignmentSource::Decl);4607}46084609QualType type =4610E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());46114612// Encode the element access list into a vector of unsigned indices.4613SmallVector<uint32_t, 4> Indices;4614E->getEncodedElementAccess(Indices);46154616if (Base.isSimple()) {4617llvm::Constant *CV =4618llvm::ConstantDataVector::get(getLLVMContext(), Indices);4619return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,4620Base.getBaseInfo(), TBAAAccessInfo());4621}4622assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");46234624llvm::Constant *BaseElts = Base.getExtVectorElts();4625SmallVector<llvm::Constant *, 4> CElts;46264627for (unsigned i = 0, e = Indices.size(); i != e; ++i)4628CElts.push_back(BaseElts->getAggregateElement(Indices[i]));4629llvm::Constant *CV = llvm::ConstantVector::get(CElts);4630return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,4631Base.getBaseInfo(), TBAAAccessInfo());4632}46334634LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {4635if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {4636EmitIgnoredExpr(E->getBase());4637return EmitDeclRefLValue(DRE);4638}46394640Expr *BaseExpr = E->getBase();4641// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.4642LValue BaseLV;4643if (E->isArrow()) {4644LValueBaseInfo BaseInfo;4645TBAAAccessInfo TBAAInfo;4646Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);4647QualType PtrTy = BaseExpr->getType()->getPointeeType();4648SanitizerSet SkippedChecks;4649bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);4650if (IsBaseCXXThis)4651SkippedChecks.set(SanitizerKind::Alignment, true);4652if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))4653SkippedChecks.set(SanitizerKind::Null, true);4654EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,4655/*Alignment=*/CharUnits::Zero(), SkippedChecks);4656BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);4657} else4658BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);46594660NamedDecl *ND = E->getMemberDecl();4661if (auto *Field = dyn_cast<FieldDecl>(ND)) {4662LValue LV = EmitLValueForField(BaseLV, Field);4663setObjCGCLValueClass(getContext(), E, LV);4664if (getLangOpts().OpenMP) {4665// If the member was explicitly marked as nontemporal, mark it as4666// nontemporal. If the base lvalue is marked as nontemporal, mark access4667// to children as nontemporal too.4668if ((IsWrappedCXXThis(BaseExpr) &&4669CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||4670BaseLV.isNontemporal())4671LV.setNontemporal(/*Value=*/true);4672}4673return LV;4674}46754676if (const auto *FD = dyn_cast<FunctionDecl>(ND))4677return EmitFunctionDeclLValue(*this, E, FD);46784679llvm_unreachable("Unhandled member declaration!");4680}46814682/// Given that we are currently emitting a lambda, emit an l-value for4683/// one of its members.4684///4685LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,4686llvm::Value *ThisValue) {4687bool HasExplicitObjectParameter = false;4688const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);4689if (MD) {4690HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();4691assert(MD->getParent()->isLambda());4692assert(MD->getParent() == Field->getParent());4693}4694LValue LambdaLV;4695if (HasExplicitObjectParameter) {4696const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);4697auto It = LocalDeclMap.find(D);4698assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");4699Address AddrOfExplicitObject = It->getSecond();4700if (D->getType()->isReferenceType())4701LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),4702AlignmentSource::Decl);4703else4704LambdaLV = MakeAddrLValue(AddrOfExplicitObject,4705D->getType().getNonReferenceType());47064707// Make sure we have an lvalue to the lambda itself and not a derived class.4708auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();4709auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());4710if (ThisTy != LambdaTy) {4711const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);4712Address Base = GetAddressOfBaseClass(4713LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),4714BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());4715LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});4716}4717} else {4718QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());4719LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);4720}4721return EmitLValueForField(LambdaLV, Field);4722}47234724LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {4725return EmitLValueForLambdaField(Field, CXXABIThisValue);4726}47274728/// Get the field index in the debug info. The debug info structure/union4729/// will ignore the unnamed bitfields.4730unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,4731unsigned FieldIndex) {4732unsigned I = 0, Skipped = 0;47334734for (auto *F : Rec->getDefinition()->fields()) {4735if (I == FieldIndex)4736break;4737if (F->isUnnamedBitField())4738Skipped++;4739I++;4740}47414742return FieldIndex - Skipped;4743}47444745/// Get the address of a zero-sized field within a record. The resulting4746/// address doesn't necessarily have the right type.4747static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,4748const FieldDecl *Field) {4749CharUnits Offset = CGF.getContext().toCharUnitsFromBits(4750CGF.getContext().getFieldOffset(Field));4751if (Offset.isZero())4752return Base;4753Base = Base.withElementType(CGF.Int8Ty);4754return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);4755}47564757/// Drill down to the storage of a field without walking into4758/// reference types.4759///4760/// The resulting address doesn't necessarily have the right type.4761static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,4762const FieldDecl *field) {4763if (isEmptyFieldForLayout(CGF.getContext(), field))4764return emitAddrOfZeroSizeField(CGF, base, field);47654766const RecordDecl *rec = field->getParent();47674768unsigned idx =4769CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);47704771return CGF.Builder.CreateStructGEP(base, idx, field->getName());4772}47734774static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,4775Address addr, const FieldDecl *field) {4776const RecordDecl *rec = field->getParent();4777llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(4778base.getType(), rec->getLocation());47794780unsigned idx =4781CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);47824783return CGF.Builder.CreatePreserveStructAccessIndex(4784addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);4785}47864787static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {4788const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();4789if (!RD)4790return false;47914792if (RD->isDynamicClass())4793return true;47944795for (const auto &Base : RD->bases())4796if (hasAnyVptr(Base.getType(), Context))4797return true;47984799for (const FieldDecl *Field : RD->fields())4800if (hasAnyVptr(Field->getType(), Context))4801return true;48024803return false;4804}48054806LValue CodeGenFunction::EmitLValueForField(LValue base,4807const FieldDecl *field) {4808LValueBaseInfo BaseInfo = base.getBaseInfo();48094810if (field->isBitField()) {4811const CGRecordLayout &RL =4812CGM.getTypes().getCGRecordLayout(field->getParent());4813const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);4814const bool UseVolatile = isAAPCS(CGM.getTarget()) &&4815CGM.getCodeGenOpts().AAPCSBitfieldWidth &&4816Info.VolatileStorageSize != 0 &&4817field->getType()4818.withCVRQualifiers(base.getVRQualifiers())4819.isVolatileQualified();4820Address Addr = base.getAddress();4821unsigned Idx = RL.getLLVMFieldNo(field);4822const RecordDecl *rec = field->getParent();4823if (hasBPFPreserveStaticOffset(rec))4824Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);4825if (!UseVolatile) {4826if (!IsInPreservedAIRegion &&4827(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {4828if (Idx != 0)4829// For structs, we GEP to the field that the record layout suggests.4830Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());4831} else {4832llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(4833getContext().getRecordType(rec), rec->getLocation());4834Addr = Builder.CreatePreserveStructAccessIndex(4835Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),4836DbgInfo);4837}4838}4839const unsigned SS =4840UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;4841// Get the access type.4842llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);4843Addr = Addr.withElementType(FieldIntTy);4844if (UseVolatile) {4845const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();4846if (VolatileOffset)4847Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);4848}48494850QualType fieldType =4851field->getType().withCVRQualifiers(base.getVRQualifiers());4852// TODO: Support TBAA for bit fields.4853LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());4854return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,4855TBAAAccessInfo());4856}48574858// Fields of may-alias structures are may-alias themselves.4859// FIXME: this should get propagated down through anonymous structs4860// and unions.4861QualType FieldType = field->getType();4862const RecordDecl *rec = field->getParent();4863AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();4864LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));4865TBAAAccessInfo FieldTBAAInfo;4866if (base.getTBAAInfo().isMayAlias() ||4867rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {4868FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();4869} else if (rec->isUnion()) {4870// TODO: Support TBAA for unions.4871FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();4872} else {4873// If no base type been assigned for the base access, then try to generate4874// one for this base lvalue.4875FieldTBAAInfo = base.getTBAAInfo();4876if (!FieldTBAAInfo.BaseType) {4877FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());4878assert(!FieldTBAAInfo.Offset &&4879"Nonzero offset for an access with no base type!");4880}48814882// Adjust offset to be relative to the base type.4883const ASTRecordLayout &Layout =4884getContext().getASTRecordLayout(field->getParent());4885unsigned CharWidth = getContext().getCharWidth();4886if (FieldTBAAInfo.BaseType)4887FieldTBAAInfo.Offset +=4888Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;48894890// Update the final access type and size.4891FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);4892FieldTBAAInfo.Size =4893getContext().getTypeSizeInChars(FieldType).getQuantity();4894}48954896Address addr = base.getAddress();4897if (hasBPFPreserveStaticOffset(rec))4898addr = wrapWithBPFPreserveStaticOffset(*this, addr);4899if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {4900if (CGM.getCodeGenOpts().StrictVTablePointers &&4901ClassDef->isDynamicClass()) {4902// Getting to any field of dynamic object requires stripping dynamic4903// information provided by invariant.group. This is because accessing4904// fields may leak the real address of dynamic object, which could result4905// in miscompilation when leaked pointer would be compared.4906auto *stripped =4907Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));4908addr = Address(stripped, addr.getElementType(), addr.getAlignment());4909}4910}49114912unsigned RecordCVR = base.getVRQualifiers();4913if (rec->isUnion()) {4914// For unions, there is no pointer adjustment.4915if (CGM.getCodeGenOpts().StrictVTablePointers &&4916hasAnyVptr(FieldType, getContext()))4917// Because unions can easily skip invariant.barriers, we need to add4918// a barrier every time CXXRecord field with vptr is referenced.4919addr = Builder.CreateLaunderInvariantGroup(addr);49204921if (IsInPreservedAIRegion ||4922(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {4923// Remember the original union field index4924llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),4925rec->getLocation());4926addr =4927Address(Builder.CreatePreserveUnionAccessIndex(4928addr.emitRawPointer(*this),4929getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),4930addr.getElementType(), addr.getAlignment());4931}49324933if (FieldType->isReferenceType())4934addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));4935} else {4936if (!IsInPreservedAIRegion &&4937(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))4938// For structs, we GEP to the field that the record layout suggests.4939addr = emitAddrOfFieldStorage(*this, addr, field);4940else4941// Remember the original struct field index4942addr = emitPreserveStructAccess(*this, base, addr, field);4943}49444945// If this is a reference field, load the reference right now.4946if (FieldType->isReferenceType()) {4947LValue RefLVal =4948MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);4949if (RecordCVR & Qualifiers::Volatile)4950RefLVal.getQuals().addVolatile();4951addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);49524953// Qualifiers on the struct don't apply to the referencee.4954RecordCVR = 0;4955FieldType = FieldType->getPointeeType();4956}49574958// Make sure that the address is pointing to the right type. This is critical4959// for both unions and structs.4960addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));49614962if (field->hasAttr<AnnotateAttr>())4963addr = EmitFieldAnnotations(field, addr);49644965LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);4966LV.getQuals().addCVRQualifiers(RecordCVR);49674968// __weak attribute on a field is ignored.4969if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)4970LV.getQuals().removeObjCGCAttr();49714972return LV;4973}49744975LValue4976CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,4977const FieldDecl *Field) {4978QualType FieldType = Field->getType();49794980if (!FieldType->isReferenceType())4981return EmitLValueForField(Base, Field);49824983Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);49844985// Make sure that the address is pointing to the right type.4986llvm::Type *llvmType = ConvertTypeForMem(FieldType);4987V = V.withElementType(llvmType);49884989// TODO: Generate TBAA information that describes this access as a structure4990// member access and not just an access to an object of the field's type. This4991// should be similar to what we do in EmitLValueForField().4992LValueBaseInfo BaseInfo = Base.getBaseInfo();4993AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();4994LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));4995return MakeAddrLValue(V, FieldType, FieldBaseInfo,4996CGM.getTBAAInfoForSubobject(Base, FieldType));4997}49984999LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){5000if (E->isFileScope()) {5001ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);5002return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);5003}5004if (E->getType()->isVariablyModifiedType())5005// make sure to emit the VLA size.5006EmitVariablyModifiedType(E->getType());50075008Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");5009const Expr *InitExpr = E->getInitializer();5010LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);50115012EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),5013/*Init*/ true);50145015// Block-scope compound literals are destroyed at the end of the enclosing5016// scope in C.5017if (!getLangOpts().CPlusPlus)5018if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())5019pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,5020E->getType(), getDestroyer(DtorKind),5021DtorKind & EHCleanup);50225023return Result;5024}50255026LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {5027if (!E->isGLValue())5028// Initializing an aggregate temporary in C++11: T{...}.5029return EmitAggExprToLValue(E);50305031// An lvalue initializer list must be initializing a reference.5032assert(E->isTransparent() && "non-transparent glvalue init list");5033return EmitLValue(E->getInit(0));5034}50355036/// Emit the operand of a glvalue conditional operator. This is either a glvalue5037/// or a (possibly-parenthesized) throw-expression. If this is a throw, no5038/// LValue is returned and the current block has been terminated.5039static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,5040const Expr *Operand) {5041if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {5042CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);5043return std::nullopt;5044}50455046return CGF.EmitLValue(Operand);5047}50485049namespace {5050// Handle the case where the condition is a constant evaluatable simple integer,5051// which means we don't have to separately handle the true/false blocks.5052std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(5053CodeGenFunction &CGF, const AbstractConditionalOperator *E) {5054const Expr *condExpr = E->getCond();5055bool CondExprBool;5056if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {5057const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();5058if (!CondExprBool)5059std::swap(Live, Dead);50605061if (!CGF.ContainsLabel(Dead)) {5062// If the true case is live, we need to track its region.5063if (CondExprBool)5064CGF.incrementProfileCounter(E);5065// If a throw expression we emit it and return an undefined lvalue5066// because it can't be used.5067if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {5068CGF.EmitCXXThrowExpr(ThrowExpr);5069llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());5070llvm::Type *Ty = CGF.UnqualPtrTy;5071return CGF.MakeAddrLValue(5072Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),5073Dead->getType());5074}5075return CGF.EmitLValue(Live);5076}5077}5078return std::nullopt;5079}5080struct ConditionalInfo {5081llvm::BasicBlock *lhsBlock, *rhsBlock;5082std::optional<LValue> LHS, RHS;5083};50845085// Create and generate the 3 blocks for a conditional operator.5086// Leaves the 'current block' in the continuation basic block.5087template<typename FuncTy>5088ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,5089const AbstractConditionalOperator *E,5090const FuncTy &BranchGenFunc) {5091ConditionalInfo Info{CGF.createBasicBlock("cond.true"),5092CGF.createBasicBlock("cond.false"), std::nullopt,5093std::nullopt};5094llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");50955096CodeGenFunction::ConditionalEvaluation eval(CGF);5097CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,5098CGF.getProfileCount(E));50995100// Any temporaries created here are conditional.5101CGF.EmitBlock(Info.lhsBlock);5102CGF.incrementProfileCounter(E);5103eval.begin(CGF);5104Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());5105eval.end(CGF);5106Info.lhsBlock = CGF.Builder.GetInsertBlock();51075108if (Info.LHS)5109CGF.Builder.CreateBr(endBlock);51105111// Any temporaries created here are conditional.5112CGF.EmitBlock(Info.rhsBlock);5113eval.begin(CGF);5114Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());5115eval.end(CGF);5116Info.rhsBlock = CGF.Builder.GetInsertBlock();5117CGF.EmitBlock(endBlock);51185119return Info;5120}5121} // namespace51225123void CodeGenFunction::EmitIgnoredConditionalOperator(5124const AbstractConditionalOperator *E) {5125if (!E->isGLValue()) {5126// ?: here should be an aggregate.5127assert(hasAggregateEvaluationKind(E->getType()) &&5128"Unexpected conditional operator!");5129return (void)EmitAggExprToLValue(E);5130}51315132OpaqueValueMapping binding(*this, E);5133if (HandleConditionalOperatorLValueSimpleCase(*this, E))5134return;51355136EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {5137CGF.EmitIgnoredExpr(E);5138return LValue{};5139});5140}5141LValue CodeGenFunction::EmitConditionalOperatorLValue(5142const AbstractConditionalOperator *expr) {5143if (!expr->isGLValue()) {5144// ?: here should be an aggregate.5145assert(hasAggregateEvaluationKind(expr->getType()) &&5146"Unexpected conditional operator!");5147return EmitAggExprToLValue(expr);5148}51495150OpaqueValueMapping binding(*this, expr);5151if (std::optional<LValue> Res =5152HandleConditionalOperatorLValueSimpleCase(*this, expr))5153return *Res;51545155ConditionalInfo Info = EmitConditionalBlocks(5156*this, expr, [](CodeGenFunction &CGF, const Expr *E) {5157return EmitLValueOrThrowExpression(CGF, E);5158});51595160if ((Info.LHS && !Info.LHS->isSimple()) ||5161(Info.RHS && !Info.RHS->isSimple()))5162return EmitUnsupportedLValue(expr, "conditional operator");51635164if (Info.LHS && Info.RHS) {5165Address lhsAddr = Info.LHS->getAddress();5166Address rhsAddr = Info.RHS->getAddress();5167Address result = mergeAddressesInConditionalExpr(5168lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,5169Builder.GetInsertBlock(), expr->getType());5170AlignmentSource alignSource =5171std::max(Info.LHS->getBaseInfo().getAlignmentSource(),5172Info.RHS->getBaseInfo().getAlignmentSource());5173TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(5174Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());5175return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),5176TBAAInfo);5177} else {5178assert((Info.LHS || Info.RHS) &&5179"both operands of glvalue conditional are throw-expressions?");5180return Info.LHS ? *Info.LHS : *Info.RHS;5181}5182}51835184/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference5185/// type. If the cast is to a reference, we can have the usual lvalue result,5186/// otherwise if a cast is needed by the code generator in an lvalue context,5187/// then it must mean that we need the address of an aggregate in order to5188/// access one of its members. This can happen for all the reasons that casts5189/// are permitted with aggregate result, including noop aggregate casts, and5190/// cast from scalar to union.5191LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {5192switch (E->getCastKind()) {5193case CK_ToVoid:5194case CK_BitCast:5195case CK_LValueToRValueBitCast:5196case CK_ArrayToPointerDecay:5197case CK_FunctionToPointerDecay:5198case CK_NullToMemberPointer:5199case CK_NullToPointer:5200case CK_IntegralToPointer:5201case CK_PointerToIntegral:5202case CK_PointerToBoolean:5203case CK_IntegralCast:5204case CK_BooleanToSignedIntegral:5205case CK_IntegralToBoolean:5206case CK_IntegralToFloating:5207case CK_FloatingToIntegral:5208case CK_FloatingToBoolean:5209case CK_FloatingCast:5210case CK_FloatingRealToComplex:5211case CK_FloatingComplexToReal:5212case CK_FloatingComplexToBoolean:5213case CK_FloatingComplexCast:5214case CK_FloatingComplexToIntegralComplex:5215case CK_IntegralRealToComplex:5216case CK_IntegralComplexToReal:5217case CK_IntegralComplexToBoolean:5218case CK_IntegralComplexCast:5219case CK_IntegralComplexToFloatingComplex:5220case CK_DerivedToBaseMemberPointer:5221case CK_BaseToDerivedMemberPointer:5222case CK_MemberPointerToBoolean:5223case CK_ReinterpretMemberPointer:5224case CK_AnyPointerToBlockPointerCast:5225case CK_ARCProduceObject:5226case CK_ARCConsumeObject:5227case CK_ARCReclaimReturnedObject:5228case CK_ARCExtendBlockObject:5229case CK_CopyAndAutoreleaseBlockObject:5230case CK_IntToOCLSampler:5231case CK_FloatingToFixedPoint:5232case CK_FixedPointToFloating:5233case CK_FixedPointCast:5234case CK_FixedPointToBoolean:5235case CK_FixedPointToIntegral:5236case CK_IntegralToFixedPoint:5237case CK_MatrixCast:5238case CK_HLSLVectorTruncation:5239case CK_HLSLArrayRValue:5240return EmitUnsupportedLValue(E, "unexpected cast lvalue");52415242case CK_Dependent:5243llvm_unreachable("dependent cast kind in IR gen!");52445245case CK_BuiltinFnToFnPtr:5246llvm_unreachable("builtin functions are handled elsewhere");52475248// These are never l-values; just use the aggregate emission code.5249case CK_NonAtomicToAtomic:5250case CK_AtomicToNonAtomic:5251return EmitAggExprToLValue(E);52525253case CK_Dynamic: {5254LValue LV = EmitLValue(E->getSubExpr());5255Address V = LV.getAddress();5256const auto *DCE = cast<CXXDynamicCastExpr>(E);5257return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());5258}52595260case CK_ConstructorConversion:5261case CK_UserDefinedConversion:5262case CK_CPointerToObjCPointerCast:5263case CK_BlockPointerToObjCPointerCast:5264case CK_LValueToRValue:5265return EmitLValue(E->getSubExpr());52665267case CK_NoOp: {5268// CK_NoOp can model a qualification conversion, which can remove an array5269// bound and change the IR type.5270// FIXME: Once pointee types are removed from IR, remove this.5271LValue LV = EmitLValue(E->getSubExpr());5272// Propagate the volatile qualifer to LValue, if exist in E.5273if (E->changesVolatileQualification())5274LV.getQuals() = E->getType().getQualifiers();5275if (LV.isSimple()) {5276Address V = LV.getAddress();5277if (V.isValid()) {5278llvm::Type *T = ConvertTypeForMem(E->getType());5279if (V.getElementType() != T)5280LV.setAddress(V.withElementType(T));5281}5282}5283return LV;5284}52855286case CK_UncheckedDerivedToBase:5287case CK_DerivedToBase: {5288const auto *DerivedClassTy =5289E->getSubExpr()->getType()->castAs<RecordType>();5290auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());52915292LValue LV = EmitLValue(E->getSubExpr());5293Address This = LV.getAddress();52945295// Perform the derived-to-base conversion5296Address Base = GetAddressOfBaseClass(5297This, DerivedClassDecl, E->path_begin(), E->path_end(),5298/*NullCheckValue=*/false, E->getExprLoc());52995300// TODO: Support accesses to members of base classes in TBAA. For now, we5301// conservatively pretend that the complete object is of the base class5302// type.5303return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),5304CGM.getTBAAInfoForSubobject(LV, E->getType()));5305}5306case CK_ToUnion:5307return EmitAggExprToLValue(E);5308case CK_BaseToDerived: {5309const auto *DerivedClassTy = E->getType()->castAs<RecordType>();5310auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());53115312LValue LV = EmitLValue(E->getSubExpr());53135314// Perform the base-to-derived conversion5315Address Derived = GetAddressOfDerivedClass(5316LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),5317/*NullCheckValue=*/false);53185319// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is5320// performed and the object is not of the derived type.5321if (sanitizePerformTypeCheck())5322EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,5323E->getType());53245325if (SanOpts.has(SanitizerKind::CFIDerivedCast))5326EmitVTablePtrCheckForCast(E->getType(), Derived,5327/*MayBeNull=*/false, CFITCK_DerivedCast,5328E->getBeginLoc());53295330return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),5331CGM.getTBAAInfoForSubobject(LV, E->getType()));5332}5333case CK_LValueBitCast: {5334// This must be a reinterpret_cast (or c-style equivalent).5335const auto *CE = cast<ExplicitCastExpr>(E);53365337CGM.EmitExplicitCastExprType(CE, this);5338LValue LV = EmitLValue(E->getSubExpr());5339Address V = LV.getAddress().withElementType(5340ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));53415342if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))5343EmitVTablePtrCheckForCast(E->getType(), V,5344/*MayBeNull=*/false, CFITCK_UnrelatedCast,5345E->getBeginLoc());53465347return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),5348CGM.getTBAAInfoForSubobject(LV, E->getType()));5349}5350case CK_AddressSpaceConversion: {5351LValue LV = EmitLValue(E->getSubExpr());5352QualType DestTy = getContext().getPointerType(E->getType());5353llvm::Value *V = getTargetHooks().performAddrSpaceCast(5354*this, LV.getPointer(*this),5355E->getSubExpr()->getType().getAddressSpace(),5356E->getType().getAddressSpace(), ConvertType(DestTy));5357return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),5358LV.getAddress().getAlignment()),5359E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());5360}5361case CK_ObjCObjectLValueCast: {5362LValue LV = EmitLValue(E->getSubExpr());5363Address V = LV.getAddress().withElementType(ConvertType(E->getType()));5364return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),5365CGM.getTBAAInfoForSubobject(LV, E->getType()));5366}5367case CK_ZeroToOCLOpaqueType:5368llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");53695370case CK_VectorSplat: {5371// LValue results of vector splats are only supported in HLSL.5372if (!getLangOpts().HLSL)5373return EmitUnsupportedLValue(E, "unexpected cast lvalue");5374return EmitLValue(E->getSubExpr());5375}5376}53775378llvm_unreachable("Unhandled lvalue cast kind?");5379}53805381LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {5382assert(OpaqueValueMappingData::shouldBindAsLValue(e));5383return getOrCreateOpaqueLValueMapping(e);5384}53855386LValue5387CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {5388assert(OpaqueValueMapping::shouldBindAsLValue(e));53895390llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator5391it = OpaqueLValues.find(e);53925393if (it != OpaqueLValues.end())5394return it->second;53955396assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");5397return EmitLValue(e->getSourceExpr());5398}53995400RValue5401CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {5402assert(!OpaqueValueMapping::shouldBindAsLValue(e));54035404llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator5405it = OpaqueRValues.find(e);54065407if (it != OpaqueRValues.end())5408return it->second;54095410assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");5411return EmitAnyExpr(e->getSourceExpr());5412}54135414RValue CodeGenFunction::EmitRValueForField(LValue LV,5415const FieldDecl *FD,5416SourceLocation Loc) {5417QualType FT = FD->getType();5418LValue FieldLV = EmitLValueForField(LV, FD);5419switch (getEvaluationKind(FT)) {5420case TEK_Complex:5421return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));5422case TEK_Aggregate:5423return FieldLV.asAggregateRValue();5424case TEK_Scalar:5425// This routine is used to load fields one-by-one to perform a copy, so5426// don't load reference fields.5427if (FD->getType()->isReferenceType())5428return RValue::get(FieldLV.getPointer(*this));5429// Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a5430// primitive load.5431if (FieldLV.isBitField())5432return EmitLoadOfLValue(FieldLV, Loc);5433return RValue::get(EmitLoadOfScalar(FieldLV, Loc));5434}5435llvm_unreachable("bad evaluation kind");5436}54375438//===--------------------------------------------------------------------===//5439// Expression Emission5440//===--------------------------------------------------------------------===//54415442RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,5443ReturnValueSlot ReturnValue) {5444// Builtins never have block type.5445if (E->getCallee()->getType()->isBlockPointerType())5446return EmitBlockCallExpr(E, ReturnValue);54475448if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))5449return EmitCXXMemberCallExpr(CE, ReturnValue);54505451if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))5452return EmitCUDAKernelCallExpr(CE, ReturnValue);54535454// A CXXOperatorCallExpr is created even for explicit object methods, but5455// these should be treated like static function call.5456if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))5457if (const auto *MD =5458dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());5459MD && MD->isImplicitObjectMemberFunction())5460return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);54615462CGCallee callee = EmitCallee(E->getCallee());54635464if (callee.isBuiltin()) {5465return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),5466E, ReturnValue);5467}54685469if (callee.isPseudoDestructor()) {5470return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());5471}54725473return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);5474}54755476/// Emit a CallExpr without considering whether it might be a subclass.5477RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,5478ReturnValueSlot ReturnValue) {5479CGCallee Callee = EmitCallee(E->getCallee());5480return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);5481}54825483// Detect the unusual situation where an inline version is shadowed by a5484// non-inline version. In that case we should pick the external one5485// everywhere. That's GCC behavior too.5486static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {5487for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())5488if (!PD->isInlineBuiltinDeclaration())5489return false;5490return true;5491}54925493static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {5494const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());54955496if (auto builtinID = FD->getBuiltinID()) {5497std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();5498std::string NoBuiltins = "no-builtins";54995500StringRef Ident = CGF.CGM.getMangledName(GD);5501std::string FDInlineName = (Ident + ".inline").str();55025503bool IsPredefinedLibFunction =5504CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);5505bool HasAttributeNoBuiltin =5506CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||5507CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);55085509// When directing calling an inline builtin, call it through it's mangled5510// name to make it clear it's not the actual builtin.5511if (CGF.CurFn->getName() != FDInlineName &&5512OnlyHasInlineBuiltinDeclaration(FD)) {5513llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);5514llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);5515llvm::Module *M = Fn->getParent();5516llvm::Function *Clone = M->getFunction(FDInlineName);5517if (!Clone) {5518Clone = llvm::Function::Create(Fn->getFunctionType(),5519llvm::GlobalValue::InternalLinkage,5520Fn->getAddressSpace(), FDInlineName, M);5521Clone->addFnAttr(llvm::Attribute::AlwaysInline);5522}5523return CGCallee::forDirect(Clone, GD);5524}55255526// Replaceable builtins provide their own implementation of a builtin. If we5527// are in an inline builtin implementation, avoid trivial infinite5528// recursion. Honor __attribute__((no_builtin("foo"))) or5529// __attribute__((no_builtin)) on the current function unless foo is5530// not a predefined library function which means we must generate the5531// builtin no matter what.5532else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)5533return CGCallee::forBuiltin(builtinID, FD);5534}55355536llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);5537if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&5538FD->hasAttr<CUDAGlobalAttr>())5539CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(5540cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));55415542return CGCallee::forDirect(CalleePtr, GD);5543}55445545CGCallee CodeGenFunction::EmitCallee(const Expr *E) {5546E = E->IgnoreParens();55475548// Look through function-to-pointer decay.5549if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {5550if (ICE->getCastKind() == CK_FunctionToPointerDecay ||5551ICE->getCastKind() == CK_BuiltinFnToFnPtr) {5552return EmitCallee(ICE->getSubExpr());5553}55545555// Resolve direct calls.5556} else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {5557if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {5558return EmitDirectCallee(*this, FD);5559}5560} else if (auto ME = dyn_cast<MemberExpr>(E)) {5561if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {5562EmitIgnoredExpr(ME->getBase());5563return EmitDirectCallee(*this, FD);5564}55655566// Look through template substitutions.5567} else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {5568return EmitCallee(NTTP->getReplacement());55695570// Treat pseudo-destructor calls differently.5571} else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {5572return CGCallee::forPseudoDestructor(PDE);5573}55745575// Otherwise, we have an indirect reference.5576llvm::Value *calleePtr;5577QualType functionType;5578if (auto ptrType = E->getType()->getAs<PointerType>()) {5579calleePtr = EmitScalarExpr(E);5580functionType = ptrType->getPointeeType();5581} else {5582functionType = E->getType();5583calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);5584}5585assert(functionType->isFunctionType());55865587GlobalDecl GD;5588if (const auto *VD =5589dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))5590GD = GlobalDecl(VD);55915592CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);5593CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);5594CGCallee callee(calleeInfo, calleePtr, pointerAuth);5595return callee;5596}55975598LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {5599// Comma expressions just emit their LHS then their RHS as an l-value.5600if (E->getOpcode() == BO_Comma) {5601EmitIgnoredExpr(E->getLHS());5602EnsureInsertPoint();5603return EmitLValue(E->getRHS());5604}56055606if (E->getOpcode() == BO_PtrMemD ||5607E->getOpcode() == BO_PtrMemI)5608return EmitPointerToDataMemberBinaryExpr(E);56095610assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");56115612// Note that in all of these cases, __block variables need the RHS5613// evaluated first just in case the variable gets moved by the RHS.56145615switch (getEvaluationKind(E->getType())) {5616case TEK_Scalar: {5617switch (E->getLHS()->getType().getObjCLifetime()) {5618case Qualifiers::OCL_Strong:5619return EmitARCStoreStrong(E, /*ignored*/ false).first;56205621case Qualifiers::OCL_Autoreleasing:5622return EmitARCStoreAutoreleasing(E).first;56235624// No reason to do any of these differently.5625case Qualifiers::OCL_None:5626case Qualifiers::OCL_ExplicitNone:5627case Qualifiers::OCL_Weak:5628break;5629}56305631// TODO: Can we de-duplicate this code with the corresponding code in5632// CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?5633RValue RV;5634llvm::Value *Previous = nullptr;5635QualType SrcType = E->getRHS()->getType();5636// Check if LHS is a bitfield, if RHS contains an implicit cast expression5637// we want to extract that value and potentially (if the bitfield sanitizer5638// is enabled) use it to check for an implicit conversion.5639if (E->getLHS()->refersToBitField()) {5640llvm::Value *RHS =5641EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);5642RV = RValue::get(RHS);5643} else5644RV = EmitAnyExpr(E->getRHS());56455646LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);56475648if (RV.isScalar())5649EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());56505651if (LV.isBitField()) {5652llvm::Value *Result = nullptr;5653// If bitfield sanitizers are enabled we want to use the result5654// to check whether a truncation or sign change has occurred.5655if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))5656EmitStoreThroughBitfieldLValue(RV, LV, &Result);5657else5658EmitStoreThroughBitfieldLValue(RV, LV);56595660// If the expression contained an implicit conversion, make sure5661// to use the value before the scalar conversion.5662llvm::Value *Src = Previous ? Previous : RV.getScalarVal();5663QualType DstType = E->getLHS()->getType();5664EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,5665LV.getBitFieldInfo(), E->getExprLoc());5666} else5667EmitStoreThroughLValue(RV, LV);56685669if (getLangOpts().OpenMP)5670CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,5671E->getLHS());5672return LV;5673}56745675case TEK_Complex:5676return EmitComplexAssignmentLValue(E);56775678case TEK_Aggregate:5679return EmitAggExprToLValue(E);5680}5681llvm_unreachable("bad evaluation kind");5682}56835684LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {5685RValue RV = EmitCallExpr(E);56865687if (!RV.isScalar())5688return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),5689AlignmentSource::Decl);56905691assert(E->getCallReturnType(getContext())->isReferenceType() &&5692"Can't have a scalar return unless the return type is a "5693"reference type!");56945695return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());5696}56975698LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {5699// FIXME: This shouldn't require another copy.5700return EmitAggExprToLValue(E);5701}57025703LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {5704assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()5705&& "binding l-value to type which needs a temporary");5706AggValueSlot Slot = CreateAggTemp(E->getType());5707EmitCXXConstructExpr(E, Slot);5708return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);5709}57105711LValue5712CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {5713return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());5714}57155716Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {5717return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())5718.withElementType(ConvertType(E->getType()));5719}57205721LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {5722return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),5723AlignmentSource::Decl);5724}57255726LValue5727CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {5728AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");5729Slot.setExternallyDestructed();5730EmitAggExpr(E->getSubExpr(), Slot);5731EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());5732return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);5733}57345735LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {5736RValue RV = EmitObjCMessageExpr(E);57375738if (!RV.isScalar())5739return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),5740AlignmentSource::Decl);57415742assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&5743"Can't have a scalar return unless the return type is a "5744"reference type!");57455746return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());5747}57485749LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {5750Address V =5751CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());5752return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);5753}57545755llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,5756const ObjCIvarDecl *Ivar) {5757return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);5758}57595760llvm::Value *5761CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,5762const ObjCIvarDecl *Ivar) {5763llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);5764QualType PointerDiffType = getContext().getPointerDiffType();5765return Builder.CreateZExtOrTrunc(OffsetValue,5766getTypes().ConvertType(PointerDiffType));5767}57685769LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,5770llvm::Value *BaseValue,5771const ObjCIvarDecl *Ivar,5772unsigned CVRQualifiers) {5773return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,5774Ivar, CVRQualifiers);5775}57765777LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {5778// FIXME: A lot of the code below could be shared with EmitMemberExpr.5779llvm::Value *BaseValue = nullptr;5780const Expr *BaseExpr = E->getBase();5781Qualifiers BaseQuals;5782QualType ObjectTy;5783if (E->isArrow()) {5784BaseValue = EmitScalarExpr(BaseExpr);5785ObjectTy = BaseExpr->getType()->getPointeeType();5786BaseQuals = ObjectTy.getQualifiers();5787} else {5788LValue BaseLV = EmitLValue(BaseExpr);5789BaseValue = BaseLV.getPointer(*this);5790ObjectTy = BaseExpr->getType();5791BaseQuals = ObjectTy.getQualifiers();5792}57935794LValue LV =5795EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),5796BaseQuals.getCVRQualifiers());5797setObjCGCLValueClass(getContext(), E, LV);5798return LV;5799}58005801LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {5802// Can only get l-value for message expression returning aggregate type5803RValue RV = EmitAnyExprToTemp(E);5804return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),5805AlignmentSource::Decl);5806}58075808RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,5809const CallExpr *E, ReturnValueSlot ReturnValue,5810llvm::Value *Chain) {5811// Get the actual function type. The callee type will always be a pointer to5812// function type or a block pointer type.5813assert(CalleeType->isFunctionPointerType() &&5814"Call must have function pointer type!");58155816const Decl *TargetDecl =5817OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();58185819assert((!isa_and_present<FunctionDecl>(TargetDecl) ||5820!cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&5821"trying to emit a call to an immediate function");58225823CalleeType = getContext().getCanonicalType(CalleeType);58245825auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();58265827CGCallee Callee = OrigCallee;58285829if (SanOpts.has(SanitizerKind::Function) &&5830(!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&5831!isa<FunctionNoProtoType>(PointeeType)) {5832if (llvm::Constant *PrefixSig =5833CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {5834SanitizerScope SanScope(this);5835auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);58365837llvm::Type *PrefixSigType = PrefixSig->getType();5838llvm::StructType *PrefixStructTy = llvm::StructType::get(5839CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);58405841llvm::Value *CalleePtr = Callee.getFunctionPointer();5842if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {5843// Use raw pointer since we are using the callee pointer as data here.5844Address Addr =5845Address(CalleePtr, CalleePtr->getType(),5846CharUnits::fromQuantity(5847CalleePtr->getPointerAlignment(CGM.getDataLayout())),5848Callee.getPointerAuthInfo(), nullptr);5849CalleePtr = Addr.emitRawPointer(*this);5850}58515852// On 32-bit Arm, the low bit of a function pointer indicates whether5853// it's using the Arm or Thumb instruction set. The actual first5854// instruction lives at the same address either way, so we must clear5855// that low bit before using the function address to find the prefix5856// structure.5857//5858// This applies to both Arm and Thumb target triples, because5859// either one could be used in an interworking context where it5860// might be passed function pointers of both types.5861llvm::Value *AlignedCalleePtr;5862if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {5863llvm::Value *CalleeAddress =5864Builder.CreatePtrToInt(CalleePtr, IntPtrTy);5865llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);5866llvm::Value *AlignedCalleeAddress =5867Builder.CreateAnd(CalleeAddress, Mask);5868AlignedCalleePtr =5869Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());5870} else {5871AlignedCalleePtr = CalleePtr;5872}58735874llvm::Value *CalleePrefixStruct = AlignedCalleePtr;5875llvm::Value *CalleeSigPtr =5876Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);5877llvm::Value *CalleeSig =5878Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());5879llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);58805881llvm::BasicBlock *Cont = createBasicBlock("cont");5882llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");5883Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);58845885EmitBlock(TypeCheck);5886llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(5887Int32Ty,5888Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),5889getPointerAlign());5890llvm::Value *CalleeTypeHashMatch =5891Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);5892llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),5893EmitCheckTypeDescriptor(CalleeType)};5894EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),5895SanitizerHandler::FunctionTypeMismatch, StaticData,5896{CalleePtr});58975898Builder.CreateBr(Cont);5899EmitBlock(Cont);5900}5901}59025903const auto *FnType = cast<FunctionType>(PointeeType);59045905// If we are checking indirect calls and this call is indirect, check that the5906// function pointer is a member of the bit set for the function type.5907if (SanOpts.has(SanitizerKind::CFIICall) &&5908(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {5909SanitizerScope SanScope(this);5910EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);59115912llvm::Metadata *MD;5913if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)5914MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));5915else5916MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));59175918llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);59195920llvm::Value *CalleePtr = Callee.getFunctionPointer();5921llvm::Value *TypeTest = Builder.CreateCall(5922CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});59235924auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);5925llvm::Constant *StaticData[] = {5926llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),5927EmitCheckSourceLocation(E->getBeginLoc()),5928EmitCheckTypeDescriptor(QualType(FnType, 0)),5929};5930if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {5931EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,5932CalleePtr, StaticData);5933} else {5934EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),5935SanitizerHandler::CFICheckFail, StaticData,5936{CalleePtr, llvm::UndefValue::get(IntPtrTy)});5937}5938}59395940CallArgList Args;5941if (Chain)5942Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);59435944// C++17 requires that we evaluate arguments to a call using assignment syntax5945// right-to-left, and that we evaluate arguments to certain other operators5946// left-to-right. Note that we allow this to override the order dictated by5947// the calling convention on the MS ABI, which means that parameter5948// destruction order is not necessarily reverse construction order.5949// FIXME: Revisit this based on C++ committee response to unimplementability.5950EvaluationOrder Order = EvaluationOrder::Default;5951bool StaticOperator = false;5952if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {5953if (OCE->isAssignmentOp())5954Order = EvaluationOrder::ForceRightToLeft;5955else {5956switch (OCE->getOperator()) {5957case OO_LessLess:5958case OO_GreaterGreater:5959case OO_AmpAmp:5960case OO_PipePipe:5961case OO_Comma:5962case OO_ArrowStar:5963Order = EvaluationOrder::ForceLeftToRight;5964break;5965default:5966break;5967}5968}59695970if (const auto *MD =5971dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());5972MD && MD->isStatic())5973StaticOperator = true;5974}59755976auto Arguments = E->arguments();5977if (StaticOperator) {5978// If we're calling a static operator, we need to emit the object argument5979// and ignore it.5980EmitIgnoredExpr(E->getArg(0));5981Arguments = drop_begin(Arguments, 1);5982}5983EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,5984E->getDirectCallee(), /*ParamsToSkip=*/0, Order);59855986const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(5987Args, FnType, /*ChainCall=*/Chain);59885989// C99 6.5.2.2p6:5990// If the expression that denotes the called function has a type5991// that does not include a prototype, [the default argument5992// promotions are performed]. If the number of arguments does not5993// equal the number of parameters, the behavior is undefined. If5994// the function is defined with a type that includes a prototype,5995// and either the prototype ends with an ellipsis (, ...) or the5996// types of the arguments after promotion are not compatible with5997// the types of the parameters, the behavior is undefined. If the5998// function is defined with a type that does not include a5999// prototype, and the types of the arguments after promotion are6000// not compatible with those of the parameters after promotion,6001// the behavior is undefined [except in some trivial cases].6002// That is, in the general case, we should assume that a call6003// through an unprototyped function type works like a *non-variadic*6004// call. The way we make this work is to cast to the exact type6005// of the promoted arguments.6006//6007// Chain calls use this same code path to add the invisible chain parameter6008// to the function type.6009if (isa<FunctionNoProtoType>(FnType) || Chain) {6010llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);6011int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();6012CalleeTy = CalleeTy->getPointerTo(AS);60136014llvm::Value *CalleePtr = Callee.getFunctionPointer();6015CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");6016Callee.setFunctionPointer(CalleePtr);6017}60186019// HIP function pointer contains kernel handle when it is used in triple6020// chevron. The kernel stub needs to be loaded from kernel handle and used6021// as callee.6022if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&6023isa<CUDAKernelCallExpr>(E) &&6024(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {6025llvm::Value *Handle = Callee.getFunctionPointer();6026auto *Stub = Builder.CreateLoad(6027Address(Handle, Handle->getType(), CGM.getPointerAlign()));6028Callee.setFunctionPointer(Stub);6029}6030llvm::CallBase *CallOrInvoke = nullptr;6031RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,6032E == MustTailCall, E->getExprLoc());60336034// Generate function declaration DISuprogram in order to be used6035// in debug info about call sites.6036if (CGDebugInfo *DI = getDebugInfo()) {6037if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {6038FunctionArgList Args;6039QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);6040DI->EmitFuncDeclForCallSite(CallOrInvoke,6041DI->getFunctionType(CalleeDecl, ResTy, Args),6042CalleeDecl);6043}6044}60456046return Call;6047}60486049LValue CodeGenFunction::6050EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {6051Address BaseAddr = Address::invalid();6052if (E->getOpcode() == BO_PtrMemI) {6053BaseAddr = EmitPointerWithAlignment(E->getLHS());6054} else {6055BaseAddr = EmitLValue(E->getLHS()).getAddress();6056}60576058llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());6059const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();60606061LValueBaseInfo BaseInfo;6062TBAAAccessInfo TBAAInfo;6063Address MemberAddr =6064EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,6065&TBAAInfo);60666067return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);6068}60696070/// Given the address of a temporary variable, produce an r-value of6071/// its type.6072RValue CodeGenFunction::convertTempToRValue(Address addr,6073QualType type,6074SourceLocation loc) {6075LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);6076switch (getEvaluationKind(type)) {6077case TEK_Complex:6078return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));6079case TEK_Aggregate:6080return lvalue.asAggregateRValue();6081case TEK_Scalar:6082return RValue::get(EmitLoadOfScalar(lvalue, loc));6083}6084llvm_unreachable("bad evaluation kind");6085}60866087void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {6088assert(Val->getType()->isFPOrFPVectorTy());6089if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))6090return;60916092llvm::MDBuilder MDHelper(getLLVMContext());6093llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);60946095cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);6096}60976098void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {6099llvm::Type *EltTy = Val->getType()->getScalarType();6100if (!EltTy->isFloatTy())6101return;61026103if ((getLangOpts().OpenCL &&6104!CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||6105(getLangOpts().HIP && getLangOpts().CUDAIsDevice &&6106!CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {6107// OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp6108//6109// OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt6110// build option allows an application to specify that single precision6111// floating-point divide (x/y and 1/x) and sqrt used in the program6112// source are correctly rounded.6113//6114// TODO: CUDA has a prec-sqrt flag6115SetFPAccuracy(Val, 3.0f);6116}6117}61186119void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {6120llvm::Type *EltTy = Val->getType()->getScalarType();6121if (!EltTy->isFloatTy())6122return;61236124if ((getLangOpts().OpenCL &&6125!CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||6126(getLangOpts().HIP && getLangOpts().CUDAIsDevice &&6127!CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {6128// OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp6129//6130// OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt6131// build option allows an application to specify that single precision6132// floating-point divide (x/y and 1/x) and sqrt used in the program6133// source are correctly rounded.6134//6135// TODO: CUDA has a prec-div flag6136SetFPAccuracy(Val, 2.5f);6137}6138}61396140namespace {6141struct LValueOrRValue {6142LValue LV;6143RValue RV;6144};6145}61466147static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,6148const PseudoObjectExpr *E,6149bool forLValue,6150AggValueSlot slot) {6151SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;61526153// Find the result expression, if any.6154const Expr *resultExpr = E->getResultExpr();6155LValueOrRValue result;61566157for (PseudoObjectExpr::const_semantics_iterator6158i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {6159const Expr *semantic = *i;61606161// If this semantic expression is an opaque value, bind it6162// to the result of its source expression.6163if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {6164// Skip unique OVEs.6165if (ov->isUnique()) {6166assert(ov != resultExpr &&6167"A unique OVE cannot be used as the result expression");6168continue;6169}61706171// If this is the result expression, we may need to evaluate6172// directly into the slot.6173typedef CodeGenFunction::OpaqueValueMappingData OVMA;6174OVMA opaqueData;6175if (ov == resultExpr && ov->isPRValue() && !forLValue &&6176CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {6177CGF.EmitAggExpr(ov->getSourceExpr(), slot);6178LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),6179AlignmentSource::Decl);6180opaqueData = OVMA::bind(CGF, ov, LV);6181result.RV = slot.asRValue();61826183// Otherwise, emit as normal.6184} else {6185opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());61866187// If this is the result, also evaluate the result now.6188if (ov == resultExpr) {6189if (forLValue)6190result.LV = CGF.EmitLValue(ov);6191else6192result.RV = CGF.EmitAnyExpr(ov, slot);6193}6194}61956196opaques.push_back(opaqueData);61976198// Otherwise, if the expression is the result, evaluate it6199// and remember the result.6200} else if (semantic == resultExpr) {6201if (forLValue)6202result.LV = CGF.EmitLValue(semantic);6203else6204result.RV = CGF.EmitAnyExpr(semantic, slot);62056206// Otherwise, evaluate the expression in an ignored context.6207} else {6208CGF.EmitIgnoredExpr(semantic);6209}6210}62116212// Unbind all the opaques now.6213for (unsigned i = 0, e = opaques.size(); i != e; ++i)6214opaques[i].unbind(CGF);62156216return result;6217}62186219RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,6220AggValueSlot slot) {6221return emitPseudoObjectExpr(*this, E, false, slot).RV;6222}62236224LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {6225return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;6226}622762286229