Path: blob/main/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
35233 views
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file contains the code for emitting atomic operations.9//10//===----------------------------------------------------------------------===//1112#include "CGCall.h"13#include "CGRecordLayout.h"14#include "CodeGenFunction.h"15#include "CodeGenModule.h"16#include "TargetInfo.h"17#include "clang/AST/ASTContext.h"18#include "clang/CodeGen/CGFunctionInfo.h"19#include "clang/Frontend/FrontendDiagnostic.h"20#include "llvm/ADT/DenseMap.h"21#include "llvm/IR/DataLayout.h"22#include "llvm/IR/Intrinsics.h"23#include "llvm/IR/Operator.h"2425using namespace clang;26using namespace CodeGen;2728namespace {29class AtomicInfo {30CodeGenFunction &CGF;31QualType AtomicTy;32QualType ValueTy;33uint64_t AtomicSizeInBits;34uint64_t ValueSizeInBits;35CharUnits AtomicAlign;36CharUnits ValueAlign;37TypeEvaluationKind EvaluationKind;38bool UseLibcall;39LValue LVal;40CGBitFieldInfo BFI;41public:42AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)43: CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),44EvaluationKind(TEK_Scalar), UseLibcall(true) {45assert(!lvalue.isGlobalReg());46ASTContext &C = CGF.getContext();47if (lvalue.isSimple()) {48AtomicTy = lvalue.getType();49if (auto *ATy = AtomicTy->getAs<AtomicType>())50ValueTy = ATy->getValueType();51else52ValueTy = AtomicTy;53EvaluationKind = CGF.getEvaluationKind(ValueTy);5455uint64_t ValueAlignInBits;56uint64_t AtomicAlignInBits;57TypeInfo ValueTI = C.getTypeInfo(ValueTy);58ValueSizeInBits = ValueTI.Width;59ValueAlignInBits = ValueTI.Align;6061TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);62AtomicSizeInBits = AtomicTI.Width;63AtomicAlignInBits = AtomicTI.Align;6465assert(ValueSizeInBits <= AtomicSizeInBits);66assert(ValueAlignInBits <= AtomicAlignInBits);6768AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);69ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);70if (lvalue.getAlignment().isZero())71lvalue.setAlignment(AtomicAlign);7273LVal = lvalue;74} else if (lvalue.isBitField()) {75ValueTy = lvalue.getType();76ValueSizeInBits = C.getTypeSize(ValueTy);77auto &OrigBFI = lvalue.getBitFieldInfo();78auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());79AtomicSizeInBits = C.toBits(80C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)81.alignTo(lvalue.getAlignment()));82llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);83auto OffsetInChars =84(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *85lvalue.getAlignment();86llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(87CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());88StoragePtr = CGF.Builder.CreateAddrSpaceCast(89StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");90BFI = OrigBFI;91BFI.Offset = Offset;92BFI.StorageSize = AtomicSizeInBits;93BFI.StorageOffset += OffsetInChars;94llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);95LVal = LValue::MakeBitfield(96Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,97lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());98AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);99if (AtomicTy.isNull()) {100llvm::APInt Size(101/*numBits=*/32,102C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());103AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,104ArraySizeModifier::Normal,105/*IndexTypeQuals=*/0);106}107AtomicAlign = ValueAlign = lvalue.getAlignment();108} else if (lvalue.isVectorElt()) {109ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();110ValueSizeInBits = C.getTypeSize(ValueTy);111AtomicTy = lvalue.getType();112AtomicSizeInBits = C.getTypeSize(AtomicTy);113AtomicAlign = ValueAlign = lvalue.getAlignment();114LVal = lvalue;115} else {116assert(lvalue.isExtVectorElt());117ValueTy = lvalue.getType();118ValueSizeInBits = C.getTypeSize(ValueTy);119AtomicTy = ValueTy = CGF.getContext().getExtVectorType(120lvalue.getType(), cast<llvm::FixedVectorType>(121lvalue.getExtVectorAddress().getElementType())122->getNumElements());123AtomicSizeInBits = C.getTypeSize(AtomicTy);124AtomicAlign = ValueAlign = lvalue.getAlignment();125LVal = lvalue;126}127UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(128AtomicSizeInBits, C.toBits(lvalue.getAlignment()));129}130131QualType getAtomicType() const { return AtomicTy; }132QualType getValueType() const { return ValueTy; }133CharUnits getAtomicAlignment() const { return AtomicAlign; }134uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }135uint64_t getValueSizeInBits() const { return ValueSizeInBits; }136TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }137bool shouldUseLibcall() const { return UseLibcall; }138const LValue &getAtomicLValue() const { return LVal; }139llvm::Value *getAtomicPointer() const {140if (LVal.isSimple())141return LVal.emitRawPointer(CGF);142else if (LVal.isBitField())143return LVal.getRawBitFieldPointer(CGF);144else if (LVal.isVectorElt())145return LVal.getRawVectorPointer(CGF);146assert(LVal.isExtVectorElt());147return LVal.getRawExtVectorPointer(CGF);148}149Address getAtomicAddress() const {150llvm::Type *ElTy;151if (LVal.isSimple())152ElTy = LVal.getAddress().getElementType();153else if (LVal.isBitField())154ElTy = LVal.getBitFieldAddress().getElementType();155else if (LVal.isVectorElt())156ElTy = LVal.getVectorAddress().getElementType();157else158ElTy = LVal.getExtVectorAddress().getElementType();159return Address(getAtomicPointer(), ElTy, getAtomicAlignment());160}161162Address getAtomicAddressAsAtomicIntPointer() const {163return castToAtomicIntPointer(getAtomicAddress());164}165166/// Is the atomic size larger than the underlying value type?167///168/// Note that the absence of padding does not mean that atomic169/// objects are completely interchangeable with non-atomic170/// objects: we might have promoted the alignment of a type171/// without making it bigger.172bool hasPadding() const {173return (ValueSizeInBits != AtomicSizeInBits);174}175176bool emitMemSetZeroIfNecessary() const;177178llvm::Value *getAtomicSizeValue() const {179CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);180return CGF.CGM.getSize(size);181}182183/// Cast the given pointer to an integer pointer suitable for atomic184/// operations if the source.185Address castToAtomicIntPointer(Address Addr) const;186187/// If Addr is compatible with the iN that will be used for an atomic188/// operation, bitcast it. Otherwise, create a temporary that is suitable189/// and copy the value across.190Address convertToAtomicIntPointer(Address Addr) const;191192/// Turn an atomic-layout object into an r-value.193RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,194SourceLocation loc, bool AsValue) const;195196llvm::Value *getScalarRValValueOrNull(RValue RVal) const;197198/// Converts an rvalue to integer value if needed.199llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;200201RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,202SourceLocation Loc, bool AsValue,203bool CmpXchg = false) const;204205/// Copy an atomic r-value into atomic-layout memory.206void emitCopyIntoMemory(RValue rvalue) const;207208/// Project an l-value down to the value field.209LValue projectValue() const {210assert(LVal.isSimple());211Address addr = getAtomicAddress();212if (hasPadding())213addr = CGF.Builder.CreateStructGEP(addr, 0);214215return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),216LVal.getBaseInfo(), LVal.getTBAAInfo());217}218219/// Emits atomic load.220/// \returns Loaded value.221RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,222bool AsValue, llvm::AtomicOrdering AO,223bool IsVolatile);224225/// Emits atomic compare-and-exchange sequence.226/// \param Expected Expected value.227/// \param Desired Desired value.228/// \param Success Atomic ordering for success operation.229/// \param Failure Atomic ordering for failed operation.230/// \param IsWeak true if atomic operation is weak, false otherwise.231/// \returns Pair of values: previous value from storage (value type) and232/// boolean flag (i1 type) with true if success and false otherwise.233std::pair<RValue, llvm::Value *>234EmitAtomicCompareExchange(RValue Expected, RValue Desired,235llvm::AtomicOrdering Success =236llvm::AtomicOrdering::SequentiallyConsistent,237llvm::AtomicOrdering Failure =238llvm::AtomicOrdering::SequentiallyConsistent,239bool IsWeak = false);240241/// Emits atomic update.242/// \param AO Atomic ordering.243/// \param UpdateOp Update operation for the current lvalue.244void EmitAtomicUpdate(llvm::AtomicOrdering AO,245const llvm::function_ref<RValue(RValue)> &UpdateOp,246bool IsVolatile);247/// Emits atomic update.248/// \param AO Atomic ordering.249void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,250bool IsVolatile);251252/// Materialize an atomic r-value in atomic-layout memory.253Address materializeRValue(RValue rvalue) const;254255/// Creates temp alloca for intermediate operations on atomic value.256Address CreateTempAlloca() const;257private:258bool requiresMemSetZero(llvm::Type *type) const;259260261/// Emits atomic load as a libcall.262void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,263llvm::AtomicOrdering AO, bool IsVolatile);264/// Emits atomic load as LLVM instruction.265llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,266bool CmpXchg = false);267/// Emits atomic compare-and-exchange op as a libcall.268llvm::Value *EmitAtomicCompareExchangeLibcall(269llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,270llvm::AtomicOrdering Success =271llvm::AtomicOrdering::SequentiallyConsistent,272llvm::AtomicOrdering Failure =273llvm::AtomicOrdering::SequentiallyConsistent);274/// Emits atomic compare-and-exchange op as LLVM instruction.275std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(276llvm::Value *ExpectedVal, llvm::Value *DesiredVal,277llvm::AtomicOrdering Success =278llvm::AtomicOrdering::SequentiallyConsistent,279llvm::AtomicOrdering Failure =280llvm::AtomicOrdering::SequentiallyConsistent,281bool IsWeak = false);282/// Emit atomic update as libcalls.283void284EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,285const llvm::function_ref<RValue(RValue)> &UpdateOp,286bool IsVolatile);287/// Emit atomic update as LLVM instructions.288void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,289const llvm::function_ref<RValue(RValue)> &UpdateOp,290bool IsVolatile);291/// Emit atomic update as libcalls.292void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,293bool IsVolatile);294/// Emit atomic update as LLVM instructions.295void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,296bool IsVolatile);297};298}299300Address AtomicInfo::CreateTempAlloca() const {301Address TempAlloca = CGF.CreateMemTemp(302(LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy303: AtomicTy,304getAtomicAlignment(),305"atomic-temp");306// Cast to pointer to value type for bitfields.307if (LVal.isBitField())308return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(309TempAlloca, getAtomicAddress().getType(),310getAtomicAddress().getElementType());311return TempAlloca;312}313314static RValue emitAtomicLibcall(CodeGenFunction &CGF,315StringRef fnName,316QualType resultType,317CallArgList &args) {318const CGFunctionInfo &fnInfo =319CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);320llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);321llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());322fnAttrB.addAttribute(llvm::Attribute::NoUnwind);323fnAttrB.addAttribute(llvm::Attribute::WillReturn);324llvm::AttributeList fnAttrs = llvm::AttributeList::get(325CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);326327llvm::FunctionCallee fn =328CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);329auto callee = CGCallee::forDirect(fn);330return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);331}332333/// Does a store of the given IR type modify the full expected width?334static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,335uint64_t expectedSize) {336return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);337}338339/// Does the atomic type require memsetting to zero before initialization?340///341/// The IR type is provided as a way of making certain queries faster.342bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {343// If the atomic type has size padding, we definitely need a memset.344if (hasPadding()) return true;345346// Otherwise, do some simple heuristics to try to avoid it:347switch (getEvaluationKind()) {348// For scalars and complexes, check whether the store size of the349// type uses the full size.350case TEK_Scalar:351return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);352case TEK_Complex:353return !isFullSizeType(CGF.CGM, type->getStructElementType(0),354AtomicSizeInBits / 2);355356// Padding in structs has an undefined bit pattern. User beware.357case TEK_Aggregate:358return false;359}360llvm_unreachable("bad evaluation kind");361}362363bool AtomicInfo::emitMemSetZeroIfNecessary() const {364assert(LVal.isSimple());365Address addr = LVal.getAddress();366if (!requiresMemSetZero(addr.getElementType()))367return false;368369CGF.Builder.CreateMemSet(370addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),371CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),372LVal.getAlignment().getAsAlign());373return true;374}375376static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,377Address Dest, Address Ptr,378Address Val1, Address Val2,379uint64_t Size,380llvm::AtomicOrdering SuccessOrder,381llvm::AtomicOrdering FailureOrder,382llvm::SyncScope::ID Scope) {383// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.384llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);385llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);386387llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(388Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);389Pair->setVolatile(E->isVolatile());390Pair->setWeak(IsWeak);391392// Cmp holds the result of the compare-exchange operation: true on success,393// false on failure.394llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);395llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);396397// This basic block is used to hold the store instruction if the operation398// failed.399llvm::BasicBlock *StoreExpectedBB =400CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);401402// This basic block is the exit point of the operation, we should end up403// here regardless of whether or not the operation succeeded.404llvm::BasicBlock *ContinueBB =405CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);406407// Update Expected if Expected isn't equal to Old, otherwise branch to the408// exit point.409CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);410411CGF.Builder.SetInsertPoint(StoreExpectedBB);412// Update the memory at Expected with Old's value.413CGF.Builder.CreateStore(Old, Val1);414// Finally, branch to the exit point.415CGF.Builder.CreateBr(ContinueBB);416417CGF.Builder.SetInsertPoint(ContinueBB);418// Update the memory at Dest with Cmp's value.419CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));420}421422/// Given an ordering required on success, emit all possible cmpxchg423/// instructions to cope with the provided (but possibly only dynamically known)424/// FailureOrder.425static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,426bool IsWeak, Address Dest, Address Ptr,427Address Val1, Address Val2,428llvm::Value *FailureOrderVal,429uint64_t Size,430llvm::AtomicOrdering SuccessOrder,431llvm::SyncScope::ID Scope) {432llvm::AtomicOrdering FailureOrder;433if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {434auto FOS = FO->getSExtValue();435if (!llvm::isValidAtomicOrderingCABI(FOS))436FailureOrder = llvm::AtomicOrdering::Monotonic;437else438switch ((llvm::AtomicOrderingCABI)FOS) {439case llvm::AtomicOrderingCABI::relaxed:440// 31.7.2.18: "The failure argument shall not be memory_order_release441// nor memory_order_acq_rel". Fallback to monotonic.442case llvm::AtomicOrderingCABI::release:443case llvm::AtomicOrderingCABI::acq_rel:444FailureOrder = llvm::AtomicOrdering::Monotonic;445break;446case llvm::AtomicOrderingCABI::consume:447case llvm::AtomicOrderingCABI::acquire:448FailureOrder = llvm::AtomicOrdering::Acquire;449break;450case llvm::AtomicOrderingCABI::seq_cst:451FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;452break;453}454// Prior to c++17, "the failure argument shall be no stronger than the455// success argument". This condition has been lifted and the only456// precondition is 31.7.2.18. Effectively treat this as a DR and skip457// language version checks.458emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,459FailureOrder, Scope);460return;461}462463// Create all the relevant BB's464auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);465auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);466auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);467auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);468469// MonotonicBB is arbitrarily chosen as the default case; in practice, this470// doesn't matter unless someone is crazy enough to use something that471// doesn't fold to a constant for the ordering.472llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);473// Implemented as acquire, since it's the closest in LLVM.474SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),475AcquireBB);476SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),477AcquireBB);478SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),479SeqCstBB);480481// Emit all the different atomics482CGF.Builder.SetInsertPoint(MonotonicBB);483emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,484Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);485CGF.Builder.CreateBr(ContBB);486487CGF.Builder.SetInsertPoint(AcquireBB);488emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,489llvm::AtomicOrdering::Acquire, Scope);490CGF.Builder.CreateBr(ContBB);491492CGF.Builder.SetInsertPoint(SeqCstBB);493emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,494llvm::AtomicOrdering::SequentiallyConsistent, Scope);495CGF.Builder.CreateBr(ContBB);496497CGF.Builder.SetInsertPoint(ContBB);498}499500/// Duplicate the atomic min/max operation in conventional IR for the builtin501/// variants that return the new rather than the original value.502static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,503AtomicExpr::AtomicOp Op,504bool IsSigned,505llvm::Value *OldVal,506llvm::Value *RHS) {507llvm::CmpInst::Predicate Pred;508switch (Op) {509default:510llvm_unreachable("Unexpected min/max operation");511case AtomicExpr::AO__atomic_max_fetch:512case AtomicExpr::AO__scoped_atomic_max_fetch:513Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;514break;515case AtomicExpr::AO__atomic_min_fetch:516case AtomicExpr::AO__scoped_atomic_min_fetch:517Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;518break;519}520llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");521return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");522}523524static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,525Address Ptr, Address Val1, Address Val2,526llvm::Value *IsWeak, llvm::Value *FailureOrder,527uint64_t Size, llvm::AtomicOrdering Order,528llvm::SyncScope::ID Scope) {529llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;530bool PostOpMinMax = false;531unsigned PostOp = 0;532533switch (E->getOp()) {534case AtomicExpr::AO__c11_atomic_init:535case AtomicExpr::AO__opencl_atomic_init:536llvm_unreachable("Already handled!");537538case AtomicExpr::AO__c11_atomic_compare_exchange_strong:539case AtomicExpr::AO__hip_atomic_compare_exchange_strong:540case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:541emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,542FailureOrder, Size, Order, Scope);543return;544case AtomicExpr::AO__c11_atomic_compare_exchange_weak:545case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:546case AtomicExpr::AO__hip_atomic_compare_exchange_weak:547emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,548FailureOrder, Size, Order, Scope);549return;550case AtomicExpr::AO__atomic_compare_exchange:551case AtomicExpr::AO__atomic_compare_exchange_n:552case AtomicExpr::AO__scoped_atomic_compare_exchange:553case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {554if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {555emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,556Val1, Val2, FailureOrder, Size, Order, Scope);557} else {558// Create all the relevant BB's559llvm::BasicBlock *StrongBB =560CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);561llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);562llvm::BasicBlock *ContBB =563CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);564565llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);566SI->addCase(CGF.Builder.getInt1(false), StrongBB);567568CGF.Builder.SetInsertPoint(StrongBB);569emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,570FailureOrder, Size, Order, Scope);571CGF.Builder.CreateBr(ContBB);572573CGF.Builder.SetInsertPoint(WeakBB);574emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,575FailureOrder, Size, Order, Scope);576CGF.Builder.CreateBr(ContBB);577578CGF.Builder.SetInsertPoint(ContBB);579}580return;581}582case AtomicExpr::AO__c11_atomic_load:583case AtomicExpr::AO__opencl_atomic_load:584case AtomicExpr::AO__hip_atomic_load:585case AtomicExpr::AO__atomic_load_n:586case AtomicExpr::AO__atomic_load:587case AtomicExpr::AO__scoped_atomic_load_n:588case AtomicExpr::AO__scoped_atomic_load: {589llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);590Load->setAtomic(Order, Scope);591Load->setVolatile(E->isVolatile());592CGF.Builder.CreateStore(Load, Dest);593return;594}595596case AtomicExpr::AO__c11_atomic_store:597case AtomicExpr::AO__opencl_atomic_store:598case AtomicExpr::AO__hip_atomic_store:599case AtomicExpr::AO__atomic_store:600case AtomicExpr::AO__atomic_store_n:601case AtomicExpr::AO__scoped_atomic_store:602case AtomicExpr::AO__scoped_atomic_store_n: {603llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);604llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);605Store->setAtomic(Order, Scope);606Store->setVolatile(E->isVolatile());607return;608}609610case AtomicExpr::AO__c11_atomic_exchange:611case AtomicExpr::AO__hip_atomic_exchange:612case AtomicExpr::AO__opencl_atomic_exchange:613case AtomicExpr::AO__atomic_exchange_n:614case AtomicExpr::AO__atomic_exchange:615case AtomicExpr::AO__scoped_atomic_exchange_n:616case AtomicExpr::AO__scoped_atomic_exchange:617Op = llvm::AtomicRMWInst::Xchg;618break;619620case AtomicExpr::AO__atomic_add_fetch:621case AtomicExpr::AO__scoped_atomic_add_fetch:622PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd623: llvm::Instruction::Add;624[[fallthrough]];625case AtomicExpr::AO__c11_atomic_fetch_add:626case AtomicExpr::AO__hip_atomic_fetch_add:627case AtomicExpr::AO__opencl_atomic_fetch_add:628case AtomicExpr::AO__atomic_fetch_add:629case AtomicExpr::AO__scoped_atomic_fetch_add:630Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd631: llvm::AtomicRMWInst::Add;632break;633634case AtomicExpr::AO__atomic_sub_fetch:635case AtomicExpr::AO__scoped_atomic_sub_fetch:636PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub637: llvm::Instruction::Sub;638[[fallthrough]];639case AtomicExpr::AO__c11_atomic_fetch_sub:640case AtomicExpr::AO__hip_atomic_fetch_sub:641case AtomicExpr::AO__opencl_atomic_fetch_sub:642case AtomicExpr::AO__atomic_fetch_sub:643case AtomicExpr::AO__scoped_atomic_fetch_sub:644Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub645: llvm::AtomicRMWInst::Sub;646break;647648case AtomicExpr::AO__atomic_min_fetch:649case AtomicExpr::AO__scoped_atomic_min_fetch:650PostOpMinMax = true;651[[fallthrough]];652case AtomicExpr::AO__c11_atomic_fetch_min:653case AtomicExpr::AO__hip_atomic_fetch_min:654case AtomicExpr::AO__opencl_atomic_fetch_min:655case AtomicExpr::AO__atomic_fetch_min:656case AtomicExpr::AO__scoped_atomic_fetch_min:657Op = E->getValueType()->isFloatingType()658? llvm::AtomicRMWInst::FMin659: (E->getValueType()->isSignedIntegerType()660? llvm::AtomicRMWInst::Min661: llvm::AtomicRMWInst::UMin);662break;663664case AtomicExpr::AO__atomic_max_fetch:665case AtomicExpr::AO__scoped_atomic_max_fetch:666PostOpMinMax = true;667[[fallthrough]];668case AtomicExpr::AO__c11_atomic_fetch_max:669case AtomicExpr::AO__hip_atomic_fetch_max:670case AtomicExpr::AO__opencl_atomic_fetch_max:671case AtomicExpr::AO__atomic_fetch_max:672case AtomicExpr::AO__scoped_atomic_fetch_max:673Op = E->getValueType()->isFloatingType()674? llvm::AtomicRMWInst::FMax675: (E->getValueType()->isSignedIntegerType()676? llvm::AtomicRMWInst::Max677: llvm::AtomicRMWInst::UMax);678break;679680case AtomicExpr::AO__atomic_and_fetch:681case AtomicExpr::AO__scoped_atomic_and_fetch:682PostOp = llvm::Instruction::And;683[[fallthrough]];684case AtomicExpr::AO__c11_atomic_fetch_and:685case AtomicExpr::AO__hip_atomic_fetch_and:686case AtomicExpr::AO__opencl_atomic_fetch_and:687case AtomicExpr::AO__atomic_fetch_and:688case AtomicExpr::AO__scoped_atomic_fetch_and:689Op = llvm::AtomicRMWInst::And;690break;691692case AtomicExpr::AO__atomic_or_fetch:693case AtomicExpr::AO__scoped_atomic_or_fetch:694PostOp = llvm::Instruction::Or;695[[fallthrough]];696case AtomicExpr::AO__c11_atomic_fetch_or:697case AtomicExpr::AO__hip_atomic_fetch_or:698case AtomicExpr::AO__opencl_atomic_fetch_or:699case AtomicExpr::AO__atomic_fetch_or:700case AtomicExpr::AO__scoped_atomic_fetch_or:701Op = llvm::AtomicRMWInst::Or;702break;703704case AtomicExpr::AO__atomic_xor_fetch:705case AtomicExpr::AO__scoped_atomic_xor_fetch:706PostOp = llvm::Instruction::Xor;707[[fallthrough]];708case AtomicExpr::AO__c11_atomic_fetch_xor:709case AtomicExpr::AO__hip_atomic_fetch_xor:710case AtomicExpr::AO__opencl_atomic_fetch_xor:711case AtomicExpr::AO__atomic_fetch_xor:712case AtomicExpr::AO__scoped_atomic_fetch_xor:713Op = llvm::AtomicRMWInst::Xor;714break;715716case AtomicExpr::AO__atomic_nand_fetch:717case AtomicExpr::AO__scoped_atomic_nand_fetch:718PostOp = llvm::Instruction::And; // the NOT is special cased below719[[fallthrough]];720case AtomicExpr::AO__c11_atomic_fetch_nand:721case AtomicExpr::AO__atomic_fetch_nand:722case AtomicExpr::AO__scoped_atomic_fetch_nand:723Op = llvm::AtomicRMWInst::Nand;724break;725}726727llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);728llvm::AtomicRMWInst *RMWI =729CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order, Scope);730RMWI->setVolatile(E->isVolatile());731732// For __atomic_*_fetch operations, perform the operation again to733// determine the value which was written.734llvm::Value *Result = RMWI;735if (PostOpMinMax)736Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),737E->getValueType()->isSignedIntegerType(),738RMWI, LoadVal1);739else if (PostOp)740Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,741LoadVal1);742if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||743E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)744Result = CGF.Builder.CreateNot(Result);745CGF.Builder.CreateStore(Result, Dest);746}747748// This function emits any expression (scalar, complex, or aggregate)749// into a temporary alloca.750static Address751EmitValToTemp(CodeGenFunction &CGF, Expr *E) {752Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");753CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),754/*Init*/ true);755return DeclPtr;756}757758static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,759Address Ptr, Address Val1, Address Val2,760llvm::Value *IsWeak, llvm::Value *FailureOrder,761uint64_t Size, llvm::AtomicOrdering Order,762llvm::Value *Scope) {763auto ScopeModel = Expr->getScopeModel();764765// LLVM atomic instructions always have synch scope. If clang atomic766// expression has no scope operand, use default LLVM synch scope.767if (!ScopeModel) {768EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,769Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));770return;771}772773// Handle constant scope.774if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {775auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(776CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),777Order, CGF.CGM.getLLVMContext());778EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,779Order, SCID);780return;781}782783// Handle non-constant scope.784auto &Builder = CGF.Builder;785auto Scopes = ScopeModel->getRuntimeValues();786llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;787for (auto S : Scopes)788BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);789790llvm::BasicBlock *ContBB =791CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);792793auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);794// If unsupported synch scope is encountered at run time, assume a fallback795// synch scope value.796auto FallBack = ScopeModel->getFallBackValue();797llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);798for (auto S : Scopes) {799auto *B = BB[S];800if (S != FallBack)801SI->addCase(Builder.getInt32(S), B);802803Builder.SetInsertPoint(B);804EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,805Order,806CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),807ScopeModel->map(S),808Order,809CGF.getLLVMContext()));810Builder.CreateBr(ContBB);811}812813Builder.SetInsertPoint(ContBB);814}815816RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {817QualType AtomicTy = E->getPtr()->getType()->getPointeeType();818QualType MemTy = AtomicTy;819if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())820MemTy = AT->getValueType();821llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;822823Address Val1 = Address::invalid();824Address Val2 = Address::invalid();825Address Dest = Address::invalid();826Address Ptr = EmitPointerWithAlignment(E->getPtr());827828if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||829E->getOp() == AtomicExpr::AO__opencl_atomic_init) {830LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);831EmitAtomicInit(E->getVal1(), lvalue);832return RValue::get(nullptr);833}834835auto TInfo = getContext().getTypeInfoInChars(AtomicTy);836uint64_t Size = TInfo.Width.getQuantity();837unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();838839CharUnits MaxInlineWidth =840getContext().toCharUnitsFromBits(MaxInlineWidthInBits);841DiagnosticsEngine &Diags = CGM.getDiags();842bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;843bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;844if (Misaligned) {845Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)846<< (int)TInfo.Width.getQuantity()847<< (int)Ptr.getAlignment().getQuantity();848}849if (Oversized) {850Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)851<< (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();852}853854llvm::Value *Order = EmitScalarExpr(E->getOrder());855llvm::Value *Scope =856E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;857bool ShouldCastToIntPtrTy = true;858859switch (E->getOp()) {860case AtomicExpr::AO__c11_atomic_init:861case AtomicExpr::AO__opencl_atomic_init:862llvm_unreachable("Already handled above with EmitAtomicInit!");863864case AtomicExpr::AO__atomic_load_n:865case AtomicExpr::AO__scoped_atomic_load_n:866case AtomicExpr::AO__c11_atomic_load:867case AtomicExpr::AO__opencl_atomic_load:868case AtomicExpr::AO__hip_atomic_load:869break;870871case AtomicExpr::AO__atomic_load:872case AtomicExpr::AO__scoped_atomic_load:873Dest = EmitPointerWithAlignment(E->getVal1());874break;875876case AtomicExpr::AO__atomic_store:877case AtomicExpr::AO__scoped_atomic_store:878Val1 = EmitPointerWithAlignment(E->getVal1());879break;880881case AtomicExpr::AO__atomic_exchange:882case AtomicExpr::AO__scoped_atomic_exchange:883Val1 = EmitPointerWithAlignment(E->getVal1());884Dest = EmitPointerWithAlignment(E->getVal2());885break;886887case AtomicExpr::AO__atomic_compare_exchange:888case AtomicExpr::AO__atomic_compare_exchange_n:889case AtomicExpr::AO__c11_atomic_compare_exchange_weak:890case AtomicExpr::AO__c11_atomic_compare_exchange_strong:891case AtomicExpr::AO__hip_atomic_compare_exchange_weak:892case AtomicExpr::AO__hip_atomic_compare_exchange_strong:893case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:894case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:895case AtomicExpr::AO__scoped_atomic_compare_exchange:896case AtomicExpr::AO__scoped_atomic_compare_exchange_n:897Val1 = EmitPointerWithAlignment(E->getVal1());898if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||899E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)900Val2 = EmitPointerWithAlignment(E->getVal2());901else902Val2 = EmitValToTemp(*this, E->getVal2());903OrderFail = EmitScalarExpr(E->getOrderFail());904if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||905E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||906E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||907E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)908IsWeak = EmitScalarExpr(E->getWeak());909break;910911case AtomicExpr::AO__c11_atomic_fetch_add:912case AtomicExpr::AO__c11_atomic_fetch_sub:913case AtomicExpr::AO__hip_atomic_fetch_add:914case AtomicExpr::AO__hip_atomic_fetch_sub:915case AtomicExpr::AO__opencl_atomic_fetch_add:916case AtomicExpr::AO__opencl_atomic_fetch_sub:917if (MemTy->isPointerType()) {918// For pointer arithmetic, we're required to do a bit of math:919// adding 1 to an int* is not the same as adding 1 to a uintptr_t.920// ... but only for the C11 builtins. The GNU builtins expect the921// user to multiply by sizeof(T).922QualType Val1Ty = E->getVal1()->getType();923llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());924CharUnits PointeeIncAmt =925getContext().getTypeSizeInChars(MemTy->getPointeeType());926Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));927auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");928Val1 = Temp;929EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));930break;931}932[[fallthrough]];933case AtomicExpr::AO__atomic_fetch_add:934case AtomicExpr::AO__atomic_fetch_max:935case AtomicExpr::AO__atomic_fetch_min:936case AtomicExpr::AO__atomic_fetch_sub:937case AtomicExpr::AO__atomic_add_fetch:938case AtomicExpr::AO__atomic_max_fetch:939case AtomicExpr::AO__atomic_min_fetch:940case AtomicExpr::AO__atomic_sub_fetch:941case AtomicExpr::AO__c11_atomic_fetch_max:942case AtomicExpr::AO__c11_atomic_fetch_min:943case AtomicExpr::AO__opencl_atomic_fetch_max:944case AtomicExpr::AO__opencl_atomic_fetch_min:945case AtomicExpr::AO__hip_atomic_fetch_max:946case AtomicExpr::AO__hip_atomic_fetch_min:947case AtomicExpr::AO__scoped_atomic_fetch_add:948case AtomicExpr::AO__scoped_atomic_fetch_max:949case AtomicExpr::AO__scoped_atomic_fetch_min:950case AtomicExpr::AO__scoped_atomic_fetch_sub:951case AtomicExpr::AO__scoped_atomic_add_fetch:952case AtomicExpr::AO__scoped_atomic_max_fetch:953case AtomicExpr::AO__scoped_atomic_min_fetch:954case AtomicExpr::AO__scoped_atomic_sub_fetch:955ShouldCastToIntPtrTy = !MemTy->isFloatingType();956[[fallthrough]];957958case AtomicExpr::AO__atomic_fetch_and:959case AtomicExpr::AO__atomic_fetch_nand:960case AtomicExpr::AO__atomic_fetch_or:961case AtomicExpr::AO__atomic_fetch_xor:962case AtomicExpr::AO__atomic_and_fetch:963case AtomicExpr::AO__atomic_nand_fetch:964case AtomicExpr::AO__atomic_or_fetch:965case AtomicExpr::AO__atomic_xor_fetch:966case AtomicExpr::AO__atomic_store_n:967case AtomicExpr::AO__atomic_exchange_n:968case AtomicExpr::AO__c11_atomic_fetch_and:969case AtomicExpr::AO__c11_atomic_fetch_nand:970case AtomicExpr::AO__c11_atomic_fetch_or:971case AtomicExpr::AO__c11_atomic_fetch_xor:972case AtomicExpr::AO__c11_atomic_store:973case AtomicExpr::AO__c11_atomic_exchange:974case AtomicExpr::AO__hip_atomic_fetch_and:975case AtomicExpr::AO__hip_atomic_fetch_or:976case AtomicExpr::AO__hip_atomic_fetch_xor:977case AtomicExpr::AO__hip_atomic_store:978case AtomicExpr::AO__hip_atomic_exchange:979case AtomicExpr::AO__opencl_atomic_fetch_and:980case AtomicExpr::AO__opencl_atomic_fetch_or:981case AtomicExpr::AO__opencl_atomic_fetch_xor:982case AtomicExpr::AO__opencl_atomic_store:983case AtomicExpr::AO__opencl_atomic_exchange:984case AtomicExpr::AO__scoped_atomic_fetch_and:985case AtomicExpr::AO__scoped_atomic_fetch_nand:986case AtomicExpr::AO__scoped_atomic_fetch_or:987case AtomicExpr::AO__scoped_atomic_fetch_xor:988case AtomicExpr::AO__scoped_atomic_and_fetch:989case AtomicExpr::AO__scoped_atomic_nand_fetch:990case AtomicExpr::AO__scoped_atomic_or_fetch:991case AtomicExpr::AO__scoped_atomic_xor_fetch:992case AtomicExpr::AO__scoped_atomic_store_n:993case AtomicExpr::AO__scoped_atomic_exchange_n:994Val1 = EmitValToTemp(*this, E->getVal1());995break;996}997998QualType RValTy = E->getType().getUnqualifiedType();9991000// The inlined atomics only function on iN types, where N is a power of 2. We1001// need to make sure (via temporaries if necessary) that all incoming values1002// are compatible.1003LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);1004AtomicInfo Atomics(*this, AtomicVal);10051006if (ShouldCastToIntPtrTy) {1007Ptr = Atomics.castToAtomicIntPointer(Ptr);1008if (Val1.isValid())1009Val1 = Atomics.convertToAtomicIntPointer(Val1);1010if (Val2.isValid())1011Val2 = Atomics.convertToAtomicIntPointer(Val2);1012}1013if (Dest.isValid()) {1014if (ShouldCastToIntPtrTy)1015Dest = Atomics.castToAtomicIntPointer(Dest);1016} else if (E->isCmpXChg())1017Dest = CreateMemTemp(RValTy, "cmpxchg.bool");1018else if (!RValTy->isVoidType()) {1019Dest = Atomics.CreateTempAlloca();1020if (ShouldCastToIntPtrTy)1021Dest = Atomics.castToAtomicIntPointer(Dest);1022}10231024bool PowerOf2Size = (Size & (Size - 1)) == 0;1025bool UseLibcall = !PowerOf2Size || (Size > 16);10261027// For atomics larger than 16 bytes, emit a libcall from the frontend. This1028// avoids the overhead of dealing with excessively-large value types in IR.1029// Non-power-of-2 values also lower to libcall here, as they are not currently1030// permitted in IR instructions (although that constraint could be relaxed in1031// the future). For other cases where a libcall is required on a given1032// platform, we let the backend handle it (this includes handling for all of1033// the size-optimized libcall variants, which are only valid up to 16 bytes.)1034//1035// See: https://llvm.org/docs/Atomics.html#libcalls-atomic1036if (UseLibcall) {1037CallArgList Args;1038// For non-optimized library calls, the size is the first parameter.1039Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),1040getContext().getSizeType());10411042// The atomic address is the second parameter.1043// The OpenCL atomic library functions only accept pointer arguments to1044// generic address space.1045auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {1046if (!E->isOpenCL())1047return V;1048auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();1049if (AS == LangAS::opencl_generic)1050return V;1051auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);1052auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);10531054return getTargetHooks().performAddrSpaceCast(1055*this, V, AS, LangAS::opencl_generic, DestType, false);1056};10571058Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),1059E->getPtr()->getType())),1060getContext().VoidPtrTy);10611062// The next 1-3 parameters are op-dependent.1063std::string LibCallName;1064QualType RetTy;1065bool HaveRetTy = false;1066switch (E->getOp()) {1067case AtomicExpr::AO__c11_atomic_init:1068case AtomicExpr::AO__opencl_atomic_init:1069llvm_unreachable("Already handled!");10701071// There is only one libcall for compare an exchange, because there is no1072// optimisation benefit possible from a libcall version of a weak compare1073// and exchange.1074// bool __atomic_compare_exchange(size_t size, void *mem, void *expected,1075// void *desired, int success, int failure)1076case AtomicExpr::AO__atomic_compare_exchange:1077case AtomicExpr::AO__atomic_compare_exchange_n:1078case AtomicExpr::AO__c11_atomic_compare_exchange_weak:1079case AtomicExpr::AO__c11_atomic_compare_exchange_strong:1080case AtomicExpr::AO__hip_atomic_compare_exchange_weak:1081case AtomicExpr::AO__hip_atomic_compare_exchange_strong:1082case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:1083case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:1084case AtomicExpr::AO__scoped_atomic_compare_exchange:1085case AtomicExpr::AO__scoped_atomic_compare_exchange_n:1086LibCallName = "__atomic_compare_exchange";1087RetTy = getContext().BoolTy;1088HaveRetTy = true;1089Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),1090E->getVal1()->getType())),1091getContext().VoidPtrTy);1092Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),1093E->getVal2()->getType())),1094getContext().VoidPtrTy);1095Args.add(RValue::get(Order), getContext().IntTy);1096Order = OrderFail;1097break;1098// void __atomic_exchange(size_t size, void *mem, void *val, void *return,1099// int order)1100case AtomicExpr::AO__atomic_exchange:1101case AtomicExpr::AO__atomic_exchange_n:1102case AtomicExpr::AO__c11_atomic_exchange:1103case AtomicExpr::AO__hip_atomic_exchange:1104case AtomicExpr::AO__opencl_atomic_exchange:1105case AtomicExpr::AO__scoped_atomic_exchange:1106case AtomicExpr::AO__scoped_atomic_exchange_n:1107LibCallName = "__atomic_exchange";1108Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),1109E->getVal1()->getType())),1110getContext().VoidPtrTy);1111break;1112// void __atomic_store(size_t size, void *mem, void *val, int order)1113case AtomicExpr::AO__atomic_store:1114case AtomicExpr::AO__atomic_store_n:1115case AtomicExpr::AO__c11_atomic_store:1116case AtomicExpr::AO__hip_atomic_store:1117case AtomicExpr::AO__opencl_atomic_store:1118case AtomicExpr::AO__scoped_atomic_store:1119case AtomicExpr::AO__scoped_atomic_store_n:1120LibCallName = "__atomic_store";1121RetTy = getContext().VoidTy;1122HaveRetTy = true;1123Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),1124E->getVal1()->getType())),1125getContext().VoidPtrTy);1126break;1127// void __atomic_load(size_t size, void *mem, void *return, int order)1128case AtomicExpr::AO__atomic_load:1129case AtomicExpr::AO__atomic_load_n:1130case AtomicExpr::AO__c11_atomic_load:1131case AtomicExpr::AO__hip_atomic_load:1132case AtomicExpr::AO__opencl_atomic_load:1133case AtomicExpr::AO__scoped_atomic_load:1134case AtomicExpr::AO__scoped_atomic_load_n:1135LibCallName = "__atomic_load";1136break;1137case AtomicExpr::AO__atomic_add_fetch:1138case AtomicExpr::AO__scoped_atomic_add_fetch:1139case AtomicExpr::AO__atomic_fetch_add:1140case AtomicExpr::AO__c11_atomic_fetch_add:1141case AtomicExpr::AO__hip_atomic_fetch_add:1142case AtomicExpr::AO__opencl_atomic_fetch_add:1143case AtomicExpr::AO__scoped_atomic_fetch_add:1144case AtomicExpr::AO__atomic_and_fetch:1145case AtomicExpr::AO__scoped_atomic_and_fetch:1146case AtomicExpr::AO__atomic_fetch_and:1147case AtomicExpr::AO__c11_atomic_fetch_and:1148case AtomicExpr::AO__hip_atomic_fetch_and:1149case AtomicExpr::AO__opencl_atomic_fetch_and:1150case AtomicExpr::AO__scoped_atomic_fetch_and:1151case AtomicExpr::AO__atomic_or_fetch:1152case AtomicExpr::AO__scoped_atomic_or_fetch:1153case AtomicExpr::AO__atomic_fetch_or:1154case AtomicExpr::AO__c11_atomic_fetch_or:1155case AtomicExpr::AO__hip_atomic_fetch_or:1156case AtomicExpr::AO__opencl_atomic_fetch_or:1157case AtomicExpr::AO__scoped_atomic_fetch_or:1158case AtomicExpr::AO__atomic_sub_fetch:1159case AtomicExpr::AO__scoped_atomic_sub_fetch:1160case AtomicExpr::AO__atomic_fetch_sub:1161case AtomicExpr::AO__c11_atomic_fetch_sub:1162case AtomicExpr::AO__hip_atomic_fetch_sub:1163case AtomicExpr::AO__opencl_atomic_fetch_sub:1164case AtomicExpr::AO__scoped_atomic_fetch_sub:1165case AtomicExpr::AO__atomic_xor_fetch:1166case AtomicExpr::AO__scoped_atomic_xor_fetch:1167case AtomicExpr::AO__atomic_fetch_xor:1168case AtomicExpr::AO__c11_atomic_fetch_xor:1169case AtomicExpr::AO__hip_atomic_fetch_xor:1170case AtomicExpr::AO__opencl_atomic_fetch_xor:1171case AtomicExpr::AO__scoped_atomic_fetch_xor:1172case AtomicExpr::AO__atomic_nand_fetch:1173case AtomicExpr::AO__atomic_fetch_nand:1174case AtomicExpr::AO__c11_atomic_fetch_nand:1175case AtomicExpr::AO__scoped_atomic_fetch_nand:1176case AtomicExpr::AO__scoped_atomic_nand_fetch:1177case AtomicExpr::AO__atomic_min_fetch:1178case AtomicExpr::AO__atomic_fetch_min:1179case AtomicExpr::AO__c11_atomic_fetch_min:1180case AtomicExpr::AO__hip_atomic_fetch_min:1181case AtomicExpr::AO__opencl_atomic_fetch_min:1182case AtomicExpr::AO__scoped_atomic_fetch_min:1183case AtomicExpr::AO__scoped_atomic_min_fetch:1184case AtomicExpr::AO__atomic_max_fetch:1185case AtomicExpr::AO__atomic_fetch_max:1186case AtomicExpr::AO__c11_atomic_fetch_max:1187case AtomicExpr::AO__hip_atomic_fetch_max:1188case AtomicExpr::AO__opencl_atomic_fetch_max:1189case AtomicExpr::AO__scoped_atomic_fetch_max:1190case AtomicExpr::AO__scoped_atomic_max_fetch:1191llvm_unreachable("Integral atomic operations always become atomicrmw!");1192}11931194if (E->isOpenCL()) {1195LibCallName =1196std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();1197}1198// By default, assume we return a value of the atomic type.1199if (!HaveRetTy) {1200// Value is returned through parameter before the order.1201RetTy = getContext().VoidTy;1202Args.add(RValue::get(1203CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),1204getContext().VoidPtrTy);1205}1206// Order is always the last parameter.1207Args.add(RValue::get(Order),1208getContext().IntTy);1209if (E->isOpenCL())1210Args.add(RValue::get(Scope), getContext().IntTy);12111212RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);1213// The value is returned directly from the libcall.1214if (E->isCmpXChg())1215return Res;12161217if (RValTy->isVoidType())1218return RValue::get(nullptr);12191220return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),1221RValTy, E->getExprLoc());1222}12231224bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||1225E->getOp() == AtomicExpr::AO__opencl_atomic_store ||1226E->getOp() == AtomicExpr::AO__hip_atomic_store ||1227E->getOp() == AtomicExpr::AO__atomic_store ||1228E->getOp() == AtomicExpr::AO__atomic_store_n ||1229E->getOp() == AtomicExpr::AO__scoped_atomic_store ||1230E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;1231bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||1232E->getOp() == AtomicExpr::AO__opencl_atomic_load ||1233E->getOp() == AtomicExpr::AO__hip_atomic_load ||1234E->getOp() == AtomicExpr::AO__atomic_load ||1235E->getOp() == AtomicExpr::AO__atomic_load_n ||1236E->getOp() == AtomicExpr::AO__scoped_atomic_load ||1237E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;12381239if (isa<llvm::ConstantInt>(Order)) {1240auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();1241// We should not ever get to a case where the ordering isn't a valid C ABI1242// value, but it's hard to enforce that in general.1243if (llvm::isValidAtomicOrderingCABI(ord))1244switch ((llvm::AtomicOrderingCABI)ord) {1245case llvm::AtomicOrderingCABI::relaxed:1246EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1247llvm::AtomicOrdering::Monotonic, Scope);1248break;1249case llvm::AtomicOrderingCABI::consume:1250case llvm::AtomicOrderingCABI::acquire:1251if (IsStore)1252break; // Avoid crashing on code with undefined behavior1253EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1254llvm::AtomicOrdering::Acquire, Scope);1255break;1256case llvm::AtomicOrderingCABI::release:1257if (IsLoad)1258break; // Avoid crashing on code with undefined behavior1259EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1260llvm::AtomicOrdering::Release, Scope);1261break;1262case llvm::AtomicOrderingCABI::acq_rel:1263if (IsLoad || IsStore)1264break; // Avoid crashing on code with undefined behavior1265EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1266llvm::AtomicOrdering::AcquireRelease, Scope);1267break;1268case llvm::AtomicOrderingCABI::seq_cst:1269EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1270llvm::AtomicOrdering::SequentiallyConsistent, Scope);1271break;1272}1273if (RValTy->isVoidType())1274return RValue::get(nullptr);12751276return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),1277RValTy, E->getExprLoc());1278}12791280// Long case, when Order isn't obviously constant.12811282// Create all the relevant BB's1283llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,1284*ReleaseBB = nullptr, *AcqRelBB = nullptr,1285*SeqCstBB = nullptr;1286MonotonicBB = createBasicBlock("monotonic", CurFn);1287if (!IsStore)1288AcquireBB = createBasicBlock("acquire", CurFn);1289if (!IsLoad)1290ReleaseBB = createBasicBlock("release", CurFn);1291if (!IsLoad && !IsStore)1292AcqRelBB = createBasicBlock("acqrel", CurFn);1293SeqCstBB = createBasicBlock("seqcst", CurFn);1294llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);12951296// Create the switch for the split1297// MonotonicBB is arbitrarily chosen as the default case; in practice, this1298// doesn't matter unless someone is crazy enough to use something that1299// doesn't fold to a constant for the ordering.1300Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);1301llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);13021303// Emit all the different atomics1304Builder.SetInsertPoint(MonotonicBB);1305EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1306llvm::AtomicOrdering::Monotonic, Scope);1307Builder.CreateBr(ContBB);1308if (!IsStore) {1309Builder.SetInsertPoint(AcquireBB);1310EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1311llvm::AtomicOrdering::Acquire, Scope);1312Builder.CreateBr(ContBB);1313SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),1314AcquireBB);1315SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),1316AcquireBB);1317}1318if (!IsLoad) {1319Builder.SetInsertPoint(ReleaseBB);1320EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1321llvm::AtomicOrdering::Release, Scope);1322Builder.CreateBr(ContBB);1323SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),1324ReleaseBB);1325}1326if (!IsLoad && !IsStore) {1327Builder.SetInsertPoint(AcqRelBB);1328EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1329llvm::AtomicOrdering::AcquireRelease, Scope);1330Builder.CreateBr(ContBB);1331SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),1332AcqRelBB);1333}1334Builder.SetInsertPoint(SeqCstBB);1335EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,1336llvm::AtomicOrdering::SequentiallyConsistent, Scope);1337Builder.CreateBr(ContBB);1338SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),1339SeqCstBB);13401341// Cleanup and return1342Builder.SetInsertPoint(ContBB);1343if (RValTy->isVoidType())1344return RValue::get(nullptr);13451346assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());1347return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),1348RValTy, E->getExprLoc());1349}13501351Address AtomicInfo::castToAtomicIntPointer(Address addr) const {1352llvm::IntegerType *ty =1353llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);1354return addr.withElementType(ty);1355}13561357Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {1358llvm::Type *Ty = Addr.getElementType();1359uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);1360if (SourceSizeInBits != AtomicSizeInBits) {1361Address Tmp = CreateTempAlloca();1362CGF.Builder.CreateMemCpy(Tmp, Addr,1363std::min(AtomicSizeInBits, SourceSizeInBits) / 8);1364Addr = Tmp;1365}13661367return castToAtomicIntPointer(Addr);1368}13691370RValue AtomicInfo::convertAtomicTempToRValue(Address addr,1371AggValueSlot resultSlot,1372SourceLocation loc,1373bool asValue) const {1374if (LVal.isSimple()) {1375if (EvaluationKind == TEK_Aggregate)1376return resultSlot.asRValue();13771378// Drill into the padding structure if we have one.1379if (hasPadding())1380addr = CGF.Builder.CreateStructGEP(addr, 0);13811382// Otherwise, just convert the temporary to an r-value using the1383// normal conversion routine.1384return CGF.convertTempToRValue(addr, getValueType(), loc);1385}1386if (!asValue)1387// Get RValue from temp memory as atomic for non-simple lvalues1388return RValue::get(CGF.Builder.CreateLoad(addr));1389if (LVal.isBitField())1390return CGF.EmitLoadOfBitfieldLValue(1391LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),1392LVal.getBaseInfo(), TBAAAccessInfo()), loc);1393if (LVal.isVectorElt())1394return CGF.EmitLoadOfLValue(1395LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),1396LVal.getBaseInfo(), TBAAAccessInfo()), loc);1397assert(LVal.isExtVectorElt());1398return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(1399addr, LVal.getExtVectorElts(), LVal.getType(),1400LVal.getBaseInfo(), TBAAAccessInfo()));1401}14021403/// Return true if \param ValTy is a type that should be casted to integer1404/// around the atomic memory operation. If \param CmpXchg is true, then the1405/// cast of a floating point type is made as that instruction can not have1406/// floating point operands. TODO: Allow compare-and-exchange and FP - see1407/// comment in AtomicExpandPass.cpp.1408static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {1409if (ValTy->isFloatingPointTy())1410return ValTy->isX86_FP80Ty() || CmpXchg;1411return !ValTy->isIntegerTy() && !ValTy->isPointerTy();1412}14131414RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,1415AggValueSlot ResultSlot,1416SourceLocation Loc, bool AsValue,1417bool CmpXchg) const {1418// Try not to in some easy cases.1419assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||1420Val->getType()->isIEEELikeFPTy()) &&1421"Expected integer, pointer or floating point value when converting "1422"result.");1423if (getEvaluationKind() == TEK_Scalar &&1424(((!LVal.isBitField() ||1425LVal.getBitFieldInfo().Size == ValueSizeInBits) &&1426!hasPadding()) ||1427!AsValue)) {1428auto *ValTy = AsValue1429? CGF.ConvertTypeForMem(ValueTy)1430: getAtomicAddress().getElementType();1431if (!shouldCastToInt(ValTy, CmpXchg)) {1432assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&1433"Different integer types.");1434return RValue::get(CGF.EmitFromMemory(Val, ValueTy));1435}1436if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))1437return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));1438}14391440// Create a temporary. This needs to be big enough to hold the1441// atomic integer.1442Address Temp = Address::invalid();1443bool TempIsVolatile = false;1444if (AsValue && getEvaluationKind() == TEK_Aggregate) {1445assert(!ResultSlot.isIgnored());1446Temp = ResultSlot.getAddress();1447TempIsVolatile = ResultSlot.isVolatile();1448} else {1449Temp = CreateTempAlloca();1450}14511452// Slam the integer into the temporary.1453Address CastTemp = castToAtomicIntPointer(Temp);1454CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);14551456return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);1457}14581459void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,1460llvm::AtomicOrdering AO, bool) {1461// void __atomic_load(size_t size, void *mem, void *return, int order);1462CallArgList Args;1463Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());1464Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);1465Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);1466Args.add(1467RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),1468CGF.getContext().IntTy);1469emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);1470}14711472llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,1473bool IsVolatile, bool CmpXchg) {1474// Okay, we're doing this natively.1475Address Addr = getAtomicAddress();1476if (shouldCastToInt(Addr.getElementType(), CmpXchg))1477Addr = castToAtomicIntPointer(Addr);1478llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");1479Load->setAtomic(AO);14801481// Other decoration.1482if (IsVolatile)1483Load->setVolatile(true);1484CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());1485return Load;1486}14871488/// An LValue is a candidate for having its loads and stores be made atomic if1489/// we are operating under /volatile:ms *and* the LValue itself is volatile and1490/// performing such an operation can be performed without a libcall.1491bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {1492if (!CGM.getLangOpts().MSVolatile) return false;1493AtomicInfo AI(*this, LV);1494bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());1495// An atomic is inline if we don't need to use a libcall.1496bool AtomicIsInline = !AI.shouldUseLibcall();1497// MSVC doesn't seem to do this for types wider than a pointer.1498if (getContext().getTypeSize(LV.getType()) >1499getContext().getTypeSize(getContext().getIntPtrType()))1500return false;1501return IsVolatile && AtomicIsInline;1502}15031504RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,1505AggValueSlot Slot) {1506llvm::AtomicOrdering AO;1507bool IsVolatile = LV.isVolatileQualified();1508if (LV.getType()->isAtomicType()) {1509AO = llvm::AtomicOrdering::SequentiallyConsistent;1510} else {1511AO = llvm::AtomicOrdering::Acquire;1512IsVolatile = true;1513}1514return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);1515}15161517RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,1518bool AsValue, llvm::AtomicOrdering AO,1519bool IsVolatile) {1520// Check whether we should use a library call.1521if (shouldUseLibcall()) {1522Address TempAddr = Address::invalid();1523if (LVal.isSimple() && !ResultSlot.isIgnored()) {1524assert(getEvaluationKind() == TEK_Aggregate);1525TempAddr = ResultSlot.getAddress();1526} else1527TempAddr = CreateTempAlloca();15281529EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);15301531// Okay, turn that back into the original value or whole atomic (for1532// non-simple lvalues) type.1533return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);1534}15351536// Okay, we're doing this natively.1537auto *Load = EmitAtomicLoadOp(AO, IsVolatile);15381539// If we're ignoring an aggregate return, don't do anything.1540if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())1541return RValue::getAggregate(Address::invalid(), false);15421543// Okay, turn that back into the original value or atomic (for non-simple1544// lvalues) type.1545return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);1546}15471548/// Emit a load from an l-value of atomic type. Note that the r-value1549/// we produce is an r-value of the atomic *value* type.1550RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,1551llvm::AtomicOrdering AO, bool IsVolatile,1552AggValueSlot resultSlot) {1553AtomicInfo Atomics(*this, src);1554return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,1555IsVolatile);1556}15571558/// Copy an r-value into memory as part of storing to an atomic type.1559/// This needs to create a bit-pattern suitable for atomic operations.1560void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {1561assert(LVal.isSimple());1562// If we have an r-value, the rvalue should be of the atomic type,1563// which means that the caller is responsible for having zeroed1564// any padding. Just do an aggregate copy of that type.1565if (rvalue.isAggregate()) {1566LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());1567LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),1568getAtomicType());1569bool IsVolatile = rvalue.isVolatileQualified() ||1570LVal.isVolatileQualified();1571CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),1572AggValueSlot::DoesNotOverlap, IsVolatile);1573return;1574}15751576// Okay, otherwise we're copying stuff.15771578// Zero out the buffer if necessary.1579emitMemSetZeroIfNecessary();15801581// Drill past the padding if present.1582LValue TempLVal = projectValue();15831584// Okay, store the rvalue in.1585if (rvalue.isScalar()) {1586CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);1587} else {1588CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);1589}1590}159115921593/// Materialize an r-value into memory for the purposes of storing it1594/// to an atomic type.1595Address AtomicInfo::materializeRValue(RValue rvalue) const {1596// Aggregate r-values are already in memory, and EmitAtomicStore1597// requires them to be values of the atomic type.1598if (rvalue.isAggregate())1599return rvalue.getAggregateAddress();16001601// Otherwise, make a temporary and materialize into it.1602LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());1603AtomicInfo Atomics(CGF, TempLV);1604Atomics.emitCopyIntoMemory(rvalue);1605return TempLV.getAddress();1606}16071608llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {1609if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))1610return RVal.getScalarVal();1611return nullptr;1612}16131614llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {1615// If we've got a scalar value of the right size, try to avoid going1616// through memory. Floats get casted if needed by AtomicExpandPass.1617if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {1618if (!shouldCastToInt(Value->getType(), CmpXchg))1619return CGF.EmitToMemory(Value, ValueTy);1620else {1621llvm::IntegerType *InputIntTy = llvm::IntegerType::get(1622CGF.getLLVMContext(),1623LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());1624if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))1625return CGF.Builder.CreateBitCast(Value, InputIntTy);1626}1627}1628// Otherwise, we need to go through memory.1629// Put the r-value in memory.1630Address Addr = materializeRValue(RVal);16311632// Cast the temporary to the atomic int type and pull a value out.1633Addr = castToAtomicIntPointer(Addr);1634return CGF.Builder.CreateLoad(Addr);1635}16361637std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(1638llvm::Value *ExpectedVal, llvm::Value *DesiredVal,1639llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {1640// Do the atomic store.1641Address Addr = getAtomicAddressAsAtomicIntPointer();1642auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,1643Success, Failure);1644// Other decoration.1645Inst->setVolatile(LVal.isVolatileQualified());1646Inst->setWeak(IsWeak);16471648// Okay, turn that back into the original value type.1649auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);1650auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);1651return std::make_pair(PreviousVal, SuccessFailureVal);1652}16531654llvm::Value *1655AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,1656llvm::Value *DesiredAddr,1657llvm::AtomicOrdering Success,1658llvm::AtomicOrdering Failure) {1659// bool __atomic_compare_exchange(size_t size, void *obj, void *expected,1660// void *desired, int success, int failure);1661CallArgList Args;1662Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());1663Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);1664Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);1665Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);1666Args.add(RValue::get(1667llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),1668CGF.getContext().IntTy);1669Args.add(RValue::get(1670llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),1671CGF.getContext().IntTy);1672auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",1673CGF.getContext().BoolTy, Args);16741675return SuccessFailureRVal.getScalarVal();1676}16771678std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(1679RValue Expected, RValue Desired, llvm::AtomicOrdering Success,1680llvm::AtomicOrdering Failure, bool IsWeak) {1681// Check whether we should use a library call.1682if (shouldUseLibcall()) {1683// Produce a source address.1684Address ExpectedAddr = materializeRValue(Expected);1685llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);1686llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);1687auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,1688Success, Failure);1689return std::make_pair(1690convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),1691SourceLocation(), /*AsValue=*/false),1692Res);1693}16941695// If we've got a scalar value of the right size, try to avoid going1696// through memory.1697auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);1698auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);1699auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,1700Failure, IsWeak);1701return std::make_pair(1702ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),1703SourceLocation(), /*AsValue=*/false,1704/*CmpXchg=*/true),1705Res.second);1706}17071708static void1709EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,1710const llvm::function_ref<RValue(RValue)> &UpdateOp,1711Address DesiredAddr) {1712RValue UpRVal;1713LValue AtomicLVal = Atomics.getAtomicLValue();1714LValue DesiredLVal;1715if (AtomicLVal.isSimple()) {1716UpRVal = OldRVal;1717DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());1718} else {1719// Build new lvalue for temp address.1720Address Ptr = Atomics.materializeRValue(OldRVal);1721LValue UpdateLVal;1722if (AtomicLVal.isBitField()) {1723UpdateLVal =1724LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),1725AtomicLVal.getType(),1726AtomicLVal.getBaseInfo(),1727AtomicLVal.getTBAAInfo());1728DesiredLVal =1729LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),1730AtomicLVal.getType(), AtomicLVal.getBaseInfo(),1731AtomicLVal.getTBAAInfo());1732} else if (AtomicLVal.isVectorElt()) {1733UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),1734AtomicLVal.getType(),1735AtomicLVal.getBaseInfo(),1736AtomicLVal.getTBAAInfo());1737DesiredLVal = LValue::MakeVectorElt(1738DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),1739AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());1740} else {1741assert(AtomicLVal.isExtVectorElt());1742UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),1743AtomicLVal.getType(),1744AtomicLVal.getBaseInfo(),1745AtomicLVal.getTBAAInfo());1746DesiredLVal = LValue::MakeExtVectorElt(1747DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),1748AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());1749}1750UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());1751}1752// Store new value in the corresponding memory area.1753RValue NewRVal = UpdateOp(UpRVal);1754if (NewRVal.isScalar()) {1755CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);1756} else {1757assert(NewRVal.isComplex());1758CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,1759/*isInit=*/false);1760}1761}17621763void AtomicInfo::EmitAtomicUpdateLibcall(1764llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,1765bool IsVolatile) {1766auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);17671768Address ExpectedAddr = CreateTempAlloca();17691770EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);1771auto *ContBB = CGF.createBasicBlock("atomic_cont");1772auto *ExitBB = CGF.createBasicBlock("atomic_exit");1773CGF.EmitBlock(ContBB);1774Address DesiredAddr = CreateTempAlloca();1775if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||1776requiresMemSetZero(getAtomicAddress().getElementType())) {1777auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);1778CGF.Builder.CreateStore(OldVal, DesiredAddr);1779}1780auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,1781AggValueSlot::ignored(),1782SourceLocation(), /*AsValue=*/false);1783EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);1784llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);1785llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);1786auto *Res =1787EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);1788CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);1789CGF.EmitBlock(ExitBB, /*IsFinished=*/true);1790}17911792void AtomicInfo::EmitAtomicUpdateOp(1793llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,1794bool IsVolatile) {1795auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);17961797// Do the atomic load.1798auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);1799// For non-simple lvalues perform compare-and-swap procedure.1800auto *ContBB = CGF.createBasicBlock("atomic_cont");1801auto *ExitBB = CGF.createBasicBlock("atomic_exit");1802auto *CurBB = CGF.Builder.GetInsertBlock();1803CGF.EmitBlock(ContBB);1804llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),1805/*NumReservedValues=*/2);1806PHI->addIncoming(OldVal, CurBB);1807Address NewAtomicAddr = CreateTempAlloca();1808Address NewAtomicIntAddr =1809shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)1810? castToAtomicIntPointer(NewAtomicAddr)1811: NewAtomicAddr;18121813if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||1814requiresMemSetZero(getAtomicAddress().getElementType())) {1815CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);1816}1817auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),1818SourceLocation(), /*AsValue=*/false,1819/*CmpXchg=*/true);1820EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);1821auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);1822// Try to write new value using cmpxchg operation.1823auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);1824PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());1825CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);1826CGF.EmitBlock(ExitBB, /*IsFinished=*/true);1827}18281829static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,1830RValue UpdateRVal, Address DesiredAddr) {1831LValue AtomicLVal = Atomics.getAtomicLValue();1832LValue DesiredLVal;1833// Build new lvalue for temp address.1834if (AtomicLVal.isBitField()) {1835DesiredLVal =1836LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),1837AtomicLVal.getType(), AtomicLVal.getBaseInfo(),1838AtomicLVal.getTBAAInfo());1839} else if (AtomicLVal.isVectorElt()) {1840DesiredLVal =1841LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),1842AtomicLVal.getType(), AtomicLVal.getBaseInfo(),1843AtomicLVal.getTBAAInfo());1844} else {1845assert(AtomicLVal.isExtVectorElt());1846DesiredLVal = LValue::MakeExtVectorElt(1847DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),1848AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());1849}1850// Store new value in the corresponding memory area.1851assert(UpdateRVal.isScalar());1852CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);1853}18541855void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,1856RValue UpdateRVal, bool IsVolatile) {1857auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);18581859Address ExpectedAddr = CreateTempAlloca();18601861EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);1862auto *ContBB = CGF.createBasicBlock("atomic_cont");1863auto *ExitBB = CGF.createBasicBlock("atomic_exit");1864CGF.EmitBlock(ContBB);1865Address DesiredAddr = CreateTempAlloca();1866if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||1867requiresMemSetZero(getAtomicAddress().getElementType())) {1868auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);1869CGF.Builder.CreateStore(OldVal, DesiredAddr);1870}1871EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);1872llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);1873llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);1874auto *Res =1875EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);1876CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);1877CGF.EmitBlock(ExitBB, /*IsFinished=*/true);1878}18791880void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,1881bool IsVolatile) {1882auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);18831884// Do the atomic load.1885auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);1886// For non-simple lvalues perform compare-and-swap procedure.1887auto *ContBB = CGF.createBasicBlock("atomic_cont");1888auto *ExitBB = CGF.createBasicBlock("atomic_exit");1889auto *CurBB = CGF.Builder.GetInsertBlock();1890CGF.EmitBlock(ContBB);1891llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),1892/*NumReservedValues=*/2);1893PHI->addIncoming(OldVal, CurBB);1894Address NewAtomicAddr = CreateTempAlloca();1895Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);1896if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||1897requiresMemSetZero(getAtomicAddress().getElementType())) {1898CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);1899}1900EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);1901auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);1902// Try to write new value using cmpxchg operation.1903auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);1904PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());1905CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);1906CGF.EmitBlock(ExitBB, /*IsFinished=*/true);1907}19081909void AtomicInfo::EmitAtomicUpdate(1910llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,1911bool IsVolatile) {1912if (shouldUseLibcall()) {1913EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);1914} else {1915EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);1916}1917}19181919void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,1920bool IsVolatile) {1921if (shouldUseLibcall()) {1922EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);1923} else {1924EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);1925}1926}19271928void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,1929bool isInit) {1930bool IsVolatile = lvalue.isVolatileQualified();1931llvm::AtomicOrdering AO;1932if (lvalue.getType()->isAtomicType()) {1933AO = llvm::AtomicOrdering::SequentiallyConsistent;1934} else {1935AO = llvm::AtomicOrdering::Release;1936IsVolatile = true;1937}1938return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);1939}19401941/// Emit a store to an l-value of atomic type.1942///1943/// Note that the r-value is expected to be an r-value *of the atomic1944/// type*; this means that for aggregate r-values, it should include1945/// storage for any padding that was necessary.1946void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,1947llvm::AtomicOrdering AO, bool IsVolatile,1948bool isInit) {1949// If this is an aggregate r-value, it should agree in type except1950// maybe for address-space qualification.1951assert(!rvalue.isAggregate() ||1952rvalue.getAggregateAddress().getElementType() ==1953dest.getAddress().getElementType());19541955AtomicInfo atomics(*this, dest);1956LValue LVal = atomics.getAtomicLValue();19571958// If this is an initialization, just put the value there normally.1959if (LVal.isSimple()) {1960if (isInit) {1961atomics.emitCopyIntoMemory(rvalue);1962return;1963}19641965// Check whether we should use a library call.1966if (atomics.shouldUseLibcall()) {1967// Produce a source address.1968Address srcAddr = atomics.materializeRValue(rvalue);19691970// void __atomic_store(size_t size, void *mem, void *val, int order)1971CallArgList args;1972args.add(RValue::get(atomics.getAtomicSizeValue()),1973getContext().getSizeType());1974args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);1975args.add(RValue::get(srcAddr.emitRawPointer(*this)),1976getContext().VoidPtrTy);1977args.add(1978RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),1979getContext().IntTy);1980emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);1981return;1982}19831984// Okay, we're doing this natively.1985llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);19861987// Do the atomic store.1988Address Addr = atomics.getAtomicAddress();1989if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))1990if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {1991Addr = atomics.castToAtomicIntPointer(Addr);1992ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),1993/*isSigned=*/false);1994}1995llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);19961997if (AO == llvm::AtomicOrdering::Acquire)1998AO = llvm::AtomicOrdering::Monotonic;1999else if (AO == llvm::AtomicOrdering::AcquireRelease)2000AO = llvm::AtomicOrdering::Release;2001// Initializations don't need to be atomic.2002if (!isInit)2003store->setAtomic(AO);20042005// Other decoration.2006if (IsVolatile)2007store->setVolatile(true);2008CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());2009return;2010}20112012// Emit simple atomic update operation.2013atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);2014}20152016/// Emit a compare-and-exchange op for atomic type.2017///2018std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(2019LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,2020llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,2021AggValueSlot Slot) {2022// If this is an aggregate r-value, it should agree in type except2023// maybe for address-space qualification.2024assert(!Expected.isAggregate() ||2025Expected.getAggregateAddress().getElementType() ==2026Obj.getAddress().getElementType());2027assert(!Desired.isAggregate() ||2028Desired.getAggregateAddress().getElementType() ==2029Obj.getAddress().getElementType());2030AtomicInfo Atomics(*this, Obj);20312032return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,2033IsWeak);2034}20352036void CodeGenFunction::EmitAtomicUpdate(2037LValue LVal, llvm::AtomicOrdering AO,2038const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {2039AtomicInfo Atomics(*this, LVal);2040Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);2041}20422043void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {2044AtomicInfo atomics(*this, dest);20452046switch (atomics.getEvaluationKind()) {2047case TEK_Scalar: {2048llvm::Value *value = EmitScalarExpr(init);2049atomics.emitCopyIntoMemory(RValue::get(value));2050return;2051}20522053case TEK_Complex: {2054ComplexPairTy value = EmitComplexExpr(init);2055atomics.emitCopyIntoMemory(RValue::getComplex(value));2056return;2057}20582059case TEK_Aggregate: {2060// Fix up the destination if the initializer isn't an expression2061// of atomic type.2062bool Zeroed = false;2063if (!init->getType()->isAtomicType()) {2064Zeroed = atomics.emitMemSetZeroIfNecessary();2065dest = atomics.projectValue();2066}20672068// Evaluate the expression directly into the destination.2069AggValueSlot slot = AggValueSlot::forLValue(2070dest, AggValueSlot::IsNotDestructed,2071AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,2072AggValueSlot::DoesNotOverlap,2073Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);20742075EmitAggExpr(init, slot);2076return;2077}2078}2079llvm_unreachable("bad evaluation kind");2080}208120822083