Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
35233 views
1
//===- ThreadSafety.cpp ---------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10
// conditions), based off of an annotation system.
11
//
12
// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13
// for more information.
14
//
15
//===----------------------------------------------------------------------===//
16
17
#include "clang/Analysis/Analyses/ThreadSafety.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/Decl.h"
20
#include "clang/AST/DeclCXX.h"
21
#include "clang/AST/DeclGroup.h"
22
#include "clang/AST/Expr.h"
23
#include "clang/AST/ExprCXX.h"
24
#include "clang/AST/OperationKinds.h"
25
#include "clang/AST/Stmt.h"
26
#include "clang/AST/StmtVisitor.h"
27
#include "clang/AST/Type.h"
28
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
32
#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
33
#include "clang/Analysis/AnalysisDeclContext.h"
34
#include "clang/Analysis/CFG.h"
35
#include "clang/Basic/Builtins.h"
36
#include "clang/Basic/LLVM.h"
37
#include "clang/Basic/OperatorKinds.h"
38
#include "clang/Basic/SourceLocation.h"
39
#include "clang/Basic/Specifiers.h"
40
#include "llvm/ADT/ArrayRef.h"
41
#include "llvm/ADT/DenseMap.h"
42
#include "llvm/ADT/ImmutableMap.h"
43
#include "llvm/ADT/STLExtras.h"
44
#include "llvm/ADT/SmallVector.h"
45
#include "llvm/ADT/StringRef.h"
46
#include "llvm/Support/Allocator.h"
47
#include "llvm/Support/Casting.h"
48
#include "llvm/Support/ErrorHandling.h"
49
#include "llvm/Support/raw_ostream.h"
50
#include <algorithm>
51
#include <cassert>
52
#include <functional>
53
#include <iterator>
54
#include <memory>
55
#include <optional>
56
#include <string>
57
#include <type_traits>
58
#include <utility>
59
#include <vector>
60
61
using namespace clang;
62
using namespace threadSafety;
63
64
// Key method definition
65
ThreadSafetyHandler::~ThreadSafetyHandler() = default;
66
67
/// Issue a warning about an invalid lock expression
68
static void warnInvalidLock(ThreadSafetyHandler &Handler,
69
const Expr *MutexExp, const NamedDecl *D,
70
const Expr *DeclExp, StringRef Kind) {
71
SourceLocation Loc;
72
if (DeclExp)
73
Loc = DeclExp->getExprLoc();
74
75
// FIXME: add a note about the attribute location in MutexExp or D
76
if (Loc.isValid())
77
Handler.handleInvalidLockExp(Loc);
78
}
79
80
namespace {
81
82
/// A set of CapabilityExpr objects, which are compiled from thread safety
83
/// attributes on a function.
84
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
85
public:
86
/// Push M onto list, but discard duplicates.
87
void push_back_nodup(const CapabilityExpr &CapE) {
88
if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
89
return CapE.equals(CapE2);
90
}))
91
push_back(CapE);
92
}
93
};
94
95
class FactManager;
96
class FactSet;
97
98
/// This is a helper class that stores a fact that is known at a
99
/// particular point in program execution. Currently, a fact is a capability,
100
/// along with additional information, such as where it was acquired, whether
101
/// it is exclusive or shared, etc.
102
///
103
/// FIXME: this analysis does not currently support re-entrant locking.
104
class FactEntry : public CapabilityExpr {
105
public:
106
/// Where a fact comes from.
107
enum SourceKind {
108
Acquired, ///< The fact has been directly acquired.
109
Asserted, ///< The fact has been asserted to be held.
110
Declared, ///< The fact is assumed to be held by callers.
111
Managed, ///< The fact has been acquired through a scoped capability.
112
};
113
114
private:
115
/// Exclusive or shared.
116
LockKind LKind : 8;
117
118
// How it was acquired.
119
SourceKind Source : 8;
120
121
/// Where it was acquired.
122
SourceLocation AcquireLoc;
123
124
public:
125
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
126
SourceKind Src)
127
: CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {}
128
virtual ~FactEntry() = default;
129
130
LockKind kind() const { return LKind; }
131
SourceLocation loc() const { return AcquireLoc; }
132
133
bool asserted() const { return Source == Asserted; }
134
bool declared() const { return Source == Declared; }
135
bool managed() const { return Source == Managed; }
136
137
virtual void
138
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
139
SourceLocation JoinLoc, LockErrorKind LEK,
140
ThreadSafetyHandler &Handler) const = 0;
141
virtual void handleLock(FactSet &FSet, FactManager &FactMan,
142
const FactEntry &entry,
143
ThreadSafetyHandler &Handler) const = 0;
144
virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
145
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
146
bool FullyRemove,
147
ThreadSafetyHandler &Handler) const = 0;
148
149
// Return true if LKind >= LK, where exclusive > shared
150
bool isAtLeast(LockKind LK) const {
151
return (LKind == LK_Exclusive) || (LK == LK_Shared);
152
}
153
};
154
155
using FactID = unsigned short;
156
157
/// FactManager manages the memory for all facts that are created during
158
/// the analysis of a single routine.
159
class FactManager {
160
private:
161
std::vector<std::unique_ptr<const FactEntry>> Facts;
162
163
public:
164
FactID newFact(std::unique_ptr<FactEntry> Entry) {
165
Facts.push_back(std::move(Entry));
166
return static_cast<unsigned short>(Facts.size() - 1);
167
}
168
169
const FactEntry &operator[](FactID F) const { return *Facts[F]; }
170
};
171
172
/// A FactSet is the set of facts that are known to be true at a
173
/// particular program point. FactSets must be small, because they are
174
/// frequently copied, and are thus implemented as a set of indices into a
175
/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
176
/// locks, so we can get away with doing a linear search for lookup. Note
177
/// that a hashtable or map is inappropriate in this case, because lookups
178
/// may involve partial pattern matches, rather than exact matches.
179
class FactSet {
180
private:
181
using FactVec = SmallVector<FactID, 4>;
182
183
FactVec FactIDs;
184
185
public:
186
using iterator = FactVec::iterator;
187
using const_iterator = FactVec::const_iterator;
188
189
iterator begin() { return FactIDs.begin(); }
190
const_iterator begin() const { return FactIDs.begin(); }
191
192
iterator end() { return FactIDs.end(); }
193
const_iterator end() const { return FactIDs.end(); }
194
195
bool isEmpty() const { return FactIDs.size() == 0; }
196
197
// Return true if the set contains only negative facts
198
bool isEmpty(FactManager &FactMan) const {
199
for (const auto FID : *this) {
200
if (!FactMan[FID].negative())
201
return false;
202
}
203
return true;
204
}
205
206
void addLockByID(FactID ID) { FactIDs.push_back(ID); }
207
208
FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
209
FactID F = FM.newFact(std::move(Entry));
210
FactIDs.push_back(F);
211
return F;
212
}
213
214
bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
215
unsigned n = FactIDs.size();
216
if (n == 0)
217
return false;
218
219
for (unsigned i = 0; i < n-1; ++i) {
220
if (FM[FactIDs[i]].matches(CapE)) {
221
FactIDs[i] = FactIDs[n-1];
222
FactIDs.pop_back();
223
return true;
224
}
225
}
226
if (FM[FactIDs[n-1]].matches(CapE)) {
227
FactIDs.pop_back();
228
return true;
229
}
230
return false;
231
}
232
233
iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
234
return std::find_if(begin(), end(), [&](FactID ID) {
235
return FM[ID].matches(CapE);
236
});
237
}
238
239
const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
240
auto I = std::find_if(begin(), end(), [&](FactID ID) {
241
return FM[ID].matches(CapE);
242
});
243
return I != end() ? &FM[*I] : nullptr;
244
}
245
246
const FactEntry *findLockUniv(FactManager &FM,
247
const CapabilityExpr &CapE) const {
248
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
249
return FM[ID].matchesUniv(CapE);
250
});
251
return I != end() ? &FM[*I] : nullptr;
252
}
253
254
const FactEntry *findPartialMatch(FactManager &FM,
255
const CapabilityExpr &CapE) const {
256
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
257
return FM[ID].partiallyMatches(CapE);
258
});
259
return I != end() ? &FM[*I] : nullptr;
260
}
261
262
bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
263
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
264
return FM[ID].valueDecl() == Vd;
265
});
266
return I != end();
267
}
268
};
269
270
class ThreadSafetyAnalyzer;
271
272
} // namespace
273
274
namespace clang {
275
namespace threadSafety {
276
277
class BeforeSet {
278
private:
279
using BeforeVect = SmallVector<const ValueDecl *, 4>;
280
281
struct BeforeInfo {
282
BeforeVect Vect;
283
int Visited = 0;
284
285
BeforeInfo() = default;
286
BeforeInfo(BeforeInfo &&) = default;
287
};
288
289
using BeforeMap =
290
llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
291
using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
292
293
public:
294
BeforeSet() = default;
295
296
BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
297
ThreadSafetyAnalyzer& Analyzer);
298
299
BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
300
ThreadSafetyAnalyzer &Analyzer);
301
302
void checkBeforeAfter(const ValueDecl* Vd,
303
const FactSet& FSet,
304
ThreadSafetyAnalyzer& Analyzer,
305
SourceLocation Loc, StringRef CapKind);
306
307
private:
308
BeforeMap BMap;
309
CycleMap CycMap;
310
};
311
312
} // namespace threadSafety
313
} // namespace clang
314
315
namespace {
316
317
class LocalVariableMap;
318
319
using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
320
321
/// A side (entry or exit) of a CFG node.
322
enum CFGBlockSide { CBS_Entry, CBS_Exit };
323
324
/// CFGBlockInfo is a struct which contains all the information that is
325
/// maintained for each block in the CFG. See LocalVariableMap for more
326
/// information about the contexts.
327
struct CFGBlockInfo {
328
// Lockset held at entry to block
329
FactSet EntrySet;
330
331
// Lockset held at exit from block
332
FactSet ExitSet;
333
334
// Context held at entry to block
335
LocalVarContext EntryContext;
336
337
// Context held at exit from block
338
LocalVarContext ExitContext;
339
340
// Location of first statement in block
341
SourceLocation EntryLoc;
342
343
// Location of last statement in block.
344
SourceLocation ExitLoc;
345
346
// Used to replay contexts later
347
unsigned EntryIndex;
348
349
// Is this block reachable?
350
bool Reachable = false;
351
352
const FactSet &getSet(CFGBlockSide Side) const {
353
return Side == CBS_Entry ? EntrySet : ExitSet;
354
}
355
356
SourceLocation getLocation(CFGBlockSide Side) const {
357
return Side == CBS_Entry ? EntryLoc : ExitLoc;
358
}
359
360
private:
361
CFGBlockInfo(LocalVarContext EmptyCtx)
362
: EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
363
364
public:
365
static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
366
};
367
368
// A LocalVariableMap maintains a map from local variables to their currently
369
// valid definitions. It provides SSA-like functionality when traversing the
370
// CFG. Like SSA, each definition or assignment to a variable is assigned a
371
// unique name (an integer), which acts as the SSA name for that definition.
372
// The total set of names is shared among all CFG basic blocks.
373
// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
374
// with their SSA-names. Instead, we compute a Context for each point in the
375
// code, which maps local variables to the appropriate SSA-name. This map
376
// changes with each assignment.
377
//
378
// The map is computed in a single pass over the CFG. Subsequent analyses can
379
// then query the map to find the appropriate Context for a statement, and use
380
// that Context to look up the definitions of variables.
381
class LocalVariableMap {
382
public:
383
using Context = LocalVarContext;
384
385
/// A VarDefinition consists of an expression, representing the value of the
386
/// variable, along with the context in which that expression should be
387
/// interpreted. A reference VarDefinition does not itself contain this
388
/// information, but instead contains a pointer to a previous VarDefinition.
389
struct VarDefinition {
390
public:
391
friend class LocalVariableMap;
392
393
// The original declaration for this variable.
394
const NamedDecl *Dec;
395
396
// The expression for this variable, OR
397
const Expr *Exp = nullptr;
398
399
// Reference to another VarDefinition
400
unsigned Ref = 0;
401
402
// The map with which Exp should be interpreted.
403
Context Ctx;
404
405
bool isReference() const { return !Exp; }
406
407
private:
408
// Create ordinary variable definition
409
VarDefinition(const NamedDecl *D, const Expr *E, Context C)
410
: Dec(D), Exp(E), Ctx(C) {}
411
412
// Create reference to previous definition
413
VarDefinition(const NamedDecl *D, unsigned R, Context C)
414
: Dec(D), Ref(R), Ctx(C) {}
415
};
416
417
private:
418
Context::Factory ContextFactory;
419
std::vector<VarDefinition> VarDefinitions;
420
std::vector<std::pair<const Stmt *, Context>> SavedContexts;
421
422
public:
423
LocalVariableMap() {
424
// index 0 is a placeholder for undefined variables (aka phi-nodes).
425
VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
426
}
427
428
/// Look up a definition, within the given context.
429
const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
430
const unsigned *i = Ctx.lookup(D);
431
if (!i)
432
return nullptr;
433
assert(*i < VarDefinitions.size());
434
return &VarDefinitions[*i];
435
}
436
437
/// Look up the definition for D within the given context. Returns
438
/// NULL if the expression is not statically known. If successful, also
439
/// modifies Ctx to hold the context of the return Expr.
440
const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
441
const unsigned *P = Ctx.lookup(D);
442
if (!P)
443
return nullptr;
444
445
unsigned i = *P;
446
while (i > 0) {
447
if (VarDefinitions[i].Exp) {
448
Ctx = VarDefinitions[i].Ctx;
449
return VarDefinitions[i].Exp;
450
}
451
i = VarDefinitions[i].Ref;
452
}
453
return nullptr;
454
}
455
456
Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
457
458
/// Return the next context after processing S. This function is used by
459
/// clients of the class to get the appropriate context when traversing the
460
/// CFG. It must be called for every assignment or DeclStmt.
461
Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
462
if (SavedContexts[CtxIndex+1].first == S) {
463
CtxIndex++;
464
Context Result = SavedContexts[CtxIndex].second;
465
return Result;
466
}
467
return C;
468
}
469
470
void dumpVarDefinitionName(unsigned i) {
471
if (i == 0) {
472
llvm::errs() << "Undefined";
473
return;
474
}
475
const NamedDecl *Dec = VarDefinitions[i].Dec;
476
if (!Dec) {
477
llvm::errs() << "<<NULL>>";
478
return;
479
}
480
Dec->printName(llvm::errs());
481
llvm::errs() << "." << i << " " << ((const void*) Dec);
482
}
483
484
/// Dumps an ASCII representation of the variable map to llvm::errs()
485
void dump() {
486
for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
487
const Expr *Exp = VarDefinitions[i].Exp;
488
unsigned Ref = VarDefinitions[i].Ref;
489
490
dumpVarDefinitionName(i);
491
llvm::errs() << " = ";
492
if (Exp) Exp->dump();
493
else {
494
dumpVarDefinitionName(Ref);
495
llvm::errs() << "\n";
496
}
497
}
498
}
499
500
/// Dumps an ASCII representation of a Context to llvm::errs()
501
void dumpContext(Context C) {
502
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
503
const NamedDecl *D = I.getKey();
504
D->printName(llvm::errs());
505
llvm::errs() << " -> ";
506
dumpVarDefinitionName(I.getData());
507
llvm::errs() << "\n";
508
}
509
}
510
511
/// Builds the variable map.
512
void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
513
std::vector<CFGBlockInfo> &BlockInfo);
514
515
protected:
516
friend class VarMapBuilder;
517
518
// Get the current context index
519
unsigned getContextIndex() { return SavedContexts.size()-1; }
520
521
// Save the current context for later replay
522
void saveContext(const Stmt *S, Context C) {
523
SavedContexts.push_back(std::make_pair(S, C));
524
}
525
526
// Adds a new definition to the given context, and returns a new context.
527
// This method should be called when declaring a new variable.
528
Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
529
assert(!Ctx.contains(D));
530
unsigned newID = VarDefinitions.size();
531
Context NewCtx = ContextFactory.add(Ctx, D, newID);
532
VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
533
return NewCtx;
534
}
535
536
// Add a new reference to an existing definition.
537
Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
538
unsigned newID = VarDefinitions.size();
539
Context NewCtx = ContextFactory.add(Ctx, D, newID);
540
VarDefinitions.push_back(VarDefinition(D, i, Ctx));
541
return NewCtx;
542
}
543
544
// Updates a definition only if that definition is already in the map.
545
// This method should be called when assigning to an existing variable.
546
Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
547
if (Ctx.contains(D)) {
548
unsigned newID = VarDefinitions.size();
549
Context NewCtx = ContextFactory.remove(Ctx, D);
550
NewCtx = ContextFactory.add(NewCtx, D, newID);
551
VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
552
return NewCtx;
553
}
554
return Ctx;
555
}
556
557
// Removes a definition from the context, but keeps the variable name
558
// as a valid variable. The index 0 is a placeholder for cleared definitions.
559
Context clearDefinition(const NamedDecl *D, Context Ctx) {
560
Context NewCtx = Ctx;
561
if (NewCtx.contains(D)) {
562
NewCtx = ContextFactory.remove(NewCtx, D);
563
NewCtx = ContextFactory.add(NewCtx, D, 0);
564
}
565
return NewCtx;
566
}
567
568
// Remove a definition entirely frmo the context.
569
Context removeDefinition(const NamedDecl *D, Context Ctx) {
570
Context NewCtx = Ctx;
571
if (NewCtx.contains(D)) {
572
NewCtx = ContextFactory.remove(NewCtx, D);
573
}
574
return NewCtx;
575
}
576
577
Context intersectContexts(Context C1, Context C2);
578
Context createReferenceContext(Context C);
579
void intersectBackEdge(Context C1, Context C2);
580
};
581
582
} // namespace
583
584
// This has to be defined after LocalVariableMap.
585
CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
586
return CFGBlockInfo(M.getEmptyContext());
587
}
588
589
namespace {
590
591
/// Visitor which builds a LocalVariableMap
592
class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
593
public:
594
LocalVariableMap* VMap;
595
LocalVariableMap::Context Ctx;
596
597
VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
598
: VMap(VM), Ctx(C) {}
599
600
void VisitDeclStmt(const DeclStmt *S);
601
void VisitBinaryOperator(const BinaryOperator *BO);
602
};
603
604
} // namespace
605
606
// Add new local variables to the variable map
607
void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
608
bool modifiedCtx = false;
609
const DeclGroupRef DGrp = S->getDeclGroup();
610
for (const auto *D : DGrp) {
611
if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
612
const Expr *E = VD->getInit();
613
614
// Add local variables with trivial type to the variable map
615
QualType T = VD->getType();
616
if (T.isTrivialType(VD->getASTContext())) {
617
Ctx = VMap->addDefinition(VD, E, Ctx);
618
modifiedCtx = true;
619
}
620
}
621
}
622
if (modifiedCtx)
623
VMap->saveContext(S, Ctx);
624
}
625
626
// Update local variable definitions in variable map
627
void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
628
if (!BO->isAssignmentOp())
629
return;
630
631
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
632
633
// Update the variable map and current context.
634
if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
635
const ValueDecl *VDec = DRE->getDecl();
636
if (Ctx.lookup(VDec)) {
637
if (BO->getOpcode() == BO_Assign)
638
Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
639
else
640
// FIXME -- handle compound assignment operators
641
Ctx = VMap->clearDefinition(VDec, Ctx);
642
VMap->saveContext(BO, Ctx);
643
}
644
}
645
}
646
647
// Computes the intersection of two contexts. The intersection is the
648
// set of variables which have the same definition in both contexts;
649
// variables with different definitions are discarded.
650
LocalVariableMap::Context
651
LocalVariableMap::intersectContexts(Context C1, Context C2) {
652
Context Result = C1;
653
for (const auto &P : C1) {
654
const NamedDecl *Dec = P.first;
655
const unsigned *i2 = C2.lookup(Dec);
656
if (!i2) // variable doesn't exist on second path
657
Result = removeDefinition(Dec, Result);
658
else if (*i2 != P.second) // variable exists, but has different definition
659
Result = clearDefinition(Dec, Result);
660
}
661
return Result;
662
}
663
664
// For every variable in C, create a new variable that refers to the
665
// definition in C. Return a new context that contains these new variables.
666
// (We use this for a naive implementation of SSA on loop back-edges.)
667
LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
668
Context Result = getEmptyContext();
669
for (const auto &P : C)
670
Result = addReference(P.first, P.second, Result);
671
return Result;
672
}
673
674
// This routine also takes the intersection of C1 and C2, but it does so by
675
// altering the VarDefinitions. C1 must be the result of an earlier call to
676
// createReferenceContext.
677
void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
678
for (const auto &P : C1) {
679
unsigned i1 = P.second;
680
VarDefinition *VDef = &VarDefinitions[i1];
681
assert(VDef->isReference());
682
683
const unsigned *i2 = C2.lookup(P.first);
684
if (!i2 || (*i2 != i1))
685
VDef->Ref = 0; // Mark this variable as undefined
686
}
687
}
688
689
// Traverse the CFG in topological order, so all predecessors of a block
690
// (excluding back-edges) are visited before the block itself. At
691
// each point in the code, we calculate a Context, which holds the set of
692
// variable definitions which are visible at that point in execution.
693
// Visible variables are mapped to their definitions using an array that
694
// contains all definitions.
695
//
696
// At join points in the CFG, the set is computed as the intersection of
697
// the incoming sets along each edge, E.g.
698
//
699
// { Context | VarDefinitions }
700
// int x = 0; { x -> x1 | x1 = 0 }
701
// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
702
// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
703
// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
704
// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
705
//
706
// This is essentially a simpler and more naive version of the standard SSA
707
// algorithm. Those definitions that remain in the intersection are from blocks
708
// that strictly dominate the current block. We do not bother to insert proper
709
// phi nodes, because they are not used in our analysis; instead, wherever
710
// a phi node would be required, we simply remove that definition from the
711
// context (E.g. x above).
712
//
713
// The initial traversal does not capture back-edges, so those need to be
714
// handled on a separate pass. Whenever the first pass encounters an
715
// incoming back edge, it duplicates the context, creating new definitions
716
// that refer back to the originals. (These correspond to places where SSA
717
// might have to insert a phi node.) On the second pass, these definitions are
718
// set to NULL if the variable has changed on the back-edge (i.e. a phi
719
// node was actually required.) E.g.
720
//
721
// { Context | VarDefinitions }
722
// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
723
// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
724
// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
725
// ... { y -> y1 | x3 = 2, x2 = 1, ... }
726
void LocalVariableMap::traverseCFG(CFG *CFGraph,
727
const PostOrderCFGView *SortedGraph,
728
std::vector<CFGBlockInfo> &BlockInfo) {
729
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
730
731
for (const auto *CurrBlock : *SortedGraph) {
732
unsigned CurrBlockID = CurrBlock->getBlockID();
733
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
734
735
VisitedBlocks.insert(CurrBlock);
736
737
// Calculate the entry context for the current block
738
bool HasBackEdges = false;
739
bool CtxInit = true;
740
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
741
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
742
// if *PI -> CurrBlock is a back edge, so skip it
743
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
744
HasBackEdges = true;
745
continue;
746
}
747
748
unsigned PrevBlockID = (*PI)->getBlockID();
749
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
750
751
if (CtxInit) {
752
CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
753
CtxInit = false;
754
}
755
else {
756
CurrBlockInfo->EntryContext =
757
intersectContexts(CurrBlockInfo->EntryContext,
758
PrevBlockInfo->ExitContext);
759
}
760
}
761
762
// Duplicate the context if we have back-edges, so we can call
763
// intersectBackEdges later.
764
if (HasBackEdges)
765
CurrBlockInfo->EntryContext =
766
createReferenceContext(CurrBlockInfo->EntryContext);
767
768
// Create a starting context index for the current block
769
saveContext(nullptr, CurrBlockInfo->EntryContext);
770
CurrBlockInfo->EntryIndex = getContextIndex();
771
772
// Visit all the statements in the basic block.
773
VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
774
for (const auto &BI : *CurrBlock) {
775
switch (BI.getKind()) {
776
case CFGElement::Statement: {
777
CFGStmt CS = BI.castAs<CFGStmt>();
778
VMapBuilder.Visit(CS.getStmt());
779
break;
780
}
781
default:
782
break;
783
}
784
}
785
CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
786
787
// Mark variables on back edges as "unknown" if they've been changed.
788
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
789
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
790
// if CurrBlock -> *SI is *not* a back edge
791
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
792
continue;
793
794
CFGBlock *FirstLoopBlock = *SI;
795
Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
796
Context LoopEnd = CurrBlockInfo->ExitContext;
797
intersectBackEdge(LoopBegin, LoopEnd);
798
}
799
}
800
801
// Put an extra entry at the end of the indexed context array
802
unsigned exitID = CFGraph->getExit().getBlockID();
803
saveContext(nullptr, BlockInfo[exitID].ExitContext);
804
}
805
806
/// Find the appropriate source locations to use when producing diagnostics for
807
/// each block in the CFG.
808
static void findBlockLocations(CFG *CFGraph,
809
const PostOrderCFGView *SortedGraph,
810
std::vector<CFGBlockInfo> &BlockInfo) {
811
for (const auto *CurrBlock : *SortedGraph) {
812
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
813
814
// Find the source location of the last statement in the block, if the
815
// block is not empty.
816
if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
817
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
818
} else {
819
for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
820
BE = CurrBlock->rend(); BI != BE; ++BI) {
821
// FIXME: Handle other CFGElement kinds.
822
if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
823
CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
824
break;
825
}
826
}
827
}
828
829
if (CurrBlockInfo->ExitLoc.isValid()) {
830
// This block contains at least one statement. Find the source location
831
// of the first statement in the block.
832
for (const auto &BI : *CurrBlock) {
833
// FIXME: Handle other CFGElement kinds.
834
if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
835
CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
836
break;
837
}
838
}
839
} else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
840
CurrBlock != &CFGraph->getExit()) {
841
// The block is empty, and has a single predecessor. Use its exit
842
// location.
843
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
844
BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
845
} else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
846
// The block is empty, and has a single successor. Use its entry
847
// location.
848
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
849
BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
850
}
851
}
852
}
853
854
namespace {
855
856
class LockableFactEntry : public FactEntry {
857
public:
858
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
859
SourceKind Src = Acquired)
860
: FactEntry(CE, LK, Loc, Src) {}
861
862
void
863
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
864
SourceLocation JoinLoc, LockErrorKind LEK,
865
ThreadSafetyHandler &Handler) const override {
866
if (!asserted() && !negative() && !isUniversal()) {
867
Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
868
LEK);
869
}
870
}
871
872
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
873
ThreadSafetyHandler &Handler) const override {
874
Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
875
entry.loc());
876
}
877
878
void handleUnlock(FactSet &FSet, FactManager &FactMan,
879
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
880
bool FullyRemove,
881
ThreadSafetyHandler &Handler) const override {
882
FSet.removeLock(FactMan, Cp);
883
if (!Cp.negative()) {
884
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
885
!Cp, LK_Exclusive, UnlockLoc));
886
}
887
}
888
};
889
890
class ScopedLockableFactEntry : public FactEntry {
891
private:
892
enum UnderlyingCapabilityKind {
893
UCK_Acquired, ///< Any kind of acquired capability.
894
UCK_ReleasedShared, ///< Shared capability that was released.
895
UCK_ReleasedExclusive, ///< Exclusive capability that was released.
896
};
897
898
struct UnderlyingCapability {
899
CapabilityExpr Cap;
900
UnderlyingCapabilityKind Kind;
901
};
902
903
SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
904
905
public:
906
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
907
: FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
908
909
void addLock(const CapabilityExpr &M) {
910
UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired});
911
}
912
913
void addExclusiveUnlock(const CapabilityExpr &M) {
914
UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive});
915
}
916
917
void addSharedUnlock(const CapabilityExpr &M) {
918
UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared});
919
}
920
921
void
922
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
923
SourceLocation JoinLoc, LockErrorKind LEK,
924
ThreadSafetyHandler &Handler) const override {
925
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
926
const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
927
if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
928
(UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
929
// If this scoped lock manages another mutex, and if the underlying
930
// mutex is still/not held, then warn about the underlying mutex.
931
Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
932
UnderlyingMutex.Cap.toString(), loc(),
933
JoinLoc, LEK);
934
}
935
}
936
}
937
938
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
939
ThreadSafetyHandler &Handler) const override {
940
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
941
if (UnderlyingMutex.Kind == UCK_Acquired)
942
lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
943
&Handler);
944
else
945
unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
946
}
947
}
948
949
void handleUnlock(FactSet &FSet, FactManager &FactMan,
950
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
951
bool FullyRemove,
952
ThreadSafetyHandler &Handler) const override {
953
assert(!Cp.negative() && "Managing object cannot be negative.");
954
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
955
// Remove/lock the underlying mutex if it exists/is still unlocked; warn
956
// on double unlocking/locking if we're not destroying the scoped object.
957
ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
958
if (UnderlyingMutex.Kind == UCK_Acquired) {
959
unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
960
} else {
961
LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
962
? LK_Shared
963
: LK_Exclusive;
964
lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
965
}
966
}
967
if (FullyRemove)
968
FSet.removeLock(FactMan, Cp);
969
}
970
971
private:
972
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
973
LockKind kind, SourceLocation loc,
974
ThreadSafetyHandler *Handler) const {
975
if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
976
if (Handler)
977
Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
978
loc);
979
} else {
980
FSet.removeLock(FactMan, !Cp);
981
FSet.addLock(FactMan,
982
std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed));
983
}
984
}
985
986
void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
987
SourceLocation loc, ThreadSafetyHandler *Handler) const {
988
if (FSet.findLock(FactMan, Cp)) {
989
FSet.removeLock(FactMan, Cp);
990
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
991
!Cp, LK_Exclusive, loc));
992
} else if (Handler) {
993
SourceLocation PrevLoc;
994
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
995
PrevLoc = Neg->loc();
996
Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
997
}
998
}
999
};
1000
1001
/// Class which implements the core thread safety analysis routines.
1002
class ThreadSafetyAnalyzer {
1003
friend class BuildLockset;
1004
friend class threadSafety::BeforeSet;
1005
1006
llvm::BumpPtrAllocator Bpa;
1007
threadSafety::til::MemRegionRef Arena;
1008
threadSafety::SExprBuilder SxBuilder;
1009
1010
ThreadSafetyHandler &Handler;
1011
const FunctionDecl *CurrentFunction;
1012
LocalVariableMap LocalVarMap;
1013
// Maps constructed objects to `this` placeholder prior to initialization.
1014
llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1015
FactManager FactMan;
1016
std::vector<CFGBlockInfo> BlockInfo;
1017
1018
BeforeSet *GlobalBeforeSet;
1019
1020
public:
1021
ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
1022
: Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
1023
1024
bool inCurrentScope(const CapabilityExpr &CapE);
1025
1026
void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
1027
bool ReqAttr = false);
1028
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1029
SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1030
1031
template <typename AttrType>
1032
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1033
const NamedDecl *D, til::SExpr *Self = nullptr);
1034
1035
template <class AttrType>
1036
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1037
const NamedDecl *D,
1038
const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1039
Expr *BrE, bool Neg);
1040
1041
const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1042
bool &Negate);
1043
1044
void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1045
const CFGBlock* PredBlock,
1046
const CFGBlock *CurrBlock);
1047
1048
bool join(const FactEntry &a, const FactEntry &b, bool CanModify);
1049
1050
void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1051
SourceLocation JoinLoc, LockErrorKind EntryLEK,
1052
LockErrorKind ExitLEK);
1053
1054
void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1055
SourceLocation JoinLoc, LockErrorKind LEK) {
1056
intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1057
}
1058
1059
void runAnalysis(AnalysisDeclContext &AC);
1060
1061
void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1062
const Expr *Exp, AccessKind AK, Expr *MutexExp,
1063
ProtectedOperationKind POK, til::LiteralPtr *Self,
1064
SourceLocation Loc);
1065
void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1066
Expr *MutexExp, til::LiteralPtr *Self,
1067
SourceLocation Loc);
1068
1069
void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1070
ProtectedOperationKind POK);
1071
void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1072
ProtectedOperationKind POK);
1073
};
1074
1075
} // namespace
1076
1077
/// Process acquired_before and acquired_after attributes on Vd.
1078
BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1079
ThreadSafetyAnalyzer& Analyzer) {
1080
// Create a new entry for Vd.
1081
BeforeInfo *Info = nullptr;
1082
{
1083
// Keep InfoPtr in its own scope in case BMap is modified later and the
1084
// reference becomes invalid.
1085
std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1086
if (!InfoPtr)
1087
InfoPtr.reset(new BeforeInfo());
1088
Info = InfoPtr.get();
1089
}
1090
1091
for (const auto *At : Vd->attrs()) {
1092
switch (At->getKind()) {
1093
case attr::AcquiredBefore: {
1094
const auto *A = cast<AcquiredBeforeAttr>(At);
1095
1096
// Read exprs from the attribute, and add them to BeforeVect.
1097
for (const auto *Arg : A->args()) {
1098
CapabilityExpr Cp =
1099
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1100
if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1101
Info->Vect.push_back(Cpvd);
1102
const auto It = BMap.find(Cpvd);
1103
if (It == BMap.end())
1104
insertAttrExprs(Cpvd, Analyzer);
1105
}
1106
}
1107
break;
1108
}
1109
case attr::AcquiredAfter: {
1110
const auto *A = cast<AcquiredAfterAttr>(At);
1111
1112
// Read exprs from the attribute, and add them to BeforeVect.
1113
for (const auto *Arg : A->args()) {
1114
CapabilityExpr Cp =
1115
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1116
if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1117
// Get entry for mutex listed in attribute
1118
BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1119
ArgInfo->Vect.push_back(Vd);
1120
}
1121
}
1122
break;
1123
}
1124
default:
1125
break;
1126
}
1127
}
1128
1129
return Info;
1130
}
1131
1132
BeforeSet::BeforeInfo *
1133
BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1134
ThreadSafetyAnalyzer &Analyzer) {
1135
auto It = BMap.find(Vd);
1136
BeforeInfo *Info = nullptr;
1137
if (It == BMap.end())
1138
Info = insertAttrExprs(Vd, Analyzer);
1139
else
1140
Info = It->second.get();
1141
assert(Info && "BMap contained nullptr?");
1142
return Info;
1143
}
1144
1145
/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1146
void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1147
const FactSet& FSet,
1148
ThreadSafetyAnalyzer& Analyzer,
1149
SourceLocation Loc, StringRef CapKind) {
1150
SmallVector<BeforeInfo*, 8> InfoVect;
1151
1152
// Do a depth-first traversal of Vd.
1153
// Return true if there are cycles.
1154
std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1155
if (!Vd)
1156
return false;
1157
1158
BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1159
1160
if (Info->Visited == 1)
1161
return true;
1162
1163
if (Info->Visited == 2)
1164
return false;
1165
1166
if (Info->Vect.empty())
1167
return false;
1168
1169
InfoVect.push_back(Info);
1170
Info->Visited = 1;
1171
for (const auto *Vdb : Info->Vect) {
1172
// Exclude mutexes in our immediate before set.
1173
if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1174
StringRef L1 = StartVd->getName();
1175
StringRef L2 = Vdb->getName();
1176
Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1177
}
1178
// Transitively search other before sets, and warn on cycles.
1179
if (traverse(Vdb)) {
1180
if (!CycMap.contains(Vd)) {
1181
CycMap.insert(std::make_pair(Vd, true));
1182
StringRef L1 = Vd->getName();
1183
Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1184
}
1185
}
1186
}
1187
Info->Visited = 2;
1188
return false;
1189
};
1190
1191
traverse(StartVd);
1192
1193
for (auto *Info : InfoVect)
1194
Info->Visited = 0;
1195
}
1196
1197
/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1198
static const ValueDecl *getValueDecl(const Expr *Exp) {
1199
if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1200
return getValueDecl(CE->getSubExpr());
1201
1202
if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1203
return DR->getDecl();
1204
1205
if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1206
return ME->getMemberDecl();
1207
1208
return nullptr;
1209
}
1210
1211
namespace {
1212
1213
template <typename Ty>
1214
class has_arg_iterator_range {
1215
using yes = char[1];
1216
using no = char[2];
1217
1218
template <typename Inner>
1219
static yes& test(Inner *I, decltype(I->args()) * = nullptr);
1220
1221
template <typename>
1222
static no& test(...);
1223
1224
public:
1225
static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
1226
};
1227
1228
} // namespace
1229
1230
bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1231
const threadSafety::til::SExpr *SExp = CapE.sexpr();
1232
assert(SExp && "Null expressions should be ignored");
1233
1234
if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1235
const ValueDecl *VD = LP->clangDecl();
1236
// Variables defined in a function are always inaccessible.
1237
if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1238
return false;
1239
// For now we consider static class members to be inaccessible.
1240
if (isa<CXXRecordDecl>(VD->getDeclContext()))
1241
return false;
1242
// Global variables are always in scope.
1243
return true;
1244
}
1245
1246
// Members are in scope from methods of the same class.
1247
if (const auto *P = dyn_cast<til::Project>(SExp)) {
1248
if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1249
return false;
1250
const ValueDecl *VD = P->clangDecl();
1251
return VD->getDeclContext() == CurrentFunction->getDeclContext();
1252
}
1253
1254
return false;
1255
}
1256
1257
/// Add a new lock to the lockset, warning if the lock is already there.
1258
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1259
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
1260
std::unique_ptr<FactEntry> Entry,
1261
bool ReqAttr) {
1262
if (Entry->shouldIgnore())
1263
return;
1264
1265
if (!ReqAttr && !Entry->negative()) {
1266
// look for the negative capability, and remove it from the fact set.
1267
CapabilityExpr NegC = !*Entry;
1268
const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1269
if (Nen) {
1270
FSet.removeLock(FactMan, NegC);
1271
}
1272
else {
1273
if (inCurrentScope(*Entry) && !Entry->asserted())
1274
Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1275
NegC.toString(), Entry->loc());
1276
}
1277
}
1278
1279
// Check before/after constraints
1280
if (Handler.issueBetaWarnings() &&
1281
!Entry->asserted() && !Entry->declared()) {
1282
GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1283
Entry->loc(), Entry->getKind());
1284
}
1285
1286
// FIXME: Don't always warn when we have support for reentrant locks.
1287
if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1288
if (!Entry->asserted())
1289
Cp->handleLock(FSet, FactMan, *Entry, Handler);
1290
} else {
1291
FSet.addLock(FactMan, std::move(Entry));
1292
}
1293
}
1294
1295
/// Remove a lock from the lockset, warning if the lock is not there.
1296
/// \param UnlockLoc The source location of the unlock (only used in error msg)
1297
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1298
SourceLocation UnlockLoc,
1299
bool FullyRemove, LockKind ReceivedKind) {
1300
if (Cp.shouldIgnore())
1301
return;
1302
1303
const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1304
if (!LDat) {
1305
SourceLocation PrevLoc;
1306
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1307
PrevLoc = Neg->loc();
1308
Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1309
PrevLoc);
1310
return;
1311
}
1312
1313
// Generic lock removal doesn't care about lock kind mismatches, but
1314
// otherwise diagnose when the lock kinds are mismatched.
1315
if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1316
Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1317
ReceivedKind, LDat->loc(), UnlockLoc);
1318
}
1319
1320
LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1321
}
1322
1323
/// Extract the list of mutexIDs from the attribute on an expression,
1324
/// and push them onto Mtxs, discarding any duplicates.
1325
template <typename AttrType>
1326
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1327
const Expr *Exp, const NamedDecl *D,
1328
til::SExpr *Self) {
1329
if (Attr->args_size() == 0) {
1330
// The mutex held is the "this" object.
1331
CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1332
if (Cp.isInvalid()) {
1333
warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1334
return;
1335
}
1336
//else
1337
if (!Cp.shouldIgnore())
1338
Mtxs.push_back_nodup(Cp);
1339
return;
1340
}
1341
1342
for (const auto *Arg : Attr->args()) {
1343
CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1344
if (Cp.isInvalid()) {
1345
warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1346
continue;
1347
}
1348
//else
1349
if (!Cp.shouldIgnore())
1350
Mtxs.push_back_nodup(Cp);
1351
}
1352
}
1353
1354
/// Extract the list of mutexIDs from a trylock attribute. If the
1355
/// trylock applies to the given edge, then push them onto Mtxs, discarding
1356
/// any duplicates.
1357
template <class AttrType>
1358
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1359
const Expr *Exp, const NamedDecl *D,
1360
const CFGBlock *PredBlock,
1361
const CFGBlock *CurrBlock,
1362
Expr *BrE, bool Neg) {
1363
// Find out which branch has the lock
1364
bool branch = false;
1365
if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1366
branch = BLE->getValue();
1367
else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1368
branch = ILE->getValue().getBoolValue();
1369
1370
int branchnum = branch ? 0 : 1;
1371
if (Neg)
1372
branchnum = !branchnum;
1373
1374
// If we've taken the trylock branch, then add the lock
1375
int i = 0;
1376
for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1377
SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1378
if (*SI == CurrBlock && i == branchnum)
1379
getMutexIDs(Mtxs, Attr, Exp, D);
1380
}
1381
}
1382
1383
static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1384
if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
1385
TCond = false;
1386
return true;
1387
} else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1388
TCond = BLE->getValue();
1389
return true;
1390
} else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1391
TCond = ILE->getValue().getBoolValue();
1392
return true;
1393
} else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1394
return getStaticBooleanValue(CE->getSubExpr(), TCond);
1395
return false;
1396
}
1397
1398
// If Cond can be traced back to a function call, return the call expression.
1399
// The negate variable should be called with false, and will be set to true
1400
// if the function call is negated, e.g. if (!mu.tryLock(...))
1401
const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1402
LocalVarContext C,
1403
bool &Negate) {
1404
if (!Cond)
1405
return nullptr;
1406
1407
if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1408
if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1409
return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1410
return CallExp;
1411
}
1412
else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1413
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1414
else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1415
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1416
else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1417
return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1418
else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1419
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1420
return getTrylockCallExpr(E, C, Negate);
1421
}
1422
else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1423
if (UOP->getOpcode() == UO_LNot) {
1424
Negate = !Negate;
1425
return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1426
}
1427
return nullptr;
1428
}
1429
else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1430
if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1431
if (BOP->getOpcode() == BO_NE)
1432
Negate = !Negate;
1433
1434
bool TCond = false;
1435
if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1436
if (!TCond) Negate = !Negate;
1437
return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1438
}
1439
TCond = false;
1440
if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1441
if (!TCond) Negate = !Negate;
1442
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1443
}
1444
return nullptr;
1445
}
1446
if (BOP->getOpcode() == BO_LAnd) {
1447
// LHS must have been evaluated in a different block.
1448
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1449
}
1450
if (BOP->getOpcode() == BO_LOr)
1451
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1452
return nullptr;
1453
} else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1454
bool TCond, FCond;
1455
if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1456
getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1457
if (TCond && !FCond)
1458
return getTrylockCallExpr(COP->getCond(), C, Negate);
1459
if (!TCond && FCond) {
1460
Negate = !Negate;
1461
return getTrylockCallExpr(COP->getCond(), C, Negate);
1462
}
1463
}
1464
}
1465
return nullptr;
1466
}
1467
1468
/// Find the lockset that holds on the edge between PredBlock
1469
/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1470
/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1471
void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1472
const FactSet &ExitSet,
1473
const CFGBlock *PredBlock,
1474
const CFGBlock *CurrBlock) {
1475
Result = ExitSet;
1476
1477
const Stmt *Cond = PredBlock->getTerminatorCondition();
1478
// We don't acquire try-locks on ?: branches, only when its result is used.
1479
if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1480
return;
1481
1482
bool Negate = false;
1483
const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1484
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1485
1486
const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1487
if (!Exp)
1488
return;
1489
1490
auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1491
if(!FunDecl || !FunDecl->hasAttrs())
1492
return;
1493
1494
CapExprSet ExclusiveLocksToAdd;
1495
CapExprSet SharedLocksToAdd;
1496
1497
// If the condition is a call to a Trylock function, then grab the attributes
1498
for (const auto *Attr : FunDecl->attrs()) {
1499
switch (Attr->getKind()) {
1500
case attr::TryAcquireCapability: {
1501
auto *A = cast<TryAcquireCapabilityAttr>(Attr);
1502
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
1503
Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
1504
Negate);
1505
break;
1506
};
1507
case attr::ExclusiveTrylockFunction: {
1508
const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
1509
getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
1510
A->getSuccessValue(), Negate);
1511
break;
1512
}
1513
case attr::SharedTrylockFunction: {
1514
const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
1515
getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
1516
A->getSuccessValue(), Negate);
1517
break;
1518
}
1519
default:
1520
break;
1521
}
1522
}
1523
1524
// Add and remove locks.
1525
SourceLocation Loc = Exp->getExprLoc();
1526
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1527
addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
1528
LK_Exclusive, Loc));
1529
for (const auto &SharedLockToAdd : SharedLocksToAdd)
1530
addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
1531
LK_Shared, Loc));
1532
}
1533
1534
namespace {
1535
1536
/// We use this class to visit different types of expressions in
1537
/// CFGBlocks, and build up the lockset.
1538
/// An expression may cause us to add or remove locks from the lockset, or else
1539
/// output error messages related to missing locks.
1540
/// FIXME: In future, we may be able to not inherit from a visitor.
1541
class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1542
friend class ThreadSafetyAnalyzer;
1543
1544
ThreadSafetyAnalyzer *Analyzer;
1545
FactSet FSet;
1546
// The fact set for the function on exit.
1547
const FactSet &FunctionExitFSet;
1548
LocalVariableMap::Context LVarCtx;
1549
unsigned CtxIndex;
1550
1551
// helper functions
1552
1553
void checkAccess(const Expr *Exp, AccessKind AK,
1554
ProtectedOperationKind POK = POK_VarAccess) {
1555
Analyzer->checkAccess(FSet, Exp, AK, POK);
1556
}
1557
void checkPtAccess(const Expr *Exp, AccessKind AK,
1558
ProtectedOperationKind POK = POK_VarAccess) {
1559
Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1560
}
1561
1562
void handleCall(const Expr *Exp, const NamedDecl *D,
1563
til::LiteralPtr *Self = nullptr,
1564
SourceLocation Loc = SourceLocation());
1565
void examineArguments(const FunctionDecl *FD,
1566
CallExpr::const_arg_iterator ArgBegin,
1567
CallExpr::const_arg_iterator ArgEnd,
1568
bool SkipFirstParam = false);
1569
1570
public:
1571
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1572
const FactSet &FunctionExitFSet)
1573
: ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1574
FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1575
CtxIndex(Info.EntryIndex) {}
1576
1577
void VisitUnaryOperator(const UnaryOperator *UO);
1578
void VisitBinaryOperator(const BinaryOperator *BO);
1579
void VisitCastExpr(const CastExpr *CE);
1580
void VisitCallExpr(const CallExpr *Exp);
1581
void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1582
void VisitDeclStmt(const DeclStmt *S);
1583
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1584
void VisitReturnStmt(const ReturnStmt *S);
1585
};
1586
1587
} // namespace
1588
1589
/// Warn if the LSet does not contain a lock sufficient to protect access
1590
/// of at least the passed in AccessKind.
1591
void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1592
const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1593
Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self,
1594
SourceLocation Loc) {
1595
LockKind LK = getLockKindFromAccessKind(AK);
1596
CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1597
if (Cp.isInvalid()) {
1598
warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1599
return;
1600
} else if (Cp.shouldIgnore()) {
1601
return;
1602
}
1603
1604
if (Cp.negative()) {
1605
// Negative capabilities act like locks excluded
1606
const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1607
if (LDat) {
1608
Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
1609
(!Cp).toString(), Loc);
1610
return;
1611
}
1612
1613
// If this does not refer to a negative capability in the same class,
1614
// then stop here.
1615
if (!inCurrentScope(Cp))
1616
return;
1617
1618
// Otherwise the negative requirement must be propagated to the caller.
1619
LDat = FSet.findLock(FactMan, Cp);
1620
if (!LDat) {
1621
Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1622
}
1623
return;
1624
}
1625
1626
const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1627
bool NoError = true;
1628
if (!LDat) {
1629
// No exact match found. Look for a partial match.
1630
LDat = FSet.findPartialMatch(FactMan, Cp);
1631
if (LDat) {
1632
// Warn that there's no precise match.
1633
std::string PartMatchStr = LDat->toString();
1634
StringRef PartMatchName(PartMatchStr);
1635
Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1636
&PartMatchName);
1637
} else {
1638
// Warn that there's no match at all.
1639
Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1640
}
1641
NoError = false;
1642
}
1643
// Make sure the mutex we found is the right kind.
1644
if (NoError && LDat && !LDat->isAtLeast(LK)) {
1645
Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1646
}
1647
}
1648
1649
/// Warn if the LSet contains the given lock.
1650
void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1651
const NamedDecl *D, const Expr *Exp,
1652
Expr *MutexExp,
1653
til::LiteralPtr *Self,
1654
SourceLocation Loc) {
1655
CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1656
if (Cp.isInvalid()) {
1657
warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1658
return;
1659
} else if (Cp.shouldIgnore()) {
1660
return;
1661
}
1662
1663
const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1664
if (LDat) {
1665
Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
1666
Cp.toString(), Loc);
1667
}
1668
}
1669
1670
/// Checks guarded_by and pt_guarded_by attributes.
1671
/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1672
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1673
/// Similarly, we check if the access is to an expression that dereferences
1674
/// a pointer marked with pt_guarded_by.
1675
void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1676
AccessKind AK,
1677
ProtectedOperationKind POK) {
1678
Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1679
1680
SourceLocation Loc = Exp->getExprLoc();
1681
1682
// Local variables of reference type cannot be re-assigned;
1683
// map them to their initializer.
1684
while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1685
const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1686
if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1687
if (const auto *E = VD->getInit()) {
1688
// Guard against self-initialization. e.g., int &i = i;
1689
if (E == Exp)
1690
break;
1691
Exp = E;
1692
continue;
1693
}
1694
}
1695
break;
1696
}
1697
1698
if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1699
// For dereferences
1700
if (UO->getOpcode() == UO_Deref)
1701
checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1702
return;
1703
}
1704
1705
if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1706
switch (BO->getOpcode()) {
1707
case BO_PtrMemD: // .*
1708
return checkAccess(FSet, BO->getLHS(), AK, POK);
1709
case BO_PtrMemI: // ->*
1710
return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1711
default:
1712
return;
1713
}
1714
}
1715
1716
if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1717
checkPtAccess(FSet, AE->getLHS(), AK, POK);
1718
return;
1719
}
1720
1721
if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1722
if (ME->isArrow())
1723
checkPtAccess(FSet, ME->getBase(), AK, POK);
1724
else
1725
checkAccess(FSet, ME->getBase(), AK, POK);
1726
}
1727
1728
const ValueDecl *D = getValueDecl(Exp);
1729
if (!D || !D->hasAttrs())
1730
return;
1731
1732
if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1733
Handler.handleNoMutexHeld(D, POK, AK, Loc);
1734
}
1735
1736
for (const auto *I : D->specific_attrs<GuardedByAttr>())
1737
warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1738
}
1739
1740
/// Checks pt_guarded_by and pt_guarded_var attributes.
1741
/// POK is the same operationKind that was passed to checkAccess.
1742
void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1743
AccessKind AK,
1744
ProtectedOperationKind POK) {
1745
while (true) {
1746
if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1747
Exp = PE->getSubExpr();
1748
continue;
1749
}
1750
if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1751
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1752
// If it's an actual array, and not a pointer, then it's elements
1753
// are protected by GUARDED_BY, not PT_GUARDED_BY;
1754
checkAccess(FSet, CE->getSubExpr(), AK, POK);
1755
return;
1756
}
1757
Exp = CE->getSubExpr();
1758
continue;
1759
}
1760
break;
1761
}
1762
1763
// Pass by reference warnings are under a different flag.
1764
ProtectedOperationKind PtPOK = POK_VarDereference;
1765
if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
1766
if (POK == POK_ReturnByRef)
1767
PtPOK = POK_PtReturnByRef;
1768
1769
const ValueDecl *D = getValueDecl(Exp);
1770
if (!D || !D->hasAttrs())
1771
return;
1772
1773
if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1774
Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1775
1776
for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1777
warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1778
Exp->getExprLoc());
1779
}
1780
1781
/// Process a function call, method call, constructor call,
1782
/// or destructor call. This involves looking at the attributes on the
1783
/// corresponding function/method/constructor/destructor, issuing warnings,
1784
/// and updating the locksets accordingly.
1785
///
1786
/// FIXME: For classes annotated with one of the guarded annotations, we need
1787
/// to treat const method calls as reads and non-const method calls as writes,
1788
/// and check that the appropriate locks are held. Non-const method calls with
1789
/// the same signature as const method calls can be also treated as reads.
1790
///
1791
/// \param Exp The call expression.
1792
/// \param D The callee declaration.
1793
/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
1794
/// of an implicitly called cleanup function.
1795
/// \param Loc If \p Exp = nullptr, the location.
1796
void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
1797
til::LiteralPtr *Self, SourceLocation Loc) {
1798
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1799
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1800
CapExprSet ScopedReqsAndExcludes;
1801
1802
// Figure out if we're constructing an object of scoped lockable class
1803
CapabilityExpr Scp;
1804
if (Exp) {
1805
assert(!Self);
1806
const auto *TagT = Exp->getType()->getAs<TagType>();
1807
if (TagT && Exp->isPRValue()) {
1808
std::pair<til::LiteralPtr *, StringRef> Placeholder =
1809
Analyzer->SxBuilder.createThisPlaceholder(Exp);
1810
[[maybe_unused]] auto inserted =
1811
Analyzer->ConstructedObjects.insert({Exp, Placeholder.first});
1812
assert(inserted.second && "Are we visiting the same expression again?");
1813
if (isa<CXXConstructExpr>(Exp))
1814
Self = Placeholder.first;
1815
if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
1816
Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false);
1817
}
1818
1819
assert(Loc.isInvalid());
1820
Loc = Exp->getExprLoc();
1821
}
1822
1823
for(const Attr *At : D->attrs()) {
1824
switch (At->getKind()) {
1825
// When we encounter a lock function, we need to add the lock to our
1826
// lockset.
1827
case attr::AcquireCapability: {
1828
const auto *A = cast<AcquireCapabilityAttr>(At);
1829
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1830
: ExclusiveLocksToAdd,
1831
A, Exp, D, Self);
1832
break;
1833
}
1834
1835
// An assert will add a lock to the lockset, but will not generate
1836
// a warning if it is already there, and will not generate a warning
1837
// if it is not removed.
1838
case attr::AssertExclusiveLock: {
1839
const auto *A = cast<AssertExclusiveLockAttr>(At);
1840
1841
CapExprSet AssertLocks;
1842
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1843
for (const auto &AssertLock : AssertLocks)
1844
Analyzer->addLock(
1845
FSet, std::make_unique<LockableFactEntry>(
1846
AssertLock, LK_Exclusive, Loc, FactEntry::Asserted));
1847
break;
1848
}
1849
case attr::AssertSharedLock: {
1850
const auto *A = cast<AssertSharedLockAttr>(At);
1851
1852
CapExprSet AssertLocks;
1853
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1854
for (const auto &AssertLock : AssertLocks)
1855
Analyzer->addLock(
1856
FSet, std::make_unique<LockableFactEntry>(
1857
AssertLock, LK_Shared, Loc, FactEntry::Asserted));
1858
break;
1859
}
1860
1861
case attr::AssertCapability: {
1862
const auto *A = cast<AssertCapabilityAttr>(At);
1863
CapExprSet AssertLocks;
1864
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1865
for (const auto &AssertLock : AssertLocks)
1866
Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
1867
AssertLock,
1868
A->isShared() ? LK_Shared : LK_Exclusive,
1869
Loc, FactEntry::Asserted));
1870
break;
1871
}
1872
1873
// When we encounter an unlock function, we need to remove unlocked
1874
// mutexes from the lockset, and flag a warning if they are not there.
1875
case attr::ReleaseCapability: {
1876
const auto *A = cast<ReleaseCapabilityAttr>(At);
1877
if (A->isGeneric())
1878
Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
1879
else if (A->isShared())
1880
Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
1881
else
1882
Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
1883
break;
1884
}
1885
1886
case attr::RequiresCapability: {
1887
const auto *A = cast<RequiresCapabilityAttr>(At);
1888
for (auto *Arg : A->args()) {
1889
Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
1890
A->isShared() ? AK_Read : AK_Written,
1891
Arg, POK_FunctionCall, Self, Loc);
1892
// use for adopting a lock
1893
if (!Scp.shouldIgnore())
1894
Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1895
}
1896
break;
1897
}
1898
1899
case attr::LocksExcluded: {
1900
const auto *A = cast<LocksExcludedAttr>(At);
1901
for (auto *Arg : A->args()) {
1902
Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
1903
// use for deferring a lock
1904
if (!Scp.shouldIgnore())
1905
Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1906
}
1907
break;
1908
}
1909
1910
// Ignore attributes unrelated to thread-safety
1911
default:
1912
break;
1913
}
1914
}
1915
1916
// Remove locks first to allow lock upgrading/downgrading.
1917
// FIXME -- should only fully remove if the attribute refers to 'this'.
1918
bool Dtor = isa<CXXDestructorDecl>(D);
1919
for (const auto &M : ExclusiveLocksToRemove)
1920
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
1921
for (const auto &M : SharedLocksToRemove)
1922
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
1923
for (const auto &M : GenericLocksToRemove)
1924
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
1925
1926
// Add locks.
1927
FactEntry::SourceKind Source =
1928
!Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
1929
for (const auto &M : ExclusiveLocksToAdd)
1930
Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive,
1931
Loc, Source));
1932
for (const auto &M : SharedLocksToAdd)
1933
Analyzer->addLock(
1934
FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source));
1935
1936
if (!Scp.shouldIgnore()) {
1937
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
1938
auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, Loc);
1939
for (const auto &M : ExclusiveLocksToAdd)
1940
ScopedEntry->addLock(M);
1941
for (const auto &M : SharedLocksToAdd)
1942
ScopedEntry->addLock(M);
1943
for (const auto &M : ScopedReqsAndExcludes)
1944
ScopedEntry->addLock(M);
1945
for (const auto &M : ExclusiveLocksToRemove)
1946
ScopedEntry->addExclusiveUnlock(M);
1947
for (const auto &M : SharedLocksToRemove)
1948
ScopedEntry->addSharedUnlock(M);
1949
Analyzer->addLock(FSet, std::move(ScopedEntry));
1950
}
1951
}
1952
1953
/// For unary operations which read and write a variable, we need to
1954
/// check whether we hold any required mutexes. Reads are checked in
1955
/// VisitCastExpr.
1956
void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
1957
switch (UO->getOpcode()) {
1958
case UO_PostDec:
1959
case UO_PostInc:
1960
case UO_PreDec:
1961
case UO_PreInc:
1962
checkAccess(UO->getSubExpr(), AK_Written);
1963
break;
1964
default:
1965
break;
1966
}
1967
}
1968
1969
/// For binary operations which assign to a variable (writes), we need to check
1970
/// whether we hold any required mutexes.
1971
/// FIXME: Deal with non-primitive types.
1972
void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
1973
if (!BO->isAssignmentOp())
1974
return;
1975
1976
// adjust the context
1977
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
1978
1979
checkAccess(BO->getLHS(), AK_Written);
1980
}
1981
1982
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
1983
/// need to ensure we hold any required mutexes.
1984
/// FIXME: Deal with non-primitive types.
1985
void BuildLockset::VisitCastExpr(const CastExpr *CE) {
1986
if (CE->getCastKind() != CK_LValueToRValue)
1987
return;
1988
checkAccess(CE->getSubExpr(), AK_Read);
1989
}
1990
1991
void BuildLockset::examineArguments(const FunctionDecl *FD,
1992
CallExpr::const_arg_iterator ArgBegin,
1993
CallExpr::const_arg_iterator ArgEnd,
1994
bool SkipFirstParam) {
1995
// Currently we can't do anything if we don't know the function declaration.
1996
if (!FD)
1997
return;
1998
1999
// NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2000
// only turns off checking within the body of a function, but we also
2001
// use it to turn off checking in arguments to the function. This
2002
// could result in some false negatives, but the alternative is to
2003
// create yet another attribute.
2004
if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2005
return;
2006
2007
const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2008
auto Param = Params.begin();
2009
if (SkipFirstParam)
2010
++Param;
2011
2012
// There can be default arguments, so we stop when one iterator is at end().
2013
for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2014
++Param, ++Arg) {
2015
QualType Qt = (*Param)->getType();
2016
if (Qt->isReferenceType())
2017
checkAccess(*Arg, AK_Read, POK_PassByRef);
2018
}
2019
}
2020
2021
void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2022
if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2023
const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2024
// ME can be null when calling a method pointer
2025
const CXXMethodDecl *MD = CE->getMethodDecl();
2026
2027
if (ME && MD) {
2028
if (ME->isArrow()) {
2029
// Should perhaps be AK_Written if !MD->isConst().
2030
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2031
} else {
2032
// Should perhaps be AK_Written if !MD->isConst().
2033
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2034
}
2035
}
2036
2037
examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2038
} else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2039
OverloadedOperatorKind OEop = OE->getOperator();
2040
switch (OEop) {
2041
case OO_Equal:
2042
case OO_PlusEqual:
2043
case OO_MinusEqual:
2044
case OO_StarEqual:
2045
case OO_SlashEqual:
2046
case OO_PercentEqual:
2047
case OO_CaretEqual:
2048
case OO_AmpEqual:
2049
case OO_PipeEqual:
2050
case OO_LessLessEqual:
2051
case OO_GreaterGreaterEqual:
2052
checkAccess(OE->getArg(1), AK_Read);
2053
[[fallthrough]];
2054
case OO_PlusPlus:
2055
case OO_MinusMinus:
2056
checkAccess(OE->getArg(0), AK_Written);
2057
break;
2058
case OO_Star:
2059
case OO_ArrowStar:
2060
case OO_Arrow:
2061
case OO_Subscript:
2062
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2063
// Grrr. operator* can be multiplication...
2064
checkPtAccess(OE->getArg(0), AK_Read);
2065
}
2066
[[fallthrough]];
2067
default: {
2068
// TODO: get rid of this, and rely on pass-by-ref instead.
2069
const Expr *Obj = OE->getArg(0);
2070
checkAccess(Obj, AK_Read);
2071
// Check the remaining arguments. For method operators, the first
2072
// argument is the implicit self argument, and doesn't appear in the
2073
// FunctionDecl, but for non-methods it does.
2074
const FunctionDecl *FD = OE->getDirectCallee();
2075
examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2076
/*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2077
break;
2078
}
2079
}
2080
} else {
2081
examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2082
}
2083
2084
auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2085
if(!D || !D->hasAttrs())
2086
return;
2087
handleCall(Exp, D);
2088
}
2089
2090
void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2091
const CXXConstructorDecl *D = Exp->getConstructor();
2092
if (D && D->isCopyConstructor()) {
2093
const Expr* Source = Exp->getArg(0);
2094
checkAccess(Source, AK_Read);
2095
} else {
2096
examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2097
}
2098
if (D && D->hasAttrs())
2099
handleCall(Exp, D);
2100
}
2101
2102
static const Expr *UnpackConstruction(const Expr *E) {
2103
if (auto *CE = dyn_cast<CastExpr>(E))
2104
if (CE->getCastKind() == CK_NoOp)
2105
E = CE->getSubExpr()->IgnoreParens();
2106
if (auto *CE = dyn_cast<CastExpr>(E))
2107
if (CE->getCastKind() == CK_ConstructorConversion ||
2108
CE->getCastKind() == CK_UserDefinedConversion)
2109
E = CE->getSubExpr();
2110
if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2111
E = BTE->getSubExpr();
2112
return E;
2113
}
2114
2115
void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2116
// adjust the context
2117
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2118
2119
for (auto *D : S->getDeclGroup()) {
2120
if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2121
const Expr *E = VD->getInit();
2122
if (!E)
2123
continue;
2124
E = E->IgnoreParens();
2125
2126
// handle constructors that involve temporaries
2127
if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2128
E = EWC->getSubExpr()->IgnoreParens();
2129
E = UnpackConstruction(E);
2130
2131
if (auto Object = Analyzer->ConstructedObjects.find(E);
2132
Object != Analyzer->ConstructedObjects.end()) {
2133
Object->second->setClangDecl(VD);
2134
Analyzer->ConstructedObjects.erase(Object);
2135
}
2136
}
2137
}
2138
}
2139
2140
void BuildLockset::VisitMaterializeTemporaryExpr(
2141
const MaterializeTemporaryExpr *Exp) {
2142
if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2143
if (auto Object = Analyzer->ConstructedObjects.find(
2144
UnpackConstruction(Exp->getSubExpr()));
2145
Object != Analyzer->ConstructedObjects.end()) {
2146
Object->second->setClangDecl(ExtD);
2147
Analyzer->ConstructedObjects.erase(Object);
2148
}
2149
}
2150
}
2151
2152
void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2153
if (Analyzer->CurrentFunction == nullptr)
2154
return;
2155
const Expr *RetVal = S->getRetValue();
2156
if (!RetVal)
2157
return;
2158
2159
// If returning by reference, check that the function requires the appropriate
2160
// capabilities.
2161
const QualType ReturnType =
2162
Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2163
if (ReturnType->isLValueReferenceType()) {
2164
Analyzer->checkAccess(
2165
FunctionExitFSet, RetVal,
2166
ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2167
POK_ReturnByRef);
2168
}
2169
}
2170
2171
/// Given two facts merging on a join point, possibly warn and decide whether to
2172
/// keep or replace.
2173
///
2174
/// \param CanModify Whether we can replace \p A by \p B.
2175
/// \return false if we should keep \p A, true if we should take \p B.
2176
bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2177
bool CanModify) {
2178
if (A.kind() != B.kind()) {
2179
// For managed capabilities, the destructor should unlock in the right mode
2180
// anyway. For asserted capabilities no unlocking is needed.
2181
if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2182
// The shared capability subsumes the exclusive capability, if possible.
2183
bool ShouldTakeB = B.kind() == LK_Shared;
2184
if (CanModify || !ShouldTakeB)
2185
return ShouldTakeB;
2186
}
2187
Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2188
A.loc());
2189
// Take the exclusive capability to reduce further warnings.
2190
return CanModify && B.kind() == LK_Exclusive;
2191
} else {
2192
// The non-asserted capability is the one we want to track.
2193
return CanModify && A.asserted() && !B.asserted();
2194
}
2195
}
2196
2197
/// Compute the intersection of two locksets and issue warnings for any
2198
/// locks in the symmetric difference.
2199
///
2200
/// This function is used at a merge point in the CFG when comparing the lockset
2201
/// of each branch being merged. For example, given the following sequence:
2202
/// A; if () then B; else C; D; we need to check that the lockset after B and C
2203
/// are the same. In the event of a difference, we use the intersection of these
2204
/// two locksets at the start of D.
2205
///
2206
/// \param EntrySet A lockset for entry into a (possibly new) block.
2207
/// \param ExitSet The lockset on exiting a preceding block.
2208
/// \param JoinLoc The location of the join point for error reporting
2209
/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2210
/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2211
void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2212
const FactSet &ExitSet,
2213
SourceLocation JoinLoc,
2214
LockErrorKind EntryLEK,
2215
LockErrorKind ExitLEK) {
2216
FactSet EntrySetOrig = EntrySet;
2217
2218
// Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2219
for (const auto &Fact : ExitSet) {
2220
const FactEntry &ExitFact = FactMan[Fact];
2221
2222
FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2223
if (EntryIt != EntrySet.end()) {
2224
if (join(FactMan[*EntryIt], ExitFact,
2225
EntryLEK != LEK_LockedSomeLoopIterations))
2226
*EntryIt = Fact;
2227
} else if (!ExitFact.managed()) {
2228
ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2229
EntryLEK, Handler);
2230
}
2231
}
2232
2233
// Find locks in EntrySet that are not in ExitSet, and remove them.
2234
for (const auto &Fact : EntrySetOrig) {
2235
const FactEntry *EntryFact = &FactMan[Fact];
2236
const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2237
2238
if (!ExitFact) {
2239
if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations)
2240
EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2241
ExitLEK, Handler);
2242
if (ExitLEK == LEK_LockedSomePredecessors)
2243
EntrySet.removeLock(FactMan, *EntryFact);
2244
}
2245
}
2246
}
2247
2248
// Return true if block B never continues to its successors.
2249
static bool neverReturns(const CFGBlock *B) {
2250
if (B->hasNoReturnElement())
2251
return true;
2252
if (B->empty())
2253
return false;
2254
2255
CFGElement Last = B->back();
2256
if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2257
if (isa<CXXThrowExpr>(S->getStmt()))
2258
return true;
2259
}
2260
return false;
2261
}
2262
2263
/// Check a function's CFG for thread-safety violations.
2264
///
2265
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2266
/// at the end of each block, and issue warnings for thread safety violations.
2267
/// Each block in the CFG is traversed exactly once.
2268
void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2269
// TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2270
// For now, we just use the walker to set things up.
2271
threadSafety::CFGWalker walker;
2272
if (!walker.init(AC))
2273
return;
2274
2275
// AC.dumpCFG(true);
2276
// threadSafety::printSCFG(walker);
2277
2278
CFG *CFGraph = walker.getGraph();
2279
const NamedDecl *D = walker.getDecl();
2280
CurrentFunction = dyn_cast<FunctionDecl>(D);
2281
2282
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2283
return;
2284
2285
// FIXME: Do something a bit more intelligent inside constructor and
2286
// destructor code. Constructors and destructors must assume unique access
2287
// to 'this', so checks on member variable access is disabled, but we should
2288
// still enable checks on other objects.
2289
if (isa<CXXConstructorDecl>(D))
2290
return; // Don't check inside constructors.
2291
if (isa<CXXDestructorDecl>(D))
2292
return; // Don't check inside destructors.
2293
2294
Handler.enterFunction(CurrentFunction);
2295
2296
BlockInfo.resize(CFGraph->getNumBlockIDs(),
2297
CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2298
2299
// We need to explore the CFG via a "topological" ordering.
2300
// That way, we will be guaranteed to have information about required
2301
// predecessor locksets when exploring a new block.
2302
const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2303
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2304
2305
CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2306
CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2307
2308
// Mark entry block as reachable
2309
Initial.Reachable = true;
2310
2311
// Compute SSA names for local variables
2312
LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2313
2314
// Fill in source locations for all CFGBlocks.
2315
findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2316
2317
CapExprSet ExclusiveLocksAcquired;
2318
CapExprSet SharedLocksAcquired;
2319
CapExprSet LocksReleased;
2320
2321
// Add locks from exclusive_locks_required and shared_locks_required
2322
// to initial lockset. Also turn off checking for lock and unlock functions.
2323
// FIXME: is there a more intelligent way to check lock/unlock functions?
2324
if (!SortedGraph->empty() && D->hasAttrs()) {
2325
assert(*SortedGraph->begin() == &CFGraph->getEntry());
2326
FactSet &InitialLockset = Initial.EntrySet;
2327
2328
CapExprSet ExclusiveLocksToAdd;
2329
CapExprSet SharedLocksToAdd;
2330
2331
SourceLocation Loc = D->getLocation();
2332
for (const auto *Attr : D->attrs()) {
2333
Loc = Attr->getLocation();
2334
if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2335
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2336
nullptr, D);
2337
} else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2338
// UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2339
// We must ignore such methods.
2340
if (A->args_size() == 0)
2341
return;
2342
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2343
nullptr, D);
2344
getMutexIDs(LocksReleased, A, nullptr, D);
2345
} else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2346
if (A->args_size() == 0)
2347
return;
2348
getMutexIDs(A->isShared() ? SharedLocksAcquired
2349
: ExclusiveLocksAcquired,
2350
A, nullptr, D);
2351
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
2352
// Don't try to check trylock functions for now.
2353
return;
2354
} else if (isa<SharedTrylockFunctionAttr>(Attr)) {
2355
// Don't try to check trylock functions for now.
2356
return;
2357
} else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2358
// Don't try to check trylock functions for now.
2359
return;
2360
}
2361
}
2362
2363
// FIXME -- Loc can be wrong here.
2364
for (const auto &Mu : ExclusiveLocksToAdd) {
2365
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
2366
FactEntry::Declared);
2367
addLock(InitialLockset, std::move(Entry), true);
2368
}
2369
for (const auto &Mu : SharedLocksToAdd) {
2370
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
2371
FactEntry::Declared);
2372
addLock(InitialLockset, std::move(Entry), true);
2373
}
2374
}
2375
2376
// Compute the expected exit set.
2377
// By default, we expect all locks held on entry to be held on exit.
2378
FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2379
2380
// Adjust the expected exit set by adding or removing locks, as declared
2381
// by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2382
// issue the appropriate warning.
2383
// FIXME: the location here is not quite right.
2384
for (const auto &Lock : ExclusiveLocksAcquired)
2385
ExpectedFunctionExitSet.addLock(
2386
FactMan, std::make_unique<LockableFactEntry>(Lock, LK_Exclusive,
2387
D->getLocation()));
2388
for (const auto &Lock : SharedLocksAcquired)
2389
ExpectedFunctionExitSet.addLock(
2390
FactMan,
2391
std::make_unique<LockableFactEntry>(Lock, LK_Shared, D->getLocation()));
2392
for (const auto &Lock : LocksReleased)
2393
ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2394
2395
for (const auto *CurrBlock : *SortedGraph) {
2396
unsigned CurrBlockID = CurrBlock->getBlockID();
2397
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2398
2399
// Use the default initial lockset in case there are no predecessors.
2400
VisitedBlocks.insert(CurrBlock);
2401
2402
// Iterate through the predecessor blocks and warn if the lockset for all
2403
// predecessors is not the same. We take the entry lockset of the current
2404
// block to be the intersection of all previous locksets.
2405
// FIXME: By keeping the intersection, we may output more errors in future
2406
// for a lock which is not in the intersection, but was in the union. We
2407
// may want to also keep the union in future. As an example, let's say
2408
// the intersection contains Mutex L, and the union contains L and M.
2409
// Later we unlock M. At this point, we would output an error because we
2410
// never locked M; although the real error is probably that we forgot to
2411
// lock M on all code paths. Conversely, let's say that later we lock M.
2412
// In this case, we should compare against the intersection instead of the
2413
// union because the real error is probably that we forgot to unlock M on
2414
// all code paths.
2415
bool LocksetInitialized = false;
2416
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2417
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2418
// if *PI -> CurrBlock is a back edge
2419
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2420
continue;
2421
2422
unsigned PrevBlockID = (*PI)->getBlockID();
2423
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2424
2425
// Ignore edges from blocks that can't return.
2426
if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2427
continue;
2428
2429
// Okay, we can reach this block from the entry.
2430
CurrBlockInfo->Reachable = true;
2431
2432
FactSet PrevLockset;
2433
getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2434
2435
if (!LocksetInitialized) {
2436
CurrBlockInfo->EntrySet = PrevLockset;
2437
LocksetInitialized = true;
2438
} else {
2439
// Surprisingly 'continue' doesn't always produce back edges, because
2440
// the CFG has empty "transition" blocks where they meet with the end
2441
// of the regular loop body. We still want to diagnose them as loop.
2442
intersectAndWarn(
2443
CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2444
isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2445
? LEK_LockedSomeLoopIterations
2446
: LEK_LockedSomePredecessors);
2447
}
2448
}
2449
2450
// Skip rest of block if it's not reachable.
2451
if (!CurrBlockInfo->Reachable)
2452
continue;
2453
2454
BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2455
2456
// Visit all the statements in the basic block.
2457
for (const auto &BI : *CurrBlock) {
2458
switch (BI.getKind()) {
2459
case CFGElement::Statement: {
2460
CFGStmt CS = BI.castAs<CFGStmt>();
2461
LocksetBuilder.Visit(CS.getStmt());
2462
break;
2463
}
2464
// Ignore BaseDtor and MemberDtor for now.
2465
case CFGElement::AutomaticObjectDtor: {
2466
CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2467
const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2468
if (!DD->hasAttrs())
2469
break;
2470
2471
LocksetBuilder.handleCall(nullptr, DD,
2472
SxBuilder.createVariable(AD.getVarDecl()),
2473
AD.getTriggerStmt()->getEndLoc());
2474
break;
2475
}
2476
2477
case CFGElement::CleanupFunction: {
2478
const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2479
LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(),
2480
SxBuilder.createVariable(CF.getVarDecl()),
2481
CF.getVarDecl()->getLocation());
2482
break;
2483
}
2484
2485
case CFGElement::TemporaryDtor: {
2486
auto TD = BI.castAs<CFGTemporaryDtor>();
2487
2488
// Clean up constructed object even if there are no attributes to
2489
// keep the number of objects in limbo as small as possible.
2490
if (auto Object = ConstructedObjects.find(
2491
TD.getBindTemporaryExpr()->getSubExpr());
2492
Object != ConstructedObjects.end()) {
2493
const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2494
if (DD->hasAttrs())
2495
// TODO: the location here isn't quite correct.
2496
LocksetBuilder.handleCall(nullptr, DD, Object->second,
2497
TD.getBindTemporaryExpr()->getEndLoc());
2498
ConstructedObjects.erase(Object);
2499
}
2500
break;
2501
}
2502
default:
2503
break;
2504
}
2505
}
2506
CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2507
2508
// For every back edge from CurrBlock (the end of the loop) to another block
2509
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2510
// the one held at the beginning of FirstLoopBlock. We can look up the
2511
// Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2512
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2513
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2514
// if CurrBlock -> *SI is *not* a back edge
2515
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2516
continue;
2517
2518
CFGBlock *FirstLoopBlock = *SI;
2519
CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2520
CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2521
intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2522
LEK_LockedSomeLoopIterations);
2523
}
2524
}
2525
2526
// Skip the final check if the exit block is unreachable.
2527
if (!Final.Reachable)
2528
return;
2529
2530
// FIXME: Should we call this function for all blocks which exit the function?
2531
intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2532
LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction);
2533
2534
Handler.leaveFunction(CurrentFunction);
2535
}
2536
2537
/// Check a function's CFG for thread-safety violations.
2538
///
2539
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2540
/// at the end of each block, and issue warnings for thread safety violations.
2541
/// Each block in the CFG is traversed exactly once.
2542
void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2543
ThreadSafetyHandler &Handler,
2544
BeforeSet **BSet) {
2545
if (!*BSet)
2546
*BSet = new BeforeSet;
2547
ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2548
Analyzer.runAnalysis(AC);
2549
}
2550
2551
void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2552
2553
/// Helper function that returns a LockKind required for the given level
2554
/// of access.
2555
LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2556
switch (AK) {
2557
case AK_Read :
2558
return LK_Shared;
2559
case AK_Written :
2560
return LK_Exclusive;
2561
}
2562
llvm_unreachable("Unknown AccessKind");
2563
}
2564
2565