Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
35269 views
//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer (TSan), a race detector.9//10// Main internal TSan header file.11//12// Ground rules:13// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static14// function-scope locals)15// - All functions/classes/etc reside in namespace __tsan, except for those16// declared in tsan_interface.h.17// - Platform-specific files should be used instead of ifdefs (*).18// - No system headers included in header files (*).19// - Platform specific headres included only into platform-specific files (*).20//21// (*) Except when inlining is critical for performance.22//===----------------------------------------------------------------------===//2324#ifndef TSAN_RTL_H25#define TSAN_RTL_H2627#include "sanitizer_common/sanitizer_allocator.h"28#include "sanitizer_common/sanitizer_allocator_internal.h"29#include "sanitizer_common/sanitizer_asm.h"30#include "sanitizer_common/sanitizer_common.h"31#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"32#include "sanitizer_common/sanitizer_libignore.h"33#include "sanitizer_common/sanitizer_suppressions.h"34#include "sanitizer_common/sanitizer_thread_registry.h"35#include "sanitizer_common/sanitizer_vector.h"36#include "tsan_defs.h"37#include "tsan_flags.h"38#include "tsan_ignoreset.h"39#include "tsan_ilist.h"40#include "tsan_mman.h"41#include "tsan_mutexset.h"42#include "tsan_platform.h"43#include "tsan_report.h"44#include "tsan_shadow.h"45#include "tsan_stack_trace.h"46#include "tsan_sync.h"47#include "tsan_trace.h"48#include "tsan_vector_clock.h"4950#if SANITIZER_WORDSIZE != 6451# error "ThreadSanitizer is supported only on 64-bit platforms"52#endif5354namespace __tsan {5556#if !SANITIZER_GO57struct MapUnmapCallback;58# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \59defined(__powerpc__) || SANITIZER_RISCV646061struct AP32 {62static const uptr kSpaceBeg = 0;63static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;64static const uptr kMetadataSize = 0;65typedef __sanitizer::CompactSizeClassMap SizeClassMap;66static const uptr kRegionSizeLog = 20;67using AddressSpaceView = LocalAddressSpaceView;68typedef __tsan::MapUnmapCallback MapUnmapCallback;69static const uptr kFlags = 0;70};71typedef SizeClassAllocator32<AP32> PrimaryAllocator;72#else73struct AP64 { // Allocator64 parameters. Deliberately using a short name.74# if defined(__s390x__)75typedef MappingS390x Mapping;76# else77typedef Mapping48AddressSpace Mapping;78# endif79static const uptr kSpaceBeg = Mapping::kHeapMemBeg;80static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;81static const uptr kMetadataSize = 0;82typedef DefaultSizeClassMap SizeClassMap;83typedef __tsan::MapUnmapCallback MapUnmapCallback;84static const uptr kFlags = 0;85using AddressSpaceView = LocalAddressSpaceView;86};87typedef SizeClassAllocator64<AP64> PrimaryAllocator;88#endif89typedef CombinedAllocator<PrimaryAllocator> Allocator;90typedef Allocator::AllocatorCache AllocatorCache;91Allocator *allocator();92#endif9394struct ThreadSignalContext;9596struct JmpBuf {97uptr sp;98int int_signal_send;99bool in_blocking_func;100uptr in_signal_handler;101uptr *shadow_stack_pos;102};103104// A Processor represents a physical thread, or a P for Go.105// It is used to store internal resources like allocate cache, and does not106// participate in race-detection logic (invisible to end user).107// In C++ it is tied to an OS thread just like ThreadState, however ideally108// it should be tied to a CPU (this way we will have fewer allocator caches).109// In Go it is tied to a P, so there are significantly fewer Processor's than110// ThreadState's (which are tied to Gs).111// A ThreadState must be wired with a Processor to handle events.112struct Processor {113ThreadState *thr; // currently wired thread, or nullptr114#if !SANITIZER_GO115AllocatorCache alloc_cache;116InternalAllocatorCache internal_alloc_cache;117#endif118DenseSlabAllocCache block_cache;119DenseSlabAllocCache sync_cache;120DDPhysicalThread *dd_pt;121};122123#if !SANITIZER_GO124// ScopedGlobalProcessor temporary setups a global processor for the current125// thread, if it does not have one. Intended for interceptors that can run126// at the very thread end, when we already destroyed the thread processor.127struct ScopedGlobalProcessor {128ScopedGlobalProcessor();129~ScopedGlobalProcessor();130};131#endif132133struct TidEpoch {134Tid tid;135Epoch epoch;136};137138struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot {139Mutex mtx;140Sid sid;141atomic_uint32_t raw_epoch;142ThreadState *thr;143Vector<TidEpoch> journal;144INode node;145146Epoch epoch() const {147return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));148}149150void SetEpoch(Epoch v) {151atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);152}153154TidSlot();155};156157// This struct is stored in TLS.158struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {159FastState fast_state;160int ignore_sync;161#if !SANITIZER_GO162int ignore_interceptors;163#endif164uptr *shadow_stack_pos;165166// Current position in tctx->trace.Back()->events (Event*).167atomic_uintptr_t trace_pos;168// PC of the last memory access, used to compute PC deltas in the trace.169uptr trace_prev_pc;170171// Technically `current` should be a separate THREADLOCAL variable;172// but it is placed here in order to share cache line with previous fields.173ThreadState* current;174175atomic_sint32_t pending_signals;176177VectorClock clock;178179// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.180// We do not distinguish beteween ignoring reads and writes181// for better performance.182int ignore_reads_and_writes;183int suppress_reports;184// Go does not support ignores.185#if !SANITIZER_GO186IgnoreSet mop_ignore_set;187IgnoreSet sync_ignore_set;188#endif189uptr *shadow_stack;190uptr *shadow_stack_end;191#if !SANITIZER_GO192Vector<JmpBuf> jmp_bufs;193int in_symbolizer;194atomic_uintptr_t in_blocking_func;195bool in_ignored_lib;196bool is_inited;197#endif198MutexSet mset;199bool is_dead;200const Tid tid;201uptr stk_addr;202uptr stk_size;203uptr tls_addr;204uptr tls_size;205ThreadContext *tctx;206207DDLogicalThread *dd_lt;208209TidSlot *slot;210uptr slot_epoch;211bool slot_locked;212213// Current wired Processor, or nullptr. Required to handle any events.214Processor *proc1;215#if !SANITIZER_GO216Processor *proc() { return proc1; }217#else218Processor *proc();219#endif220221atomic_uintptr_t in_signal_handler;222atomic_uintptr_t signal_ctx;223224#if !SANITIZER_GO225StackID last_sleep_stack_id;226VectorClock last_sleep_clock;227#endif228229// Set in regions of runtime that must be signal-safe and fork-safe.230// If set, malloc must not be called.231int nomalloc;232233const ReportDesc *current_report;234235explicit ThreadState(Tid tid);236};237238#if !SANITIZER_GO239#if SANITIZER_APPLE || SANITIZER_ANDROID240ThreadState *cur_thread();241void set_cur_thread(ThreadState *thr);242void cur_thread_finalize();243inline ThreadState *cur_thread_init() { return cur_thread(); }244# else245__attribute__((tls_model("initial-exec")))246extern THREADLOCAL char cur_thread_placeholder[];247inline ThreadState *cur_thread() {248return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;249}250inline ThreadState *cur_thread_init() {251ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);252if (UNLIKELY(!thr->current))253thr->current = thr;254return thr->current;255}256inline void set_cur_thread(ThreadState *thr) {257reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;258}259inline void cur_thread_finalize() { }260# endif // SANITIZER_APPLE || SANITIZER_ANDROID261#endif // SANITIZER_GO262263class ThreadContext final : public ThreadContextBase {264public:265explicit ThreadContext(Tid tid);266~ThreadContext();267ThreadState *thr;268StackID creation_stack_id;269VectorClock *sync;270uptr sync_epoch;271Trace trace;272273// Override superclass callbacks.274void OnDead() override;275void OnJoined(void *arg) override;276void OnFinished() override;277void OnStarted(void *arg) override;278void OnCreated(void *arg) override;279void OnReset() override;280void OnDetached(void *arg) override;281};282283struct RacyStacks {284MD5Hash hash[2];285bool operator==(const RacyStacks &other) const;286};287288struct RacyAddress {289uptr addr_min;290uptr addr_max;291};292293struct FiredSuppression {294ReportType type;295uptr pc_or_addr;296Suppression *supp;297};298299struct Context {300Context();301302bool initialized;303#if !SANITIZER_GO304bool after_multithreaded_fork;305#endif306307MetaMap metamap;308309Mutex report_mtx;310int nreported;311atomic_uint64_t last_symbolize_time_ns;312313void *background_thread;314atomic_uint32_t stop_background_thread;315316ThreadRegistry thread_registry;317318// This is used to prevent a very unlikely but very pathological behavior.319// Since memory access handling is not synchronized with DoReset,320// a thread running concurrently with DoReset can leave a bogus shadow value321// that will be later falsely detected as a race. For such false races322// RestoreStack will return false and we will not report it.323// However, consider that a thread leaves a whole lot of such bogus values324// and these values are later read by a whole lot of threads.325// This will cause massive amounts of ReportRace calls and lots of326// serialization. In very pathological cases the resulting slowdown327// can be >100x. This is very unlikely, but it was presumably observed328// in practice: https://github.com/google/sanitizers/issues/1552329// If this happens, previous access sid+epoch will be the same for all of330// these false races b/c if the thread will try to increment epoch, it will331// notice that DoReset has happened and will stop producing bogus shadow332// values. So, last_spurious_race is used to remember the last sid+epoch333// for which RestoreStack returned false. Then it is used to filter out334// races with the same sid+epoch very early and quickly.335// It is of course possible that multiple threads left multiple bogus shadow336// values and all of them are read by lots of threads at the same time.337// In such case last_spurious_race will only be able to deduplicate a few338// races from one thread, then few from another and so on. An alternative339// would be to hold an array of such sid+epoch, but we consider such scenario340// as even less likely.341// Note: this can lead to some rare false negatives as well:342// 1. When a legit access with the same sid+epoch participates in a race343// as the "previous" memory access, it will be wrongly filtered out.344// 2. When RestoreStack returns false for a legit memory access because it345// was already evicted from the thread trace, we will still remember it in346// last_spurious_race. Then if there is another racing memory access from347// the same thread that happened in the same epoch, but was stored in the348// next thread trace part (which is still preserved in the thread trace),349// we will also wrongly filter it out while RestoreStack would actually350// succeed for that second memory access.351RawShadow last_spurious_race;352353Mutex racy_mtx;354Vector<RacyStacks> racy_stacks;355// Number of fired suppressions may be large enough.356Mutex fired_suppressions_mtx;357InternalMmapVector<FiredSuppression> fired_suppressions;358DDetector *dd;359360Flags flags;361fd_t memprof_fd;362363// The last slot index (kFreeSid) is used to denote freed memory.364TidSlot slots[kThreadSlotCount - 1];365366// Protects global_epoch, slot_queue, trace_part_recycle.367Mutex slot_mtx;368uptr global_epoch; // guarded by slot_mtx and by all slot mutexes369bool resetting; // global reset is in progress370IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);371IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle372SANITIZER_GUARDED_BY(slot_mtx);373uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);374uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);375uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);376#if SANITIZER_GO377uptr mapped_shadow_begin;378uptr mapped_shadow_end;379#endif380};381382extern Context *ctx; // The one and the only global runtime context.383384ALWAYS_INLINE Flags *flags() {385return &ctx->flags;386}387388struct ScopedIgnoreInterceptors {389ScopedIgnoreInterceptors() {390#if !SANITIZER_GO391cur_thread()->ignore_interceptors++;392#endif393}394395~ScopedIgnoreInterceptors() {396#if !SANITIZER_GO397cur_thread()->ignore_interceptors--;398#endif399}400};401402const char *GetObjectTypeFromTag(uptr tag);403const char *GetReportHeaderFromTag(uptr tag);404uptr TagFromShadowStackFrame(uptr pc);405406class ScopedReportBase {407public:408void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,409StackTrace stack, const MutexSet *mset);410void AddStack(StackTrace stack, bool suppressable = false);411void AddThread(const ThreadContext *tctx, bool suppressable = false);412void AddThread(Tid tid, bool suppressable = false);413void AddUniqueTid(Tid unique_tid);414int AddMutex(uptr addr, StackID creation_stack_id);415void AddLocation(uptr addr, uptr size);416void AddSleep(StackID stack_id);417void SetCount(int count);418void SetSigNum(int sig);419420const ReportDesc *GetReport() const;421422protected:423ScopedReportBase(ReportType typ, uptr tag);424~ScopedReportBase();425426private:427ReportDesc *rep_;428// Symbolizer makes lots of intercepted calls. If we try to process them,429// at best it will cause deadlocks on internal mutexes.430ScopedIgnoreInterceptors ignore_interceptors_;431432ScopedReportBase(const ScopedReportBase &) = delete;433void operator=(const ScopedReportBase &) = delete;434};435436class ScopedReport : public ScopedReportBase {437public:438explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);439~ScopedReport();440441private:442ScopedErrorReportLock lock_;443};444445bool ShouldReport(ThreadState *thr, ReportType typ);446ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);447448// The stack could look like:449// <start> | <main> | <foo> | tag | <bar>450// This will extract the tag and keep:451// <start> | <main> | <foo> | <bar>452template<typename StackTraceTy>453void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {454if (stack->size < 2) return;455uptr possible_tag_pc = stack->trace[stack->size - 2];456uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);457if (possible_tag == kExternalTagNone) return;458stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];459stack->size -= 1;460if (tag) *tag = possible_tag;461}462463template<typename StackTraceTy>464void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,465uptr *tag = nullptr) {466uptr size = thr->shadow_stack_pos - thr->shadow_stack;467uptr start = 0;468if (size + !!toppc > kStackTraceMax) {469start = size + !!toppc - kStackTraceMax;470size = kStackTraceMax - !!toppc;471}472stack->Init(&thr->shadow_stack[start], size, toppc);473ExtractTagFromStack(stack, tag);474}475476#define GET_STACK_TRACE_FATAL(thr, pc) \477VarSizeStackTrace stack; \478ObtainCurrentStack(thr, pc, &stack); \479stack.ReverseOrder();480481void MapShadow(uptr addr, uptr size);482void MapThreadTrace(uptr addr, uptr size, const char *name);483void DontNeedShadowFor(uptr addr, uptr size);484void UnmapShadow(ThreadState *thr, uptr addr, uptr size);485void InitializeShadowMemory();486void DontDumpShadow(uptr addr, uptr size);487void InitializeInterceptors();488void InitializeLibIgnore();489void InitializeDynamicAnnotations();490491void ForkBefore(ThreadState *thr, uptr pc);492void ForkParentAfter(ThreadState *thr, uptr pc);493void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);494495void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,496AccessType typ);497bool OutputReport(ThreadState *thr, const ScopedReport &srep);498bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);499bool IsExpectedReport(uptr addr, uptr size);500501#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1502# define DPrintf Printf503#else504# define DPrintf(...)505#endif506507#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2508# define DPrintf2 Printf509#else510# define DPrintf2(...)511#endif512513StackID CurrentStackId(ThreadState *thr, uptr pc);514ReportStack *SymbolizeStackId(StackID stack_id);515void PrintCurrentStack(ThreadState *thr, uptr pc);516void PrintCurrentStackSlow(uptr pc); // uses libunwind517MBlock *JavaHeapBlock(uptr addr, uptr *start);518519void Initialize(ThreadState *thr);520void MaybeSpawnBackgroundThread();521int Finalize(ThreadState *thr);522523void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);524void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);525526void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,527AccessType typ);528void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,529AccessType typ);530// This creates 2 non-inlined specialized versions of MemoryAccessRange.531template <bool is_read>532void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);533534ALWAYS_INLINE535void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,536bool is_write) {537if (size == 0)538return;539if (is_write)540MemoryAccessRangeT<false>(thr, pc, addr, size);541else542MemoryAccessRangeT<true>(thr, pc, addr, size);543}544545void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);546void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);547void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);548void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);549void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,550uptr size);551552void ThreadIgnoreBegin(ThreadState *thr, uptr pc);553void ThreadIgnoreEnd(ThreadState *thr);554void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);555void ThreadIgnoreSyncEnd(ThreadState *thr);556557Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);558void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,559ThreadType thread_type);560void ThreadFinish(ThreadState *thr);561Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);562void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);563void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);564void ThreadFinalize(ThreadState *thr);565void ThreadSetName(ThreadState *thr, const char *name);566int ThreadCount(ThreadState *thr);567void ProcessPendingSignalsImpl(ThreadState *thr);568void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);569570Processor *ProcCreate();571void ProcDestroy(Processor *proc);572void ProcWire(Processor *proc, ThreadState *thr);573void ProcUnwire(Processor *proc, ThreadState *thr);574575// Note: the parameter is called flagz, because flags is already taken576// by the global function that returns flags.577void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);578void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);579void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);580void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,581int rec = 1);582int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);583void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);584void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);585void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);586void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);587void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD588void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);589590void Acquire(ThreadState *thr, uptr pc, uptr addr);591// AcquireGlobal synchronizes the current thread with all other threads.592// In terms of happens-before relation, it draws a HB edge from all threads593// (where they happen to execute right now) to the current thread. We use it to594// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal595// right before executing finalizers. This provides a coarse, but simple596// approximation of the actual required synchronization.597void AcquireGlobal(ThreadState *thr);598void Release(ThreadState *thr, uptr pc, uptr addr);599void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);600void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);601void AfterSleep(ThreadState *thr, uptr pc);602void IncrementEpoch(ThreadState *thr);603604#if !SANITIZER_GO605uptr ALWAYS_INLINE HeapEnd() {606return HeapMemEnd() + PrimaryAllocator::AdditionalSize();607}608#endif609610void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);611void SlotDetach(ThreadState *thr);612void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);613void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);614void DoReset(ThreadState *thr, uptr epoch);615void FlushShadowMemory();616617ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);618void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);619void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);620621// These need to match __tsan_switch_to_fiber_* flags defined in622// tsan_interface.h. See documentation there as well.623enum FiberSwitchFlags {624FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync625};626627class SlotLocker {628public:629ALWAYS_INLINE630SlotLocker(ThreadState *thr, bool recursive = false)631: thr_(thr), locked_(recursive ? thr->slot_locked : false) {632#if !SANITIZER_GO633// We are in trouble if we are here with in_blocking_func set.634// If in_blocking_func is set, all signals will be delivered synchronously,635// which means we can't lock slots since the signal handler will try636// to lock it recursively and deadlock.637DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));638#endif639if (!locked_)640SlotLock(thr_);641}642643ALWAYS_INLINE644~SlotLocker() {645if (!locked_)646SlotUnlock(thr_);647}648649private:650ThreadState *thr_;651bool locked_;652};653654class SlotUnlocker {655public:656SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {657if (locked_)658SlotUnlock(thr_);659}660661~SlotUnlocker() {662if (locked_)663SlotLock(thr_);664}665666private:667ThreadState *thr_;668bool locked_;669};670671ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {672if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))673ProcessPendingSignalsImpl(thr);674}675676extern bool is_initialized;677678ALWAYS_INLINE679void LazyInitialize(ThreadState *thr) {680// If we can use .preinit_array, assume that __tsan_init681// called from .preinit_array initializes runtime before682// any instrumented code except when tsan is used as a683// shared library.684#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))685if (UNLIKELY(!is_initialized))686Initialize(thr);687#endif688}689690void TraceResetForTesting();691void TraceSwitchPart(ThreadState *thr);692void TraceSwitchPartImpl(ThreadState *thr);693bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,694AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,695MutexSet *pmset, uptr *ptag);696697template <typename EventT>698ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,699EventT **ev) {700// TraceSwitchPart accesses shadow_stack, but it's called infrequently,701// so we check it here proactively.702DCHECK(thr->shadow_stack);703Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));704#if SANITIZER_DEBUG705// TraceSwitch acquires these mutexes,706// so we lock them here to detect deadlocks more reliably.707{ Lock lock(&ctx->slot_mtx); }708{ Lock lock(&thr->tctx->trace.mtx); }709TracePart *current = thr->tctx->trace.parts.Back();710if (current) {711DCHECK_GE(pos, ¤t->events[0]);712DCHECK_LE(pos, ¤t->events[TracePart::kSize]);713} else {714DCHECK_EQ(pos, nullptr);715}716#endif717// TracePart is allocated with mmap and is at least 4K aligned.718// So the following check is a faster way to check for part end.719// It may have false positives in the middle of the trace,720// they are filtered out in TraceSwitch.721if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))722return false;723*ev = reinterpret_cast<EventT *>(pos);724return true;725}726727template <typename EventT>728ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {729DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);730atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));731}732733template <typename EventT>734void TraceEvent(ThreadState *thr, EventT ev) {735EventT *evp;736if (!TraceAcquire(thr, &evp)) {737TraceSwitchPart(thr);738UNUSED bool res = TraceAcquire(thr, &evp);739DCHECK(res);740}741*evp = ev;742TraceRelease(thr, evp);743}744745ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,746uptr pc = 0) {747if (!kCollectHistory)748return true;749EventFunc *ev;750if (UNLIKELY(!TraceAcquire(thr, &ev)))751return false;752ev->is_access = 0;753ev->is_func = 1;754ev->pc = pc;755TraceRelease(thr, ev);756return true;757}758759WARN_UNUSED_RESULT760bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,761AccessType typ);762WARN_UNUSED_RESULT763bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,764AccessType typ);765void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,766AccessType typ);767void TraceFunc(ThreadState *thr, uptr pc = 0);768void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,769StackID stk);770void TraceMutexUnlock(ThreadState *thr, uptr addr);771void TraceTime(ThreadState *thr);772773void TraceRestartFuncExit(ThreadState *thr);774void TraceRestartFuncEntry(ThreadState *thr, uptr pc);775776void GrowShadowStack(ThreadState *thr);777778ALWAYS_INLINE779void FuncEntry(ThreadState *thr, uptr pc) {780DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);781if (UNLIKELY(!TryTraceFunc(thr, pc)))782return TraceRestartFuncEntry(thr, pc);783DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);784#if !SANITIZER_GO785DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);786#else787if (thr->shadow_stack_pos == thr->shadow_stack_end)788GrowShadowStack(thr);789#endif790thr->shadow_stack_pos[0] = pc;791thr->shadow_stack_pos++;792}793794ALWAYS_INLINE795void FuncExit(ThreadState *thr) {796DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());797if (UNLIKELY(!TryTraceFunc(thr, 0)))798return TraceRestartFuncExit(thr);799DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);800#if !SANITIZER_GO801DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);802#endif803thr->shadow_stack_pos--;804}805806#if !SANITIZER_GO807extern void (*on_initialize)(void);808extern int (*on_finalize)(int);809#endif810} // namespace __tsan811812#endif // TSAN_RTL_H813814815