Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
35269 views
//===-- tsan_rtl.cpp ------------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer (TSan), a race detector.9//10// Main file (entry points) for the TSan run-time.11//===----------------------------------------------------------------------===//1213#include "tsan_rtl.h"1415#include "sanitizer_common/sanitizer_atomic.h"16#include "sanitizer_common/sanitizer_common.h"17#include "sanitizer_common/sanitizer_file.h"18#include "sanitizer_common/sanitizer_interface_internal.h"19#include "sanitizer_common/sanitizer_libc.h"20#include "sanitizer_common/sanitizer_placement_new.h"21#include "sanitizer_common/sanitizer_stackdepot.h"22#include "sanitizer_common/sanitizer_symbolizer.h"23#include "tsan_defs.h"24#include "tsan_interface.h"25#include "tsan_mman.h"26#include "tsan_platform.h"27#include "tsan_suppressions.h"28#include "tsan_symbolize.h"29#include "ubsan/ubsan_init.h"3031volatile int __tsan_resumed = 0;3233extern "C" void __tsan_resume() {34__tsan_resumed = 1;35}3637#if SANITIZER_APPLE38SANITIZER_WEAK_DEFAULT_IMPL39void __tsan_test_only_on_fork() {}40#endif4142namespace __tsan {4344#if !SANITIZER_GO45void (*on_initialize)(void);46int (*on_finalize)(int);47#endif4849#if !SANITIZER_GO && !SANITIZER_APPLE50alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model(51"initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)];52#endif53alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)];54Context *ctx;5556// Can be overriden by a front-end.57#ifdef TSAN_EXTERNAL_HOOKS58bool OnFinalize(bool failed);59void OnInitialize();60#else61SANITIZER_WEAK_CXX_DEFAULT_IMPL62bool OnFinalize(bool failed) {63# if !SANITIZER_GO64if (on_finalize)65return on_finalize(failed);66# endif67return failed;68}6970SANITIZER_WEAK_CXX_DEFAULT_IMPL71void OnInitialize() {72# if !SANITIZER_GO73if (on_initialize)74on_initialize();75# endif76}77#endif7879static TracePart* TracePartAlloc(ThreadState* thr) {80TracePart* part = nullptr;81{82Lock lock(&ctx->slot_mtx);83uptr max_parts = Trace::kMinParts + flags()->history_size;84Trace* trace = &thr->tctx->trace;85if (trace->parts_allocated == max_parts ||86ctx->trace_part_finished_excess) {87part = ctx->trace_part_recycle.PopFront();88DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);89if (part && part->trace) {90Trace* trace1 = part->trace;91Lock trace_lock(&trace1->mtx);92part->trace = nullptr;93TracePart* part1 = trace1->parts.PopFront();94CHECK_EQ(part, part1);95if (trace1->parts_allocated > trace1->parts.Size()) {96ctx->trace_part_finished_excess +=97trace1->parts_allocated - trace1->parts.Size();98trace1->parts_allocated = trace1->parts.Size();99}100}101}102if (trace->parts_allocated < max_parts) {103trace->parts_allocated++;104if (ctx->trace_part_finished_excess)105ctx->trace_part_finished_excess--;106}107if (!part)108ctx->trace_part_total_allocated++;109else if (ctx->trace_part_recycle_finished)110ctx->trace_part_recycle_finished--;111}112if (!part)113part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();114return part;115}116117static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {118DCHECK(part->trace);119part->trace = nullptr;120ctx->trace_part_recycle.PushFront(part);121}122123void TraceResetForTesting() {124Lock lock(&ctx->slot_mtx);125while (auto* part = ctx->trace_part_recycle.PopFront()) {126if (auto trace = part->trace)127CHECK_EQ(trace->parts.PopFront(), part);128UnmapOrDie(part, sizeof(*part));129}130ctx->trace_part_total_allocated = 0;131ctx->trace_part_recycle_finished = 0;132ctx->trace_part_finished_excess = 0;133}134135static void DoResetImpl(uptr epoch) {136ThreadRegistryLock lock0(&ctx->thread_registry);137Lock lock1(&ctx->slot_mtx);138CHECK_EQ(ctx->global_epoch, epoch);139ctx->global_epoch++;140CHECK(!ctx->resetting);141ctx->resetting = true;142for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {143ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(144static_cast<Tid>(i));145// Potentially we could purge all ThreadStatusDead threads from the146// registry. Since we reset all shadow, they can't race with anything147// anymore. However, their tid's can still be stored in some aux places148// (e.g. tid of thread that created something).149auto trace = &tctx->trace;150Lock lock(&trace->mtx);151bool attached = tctx->thr && tctx->thr->slot;152auto parts = &trace->parts;153bool local = false;154while (!parts->Empty()) {155auto part = parts->Front();156local = local || part == trace->local_head;157if (local)158CHECK(!ctx->trace_part_recycle.Queued(part));159else160ctx->trace_part_recycle.Remove(part);161if (attached && parts->Size() == 1) {162// The thread is running and this is the last/current part.163// Set the trace position to the end of the current part164// to force the thread to call SwitchTracePart and re-attach165// to a new slot and allocate a new trace part.166// Note: the thread is concurrently modifying the position as well,167// so this is only best-effort. The thread can only modify position168// within this part, because switching parts is protected by169// slot/trace mutexes that we hold here.170atomic_store_relaxed(171&tctx->thr->trace_pos,172reinterpret_cast<uptr>(&part->events[TracePart::kSize]));173break;174}175parts->Remove(part);176TracePartFree(part);177}178CHECK_LE(parts->Size(), 1);179trace->local_head = parts->Front();180if (tctx->thr && !tctx->thr->slot) {181atomic_store_relaxed(&tctx->thr->trace_pos, 0);182tctx->thr->trace_prev_pc = 0;183}184if (trace->parts_allocated > trace->parts.Size()) {185ctx->trace_part_finished_excess +=186trace->parts_allocated - trace->parts.Size();187trace->parts_allocated = trace->parts.Size();188}189}190while (ctx->slot_queue.PopFront()) {191}192for (auto& slot : ctx->slots) {193slot.SetEpoch(kEpochZero);194slot.journal.Reset();195slot.thr = nullptr;196ctx->slot_queue.PushBack(&slot);197}198199DPrintf("Resetting shadow...\n");200auto shadow_begin = ShadowBeg();201auto shadow_end = ShadowEnd();202#if SANITIZER_GO203CHECK_NE(0, ctx->mapped_shadow_begin);204shadow_begin = ctx->mapped_shadow_begin;205shadow_end = ctx->mapped_shadow_end;206VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",207shadow_begin, shadow_end);208#endif209210#if SANITIZER_WINDOWS211auto resetFailed =212!ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);213#else214auto resetFailed =215!MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");216# if !SANITIZER_GO217DontDumpShadow(shadow_begin, shadow_end - shadow_begin);218# endif219#endif220if (resetFailed) {221Printf("failed to reset shadow memory\n");222Die();223}224DPrintf("Resetting meta shadow...\n");225ctx->metamap.ResetClocks();226StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);227ctx->resetting = false;228}229230// Clang does not understand locking all slots in the loop:231// error: expecting mutex 'slot.mtx' to be held at start of each loop232void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {233for (auto& slot : ctx->slots) {234slot.mtx.Lock();235if (UNLIKELY(epoch == 0))236epoch = ctx->global_epoch;237if (UNLIKELY(epoch != ctx->global_epoch)) {238// Epoch can't change once we've locked the first slot.239CHECK_EQ(slot.sid, 0);240slot.mtx.Unlock();241return;242}243}244DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);245DoResetImpl(epoch);246for (auto& slot : ctx->slots) slot.mtx.Unlock();247}248249void FlushShadowMemory() { DoReset(nullptr, 0); }250251static TidSlot* FindSlotAndLock(ThreadState* thr)252SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {253CHECK(!thr->slot);254TidSlot* slot = nullptr;255for (;;) {256uptr epoch;257{258Lock lock(&ctx->slot_mtx);259epoch = ctx->global_epoch;260if (slot) {261// This is an exhausted slot from the previous iteration.262if (ctx->slot_queue.Queued(slot))263ctx->slot_queue.Remove(slot);264thr->slot_locked = false;265slot->mtx.Unlock();266}267for (;;) {268slot = ctx->slot_queue.PopFront();269if (!slot)270break;271if (slot->epoch() != kEpochLast) {272ctx->slot_queue.PushBack(slot);273break;274}275}276}277if (!slot) {278DoReset(thr, epoch);279continue;280}281slot->mtx.Lock();282CHECK(!thr->slot_locked);283thr->slot_locked = true;284if (slot->thr) {285DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,286slot->thr->tid);287slot->SetEpoch(slot->thr->fast_state.epoch());288slot->thr = nullptr;289}290if (slot->epoch() != kEpochLast)291return slot;292}293}294295void SlotAttachAndLock(ThreadState* thr) {296TidSlot* slot = FindSlotAndLock(thr);297DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));298CHECK(!slot->thr);299CHECK(!thr->slot);300slot->thr = thr;301thr->slot = slot;302Epoch epoch = EpochInc(slot->epoch());303CHECK(!EpochOverflow(epoch));304slot->SetEpoch(epoch);305thr->fast_state.SetSid(slot->sid);306thr->fast_state.SetEpoch(epoch);307if (thr->slot_epoch != ctx->global_epoch) {308thr->slot_epoch = ctx->global_epoch;309thr->clock.Reset();310#if !SANITIZER_GO311thr->last_sleep_stack_id = kInvalidStackID;312thr->last_sleep_clock.Reset();313#endif314}315thr->clock.Set(slot->sid, epoch);316slot->journal.PushBack({thr->tid, epoch});317}318319static void SlotDetachImpl(ThreadState* thr, bool exiting) {320TidSlot* slot = thr->slot;321thr->slot = nullptr;322if (thr != slot->thr) {323slot = nullptr; // we don't own the slot anymore324if (thr->slot_epoch != ctx->global_epoch) {325TracePart* part = nullptr;326auto* trace = &thr->tctx->trace;327{328Lock l(&trace->mtx);329auto* parts = &trace->parts;330// The trace can be completely empty in an unlikely event331// the thread is preempted right after it acquired the slot332// in ThreadStart and did not trace any events yet.333CHECK_LE(parts->Size(), 1);334part = parts->PopFront();335thr->tctx->trace.local_head = nullptr;336atomic_store_relaxed(&thr->trace_pos, 0);337thr->trace_prev_pc = 0;338}339if (part) {340Lock l(&ctx->slot_mtx);341TracePartFree(part);342}343}344return;345}346CHECK(exiting || thr->fast_state.epoch() == kEpochLast);347slot->SetEpoch(thr->fast_state.epoch());348slot->thr = nullptr;349}350351void SlotDetach(ThreadState* thr) {352Lock lock(&thr->slot->mtx);353SlotDetachImpl(thr, true);354}355356void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {357DCHECK(!thr->slot_locked);358#if SANITIZER_DEBUG359// Check these mutexes are not locked.360// We can call DoReset from SlotAttachAndLock, which will lock361// these mutexes, but it happens only every once in a while.362{ ThreadRegistryLock lock(&ctx->thread_registry); }363{ Lock lock(&ctx->slot_mtx); }364#endif365TidSlot* slot = thr->slot;366slot->mtx.Lock();367thr->slot_locked = true;368if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))369return;370SlotDetachImpl(thr, false);371thr->slot_locked = false;372slot->mtx.Unlock();373SlotAttachAndLock(thr);374}375376void SlotUnlock(ThreadState* thr) {377DCHECK(thr->slot_locked);378thr->slot_locked = false;379thr->slot->mtx.Unlock();380}381382Context::Context()383: initialized(),384report_mtx(MutexTypeReport),385nreported(),386thread_registry([](Tid tid) -> ThreadContextBase* {387return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);388}),389racy_mtx(MutexTypeRacy),390racy_stacks(),391fired_suppressions_mtx(MutexTypeFired),392slot_mtx(MutexTypeSlots),393resetting() {394fired_suppressions.reserve(8);395for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {396TidSlot* slot = &slots[i];397slot->sid = static_cast<Sid>(i);398slot_queue.PushBack(slot);399}400global_epoch = 1;401}402403TidSlot::TidSlot() : mtx(MutexTypeSlot) {}404405// The objects are allocated in TLS, so one may rely on zero-initialization.406ThreadState::ThreadState(Tid tid)407// Do not touch these, rely on zero initialization,408// they may be accessed before the ctor.409// ignore_reads_and_writes()410// ignore_interceptors()411: tid(tid) {412CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);413#if !SANITIZER_GO414// C/C++ uses fixed size shadow stack.415const int kInitStackSize = kShadowStackSize;416shadow_stack = static_cast<uptr*>(417MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));418SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),419kInitStackSize * sizeof(uptr));420#else421// Go uses malloc-allocated shadow stack with dynamic size.422const int kInitStackSize = 8;423shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));424#endif425shadow_stack_pos = shadow_stack;426shadow_stack_end = shadow_stack + kInitStackSize;427}428429#if !SANITIZER_GO430void MemoryProfiler(u64 uptime) {431if (ctx->memprof_fd == kInvalidFd)432return;433InternalMmapVector<char> buf(4096);434WriteMemoryProfile(buf.data(), buf.size(), uptime);435WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));436}437438static bool InitializeMemoryProfiler() {439ctx->memprof_fd = kInvalidFd;440const char *fname = flags()->profile_memory;441if (!fname || !fname[0])442return false;443if (internal_strcmp(fname, "stdout") == 0) {444ctx->memprof_fd = 1;445} else if (internal_strcmp(fname, "stderr") == 0) {446ctx->memprof_fd = 2;447} else {448InternalScopedString filename;449filename.AppendF("%s.%d", fname, (int)internal_getpid());450ctx->memprof_fd = OpenFile(filename.data(), WrOnly);451if (ctx->memprof_fd == kInvalidFd) {452Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",453filename.data());454return false;455}456}457MemoryProfiler(0);458return true;459}460461static void *BackgroundThread(void *arg) {462// This is a non-initialized non-user thread, nothing to see here.463// We don't use ScopedIgnoreInterceptors, because we want ignores to be464// enabled even when the thread function exits (e.g. during pthread thread465// shutdown code).466cur_thread_init()->ignore_interceptors++;467const u64 kMs2Ns = 1000 * 1000;468const u64 start = NanoTime();469470u64 last_flush = start;471uptr last_rss = 0;472while (!atomic_load_relaxed(&ctx->stop_background_thread)) {473SleepForMillis(100);474u64 now = NanoTime();475476// Flush memory if requested.477if (flags()->flush_memory_ms > 0) {478if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {479VReport(1, "ThreadSanitizer: periodic memory flush\n");480FlushShadowMemory();481now = last_flush = NanoTime();482}483}484if (flags()->memory_limit_mb > 0) {485uptr rss = GetRSS();486uptr limit = uptr(flags()->memory_limit_mb) << 20;487VReport(1,488"ThreadSanitizer: memory flush check"489" RSS=%llu LAST=%llu LIMIT=%llu\n",490(u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);491if (2 * rss > limit + last_rss) {492VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");493FlushShadowMemory();494rss = GetRSS();495now = NanoTime();496VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",497(u64)rss >> 20);498}499last_rss = rss;500}501502MemoryProfiler(now - start);503504// Flush symbolizer cache if requested.505if (flags()->flush_symbolizer_ms > 0) {506u64 last = atomic_load(&ctx->last_symbolize_time_ns,507memory_order_relaxed);508if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {509Lock l(&ctx->report_mtx);510ScopedErrorReportLock l2;511SymbolizeFlush();512atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);513}514}515}516return nullptr;517}518519static void StartBackgroundThread() {520ctx->background_thread = internal_start_thread(&BackgroundThread, 0);521}522523#ifndef __mips__524static void StopBackgroundThread() {525atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);526internal_join_thread(ctx->background_thread);527ctx->background_thread = 0;528}529#endif530#endif531532void DontNeedShadowFor(uptr addr, uptr size) {533ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),534reinterpret_cast<uptr>(MemToShadow(addr + size)));535}536537#if !SANITIZER_GO538// We call UnmapShadow before the actual munmap, at that point we don't yet539// know if the provided address/size are sane. We can't call UnmapShadow540// after the actual munmap becuase at that point the memory range can541// already be reused for something else, so we can't rely on the munmap542// return value to understand is the values are sane.543// While calling munmap with insane values (non-canonical address, negative544// size, etc) is an error, the kernel won't crash. We must also try to not545// crash as the failure mode is very confusing (paging fault inside of the546// runtime on some derived shadow address).547static bool IsValidMmapRange(uptr addr, uptr size) {548if (size == 0)549return true;550if (static_cast<sptr>(size) < 0)551return false;552if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))553return false;554// Check that if the start of the region belongs to one of app ranges,555// end of the region belongs to the same region.556const uptr ranges[][2] = {557{LoAppMemBeg(), LoAppMemEnd()},558{MidAppMemBeg(), MidAppMemEnd()},559{HiAppMemBeg(), HiAppMemEnd()},560};561for (auto range : ranges) {562if (addr >= range[0] && addr < range[1])563return addr + size <= range[1];564}565return false;566}567568void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {569if (size == 0 || !IsValidMmapRange(addr, size))570return;571DontNeedShadowFor(addr, size);572ScopedGlobalProcessor sgp;573SlotLocker locker(thr, true);574ctx->metamap.ResetRange(thr->proc(), addr, size, true);575}576#endif577578void MapShadow(uptr addr, uptr size) {579// Ensure thead registry lock held, so as to synchronize580// with DoReset, which also access the mapped_shadow_* ctxt fields.581ThreadRegistryLock lock0(&ctx->thread_registry);582static bool data_mapped = false;583584#if !SANITIZER_GO585// Global data is not 64K aligned, but there are no adjacent mappings,586// so we can get away with unaligned mapping.587// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment588const uptr kPageSize = GetPageSizeCached();589uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);590uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);591if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))592Die();593#else594uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));595uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));596VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",597addr, addr + size, shadow_begin, shadow_end);598599if (!data_mapped) {600// First call maps data+bss.601if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))602Die();603} else {604VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",605ctx->mapped_shadow_begin, ctx->mapped_shadow_end);606// Second and subsequent calls map heap.607if (shadow_end <= ctx->mapped_shadow_end)608return;609if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)610ctx->mapped_shadow_begin = shadow_begin;611if (shadow_begin < ctx->mapped_shadow_end)612shadow_begin = ctx->mapped_shadow_end;613VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",614shadow_begin, shadow_end);615if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,616"shadow"))617Die();618ctx->mapped_shadow_end = shadow_end;619}620#endif621622// Meta shadow is 2:1, so tread carefully.623static uptr mapped_meta_end = 0;624uptr meta_begin = (uptr)MemToMeta(addr);625uptr meta_end = (uptr)MemToMeta(addr + size);626meta_begin = RoundDownTo(meta_begin, 64 << 10);627meta_end = RoundUpTo(meta_end, 64 << 10);628if (!data_mapped) {629// First call maps data+bss.630data_mapped = true;631if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,632"meta shadow"))633Die();634} else {635// Mapping continuous heap.636// Windows wants 64K alignment.637meta_begin = RoundDownTo(meta_begin, 64 << 10);638meta_end = RoundUpTo(meta_end, 64 << 10);639CHECK_GT(meta_end, mapped_meta_end);640if (meta_begin < mapped_meta_end)641meta_begin = mapped_meta_end;642if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,643"meta shadow"))644Die();645mapped_meta_end = meta_end;646}647VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,648addr + size, meta_begin, meta_end);649}650651#if !SANITIZER_GO652static void OnStackUnwind(const SignalContext &sig, const void *,653BufferedStackTrace *stack) {654stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,655common_flags()->fast_unwind_on_fatal);656}657658static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {659HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);660}661#endif662663void CheckUnwind() {664// There is high probability that interceptors will check-fail as well,665// on the other hand there is no sense in processing interceptors666// since we are going to die soon.667ScopedIgnoreInterceptors ignore;668#if !SANITIZER_GO669ThreadState* thr = cur_thread();670thr->nomalloc = false;671thr->ignore_sync++;672thr->ignore_reads_and_writes++;673atomic_store_relaxed(&thr->in_signal_handler, 0);674#endif675PrintCurrentStackSlow(StackTrace::GetCurrentPc());676}677678bool is_initialized;679680void Initialize(ThreadState *thr) {681// Thread safe because done before all threads exist.682if (is_initialized)683return;684is_initialized = true;685// We are not ready to handle interceptors yet.686ScopedIgnoreInterceptors ignore;687SanitizerToolName = "ThreadSanitizer";688// Install tool-specific callbacks in sanitizer_common.689SetCheckUnwindCallback(CheckUnwind);690691ctx = new(ctx_placeholder) Context;692const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";693const char *options = GetEnv(env_name);694CacheBinaryName();695CheckASLR();696InitializeFlags(&ctx->flags, options, env_name);697AvoidCVE_2016_2143();698__sanitizer::InitializePlatformEarly();699__tsan::InitializePlatformEarly();700701#if !SANITIZER_GO702InitializeAllocator();703ReplaceSystemMalloc();704#endif705if (common_flags()->detect_deadlocks)706ctx->dd = DDetector::Create(flags());707Processor *proc = ProcCreate();708ProcWire(proc, thr);709InitializeInterceptors();710InitializePlatform();711InitializeDynamicAnnotations();712#if !SANITIZER_GO713InitializeShadowMemory();714InitializeAllocatorLate();715InstallDeadlySignalHandlers(TsanOnDeadlySignal);716#endif717// Setup correct file descriptor for error reports.718__sanitizer_set_report_path(common_flags()->log_path);719InitializeSuppressions();720#if !SANITIZER_GO721InitializeLibIgnore();722Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);723#endif724725VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",726(int)internal_getpid());727728// Initialize thread 0.729Tid tid = ThreadCreate(nullptr, 0, 0, true);730CHECK_EQ(tid, kMainTid);731ThreadStart(thr, tid, GetTid(), ThreadType::Regular);732#if TSAN_CONTAINS_UBSAN733__ubsan::InitAsPlugin();734#endif735736#if !SANITIZER_GO737Symbolizer::LateInitialize();738if (InitializeMemoryProfiler() || flags()->force_background_thread)739MaybeSpawnBackgroundThread();740#endif741ctx->initialized = true;742743if (flags()->stop_on_start) {744Printf("ThreadSanitizer is suspended at startup (pid %d)."745" Call __tsan_resume().\n",746(int)internal_getpid());747while (__tsan_resumed == 0) {}748}749750OnInitialize();751}752753void MaybeSpawnBackgroundThread() {754// On MIPS, TSan initialization is run before755// __pthread_initialize_minimal_internal() is finished, so we can not spawn756// new threads.757#if !SANITIZER_GO && !defined(__mips__)758static atomic_uint32_t bg_thread = {};759if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&760atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {761StartBackgroundThread();762SetSandboxingCallback(StopBackgroundThread);763}764#endif765}766767int Finalize(ThreadState *thr) {768bool failed = false;769770#if !SANITIZER_GO771if (common_flags()->print_module_map == 1)772DumpProcessMap();773#endif774775if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)776internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);777778{779// Wait for pending reports.780ScopedErrorReportLock lock;781}782783#if !SANITIZER_GO784if (Verbosity()) AllocatorPrintStats();785#endif786787ThreadFinalize(thr);788789if (ctx->nreported) {790failed = true;791#if !SANITIZER_GO792Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);793#else794Printf("Found %d data race(s)\n", ctx->nreported);795#endif796}797798if (common_flags()->print_suppressions)799PrintMatchedSuppressions();800801failed = OnFinalize(failed);802803return failed ? common_flags()->exitcode : 0;804}805806#if !SANITIZER_GO807void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {808GlobalProcessorLock();809// Detaching from the slot makes OnUserFree skip writing to the shadow.810// The slot will be locked so any attempts to use it will deadlock anyway.811SlotDetach(thr);812for (auto& slot : ctx->slots) slot.mtx.Lock();813ctx->thread_registry.Lock();814ctx->slot_mtx.Lock();815ScopedErrorReportLock::Lock();816AllocatorLockBeforeFork();817// Suppress all reports in the pthread_atfork callbacks.818// Reports will deadlock on the report_mtx.819// We could ignore sync operations as well,820// but so far it's unclear if it will do more good or harm.821// Unnecessarily ignoring things can lead to false positives later.822thr->suppress_reports++;823// On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and824// we'll assert in CheckNoLocks() unless we ignore interceptors.825// On OS X libSystem_atfork_prepare/parent/child callbacks are called826// after/before our callbacks and they call free.827thr->ignore_interceptors++;828// Disables memory write in OnUserAlloc/Free.829thr->ignore_reads_and_writes++;830831# if SANITIZER_APPLE832__tsan_test_only_on_fork();833# endif834}835836static void ForkAfter(ThreadState* thr,837bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {838thr->suppress_reports--; // Enabled in ForkBefore.839thr->ignore_interceptors--;840thr->ignore_reads_and_writes--;841AllocatorUnlockAfterFork(child);842ScopedErrorReportLock::Unlock();843ctx->slot_mtx.Unlock();844ctx->thread_registry.Unlock();845for (auto& slot : ctx->slots) slot.mtx.Unlock();846SlotAttachAndLock(thr);847SlotUnlock(thr);848GlobalProcessorUnlock();849}850851void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, false); }852853void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {854ForkAfter(thr, true);855u32 nthread = ctx->thread_registry.OnFork(thr->tid);856VPrintf(1,857"ThreadSanitizer: forked new process with pid %d,"858" parent had %d threads\n",859(int)internal_getpid(), (int)nthread);860if (nthread == 1) {861if (start_thread)862StartBackgroundThread();863} else {864// We've just forked a multi-threaded process. We cannot reasonably function865// after that (some mutexes may be locked before fork). So just enable866// ignores for everything in the hope that we will exec soon.867ctx->after_multithreaded_fork = true;868thr->ignore_interceptors++;869thr->suppress_reports++;870ThreadIgnoreBegin(thr, pc);871ThreadIgnoreSyncBegin(thr, pc);872}873}874#endif875876#if SANITIZER_GO877NOINLINE878void GrowShadowStack(ThreadState *thr) {879const int sz = thr->shadow_stack_end - thr->shadow_stack;880const int newsz = 2 * sz;881auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));882internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));883Free(thr->shadow_stack);884thr->shadow_stack = newstack;885thr->shadow_stack_pos = newstack + sz;886thr->shadow_stack_end = newstack + newsz;887}888#endif889890StackID CurrentStackId(ThreadState *thr, uptr pc) {891#if !SANITIZER_GO892if (!thr->is_inited) // May happen during bootstrap.893return kInvalidStackID;894#endif895if (pc != 0) {896#if !SANITIZER_GO897DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);898#else899if (thr->shadow_stack_pos == thr->shadow_stack_end)900GrowShadowStack(thr);901#endif902thr->shadow_stack_pos[0] = pc;903thr->shadow_stack_pos++;904}905StackID id = StackDepotPut(906StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));907if (pc != 0)908thr->shadow_stack_pos--;909return id;910}911912static bool TraceSkipGap(ThreadState* thr) {913Trace *trace = &thr->tctx->trace;914Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));915DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);916auto *part = trace->parts.Back();917DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,918trace, trace->parts.Front(), part, pos);919if (!part)920return false;921// We can get here when we still have space in the current trace part.922// The fast-path check in TraceAcquire has false positives in the middle of923// the part. Check if we are indeed at the end of the current part or not,924// and fill any gaps with NopEvent's.925Event* end = &part->events[TracePart::kSize];926DCHECK_GE(pos, &part->events[0]);927DCHECK_LE(pos, end);928if (pos + 1 < end) {929if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==930TracePart::kAlignment)931*pos++ = NopEvent;932*pos++ = NopEvent;933DCHECK_LE(pos + 2, end);934atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));935return true;936}937// We are indeed at the end.938for (; pos < end; pos++) *pos = NopEvent;939return false;940}941942NOINLINE943void TraceSwitchPart(ThreadState* thr) {944if (TraceSkipGap(thr))945return;946#if !SANITIZER_GO947if (ctx->after_multithreaded_fork) {948// We just need to survive till exec.949TracePart* part = thr->tctx->trace.parts.Back();950if (part) {951atomic_store_relaxed(&thr->trace_pos,952reinterpret_cast<uptr>(&part->events[0]));953return;954}955}956#endif957TraceSwitchPartImpl(thr);958}959960void TraceSwitchPartImpl(ThreadState* thr) {961SlotLocker locker(thr, true);962Trace* trace = &thr->tctx->trace;963TracePart* part = TracePartAlloc(thr);964part->trace = trace;965thr->trace_prev_pc = 0;966TracePart* recycle = nullptr;967// Keep roughly half of parts local to the thread968// (not queued into the recycle queue).969uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;970{971Lock lock(&trace->mtx);972if (trace->parts.Empty())973trace->local_head = part;974if (trace->parts.Size() >= local_parts) {975recycle = trace->local_head;976trace->local_head = trace->parts.Next(recycle);977}978trace->parts.PushBack(part);979atomic_store_relaxed(&thr->trace_pos,980reinterpret_cast<uptr>(&part->events[0]));981}982// Make this part self-sufficient by restoring the current stack983// and mutex set in the beginning of the trace.984TraceTime(thr);985{986// Pathologically large stacks may not fit into the part.987// In these cases we log only fixed number of top frames.988const uptr kMaxFrames = 1000;989// Check that kMaxFrames won't consume the whole part.990static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");991uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);992for (; pos < thr->shadow_stack_pos; pos++) {993if (TryTraceFunc(thr, *pos))994continue;995CHECK(TraceSkipGap(thr));996CHECK(TryTraceFunc(thr, *pos));997}998}999for (uptr i = 0; i < thr->mset.Size(); i++) {1000MutexSet::Desc d = thr->mset.Get(i);1001for (uptr i = 0; i < d.count; i++)1002TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,1003d.addr, d.stack_id);1004}1005// Callers of TraceSwitchPart expect that TraceAcquire will always succeed1006// after the call. It's possible that TryTraceFunc/TraceMutexLock above1007// filled the trace part exactly up to the TracePart::kAlignment gap1008// and the next TraceAcquire won't succeed. Skip the gap to avoid that.1009EventFunc *ev;1010if (!TraceAcquire(thr, &ev)) {1011CHECK(TraceSkipGap(thr));1012CHECK(TraceAcquire(thr, &ev));1013}1014{1015Lock lock(&ctx->slot_mtx);1016// There is a small chance that the slot may be not queued at this point.1017// This can happen if the slot has kEpochLast epoch and another thread1018// in FindSlotAndLock discovered that it's exhausted and removed it from1019// the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart1020// was called with the slot locked and epoch already at kEpochLast,1021// or (2) if we've acquired a new slot in SlotLock in the beginning1022// of the function and the slot was at kEpochLast - 1, so after increment1023// in SlotAttachAndLock it become kEpochLast.1024if (ctx->slot_queue.Queued(thr->slot)) {1025ctx->slot_queue.Remove(thr->slot);1026ctx->slot_queue.PushBack(thr->slot);1027}1028if (recycle)1029ctx->trace_part_recycle.PushBack(recycle);1030}1031DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,1032trace->parts.Front(), trace->parts.Back(),1033atomic_load_relaxed(&thr->trace_pos));1034}10351036void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {1037DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);1038thr->ignore_reads_and_writes++;1039CHECK_GT(thr->ignore_reads_and_writes, 0);1040thr->fast_state.SetIgnoreBit();1041#if !SANITIZER_GO1042if (pc && !ctx->after_multithreaded_fork)1043thr->mop_ignore_set.Add(CurrentStackId(thr, pc));1044#endif1045}10461047void ThreadIgnoreEnd(ThreadState *thr) {1048DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);1049CHECK_GT(thr->ignore_reads_and_writes, 0);1050thr->ignore_reads_and_writes--;1051if (thr->ignore_reads_and_writes == 0) {1052thr->fast_state.ClearIgnoreBit();1053#if !SANITIZER_GO1054thr->mop_ignore_set.Reset();1055#endif1056}1057}10581059#if !SANITIZER_GO1060extern "C" SANITIZER_INTERFACE_ATTRIBUTE1061uptr __tsan_testonly_shadow_stack_current_size() {1062ThreadState *thr = cur_thread();1063return thr->shadow_stack_pos - thr->shadow_stack;1064}1065#endif10661067void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {1068DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);1069thr->ignore_sync++;1070CHECK_GT(thr->ignore_sync, 0);1071#if !SANITIZER_GO1072if (pc && !ctx->after_multithreaded_fork)1073thr->sync_ignore_set.Add(CurrentStackId(thr, pc));1074#endif1075}10761077void ThreadIgnoreSyncEnd(ThreadState *thr) {1078DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);1079CHECK_GT(thr->ignore_sync, 0);1080thr->ignore_sync--;1081#if !SANITIZER_GO1082if (thr->ignore_sync == 0)1083thr->sync_ignore_set.Reset();1084#endif1085}10861087bool MD5Hash::operator==(const MD5Hash &other) const {1088return hash[0] == other.hash[0] && hash[1] == other.hash[1];1089}10901091#if SANITIZER_DEBUG1092void build_consistency_debug() {}1093#else1094void build_consistency_release() {}1095#endif1096} // namespace __tsan10971098#if SANITIZER_CHECK_DEADLOCKS1099namespace __sanitizer {1100using namespace __tsan;1101MutexMeta mutex_meta[] = {1102{MutexInvalid, "Invalid", {}},1103{MutexThreadRegistry,1104"ThreadRegistry",1105{MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},1106{MutexTypeReport, "Report", {MutexTypeTrace}},1107{MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},1108{MutexTypeAnnotations, "Annotations", {}},1109{MutexTypeAtExit, "AtExit", {}},1110{MutexTypeFired, "Fired", {MutexLeaf}},1111{MutexTypeRacy, "Racy", {MutexLeaf}},1112{MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},1113{MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},1114{MutexTypeTrace, "Trace", {}},1115{MutexTypeSlot,1116"Slot",1117{MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,1118MutexTypeSlots}},1119{MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},1120{},1121};11221123void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }11241125} // namespace __sanitizer1126#endif112711281129