Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
35266 views
//===-- tsan_mman.cpp -----------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer (TSan), a race detector.9//10//===----------------------------------------------------------------------===//11#include "tsan_mman.h"1213#include "sanitizer_common/sanitizer_allocator_checks.h"14#include "sanitizer_common/sanitizer_allocator_interface.h"15#include "sanitizer_common/sanitizer_allocator_report.h"16#include "sanitizer_common/sanitizer_common.h"17#include "sanitizer_common/sanitizer_errno.h"18#include "sanitizer_common/sanitizer_placement_new.h"19#include "sanitizer_common/sanitizer_stackdepot.h"20#include "tsan_flags.h"21#include "tsan_interface.h"22#include "tsan_report.h"23#include "tsan_rtl.h"2425namespace __tsan {2627struct MapUnmapCallback {28void OnMap(uptr p, uptr size) const { }29void OnMapSecondary(uptr p, uptr size, uptr user_begin,30uptr user_size) const {};31void OnUnmap(uptr p, uptr size) const {32// We are about to unmap a chunk of user memory.33// Mark the corresponding shadow memory as not needed.34DontNeedShadowFor(p, size);35// Mark the corresponding meta shadow memory as not needed.36// Note the block does not contain any meta info at this point37// (this happens after free).38const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;39const uptr kPageSize = GetPageSizeCached() * kMetaRatio;40// Block came from LargeMmapAllocator, so must be large.41// We rely on this in the calculations below.42CHECK_GE(size, 2 * kPageSize);43uptr diff = RoundUp(p, kPageSize) - p;44if (diff != 0) {45p += diff;46size -= diff;47}48diff = p + size - RoundDown(p + size, kPageSize);49if (diff != 0)50size -= diff;51uptr p_meta = (uptr)MemToMeta(p);52ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);53}54};5556alignas(64) static char allocator_placeholder[sizeof(Allocator)];57Allocator *allocator() {58return reinterpret_cast<Allocator*>(&allocator_placeholder);59}6061struct GlobalProc {62Mutex mtx;63Processor *proc;64// This mutex represents the internal allocator combined for65// the purposes of deadlock detection. The internal allocator66// uses multiple mutexes, moreover they are locked only occasionally67// and they are spin mutexes which don't support deadlock detection.68// So we use this fake mutex to serve as a substitute for these mutexes.69CheckedMutex internal_alloc_mtx;7071GlobalProc()72: mtx(MutexTypeGlobalProc),73proc(ProcCreate()),74internal_alloc_mtx(MutexTypeInternalAlloc) {}75};7677alignas(64) static char global_proc_placeholder[sizeof(GlobalProc)];78GlobalProc *global_proc() {79return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);80}8182static void InternalAllocAccess() {83global_proc()->internal_alloc_mtx.Lock();84global_proc()->internal_alloc_mtx.Unlock();85}8687ScopedGlobalProcessor::ScopedGlobalProcessor() {88GlobalProc *gp = global_proc();89ThreadState *thr = cur_thread();90if (thr->proc())91return;92// If we don't have a proc, use the global one.93// There are currently only two known case where this path is triggered:94// __interceptor_free95// __nptl_deallocate_tsd96// start_thread97// clone98// and:99// ResetRange100// __interceptor_munmap101// __deallocate_stack102// start_thread103// clone104// Ideally, we destroy thread state (and unwire proc) when a thread actually105// exits (i.e. when we join/wait it). Then we would not need the global proc106gp->mtx.Lock();107ProcWire(gp->proc, thr);108}109110ScopedGlobalProcessor::~ScopedGlobalProcessor() {111GlobalProc *gp = global_proc();112ThreadState *thr = cur_thread();113if (thr->proc() != gp->proc)114return;115ProcUnwire(gp->proc, thr);116gp->mtx.Unlock();117}118119void AllocatorLockBeforeFork() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {120global_proc()->internal_alloc_mtx.Lock();121InternalAllocatorLock();122#if !SANITIZER_APPLE123// OS X allocates from hooks, see 6a3958247a.124allocator()->ForceLock();125StackDepotLockBeforeFork();126#endif127}128129void AllocatorUnlockAfterFork(bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {130#if !SANITIZER_APPLE131StackDepotUnlockAfterFork(child);132allocator()->ForceUnlock();133#endif134InternalAllocatorUnlock();135global_proc()->internal_alloc_mtx.Unlock();136}137138void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {139global_proc()->mtx.Lock();140}141142void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {143global_proc()->mtx.Unlock();144}145146static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;147static uptr max_user_defined_malloc_size;148149void InitializeAllocator() {150SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);151allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);152max_user_defined_malloc_size = common_flags()->max_allocation_size_mb153? common_flags()->max_allocation_size_mb154<< 20155: kMaxAllowedMallocSize;156}157158void InitializeAllocatorLate() {159new(global_proc()) GlobalProc();160}161162void AllocatorProcStart(Processor *proc) {163allocator()->InitCache(&proc->alloc_cache);164internal_allocator()->InitCache(&proc->internal_alloc_cache);165}166167void AllocatorProcFinish(Processor *proc) {168allocator()->DestroyCache(&proc->alloc_cache);169internal_allocator()->DestroyCache(&proc->internal_alloc_cache);170}171172void AllocatorPrintStats() {173allocator()->PrintStats();174}175176static void SignalUnsafeCall(ThreadState *thr, uptr pc) {177if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||178!ShouldReport(thr, ReportTypeSignalUnsafe))179return;180VarSizeStackTrace stack;181ObtainCurrentStack(thr, pc, &stack);182if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))183return;184ThreadRegistryLock l(&ctx->thread_registry);185ScopedReport rep(ReportTypeSignalUnsafe);186rep.AddStack(stack, true);187OutputReport(thr, rep);188}189190191void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,192bool signal) {193if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||194sz > max_user_defined_malloc_size) {195if (AllocatorMayReturnNull())196return nullptr;197uptr malloc_limit =198Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);199GET_STACK_TRACE_FATAL(thr, pc);200ReportAllocationSizeTooBig(sz, malloc_limit, &stack);201}202if (UNLIKELY(IsRssLimitExceeded())) {203if (AllocatorMayReturnNull())204return nullptr;205GET_STACK_TRACE_FATAL(thr, pc);206ReportRssLimitExceeded(&stack);207}208void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);209if (UNLIKELY(!p)) {210SetAllocatorOutOfMemory();211if (AllocatorMayReturnNull())212return nullptr;213GET_STACK_TRACE_FATAL(thr, pc);214ReportOutOfMemory(sz, &stack);215}216if (ctx && ctx->initialized)217OnUserAlloc(thr, pc, (uptr)p, sz, true);218if (signal)219SignalUnsafeCall(thr, pc);220return p;221}222223void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {224ScopedGlobalProcessor sgp;225if (ctx && ctx->initialized)226OnUserFree(thr, pc, (uptr)p, true);227allocator()->Deallocate(&thr->proc()->alloc_cache, p);228if (signal)229SignalUnsafeCall(thr, pc);230}231232void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {233return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));234}235236void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {237if (UNLIKELY(CheckForCallocOverflow(size, n))) {238if (AllocatorMayReturnNull())239return SetErrnoOnNull(nullptr);240GET_STACK_TRACE_FATAL(thr, pc);241ReportCallocOverflow(n, size, &stack);242}243void *p = user_alloc_internal(thr, pc, n * size);244if (p)245internal_memset(p, 0, n * size);246return SetErrnoOnNull(p);247}248249void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {250if (UNLIKELY(CheckForCallocOverflow(size, n))) {251if (AllocatorMayReturnNull())252return SetErrnoOnNull(nullptr);253GET_STACK_TRACE_FATAL(thr, pc);254ReportReallocArrayOverflow(size, n, &stack);255}256return user_realloc(thr, pc, p, size * n);257}258259void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {260DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);261// Note: this can run before thread initialization/after finalization.262// As a result this is not necessarily synchronized with DoReset,263// which iterates over and resets all sync objects,264// but it is fine to create new MBlocks in this context.265ctx->metamap.AllocBlock(thr, pc, p, sz);266// If this runs before thread initialization/after finalization267// and we don't have trace initialized, we can't imitate writes.268// In such case just reset the shadow range, it is fine since269// it affects only a small fraction of special objects.270if (write && thr->ignore_reads_and_writes == 0 &&271atomic_load_relaxed(&thr->trace_pos))272MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);273else274MemoryResetRange(thr, pc, (uptr)p, sz);275}276277void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {278CHECK_NE(p, (void*)0);279if (!thr->slot) {280// Very early/late in thread lifetime, or during fork.281UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);282DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);283return;284}285SlotLocker locker(thr);286uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);287DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);288if (write && thr->ignore_reads_and_writes == 0)289MemoryRangeFreed(thr, pc, (uptr)p, sz);290}291292void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {293// FIXME: Handle "shrinking" more efficiently,294// it seems that some software actually does this.295if (!p)296return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));297if (!sz) {298user_free(thr, pc, p);299return nullptr;300}301void *new_p = user_alloc_internal(thr, pc, sz);302if (new_p) {303uptr old_sz = user_alloc_usable_size(p);304internal_memcpy(new_p, p, min(old_sz, sz));305user_free(thr, pc, p);306}307return SetErrnoOnNull(new_p);308}309310void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {311if (UNLIKELY(!IsPowerOfTwo(align))) {312errno = errno_EINVAL;313if (AllocatorMayReturnNull())314return nullptr;315GET_STACK_TRACE_FATAL(thr, pc);316ReportInvalidAllocationAlignment(align, &stack);317}318return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));319}320321int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,322uptr sz) {323if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {324if (AllocatorMayReturnNull())325return errno_EINVAL;326GET_STACK_TRACE_FATAL(thr, pc);327ReportInvalidPosixMemalignAlignment(align, &stack);328}329void *ptr = user_alloc_internal(thr, pc, sz, align);330if (UNLIKELY(!ptr))331// OOM error is already taken care of by user_alloc_internal.332return errno_ENOMEM;333CHECK(IsAligned((uptr)ptr, align));334*memptr = ptr;335return 0;336}337338void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {339if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {340errno = errno_EINVAL;341if (AllocatorMayReturnNull())342return nullptr;343GET_STACK_TRACE_FATAL(thr, pc);344ReportInvalidAlignedAllocAlignment(sz, align, &stack);345}346return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));347}348349void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {350return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));351}352353void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {354uptr PageSize = GetPageSizeCached();355if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {356errno = errno_ENOMEM;357if (AllocatorMayReturnNull())358return nullptr;359GET_STACK_TRACE_FATAL(thr, pc);360ReportPvallocOverflow(sz, &stack);361}362// pvalloc(0) should allocate one page.363sz = sz ? RoundUpTo(sz, PageSize) : PageSize;364return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));365}366367static const void *user_alloc_begin(const void *p) {368if (p == nullptr || !IsAppMem((uptr)p))369return nullptr;370void *beg = allocator()->GetBlockBegin(p);371if (!beg)372return nullptr;373374MBlock *b = ctx->metamap.GetBlock((uptr)beg);375if (!b)376return nullptr; // Not a valid pointer.377378return (const void *)beg;379}380381uptr user_alloc_usable_size(const void *p) {382if (p == 0 || !IsAppMem((uptr)p))383return 0;384MBlock *b = ctx->metamap.GetBlock((uptr)p);385if (!b)386return 0; // Not a valid pointer.387if (b->siz == 0)388return 1; // Zero-sized allocations are actually 1 byte.389return b->siz;390}391392uptr user_alloc_usable_size_fast(const void *p) {393MBlock *b = ctx->metamap.GetBlock((uptr)p);394// Static objects may have malloc'd before tsan completes395// initialization, and may believe returned ptrs to be valid.396if (!b)397return 0; // Not a valid pointer.398if (b->siz == 0)399return 1; // Zero-sized allocations are actually 1 byte.400return b->siz;401}402403void invoke_malloc_hook(void *ptr, uptr size) {404ThreadState *thr = cur_thread();405if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)406return;407RunMallocHooks(ptr, size);408}409410void invoke_free_hook(void *ptr) {411ThreadState *thr = cur_thread();412if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)413return;414RunFreeHooks(ptr);415}416417void *Alloc(uptr sz) {418ThreadState *thr = cur_thread();419if (thr->nomalloc) {420thr->nomalloc = 0; // CHECK calls internal_malloc().421CHECK(0);422}423InternalAllocAccess();424return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);425}426427void FreeImpl(void *p) {428ThreadState *thr = cur_thread();429if (thr->nomalloc) {430thr->nomalloc = 0; // CHECK calls internal_malloc().431CHECK(0);432}433InternalAllocAccess();434InternalFree(p, &thr->proc()->internal_alloc_cache);435}436437} // namespace __tsan438439using namespace __tsan;440441extern "C" {442uptr __sanitizer_get_current_allocated_bytes() {443uptr stats[AllocatorStatCount];444allocator()->GetStats(stats);445return stats[AllocatorStatAllocated];446}447448uptr __sanitizer_get_heap_size() {449uptr stats[AllocatorStatCount];450allocator()->GetStats(stats);451return stats[AllocatorStatMapped];452}453454uptr __sanitizer_get_free_bytes() {455return 1;456}457458uptr __sanitizer_get_unmapped_bytes() {459return 1;460}461462uptr __sanitizer_get_estimated_allocated_size(uptr size) {463return size;464}465466int __sanitizer_get_ownership(const void *p) {467return allocator()->GetBlockBegin(p) != 0;468}469470const void *__sanitizer_get_allocated_begin(const void *p) {471return user_alloc_begin(p);472}473474uptr __sanitizer_get_allocated_size(const void *p) {475return user_alloc_usable_size(p);476}477478uptr __sanitizer_get_allocated_size_fast(const void *p) {479DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));480uptr ret = user_alloc_usable_size_fast(p);481DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));482return ret;483}484485void __sanitizer_purge_allocator() {486allocator()->ForceReleaseToOS();487}488489void __tsan_on_thread_idle() {490ThreadState *thr = cur_thread();491allocator()->SwallowCache(&thr->proc()->alloc_cache);492internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);493ctx->metamap.OnProcIdle(thr->proc());494}495} // extern "C"496497498