Path: blob/main/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
35233 views
//=-- lsan_common.cpp -----------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of LeakSanitizer.9// Implementation of common leak checking functionality.10//11//===----------------------------------------------------------------------===//1213#include "lsan_common.h"1415#include "sanitizer_common/sanitizer_common.h"16#include "sanitizer_common/sanitizer_flag_parser.h"17#include "sanitizer_common/sanitizer_flags.h"18#include "sanitizer_common/sanitizer_placement_new.h"19#include "sanitizer_common/sanitizer_procmaps.h"20#include "sanitizer_common/sanitizer_report_decorator.h"21#include "sanitizer_common/sanitizer_stackdepot.h"22#include "sanitizer_common/sanitizer_stacktrace.h"23#include "sanitizer_common/sanitizer_suppressions.h"24#include "sanitizer_common/sanitizer_thread_registry.h"25#include "sanitizer_common/sanitizer_tls_get_addr.h"2627#if CAN_SANITIZE_LEAKS2829# if SANITIZER_APPLE30// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L12731# if SANITIZER_IOS && !SANITIZER_IOSSIM32# define OBJC_DATA_MASK 0x0000007ffffffff8UL33# else34# define OBJC_DATA_MASK 0x00007ffffffffff8UL35# endif36# endif3738namespace __lsan {3940// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and41// also to protect the global list of root regions.42static Mutex global_mutex;4344void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }45void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }4647Flags lsan_flags;4849void DisableCounterUnderflow() {50if (common_flags()->detect_leaks) {51Report("Unmatched call to __lsan_enable().\n");52Die();53}54}5556void Flags::SetDefaults() {57# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;58# include "lsan_flags.inc"59# undef LSAN_FLAG60}6162void RegisterLsanFlags(FlagParser *parser, Flags *f) {63# define LSAN_FLAG(Type, Name, DefaultValue, Description) \64RegisterFlag(parser, #Name, Description, &f->Name);65# include "lsan_flags.inc"66# undef LSAN_FLAG67}6869# define LOG_POINTERS(...) \70do { \71if (flags()->log_pointers) \72Report(__VA_ARGS__); \73} while (0)7475# define LOG_THREADS(...) \76do { \77if (flags()->log_threads) \78Report(__VA_ARGS__); \79} while (0)8081class LeakSuppressionContext {82bool parsed = false;83SuppressionContext context;84bool suppressed_stacks_sorted = true;85InternalMmapVector<u32> suppressed_stacks;86const LoadedModule *suppress_module = nullptr;8788void LazyInit();89Suppression *GetSuppressionForAddr(uptr addr);90bool SuppressInvalid(const StackTrace &stack);91bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);9293public:94LeakSuppressionContext(const char *supprression_types[],95int suppression_types_num)96: context(supprression_types, suppression_types_num) {}9798bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);99100const InternalMmapVector<u32> &GetSortedSuppressedStacks() {101if (!suppressed_stacks_sorted) {102suppressed_stacks_sorted = true;103SortAndDedup(suppressed_stacks);104}105return suppressed_stacks;106}107void PrintMatchedSuppressions();108};109110alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];111static LeakSuppressionContext *suppression_ctx = nullptr;112static const char kSuppressionLeak[] = "leak";113static const char *kSuppressionTypes[] = {kSuppressionLeak};114static const char kStdSuppressions[] =115# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT116// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT117// definition.118"leak:*pthread_exit*\n"119# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT120# if SANITIZER_APPLE121// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173122"leak:*_os_trace*\n"123# endif124// TLS leak in some glibc versions, described in125// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.126"leak:*tls_get_addr*\n";127128void InitializeSuppressions() {129CHECK_EQ(nullptr, suppression_ctx);130suppression_ctx = new (suppression_placeholder)131LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));132}133134void LeakSuppressionContext::LazyInit() {135if (!parsed) {136parsed = true;137context.ParseFromFile(flags()->suppressions);138if (&__lsan_default_suppressions)139context.Parse(__lsan_default_suppressions());140context.Parse(kStdSuppressions);141if (flags()->use_tls && flags()->use_ld_allocations)142suppress_module = GetLinker();143}144}145146Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {147Suppression *s = nullptr;148149// Suppress by module name.150const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);151if (!module_name)152module_name = "<unknown module>";153if (context.Match(module_name, kSuppressionLeak, &s))154return s;155156// Suppress by file or function name.157SymbolizedStackHolder symbolized_stack(158Symbolizer::GetOrInit()->SymbolizePC(addr));159const SymbolizedStack *frames = symbolized_stack.get();160for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {161if (context.Match(cur->info.function, kSuppressionLeak, &s) ||162context.Match(cur->info.file, kSuppressionLeak, &s)) {163break;164}165}166return s;167}168169static uptr GetCallerPC(const StackTrace &stack) {170// The top frame is our malloc/calloc/etc. The next frame is the caller.171if (stack.size >= 2)172return stack.trace[1];173return 0;174}175176# if SANITIZER_APPLE177// Several pointers in the Objective-C runtime (method cache and class_rw_t,178// for example) are tagged with additional bits we need to strip.179static inline void *TransformPointer(void *p) {180uptr ptr = reinterpret_cast<uptr>(p);181return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);182}183# endif184185// On Linux, treats all chunks allocated from ld-linux.so as reachable, which186// covers dynamically allocated TLS blocks, internal dynamic loader's loaded187// modules accounting etc.188// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.189// They are allocated with a __libc_memalign() call in allocate_and_init()190// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those191// blocks, but we can make sure they come from our own allocator by intercepting192// __libc_memalign(). On top of that, there is no easy way to reach them. Their193// addresses are stored in a dynamically allocated array (the DTV) which is194// referenced from the static TLS. Unfortunately, we can't just rely on the DTV195// being reachable from the static TLS, and the dynamic TLS being reachable from196// the DTV. This is because the initial DTV is allocated before our interception197// mechanism kicks in, and thus we don't recognize it as allocated memory. We198// can't special-case it either, since we don't know its size.199// Our solution is to include in the root set all allocations made from200// ld-linux.so (which is where allocate_and_init() is implemented). This is201// guaranteed to include all dynamic TLS blocks (and possibly other allocations202// which we don't care about).203// On all other platforms, this simply checks to ensure that the caller pc is204// valid before reporting chunks as leaked.205bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {206uptr caller_pc = GetCallerPC(stack);207// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark208// it as reachable, as we can't properly report its allocation stack anyway.209return !caller_pc ||210(suppress_module && suppress_module->containsAddress(caller_pc));211}212213bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,214uptr hit_count, uptr total_size) {215for (uptr i = 0; i < stack.size; i++) {216Suppression *s = GetSuppressionForAddr(217StackTrace::GetPreviousInstructionPc(stack.trace[i]));218if (s) {219s->weight += total_size;220atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);221return true;222}223}224return false;225}226227bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,228uptr total_size) {229LazyInit();230StackTrace stack = StackDepotGet(stack_trace_id);231if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))232return false;233suppressed_stacks_sorted = false;234suppressed_stacks.push_back(stack_trace_id);235return true;236}237238static LeakSuppressionContext *GetSuppressionContext() {239CHECK(suppression_ctx);240return suppression_ctx;241}242243void InitCommonLsan() {244if (common_flags()->detect_leaks) {245// Initialization which can fail or print warnings should only be done if246// LSan is actually enabled.247InitializeSuppressions();248InitializePlatformSpecificModules();249}250}251252class Decorator : public __sanitizer::SanitizerCommonDecorator {253public:254Decorator() : SanitizerCommonDecorator() {}255const char *Error() { return Red(); }256const char *Leak() { return Blue(); }257};258259static inline bool MaybeUserPointer(uptr p) {260// Since our heap is located in mmap-ed memory, we can assume a sensible lower261// bound on heap addresses.262const uptr kMinAddress = 4 * 4096;263if (p < kMinAddress)264return false;265# if defined(__x86_64__)266// TODO: support LAM48 and 5 level page tables.267// LAM_U57 mask format268// * top byte: 0x81 because the format is: [0] [6-bit tag] [0]269// * top-1 byte: 0xff because it should be 0270// * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff271constexpr uptr kLAM_U57Mask = 0x81ff80;272constexpr uptr kPointerMask = kLAM_U57Mask << 40;273return ((p & kPointerMask) == 0);274# elif defined(__mips64)275return ((p >> 40) == 0);276# elif defined(__aarch64__)277// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in278// address translation and can be used to store a tag.279constexpr uptr kPointerMask = 255ULL << 48;280// Accept up to 48 bit VMA.281return ((p & kPointerMask) == 0);282# elif defined(__loongarch_lp64)283// Allow 47-bit user-space VMA at current.284return ((p >> 47) == 0);285# else286return true;287# endif288}289290// Scans the memory range, looking for byte patterns that point into allocator291// chunks. Marks those chunks with |tag| and adds them to |frontier|.292// There are two usage modes for this function: finding reachable chunks293// (|tag| = kReachable) and finding indirectly leaked chunks294// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,295// so |frontier| = 0.296void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,297const char *region_type, ChunkTag tag) {298CHECK(tag == kReachable || tag == kIndirectlyLeaked);299const uptr alignment = flags()->pointer_alignment();300LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,301(void *)end);302uptr pp = begin;303if (pp % alignment)304pp = pp + alignment - pp % alignment;305for (; pp + sizeof(void *) <= end; pp += alignment) {306void *p = *reinterpret_cast<void **>(pp);307# if SANITIZER_APPLE308p = TransformPointer(p);309# endif310if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))311continue;312uptr chunk = PointsIntoChunk(p);313if (!chunk)314continue;315// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.316if (chunk == begin)317continue;318LsanMetadata m(chunk);319if (m.tag() == kReachable || m.tag() == kIgnored)320continue;321322// Do this check relatively late so we can log only the interesting cases.323if (!flags()->use_poisoned && WordIsPoisoned(pp)) {324LOG_POINTERS(325"%p is poisoned: ignoring %p pointing into chunk %p-%p of size "326"%zu.\n",327(void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),328m.requested_size());329continue;330}331332m.set_tag(tag);333LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",334(void *)pp, p, (void *)chunk,335(void *)(chunk + m.requested_size()), m.requested_size());336if (frontier)337frontier->push_back(chunk);338}339}340341// Scans a global range for pointers342void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {343uptr allocator_begin = 0, allocator_end = 0;344GetAllocatorGlobalRange(&allocator_begin, &allocator_end);345if (begin <= allocator_begin && allocator_begin < end) {346CHECK_LE(allocator_begin, allocator_end);347CHECK_LE(allocator_end, end);348if (begin < allocator_begin)349ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",350kReachable);351if (allocator_end < end)352ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);353} else {354ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);355}356}357358void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,359Frontier *frontier) {360for (uptr i = 0; i < ranges.size(); i++) {361ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",362kReachable);363}364}365366# if SANITIZER_FUCHSIA367368// Fuchsia handles all threads together with its own callback.369static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,370uptr) {}371372# else373374# if SANITIZER_ANDROID375// FIXME: Move this out into *libcdep.cpp376extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(377pid_t, void (*cb)(void *, void *, uptr, void *), void *);378# endif379380static void ProcessThreadRegistry(Frontier *frontier) {381InternalMmapVector<uptr> ptrs;382GetAdditionalThreadContextPtrsLocked(&ptrs);383384for (uptr i = 0; i < ptrs.size(); ++i) {385void *ptr = reinterpret_cast<void *>(ptrs[i]);386uptr chunk = PointsIntoChunk(ptr);387if (!chunk)388continue;389LsanMetadata m(chunk);390if (!m.allocated())391continue;392393// Mark as reachable and add to frontier.394LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);395m.set_tag(kReachable);396frontier->push_back(chunk);397}398}399400// Scans thread data (stacks and TLS) for heap pointers.401static void ProcessThreads(SuspendedThreadsList const &suspended_threads,402Frontier *frontier, tid_t caller_tid,403uptr caller_sp) {404InternalMmapVector<uptr> registers;405InternalMmapVector<Range> extra_ranges;406for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {407tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));408LOG_THREADS("Processing thread %llu.\n", os_id);409uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;410DTLS *dtls;411bool thread_found =412GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,413&tls_end, &cache_begin, &cache_end, &dtls);414if (!thread_found) {415// If a thread can't be found in the thread registry, it's probably in the416// process of destruction. Log this event and move on.417LOG_THREADS("Thread %llu not found in registry.\n", os_id);418continue;419}420uptr sp;421PtraceRegistersStatus have_registers =422suspended_threads.GetRegistersAndSP(i, ®isters, &sp);423if (have_registers != REGISTERS_AVAILABLE) {424Report("Unable to get registers from thread %llu.\n", os_id);425// If unable to get SP, consider the entire stack to be reachable unless426// GetRegistersAndSP failed with ESRCH.427if (have_registers == REGISTERS_UNAVAILABLE_FATAL)428continue;429sp = stack_begin;430}431if (suspended_threads.GetThreadID(i) == caller_tid) {432sp = caller_sp;433}434435if (flags()->use_registers && have_registers) {436uptr registers_begin = reinterpret_cast<uptr>(registers.data());437uptr registers_end =438reinterpret_cast<uptr>(registers.data() + registers.size());439ScanRangeForPointers(registers_begin, registers_end, frontier,440"REGISTERS", kReachable);441}442443if (flags()->use_stacks) {444LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,445(void *)stack_end, (void *)sp);446if (sp < stack_begin || sp >= stack_end) {447// SP is outside the recorded stack range (e.g. the thread is running a448// signal handler on alternate stack, or swapcontext was used).449// Again, consider the entire stack range to be reachable.450LOG_THREADS("WARNING: stack pointer not in stack range.\n");451uptr page_size = GetPageSizeCached();452int skipped = 0;453while (stack_begin < stack_end &&454!IsAccessibleMemoryRange(stack_begin, 1)) {455skipped++;456stack_begin += page_size;457}458LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",459skipped, (void *)stack_begin, (void *)stack_end);460} else {461// Shrink the stack range to ignore out-of-scope values.462stack_begin = sp;463}464ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",465kReachable);466extra_ranges.clear();467GetThreadExtraStackRangesLocked(os_id, &extra_ranges);468ScanExtraStackRanges(extra_ranges, frontier);469}470471if (flags()->use_tls) {472if (tls_begin) {473LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);474// If the tls and cache ranges don't overlap, scan full tls range,475// otherwise, only scan the non-overlapping portions476if (cache_begin == cache_end || tls_end < cache_begin ||477tls_begin > cache_end) {478ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);479} else {480if (tls_begin < cache_begin)481ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",482kReachable);483if (tls_end > cache_end)484ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",485kReachable);486}487}488# if SANITIZER_ANDROID489auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,490void *arg) -> void {491ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),492reinterpret_cast<uptr>(dtls_end),493reinterpret_cast<Frontier *>(arg), "DTLS",494kReachable);495};496497// FIXME: There might be a race-condition here (and in Bionic) if the498// thread is suspended in the middle of updating its DTLS. IOWs, we499// could scan already freed memory. (probably fine for now)500__libc_iterate_dynamic_tls(os_id, cb, frontier);501# else502if (dtls && !DTLSInDestruction(dtls)) {503ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {504uptr dtls_beg = dtv.beg;505uptr dtls_end = dtls_beg + dtv.size;506if (dtls_beg < dtls_end) {507LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,508(void *)dtls_end);509ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",510kReachable);511}512});513} else {514// We are handling a thread with DTLS under destruction. Log about515// this and continue.516LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);517}518# endif519}520}521522// Add pointers reachable from ThreadContexts523ProcessThreadRegistry(frontier);524}525526# endif // SANITIZER_FUCHSIA527528// A map that contains [region_begin, region_end) pairs.529using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;530531static RootRegions &GetRootRegionsLocked() {532global_mutex.CheckLocked();533static RootRegions *regions = nullptr;534alignas(RootRegions) static char placeholder[sizeof(RootRegions)];535if (!regions)536regions = new (placeholder) RootRegions();537return *regions;538}539540bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }541542void ScanRootRegions(Frontier *frontier,543const InternalMmapVectorNoCtor<Region> &mapped_regions) {544if (!flags()->use_root_regions)545return;546547InternalMmapVector<Region> regions;548GetRootRegionsLocked().forEach([&](const auto &kv) {549regions.push_back({kv.first.first, kv.first.second});550return true;551});552553InternalMmapVector<Region> intersection;554Intersect(mapped_regions, regions, intersection);555556for (const Region &r : intersection) {557LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",558(void *)r.begin, (void *)r.end);559ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);560}561}562563// Scans root regions for heap pointers.564static void ProcessRootRegions(Frontier *frontier) {565if (!flags()->use_root_regions || !HasRootRegions())566return;567MemoryMappingLayout proc_maps(/*cache_enabled*/ true);568MemoryMappedSegment segment;569InternalMmapVector<Region> mapped_regions;570while (proc_maps.Next(&segment))571if (segment.IsReadable())572mapped_regions.push_back({segment.start, segment.end});573ScanRootRegions(frontier, mapped_regions);574}575576static void FloodFillTag(Frontier *frontier, ChunkTag tag) {577while (frontier->size()) {578uptr next_chunk = frontier->back();579frontier->pop_back();580LsanMetadata m(next_chunk);581ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,582"HEAP", tag);583}584}585586// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks587// which are reachable from it as indirectly leaked.588static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {589chunk = GetUserBegin(chunk);590LsanMetadata m(chunk);591if (m.allocated() && m.tag() != kReachable) {592ScanRangeForPointers(chunk, chunk + m.requested_size(),593/* frontier */ nullptr, "HEAP", kIndirectlyLeaked);594}595}596597static void IgnoredSuppressedCb(uptr chunk, void *arg) {598CHECK(arg);599chunk = GetUserBegin(chunk);600LsanMetadata m(chunk);601if (!m.allocated() || m.tag() == kIgnored)602return;603604const InternalMmapVector<u32> &suppressed =605*static_cast<const InternalMmapVector<u32> *>(arg);606uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());607if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])608return;609610LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,611(void *)(chunk + m.requested_size()), m.requested_size());612m.set_tag(kIgnored);613}614615// ForEachChunk callback. If chunk is marked as ignored, adds its address to616// frontier.617static void CollectIgnoredCb(uptr chunk, void *arg) {618CHECK(arg);619chunk = GetUserBegin(chunk);620LsanMetadata m(chunk);621if (m.allocated() && m.tag() == kIgnored) {622LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,623(void *)(chunk + m.requested_size()), m.requested_size());624reinterpret_cast<Frontier *>(arg)->push_back(chunk);625}626}627628// Sets the appropriate tag on each chunk.629static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,630Frontier *frontier, tid_t caller_tid,631uptr caller_sp) {632const InternalMmapVector<u32> &suppressed_stacks =633GetSuppressionContext()->GetSortedSuppressedStacks();634if (!suppressed_stacks.empty()) {635ForEachChunk(IgnoredSuppressedCb,636const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));637}638ForEachChunk(CollectIgnoredCb, frontier);639ProcessGlobalRegions(frontier);640ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);641ProcessRootRegions(frontier);642FloodFillTag(frontier, kReachable);643644// The check here is relatively expensive, so we do this in a separate flood645// fill. That way we can skip the check for chunks that are reachable646// otherwise.647LOG_POINTERS("Processing platform-specific allocations.\n");648ProcessPlatformSpecificAllocations(frontier);649FloodFillTag(frontier, kReachable);650651// Iterate over leaked chunks and mark those that are reachable from other652// leaked chunks.653LOG_POINTERS("Scanning leaked chunks.\n");654ForEachChunk(MarkIndirectlyLeakedCb, nullptr);655}656657// ForEachChunk callback. Resets the tags to pre-leak-check state.658static void ResetTagsCb(uptr chunk, void *arg) {659(void)arg;660chunk = GetUserBegin(chunk);661LsanMetadata m(chunk);662if (m.allocated() && m.tag() != kIgnored)663m.set_tag(kDirectlyLeaked);664}665666// ForEachChunk callback. Aggregates information about unreachable chunks into667// a LeakReport.668static void CollectLeaksCb(uptr chunk, void *arg) {669CHECK(arg);670LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);671chunk = GetUserBegin(chunk);672LsanMetadata m(chunk);673if (!m.allocated())674return;675if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)676leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});677}678679void LeakSuppressionContext::PrintMatchedSuppressions() {680InternalMmapVector<Suppression *> matched;681context.GetMatched(&matched);682if (!matched.size())683return;684const char *line = "-----------------------------------------------------";685Printf("%s\n", line);686Printf("Suppressions used:\n");687Printf(" count bytes template\n");688for (uptr i = 0; i < matched.size(); i++) {689Printf("%7zu %10zu %s\n",690static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),691matched[i]->weight, matched[i]->templ);692}693Printf("%s\n\n", line);694}695696# if SANITIZER_FUCHSIA697698// Fuchsia provides a libc interface that guarantees all threads are699// covered, and SuspendedThreadList is never really used.700static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}701702# else // !SANITIZER_FUCHSIA703704static void ReportUnsuspendedThreads(705const SuspendedThreadsList &suspended_threads) {706InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());707for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)708threads[i] = suspended_threads.GetThreadID(i);709710Sort(threads.data(), threads.size());711712InternalMmapVector<tid_t> unsuspended;713GetRunningThreadsLocked(&unsuspended);714715for (auto os_id : unsuspended) {716uptr i = InternalLowerBound(threads, os_id);717if (i >= threads.size() || threads[i] != os_id)718Report(719"Running thread %zu was not suspended. False leaks are possible.\n",720os_id);721}722}723724# endif // !SANITIZER_FUCHSIA725726static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,727void *arg) {728CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);729CHECK(param);730CHECK(!param->success);731ReportUnsuspendedThreads(suspended_threads);732ClassifyAllChunks(suspended_threads, ¶m->frontier, param->caller_tid,733param->caller_sp);734ForEachChunk(CollectLeaksCb, ¶m->leaks);735// Clean up for subsequent leak checks. This assumes we did not overwrite any736// kIgnored tags.737ForEachChunk(ResetTagsCb, nullptr);738param->success = true;739}740741static bool PrintResults(LeakReport &report) {742uptr unsuppressed_count = report.UnsuppressedLeakCount();743if (unsuppressed_count) {744Decorator d;745Printf(746"\n"747"================================================================="748"\n");749Printf("%s", d.Error());750Report("ERROR: LeakSanitizer: detected memory leaks\n");751Printf("%s", d.Default());752report.ReportTopLeaks(flags()->max_leaks);753}754if (common_flags()->print_suppressions)755GetSuppressionContext()->PrintMatchedSuppressions();756if (unsuppressed_count > 0) {757report.PrintSummary();758return true;759}760return false;761}762763static bool CheckForLeaks() {764if (&__lsan_is_turned_off && __lsan_is_turned_off()) {765VReport(1, "LeakSanitizer is disabled");766return false;767}768VReport(1, "LeakSanitizer: checking for leaks");769// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match770// suppressions. However if a stack id was previously suppressed, it should be771// suppressed in future checks as well.772for (int i = 0;; ++i) {773EnsureMainThreadIDIsCorrect();774CheckForLeaksParam param;775// Capture calling thread's stack pointer early, to avoid false negatives.776// Old frame with dead pointers might be overlapped by new frame inside777// CheckForLeaks which does not use bytes with pointers before the778// threads are suspended and stack pointers captured.779param.caller_tid = GetTid();780param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));781LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);782if (!param.success) {783Report("LeakSanitizer has encountered a fatal error.\n");784Report(785"HINT: For debugging, try setting environment variable "786"LSAN_OPTIONS=verbosity=1:log_threads=1\n");787Report(788"HINT: LeakSanitizer does not work under ptrace (strace, gdb, "789"etc)\n");790Die();791}792LeakReport leak_report;793leak_report.AddLeakedChunks(param.leaks);794795// No new suppressions stacks, so rerun will not help and we can report.796if (!leak_report.ApplySuppressions())797return PrintResults(leak_report);798799// No indirect leaks to report, so we are done here.800if (!leak_report.IndirectUnsuppressedLeakCount())801return PrintResults(leak_report);802803if (i >= 8) {804Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");805return PrintResults(leak_report);806}807808// We found a new previously unseen suppressed call stack. Rerun to make809// sure it does not hold indirect leaks.810VReport(1, "Rerun with %zu suppressed stacks.",811GetSuppressionContext()->GetSortedSuppressedStacks().size());812}813}814815static bool has_reported_leaks = false;816bool HasReportedLeaks() { return has_reported_leaks; }817818void DoLeakCheck() {819Lock l(&global_mutex);820static bool already_done;821if (already_done)822return;823already_done = true;824has_reported_leaks = CheckForLeaks();825if (has_reported_leaks)826HandleLeaks();827}828829static int DoRecoverableLeakCheck() {830Lock l(&global_mutex);831bool have_leaks = CheckForLeaks();832return have_leaks ? 1 : 0;833}834835void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }836837///// LeakReport implementation. /////838839// A hard limit on the number of distinct leaks, to avoid quadratic complexity840// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks841// in real-world applications.842// FIXME: Get rid of this limit by moving logic into DedupLeaks.843const uptr kMaxLeaksConsidered = 5000;844845void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {846for (const LeakedChunk &leak : chunks) {847uptr chunk = leak.chunk;848u32 stack_trace_id = leak.stack_trace_id;849uptr leaked_size = leak.leaked_size;850ChunkTag tag = leak.tag;851CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);852853if (u32 resolution = flags()->resolution) {854StackTrace stack = StackDepotGet(stack_trace_id);855stack.size = Min(stack.size, resolution);856stack_trace_id = StackDepotPut(stack);857}858859bool is_directly_leaked = (tag == kDirectlyLeaked);860uptr i;861for (i = 0; i < leaks_.size(); i++) {862if (leaks_[i].stack_trace_id == stack_trace_id &&863leaks_[i].is_directly_leaked == is_directly_leaked) {864leaks_[i].hit_count++;865leaks_[i].total_size += leaked_size;866break;867}868}869if (i == leaks_.size()) {870if (leaks_.size() == kMaxLeaksConsidered)871return;872Leak leak = {next_id_++, /* hit_count */ 1,873leaked_size, stack_trace_id,874is_directly_leaked, /* is_suppressed */ false};875leaks_.push_back(leak);876}877if (flags()->report_objects) {878LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};879leaked_objects_.push_back(obj);880}881}882}883884static bool LeakComparator(const Leak &leak1, const Leak &leak2) {885if (leak1.is_directly_leaked == leak2.is_directly_leaked)886return leak1.total_size > leak2.total_size;887else888return leak1.is_directly_leaked;889}890891void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {892CHECK(leaks_.size() <= kMaxLeaksConsidered);893Printf("\n");894if (leaks_.size() == kMaxLeaksConsidered)895Printf(896"Too many leaks! Only the first %zu leaks encountered will be "897"reported.\n",898kMaxLeaksConsidered);899900uptr unsuppressed_count = UnsuppressedLeakCount();901if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)902Printf("The %zu top leak(s):\n", num_leaks_to_report);903Sort(leaks_.data(), leaks_.size(), &LeakComparator);904uptr leaks_reported = 0;905for (uptr i = 0; i < leaks_.size(); i++) {906if (leaks_[i].is_suppressed)907continue;908PrintReportForLeak(i);909leaks_reported++;910if (leaks_reported == num_leaks_to_report)911break;912}913if (leaks_reported < unsuppressed_count) {914uptr remaining = unsuppressed_count - leaks_reported;915Printf("Omitting %zu more leak(s).\n", remaining);916}917}918919void LeakReport::PrintReportForLeak(uptr index) {920Decorator d;921Printf("%s", d.Leak());922Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",923leaks_[index].is_directly_leaked ? "Direct" : "Indirect",924leaks_[index].total_size, leaks_[index].hit_count);925Printf("%s", d.Default());926927CHECK(leaks_[index].stack_trace_id);928StackDepotGet(leaks_[index].stack_trace_id).Print();929930if (flags()->report_objects) {931Printf("Objects leaked above:\n");932PrintLeakedObjectsForLeak(index);933Printf("\n");934}935}936937void LeakReport::PrintLeakedObjectsForLeak(uptr index) {938u32 leak_id = leaks_[index].id;939for (uptr j = 0; j < leaked_objects_.size(); j++) {940if (leaked_objects_[j].leak_id == leak_id)941Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,942leaked_objects_[j].size);943}944}945946void LeakReport::PrintSummary() {947CHECK(leaks_.size() <= kMaxLeaksConsidered);948uptr bytes = 0, allocations = 0;949for (uptr i = 0; i < leaks_.size(); i++) {950if (leaks_[i].is_suppressed)951continue;952bytes += leaks_[i].total_size;953allocations += leaks_[i].hit_count;954}955InternalScopedString summary;956summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,957allocations);958ReportErrorSummary(summary.data());959}960961uptr LeakReport::ApplySuppressions() {962LeakSuppressionContext *suppressions = GetSuppressionContext();963uptr new_suppressions = 0;964for (uptr i = 0; i < leaks_.size(); i++) {965if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,966leaks_[i].total_size)) {967leaks_[i].is_suppressed = true;968++new_suppressions;969}970}971return new_suppressions;972}973974uptr LeakReport::UnsuppressedLeakCount() {975uptr result = 0;976for (uptr i = 0; i < leaks_.size(); i++)977if (!leaks_[i].is_suppressed)978result++;979return result;980}981982uptr LeakReport::IndirectUnsuppressedLeakCount() {983uptr result = 0;984for (uptr i = 0; i < leaks_.size(); i++)985if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)986result++;987return result;988}989990} // namespace __lsan991#else // CAN_SANITIZE_LEAKS992namespace __lsan {993void InitCommonLsan() {}994void DoLeakCheck() {}995void DoRecoverableLeakCheckVoid() {}996void DisableInThisThread() {}997void EnableInThisThread() {}998} // namespace __lsan999#endif // CAN_SANITIZE_LEAKS10001001using namespace __lsan;10021003extern "C" {1004SANITIZER_INTERFACE_ATTRIBUTE1005void __lsan_ignore_object(const void *p) {1006#if CAN_SANITIZE_LEAKS1007if (!common_flags()->detect_leaks)1008return;1009// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not1010// locked.1011Lock l(&global_mutex);1012IgnoreObjectResult res = IgnoreObject(p);1013if (res == kIgnoreObjectInvalid)1014VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);1015if (res == kIgnoreObjectAlreadyIgnored)1016VReport(1,1017"__lsan_ignore_object(): "1018"heap object at %p is already being ignored\n",1019p);1020if (res == kIgnoreObjectSuccess)1021VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);1022#endif // CAN_SANITIZE_LEAKS1023}10241025SANITIZER_INTERFACE_ATTRIBUTE1026void __lsan_register_root_region(const void *begin, uptr size) {1027#if CAN_SANITIZE_LEAKS1028VReport(1, "Registered root region at %p of size %zu\n", begin, size);1029uptr b = reinterpret_cast<uptr>(begin);1030uptr e = b + size;1031CHECK_LT(b, e);10321033Lock l(&global_mutex);1034++GetRootRegionsLocked()[{b, e}];1035#endif // CAN_SANITIZE_LEAKS1036}10371038SANITIZER_INTERFACE_ATTRIBUTE1039void __lsan_unregister_root_region(const void *begin, uptr size) {1040#if CAN_SANITIZE_LEAKS1041uptr b = reinterpret_cast<uptr>(begin);1042uptr e = b + size;1043CHECK_LT(b, e);1044VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);10451046{1047Lock l(&global_mutex);1048if (auto *f = GetRootRegionsLocked().find({b, e})) {1049if (--(f->second) == 0)1050GetRootRegionsLocked().erase(f);1051return;1052}1053}1054Report(1055"__lsan_unregister_root_region(): region at %p of size %zu has not "1056"been registered.\n",1057begin, size);1058Die();1059#endif // CAN_SANITIZE_LEAKS1060}10611062SANITIZER_INTERFACE_ATTRIBUTE1063void __lsan_disable() {1064#if CAN_SANITIZE_LEAKS1065__lsan::DisableInThisThread();1066#endif1067}10681069SANITIZER_INTERFACE_ATTRIBUTE1070void __lsan_enable() {1071#if CAN_SANITIZE_LEAKS1072__lsan::EnableInThisThread();1073#endif1074}10751076SANITIZER_INTERFACE_ATTRIBUTE1077void __lsan_do_leak_check() {1078#if CAN_SANITIZE_LEAKS1079if (common_flags()->detect_leaks)1080__lsan::DoLeakCheck();1081#endif // CAN_SANITIZE_LEAKS1082}10831084SANITIZER_INTERFACE_ATTRIBUTE1085int __lsan_do_recoverable_leak_check() {1086#if CAN_SANITIZE_LEAKS1087if (common_flags()->detect_leaks)1088return __lsan::DoRecoverableLeakCheck();1089#endif // CAN_SANITIZE_LEAKS1090return 0;1091}10921093SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {1094return "";1095}10961097#if !SANITIZER_SUPPORTS_WEAK_HOOKS1098SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {1099return 0;1100}11011102SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {1103return "";1104}1105#endif1106} // extern "C"110711081109