Path: blob/main/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
35233 views
//=-- lsan_allocator.cpp --------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of LeakSanitizer.9// See lsan_allocator.h for details.10//11//===----------------------------------------------------------------------===//1213#include "lsan_allocator.h"1415#include "sanitizer_common/sanitizer_allocator.h"16#include "sanitizer_common/sanitizer_allocator_checks.h"17#include "sanitizer_common/sanitizer_allocator_interface.h"18#include "sanitizer_common/sanitizer_allocator_report.h"19#include "sanitizer_common/sanitizer_errno.h"20#include "sanitizer_common/sanitizer_internal_defs.h"21#include "sanitizer_common/sanitizer_stackdepot.h"22#include "sanitizer_common/sanitizer_stacktrace.h"23#include "lsan_common.h"2425extern "C" void *memset(void *ptr, int value, uptr num);2627namespace __lsan {28#if defined(__i386__) || defined(__arm__)29static const uptr kMaxAllowedMallocSize = 1ULL << 30;30#elif defined(__mips64) || defined(__aarch64__)31static const uptr kMaxAllowedMallocSize = 4ULL << 30;32#else33static const uptr kMaxAllowedMallocSize = 1ULL << 40;34#endif3536static Allocator allocator;3738static uptr max_malloc_size;3940void InitializeAllocator() {41SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);42allocator.InitLinkerInitialized(43common_flags()->allocator_release_to_os_interval_ms);44if (common_flags()->max_allocation_size_mb)45max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,46kMaxAllowedMallocSize);47else48max_malloc_size = kMaxAllowedMallocSize;49}5051void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }5253void AllocatorThreadFinish() {54allocator.SwallowCache(GetAllocatorCache());55allocator.DestroyCache(GetAllocatorCache());56}5758static ChunkMetadata *Metadata(const void *p) {59return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));60}6162static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {63if (!p) return;64ChunkMetadata *m = Metadata(p);65CHECK(m);66m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;67m->stack_trace_id = StackDepotPut(stack);68m->requested_size = size;69atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);70RunMallocHooks(p, size);71}7273static void RegisterDeallocation(void *p) {74if (!p) return;75ChunkMetadata *m = Metadata(p);76CHECK(m);77RunFreeHooks(p);78atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);79}8081static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {82if (AllocatorMayReturnNull()) {83Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);84return nullptr;85}86ReportAllocationSizeTooBig(size, max_malloc_size, &stack);87}8889void *Allocate(const StackTrace &stack, uptr size, uptr alignment,90bool cleared) {91if (size == 0)92size = 1;93if (size > max_malloc_size)94return ReportAllocationSizeTooBig(size, stack);95if (UNLIKELY(IsRssLimitExceeded())) {96if (AllocatorMayReturnNull())97return nullptr;98ReportRssLimitExceeded(&stack);99}100void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);101if (UNLIKELY(!p)) {102SetAllocatorOutOfMemory();103if (AllocatorMayReturnNull())104return nullptr;105ReportOutOfMemory(size, &stack);106}107// Do not rely on the allocator to clear the memory (it's slow).108if (cleared && allocator.FromPrimary(p))109memset(p, 0, size);110RegisterAllocation(stack, p, size);111return p;112}113114static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {115if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {116if (AllocatorMayReturnNull())117return nullptr;118ReportCallocOverflow(nmemb, size, &stack);119}120size *= nmemb;121return Allocate(stack, size, 1, true);122}123124void Deallocate(void *p) {125RegisterDeallocation(p);126allocator.Deallocate(GetAllocatorCache(), p);127}128129void *Reallocate(const StackTrace &stack, void *p, uptr new_size,130uptr alignment) {131if (new_size > max_malloc_size) {132ReportAllocationSizeTooBig(new_size, stack);133return nullptr;134}135RegisterDeallocation(p);136void *new_p =137allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);138if (new_p)139RegisterAllocation(stack, new_p, new_size);140else if (new_size != 0)141RegisterAllocation(stack, p, new_size);142return new_p;143}144145void GetAllocatorCacheRange(uptr *begin, uptr *end) {146*begin = (uptr)GetAllocatorCache();147*end = *begin + sizeof(AllocatorCache);148}149150static const void *GetMallocBegin(const void *p) {151if (!p)152return nullptr;153void *beg = allocator.GetBlockBegin(p);154if (!beg)155return nullptr;156ChunkMetadata *m = Metadata(beg);157if (!m)158return nullptr;159if (!m->allocated)160return nullptr;161if (m->requested_size == 0)162return nullptr;163return (const void *)beg;164}165166uptr GetMallocUsableSize(const void *p) {167if (!p)168return 0;169ChunkMetadata *m = Metadata(p);170if (!m) return 0;171return m->requested_size;172}173174uptr GetMallocUsableSizeFast(const void *p) {175return Metadata(p)->requested_size;176}177178int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,179const StackTrace &stack) {180if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {181if (AllocatorMayReturnNull())182return errno_EINVAL;183ReportInvalidPosixMemalignAlignment(alignment, &stack);184}185void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);186if (UNLIKELY(!ptr))187// OOM error is already taken care of by Allocate.188return errno_ENOMEM;189CHECK(IsAligned((uptr)ptr, alignment));190*memptr = ptr;191return 0;192}193194void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {195if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {196errno = errno_EINVAL;197if (AllocatorMayReturnNull())198return nullptr;199ReportInvalidAlignedAllocAlignment(size, alignment, &stack);200}201return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));202}203204void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {205if (UNLIKELY(!IsPowerOfTwo(alignment))) {206errno = errno_EINVAL;207if (AllocatorMayReturnNull())208return nullptr;209ReportInvalidAllocationAlignment(alignment, &stack);210}211return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));212}213214void *lsan_malloc(uptr size, const StackTrace &stack) {215return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));216}217218void lsan_free(void *p) {219Deallocate(p);220}221222void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {223return SetErrnoOnNull(Reallocate(stack, p, size, 1));224}225226void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,227const StackTrace &stack) {228if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {229errno = errno_ENOMEM;230if (AllocatorMayReturnNull())231return nullptr;232ReportReallocArrayOverflow(nmemb, size, &stack);233}234return lsan_realloc(ptr, nmemb * size, stack);235}236237void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {238return SetErrnoOnNull(Calloc(nmemb, size, stack));239}240241void *lsan_valloc(uptr size, const StackTrace &stack) {242return SetErrnoOnNull(243Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));244}245246void *lsan_pvalloc(uptr size, const StackTrace &stack) {247uptr PageSize = GetPageSizeCached();248if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {249errno = errno_ENOMEM;250if (AllocatorMayReturnNull())251return nullptr;252ReportPvallocOverflow(size, &stack);253}254// pvalloc(0) should allocate one page.255size = size ? RoundUpTo(size, PageSize) : PageSize;256return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));257}258259uptr lsan_mz_size(const void *p) {260return GetMallocUsableSize(p);261}262263///// Interface to the common LSan module. /////264265void LockAllocator() {266allocator.ForceLock();267}268269void UnlockAllocator() {270allocator.ForceUnlock();271}272273void GetAllocatorGlobalRange(uptr *begin, uptr *end) {274*begin = (uptr)&allocator;275*end = *begin + sizeof(allocator);276}277278uptr PointsIntoChunk(void* p) {279uptr addr = reinterpret_cast<uptr>(p);280uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));281if (!chunk) return 0;282// LargeMmapAllocator considers pointers to the meta-region of a chunk to be283// valid, but we don't want that.284if (addr < chunk) return 0;285ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));286CHECK(m);287if (!m->allocated)288return 0;289if (addr < chunk + m->requested_size)290return chunk;291if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))292return chunk;293return 0;294}295296uptr GetUserBegin(uptr chunk) {297return chunk;298}299300uptr GetUserAddr(uptr chunk) {301return chunk;302}303304LsanMetadata::LsanMetadata(uptr chunk) {305metadata_ = Metadata(reinterpret_cast<void *>(chunk));306CHECK(metadata_);307}308309bool LsanMetadata::allocated() const {310return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;311}312313ChunkTag LsanMetadata::tag() const {314return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;315}316317void LsanMetadata::set_tag(ChunkTag value) {318reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;319}320321uptr LsanMetadata::requested_size() const {322return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;323}324325u32 LsanMetadata::stack_trace_id() const {326return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;327}328329void ForEachChunk(ForEachChunkCallback callback, void *arg) {330allocator.ForEachChunk(callback, arg);331}332333IgnoreObjectResult IgnoreObject(const void *p) {334void *chunk = allocator.GetBlockBegin(p);335if (!chunk || p < chunk) return kIgnoreObjectInvalid;336ChunkMetadata *m = Metadata(chunk);337CHECK(m);338if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {339if (m->tag == kIgnored)340return kIgnoreObjectAlreadyIgnored;341m->tag = kIgnored;342return kIgnoreObjectSuccess;343} else {344return kIgnoreObjectInvalid;345}346}347348} // namespace __lsan349350using namespace __lsan;351352extern "C" {353SANITIZER_INTERFACE_ATTRIBUTE354uptr __sanitizer_get_current_allocated_bytes() {355uptr stats[AllocatorStatCount];356allocator.GetStats(stats);357return stats[AllocatorStatAllocated];358}359360SANITIZER_INTERFACE_ATTRIBUTE361uptr __sanitizer_get_heap_size() {362uptr stats[AllocatorStatCount];363allocator.GetStats(stats);364return stats[AllocatorStatMapped];365}366367SANITIZER_INTERFACE_ATTRIBUTE368uptr __sanitizer_get_free_bytes() { return 1; }369370SANITIZER_INTERFACE_ATTRIBUTE371uptr __sanitizer_get_unmapped_bytes() { return 0; }372373SANITIZER_INTERFACE_ATTRIBUTE374uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }375376SANITIZER_INTERFACE_ATTRIBUTE377int __sanitizer_get_ownership(const void *p) {378return GetMallocBegin(p) != nullptr;379}380381SANITIZER_INTERFACE_ATTRIBUTE382const void * __sanitizer_get_allocated_begin(const void *p) {383return GetMallocBegin(p);384}385386SANITIZER_INTERFACE_ATTRIBUTE387uptr __sanitizer_get_allocated_size(const void *p) {388return GetMallocUsableSize(p);389}390391SANITIZER_INTERFACE_ATTRIBUTE392uptr __sanitizer_get_allocated_size_fast(const void *p) {393DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));394uptr ret = GetMallocUsableSizeFast(p);395DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));396return ret;397}398399SANITIZER_INTERFACE_ATTRIBUTE400void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }401402} // extern "C"403404405