Path: blob/main/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
35236 views
//===-- memprof_allocator.cpp --------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of MemProfiler, a memory profiler.9//10// Implementation of MemProf's memory allocator, which uses the allocator11// from sanitizer_common.12//13//===----------------------------------------------------------------------===//1415#include "memprof_allocator.h"16#include "memprof_mapping.h"17#include "memprof_mibmap.h"18#include "memprof_rawprofile.h"19#include "memprof_stack.h"20#include "memprof_thread.h"21#include "profile/MemProfData.inc"22#include "sanitizer_common/sanitizer_allocator_checks.h"23#include "sanitizer_common/sanitizer_allocator_interface.h"24#include "sanitizer_common/sanitizer_allocator_report.h"25#include "sanitizer_common/sanitizer_array_ref.h"26#include "sanitizer_common/sanitizer_common.h"27#include "sanitizer_common/sanitizer_errno.h"28#include "sanitizer_common/sanitizer_file.h"29#include "sanitizer_common/sanitizer_flags.h"30#include "sanitizer_common/sanitizer_internal_defs.h"31#include "sanitizer_common/sanitizer_stackdepot.h"3233#include <sched.h>34#include <time.h>3536#define MAX_HISTOGRAM_PRINT_SIZE 32U3738extern bool __memprof_histogram;3940namespace __memprof {41namespace {42using ::llvm::memprof::MemInfoBlock;4344void Print(const MemInfoBlock &M, const u64 id, bool print_terse) {45u64 p;4647if (print_terse) {48p = M.TotalSize * 100 / M.AllocCount;49Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id, M.AllocCount, p / 100, p % 100,50M.MinSize, M.MaxSize);51p = M.TotalAccessCount * 100 / M.AllocCount;52Printf("%llu.%02llu/%llu/%llu/", p / 100, p % 100, M.MinAccessCount,53M.MaxAccessCount);54p = M.TotalLifetime * 100 / M.AllocCount;55Printf("%llu.%02llu/%u/%u/", p / 100, p % 100, M.MinLifetime,56M.MaxLifetime);57Printf("%u/%u/%u/%u\n", M.NumMigratedCpu, M.NumLifetimeOverlaps,58M.NumSameAllocCpu, M.NumSameDeallocCpu);59} else {60p = M.TotalSize * 100 / M.AllocCount;61Printf("Memory allocation stack id = %llu\n", id);62Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",63M.AllocCount, p / 100, p % 100, M.MinSize, M.MaxSize);64p = M.TotalAccessCount * 100 / M.AllocCount;65Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p / 100,66p % 100, M.MinAccessCount, M.MaxAccessCount);67p = M.TotalLifetime * 100 / M.AllocCount;68Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p / 100,69p % 100, M.MinLifetime, M.MaxLifetime);70Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "71"cpu: %u, num same dealloc_cpu: %u\n",72M.NumMigratedCpu, M.NumLifetimeOverlaps, M.NumSameAllocCpu,73M.NumSameDeallocCpu);74Printf("AccessCountHistogram[%u]: ", M.AccessHistogramSize);75uint32_t PrintSize = M.AccessHistogramSize > MAX_HISTOGRAM_PRINT_SIZE76? MAX_HISTOGRAM_PRINT_SIZE77: M.AccessHistogramSize;78for (size_t i = 0; i < PrintSize; ++i) {79Printf("%llu ", ((uint64_t *)M.AccessHistogram)[i]);80}81Printf("\n");82}83}84} // namespace8586static int GetCpuId(void) {87// _memprof_preinit is called via the preinit_array, which subsequently calls88// malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu89// will seg fault as the address of __vdso_getcpu will be null.90if (!memprof_inited)91return -1;92return sched_getcpu();93}9495// Compute the timestamp in ms.96static int GetTimestamp(void) {97// timespec_get will segfault if called from dl_init98if (!memprof_timestamp_inited) {99// By returning 0, this will be effectively treated as being100// timestamped at memprof init time (when memprof_init_timestamp_s101// is initialized).102return 0;103}104timespec ts;105clock_gettime(CLOCK_REALTIME, &ts);106return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000;107}108109static MemprofAllocator &get_allocator();110111// The memory chunk allocated from the underlying allocator looks like this:112// H H U U U U U U113// H -- ChunkHeader (32 bytes)114// U -- user memory.115116// If there is left padding before the ChunkHeader (due to use of memalign),117// we store a magic value in the first uptr word of the memory block and118// store the address of ChunkHeader in the next uptr.119// M B L L L L L L L L L H H U U U U U U120// | ^121// ---------------------|122// M -- magic value kAllocBegMagic123// B -- address of ChunkHeader pointing to the first 'H'124125constexpr uptr kMaxAllowedMallocBits = 40;126127// Should be no more than 32-bytes128struct ChunkHeader {129// 1-st 4 bytes.130u32 alloc_context_id;131// 2-nd 4 bytes132u32 cpu_id;133// 3-rd 4 bytes134u32 timestamp_ms;135// 4-th 4 bytes136// Note only 1 bit is needed for this flag if we need space in the future for137// more fields.138u32 from_memalign;139// 5-th and 6-th 4 bytes140// The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this141// could be shrunk to kMaxAllowedMallocBits if we need space in the future for142// more fields.143atomic_uint64_t user_requested_size;144// 23 bits available145// 7-th and 8-th 4 bytes146u64 data_type_id; // TODO: hash of type name147};148149static const uptr kChunkHeaderSize = sizeof(ChunkHeader);150COMPILER_CHECK(kChunkHeaderSize == 32);151152struct MemprofChunk : ChunkHeader {153uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }154uptr UsedSize() {155return atomic_load(&user_requested_size, memory_order_relaxed);156}157void *AllocBeg() {158if (from_memalign)159return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));160return reinterpret_cast<void *>(this);161}162};163164class LargeChunkHeader {165static constexpr uptr kAllocBegMagic =166FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);167atomic_uintptr_t magic;168MemprofChunk *chunk_header;169170public:171MemprofChunk *Get() const {172return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic173? chunk_header174: nullptr;175}176177void Set(MemprofChunk *p) {178if (p) {179chunk_header = p;180atomic_store(&magic, kAllocBegMagic, memory_order_release);181return;182}183184uptr old = kAllocBegMagic;185if (!atomic_compare_exchange_strong(&magic, &old, 0,186memory_order_release)) {187CHECK_EQ(old, kAllocBegMagic);188}189}190};191192void FlushUnneededMemProfShadowMemory(uptr p, uptr size) {193// Since memprof's mapping is compacting, the shadow chunk may be194// not page-aligned, so we only flush the page-aligned portion.195ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));196}197198void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {199// Statistics.200MemprofStats &thread_stats = GetCurrentThreadStats();201thread_stats.mmaps++;202thread_stats.mmaped += size;203}204205void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {206// We are about to unmap a chunk of user memory.207// Mark the corresponding shadow memory as not needed.208FlushUnneededMemProfShadowMemory(p, size);209// Statistics.210MemprofStats &thread_stats = GetCurrentThreadStats();211thread_stats.munmaps++;212thread_stats.munmaped += size;213}214215AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {216CHECK(ms);217return &ms->allocator_cache;218}219220// Accumulates the access count from the shadow for the given pointer and size.221u64 GetShadowCount(uptr p, u32 size) {222u64 *shadow = (u64 *)MEM_TO_SHADOW(p);223u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size);224u64 count = 0;225for (; shadow <= shadow_end; shadow++)226count += *shadow;227return count;228}229230// Accumulates the access count from the shadow for the given pointer and size.231// See memprof_mapping.h for an overview on histogram counters.232u64 GetShadowCountHistogram(uptr p, u32 size) {233u8 *shadow = (u8 *)HISTOGRAM_MEM_TO_SHADOW(p);234u8 *shadow_end = (u8 *)HISTOGRAM_MEM_TO_SHADOW(p + size);235u64 count = 0;236for (; shadow <= shadow_end; shadow++)237count += *shadow;238return count;239}240241// Clears the shadow counters (when memory is allocated).242void ClearShadow(uptr addr, uptr size) {243CHECK(AddrIsAlignedByGranularity(addr));244CHECK(AddrIsInMem(addr));245CHECK(AddrIsAlignedByGranularity(addr + size));246CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));247CHECK(REAL(memset));248uptr shadow_beg;249uptr shadow_end;250if (__memprof_histogram) {251shadow_beg = HISTOGRAM_MEM_TO_SHADOW(addr);252shadow_end = HISTOGRAM_MEM_TO_SHADOW(addr + size);253} else {254shadow_beg = MEM_TO_SHADOW(addr);255shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1;256}257258if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {259REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);260} else {261uptr page_size = GetPageSizeCached();262uptr page_beg = RoundUpTo(shadow_beg, page_size);263uptr page_end = RoundDownTo(shadow_end, page_size);264265if (page_beg >= page_end) {266REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);267} else {268if (page_beg != shadow_beg) {269REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);270}271if (page_end != shadow_end) {272REAL(memset)((void *)page_end, 0, shadow_end - page_end);273}274ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);275}276}277}278279struct Allocator {280static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits;281282MemprofAllocator allocator;283StaticSpinMutex fallback_mutex;284AllocatorCache fallback_allocator_cache;285286uptr max_user_defined_malloc_size;287288// Holds the mapping of stack ids to MemInfoBlocks.289MIBMapTy MIBMap;290291atomic_uint8_t destructing;292atomic_uint8_t constructed;293bool print_text;294295// ------------------- Initialization ------------------------296explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {297atomic_store_relaxed(&destructing, 0);298atomic_store_relaxed(&constructed, 1);299}300301~Allocator() {302atomic_store_relaxed(&destructing, 1);303FinishAndWrite();304}305306static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,307void *Arg) {308SpinMutexLock l(&Value->mutex);309Print(Value->mib, Key, bool(Arg));310}311312// See memprof_mapping.h for an overview on histogram counters.313static MemInfoBlock CreateNewMIB(uptr p, MemprofChunk *m, u64 user_size) {314if (__memprof_histogram) {315return CreateNewMIBWithHistogram(p, m, user_size);316} else {317return CreateNewMIBWithoutHistogram(p, m, user_size);318}319}320321static MemInfoBlock CreateNewMIBWithHistogram(uptr p, MemprofChunk *m,322u64 user_size) {323324u64 c = GetShadowCountHistogram(p, user_size);325long curtime = GetTimestamp();326uint32_t HistogramSize =327RoundUpTo(user_size, HISTOGRAM_GRANULARITY) / HISTOGRAM_GRANULARITY;328uintptr_t Histogram =329(uintptr_t)InternalAlloc(HistogramSize * sizeof(uint64_t));330memset((void *)Histogram, 0, HistogramSize * sizeof(uint64_t));331for (size_t i = 0; i < HistogramSize; ++i) {332u8 Counter =333*((u8 *)HISTOGRAM_MEM_TO_SHADOW(p + HISTOGRAM_GRANULARITY * i));334((uint64_t *)Histogram)[i] = (uint64_t)Counter;335}336MemInfoBlock newMIB(user_size, c, m->timestamp_ms, curtime, m->cpu_id,337GetCpuId(), Histogram, HistogramSize);338return newMIB;339}340341static MemInfoBlock CreateNewMIBWithoutHistogram(uptr p, MemprofChunk *m,342u64 user_size) {343u64 c = GetShadowCount(p, user_size);344long curtime = GetTimestamp();345MemInfoBlock newMIB(user_size, c, m->timestamp_ms, curtime, m->cpu_id,346GetCpuId(), 0, 0);347return newMIB;348}349350void FinishAndWrite() {351if (print_text && common_flags()->print_module_map)352DumpProcessMap();353354allocator.ForceLock();355356InsertLiveBlocks();357if (print_text) {358if (!flags()->print_terse)359Printf("Recorded MIBs (incl. live on exit):\n");360MIBMap.ForEach(PrintCallback,361reinterpret_cast<void *>(flags()->print_terse));362StackDepotPrintAll();363} else {364// Serialize the contents to a raw profile. Format documented in365// memprof_rawprofile.h.366char *Buffer = nullptr;367368__sanitizer::ListOfModules List;369List.init();370ArrayRef<LoadedModule> Modules(List.begin(), List.end());371u64 BytesSerialized = SerializeToRawProfile(MIBMap, Modules, Buffer);372CHECK(Buffer && BytesSerialized && "could not serialize to buffer");373report_file.Write(Buffer, BytesSerialized);374}375376allocator.ForceUnlock();377}378379// Inserts any blocks which have been allocated but not yet deallocated.380void InsertLiveBlocks() {381allocator.ForEachChunk(382[](uptr chunk, void *alloc) {383u64 user_requested_size;384Allocator *A = (Allocator *)alloc;385MemprofChunk *m =386A->GetMemprofChunk((void *)chunk, user_requested_size);387if (!m)388return;389uptr user_beg = ((uptr)m) + kChunkHeaderSize;390MemInfoBlock newMIB = CreateNewMIB(user_beg, m, user_requested_size);391InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);392},393this);394}395396void InitLinkerInitialized() {397SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);398allocator.InitLinkerInitialized(399common_flags()->allocator_release_to_os_interval_ms);400max_user_defined_malloc_size = common_flags()->max_allocation_size_mb401? common_flags()->max_allocation_size_mb402<< 20403: kMaxAllowedMallocSize;404}405406// -------------------- Allocation/Deallocation routines ---------------407void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,408AllocType alloc_type) {409if (UNLIKELY(!memprof_inited))410MemprofInitFromRtl();411if (UNLIKELY(IsRssLimitExceeded())) {412if (AllocatorMayReturnNull())413return nullptr;414ReportRssLimitExceeded(stack);415}416CHECK(stack);417const uptr min_alignment = MEMPROF_ALIGNMENT;418if (alignment < min_alignment)419alignment = min_alignment;420if (size == 0) {421// We'd be happy to avoid allocating memory for zero-size requests, but422// some programs/tests depend on this behavior and assume that malloc423// would not return NULL even for zero-size allocations. Moreover, it424// looks like operator new should never return NULL, and results of425// consecutive "new" calls must be different even if the allocated size426// is zero.427size = 1;428}429CHECK(IsPowerOfTwo(alignment));430uptr rounded_size = RoundUpTo(size, alignment);431uptr needed_size = rounded_size + kChunkHeaderSize;432if (alignment > min_alignment)433needed_size += alignment;434CHECK(IsAligned(needed_size, min_alignment));435if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||436size > max_user_defined_malloc_size) {437if (AllocatorMayReturnNull()) {438Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size);439return nullptr;440}441uptr malloc_limit =442Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);443ReportAllocationSizeTooBig(size, malloc_limit, stack);444}445446MemprofThread *t = GetCurrentThread();447void *allocated;448if (t) {449AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());450allocated = allocator.Allocate(cache, needed_size, 8);451} else {452SpinMutexLock l(&fallback_mutex);453AllocatorCache *cache = &fallback_allocator_cache;454allocated = allocator.Allocate(cache, needed_size, 8);455}456if (UNLIKELY(!allocated)) {457SetAllocatorOutOfMemory();458if (AllocatorMayReturnNull())459return nullptr;460ReportOutOfMemory(size, stack);461}462463uptr alloc_beg = reinterpret_cast<uptr>(allocated);464uptr alloc_end = alloc_beg + needed_size;465uptr beg_plus_header = alloc_beg + kChunkHeaderSize;466uptr user_beg = beg_plus_header;467if (!IsAligned(user_beg, alignment))468user_beg = RoundUpTo(user_beg, alignment);469uptr user_end = user_beg + size;470CHECK_LE(user_end, alloc_end);471uptr chunk_beg = user_beg - kChunkHeaderSize;472MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);473m->from_memalign = alloc_beg != chunk_beg;474CHECK(size);475476m->cpu_id = GetCpuId();477m->timestamp_ms = GetTimestamp();478m->alloc_context_id = StackDepotPut(*stack);479480uptr size_rounded_down_to_granularity =481RoundDownTo(size, SHADOW_GRANULARITY);482if (size_rounded_down_to_granularity)483ClearShadow(user_beg, size_rounded_down_to_granularity);484485MemprofStats &thread_stats = GetCurrentThreadStats();486thread_stats.mallocs++;487thread_stats.malloced += size;488thread_stats.malloced_overhead += needed_size - size;489if (needed_size > SizeClassMap::kMaxSize)490thread_stats.malloc_large++;491else492thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;493494void *res = reinterpret_cast<void *>(user_beg);495atomic_store(&m->user_requested_size, size, memory_order_release);496if (alloc_beg != chunk_beg) {497CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);498reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);499}500RunMallocHooks(res, size);501return res;502}503504void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,505BufferedStackTrace *stack, AllocType alloc_type) {506uptr p = reinterpret_cast<uptr>(ptr);507if (p == 0)508return;509510RunFreeHooks(ptr);511512uptr chunk_beg = p - kChunkHeaderSize;513MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);514515u64 user_requested_size =516atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);517if (memprof_inited && atomic_load_relaxed(&constructed) &&518!atomic_load_relaxed(&destructing)) {519MemInfoBlock newMIB = this->CreateNewMIB(p, m, user_requested_size);520InsertOrMerge(m->alloc_context_id, newMIB, MIBMap);521}522523MemprofStats &thread_stats = GetCurrentThreadStats();524thread_stats.frees++;525thread_stats.freed += user_requested_size;526527void *alloc_beg = m->AllocBeg();528if (alloc_beg != m) {529// Clear the magic value, as allocator internals may overwrite the530// contents of deallocated chunk, confusing GetMemprofChunk lookup.531reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr);532}533534MemprofThread *t = GetCurrentThread();535if (t) {536AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());537allocator.Deallocate(cache, alloc_beg);538} else {539SpinMutexLock l(&fallback_mutex);540AllocatorCache *cache = &fallback_allocator_cache;541allocator.Deallocate(cache, alloc_beg);542}543}544545void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {546CHECK(old_ptr && new_size);547uptr p = reinterpret_cast<uptr>(old_ptr);548uptr chunk_beg = p - kChunkHeaderSize;549MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);550551MemprofStats &thread_stats = GetCurrentThreadStats();552thread_stats.reallocs++;553thread_stats.realloced += new_size;554555void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);556if (new_ptr) {557CHECK_NE(REAL(memcpy), nullptr);558uptr memcpy_size = Min(new_size, m->UsedSize());559REAL(memcpy)(new_ptr, old_ptr, memcpy_size);560Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);561}562return new_ptr;563}564565void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {566if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {567if (AllocatorMayReturnNull())568return nullptr;569ReportCallocOverflow(nmemb, size, stack);570}571void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);572// If the memory comes from the secondary allocator no need to clear it573// as it comes directly from mmap.574if (ptr && allocator.FromPrimary(ptr))575REAL(memset)(ptr, 0, nmemb * size);576return ptr;577}578579void CommitBack(MemprofThreadLocalMallocStorage *ms) {580AllocatorCache *ac = GetAllocatorCache(ms);581allocator.SwallowCache(ac);582}583584// -------------------------- Chunk lookup ----------------------585586// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).587MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) {588if (!alloc_beg)589return nullptr;590MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();591if (!p) {592if (!allocator.FromPrimary(alloc_beg))593return nullptr;594p = reinterpret_cast<MemprofChunk *>(alloc_beg);595}596// The size is reset to 0 on deallocation (and a min of 1 on597// allocation).598user_requested_size =599atomic_load(&p->user_requested_size, memory_order_acquire);600if (user_requested_size)601return p;602return nullptr;603}604605MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) {606void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));607return GetMemprofChunk(alloc_beg, user_requested_size);608}609610uptr AllocationSize(uptr p) {611u64 user_requested_size;612MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size);613if (!m)614return 0;615if (m->Beg() != p)616return 0;617return user_requested_size;618}619620uptr AllocationSizeFast(uptr p) {621return reinterpret_cast<MemprofChunk *>(p - kChunkHeaderSize)->UsedSize();622}623624void Purge() { allocator.ForceReleaseToOS(); }625626void PrintStats() { allocator.PrintStats(); }627628void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {629allocator.ForceLock();630fallback_mutex.Lock();631}632633void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {634fallback_mutex.Unlock();635allocator.ForceUnlock();636}637};638639static Allocator instance(LINKER_INITIALIZED);640641static MemprofAllocator &get_allocator() { return instance.allocator; }642643void InitializeAllocator() { instance.InitLinkerInitialized(); }644645void MemprofThreadLocalMallocStorage::CommitBack() {646instance.CommitBack(this);647}648649void PrintInternalAllocatorStats() { instance.PrintStats(); }650651void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {652instance.Deallocate(ptr, 0, 0, stack, alloc_type);653}654655void memprof_delete(void *ptr, uptr size, uptr alignment,656BufferedStackTrace *stack, AllocType alloc_type) {657instance.Deallocate(ptr, size, alignment, stack, alloc_type);658}659660void *memprof_malloc(uptr size, BufferedStackTrace *stack) {661return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));662}663664void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {665return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));666}667668void *memprof_reallocarray(void *p, uptr nmemb, uptr size,669BufferedStackTrace *stack) {670if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {671errno = errno_ENOMEM;672if (AllocatorMayReturnNull())673return nullptr;674ReportReallocArrayOverflow(nmemb, size, stack);675}676return memprof_realloc(p, nmemb * size, stack);677}678679void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) {680if (!p)681return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));682if (size == 0) {683if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {684instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);685return nullptr;686}687// Allocate a size of 1 if we shouldn't free() on Realloc to 0688size = 1;689}690return SetErrnoOnNull(instance.Reallocate(p, size, stack));691}692693void *memprof_valloc(uptr size, BufferedStackTrace *stack) {694return SetErrnoOnNull(695instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC));696}697698void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) {699uptr PageSize = GetPageSizeCached();700if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {701errno = errno_ENOMEM;702if (AllocatorMayReturnNull())703return nullptr;704ReportPvallocOverflow(size, stack);705}706// pvalloc(0) should allocate one page.707size = size ? RoundUpTo(size, PageSize) : PageSize;708return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC));709}710711void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,712AllocType alloc_type) {713if (UNLIKELY(!IsPowerOfTwo(alignment))) {714errno = errno_EINVAL;715if (AllocatorMayReturnNull())716return nullptr;717ReportInvalidAllocationAlignment(alignment, stack);718}719return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type));720}721722void *memprof_aligned_alloc(uptr alignment, uptr size,723BufferedStackTrace *stack) {724if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {725errno = errno_EINVAL;726if (AllocatorMayReturnNull())727return nullptr;728ReportInvalidAlignedAllocAlignment(size, alignment, stack);729}730return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC));731}732733int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,734BufferedStackTrace *stack) {735if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {736if (AllocatorMayReturnNull())737return errno_EINVAL;738ReportInvalidPosixMemalignAlignment(alignment, stack);739}740void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC);741if (UNLIKELY(!ptr))742// OOM error is already taken care of by Allocate.743return errno_ENOMEM;744CHECK(IsAligned((uptr)ptr, alignment));745*memptr = ptr;746return 0;747}748749static const void *memprof_malloc_begin(const void *p) {750u64 user_requested_size;751MemprofChunk *m =752instance.GetMemprofChunkByAddr((uptr)p, user_requested_size);753if (!m)754return nullptr;755if (user_requested_size == 0)756return nullptr;757758return (const void *)m->Beg();759}760761uptr memprof_malloc_usable_size(const void *ptr) {762if (!ptr)763return 0;764uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));765return usable_size;766}767768} // namespace __memprof769770// ---------------------- Interface ---------------- {{{1771using namespace __memprof;772773uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }774775int __sanitizer_get_ownership(const void *p) {776return memprof_malloc_usable_size(p) != 0;777}778779const void *__sanitizer_get_allocated_begin(const void *p) {780return memprof_malloc_begin(p);781}782783uptr __sanitizer_get_allocated_size(const void *p) {784return memprof_malloc_usable_size(p);785}786787uptr __sanitizer_get_allocated_size_fast(const void *p) {788DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));789uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));790DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));791return ret;792}793794void __sanitizer_purge_allocator() { instance.Purge(); }795796int __memprof_profile_dump() {797instance.FinishAndWrite();798// In the future we may want to return non-zero if there are any errors799// detected during the dumping process.800return 0;801}802803void __memprof_profile_reset() {804if (report_file.fd != kInvalidFd && report_file.fd != kStdoutFd &&805report_file.fd != kStderrFd) {806CloseFile(report_file.fd);807// Setting the file descriptor to kInvalidFd ensures that we will reopen the808// file when invoking Write again.809report_file.fd = kInvalidFd;810}811}812813814