Path: blob/main/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
35236 views
//===-- memprof_stats.cpp ------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of MemProfiler, a memory profiler.9//10// Code related to statistics collected by MemProfiler.11//===----------------------------------------------------------------------===//12#include "memprof_stats.h"13#include "memprof_interceptors.h"14#include "memprof_internal.h"15#include "memprof_thread.h"16#include "sanitizer_common/sanitizer_allocator_interface.h"17#include "sanitizer_common/sanitizer_mutex.h"18#include "sanitizer_common/sanitizer_stackdepot.h"1920namespace __memprof {2122MemprofStats::MemprofStats() { Clear(); }2324void MemprofStats::Clear() {25if (REAL(memset))26return (void)REAL(memset)(this, 0, sizeof(MemprofStats));27internal_memset(this, 0, sizeof(MemprofStats));28}2930static void PrintMallocStatsArray(const char *prefix,31uptr (&array)[kNumberOfSizeClasses]) {32Printf("%s", prefix);33for (uptr i = 0; i < kNumberOfSizeClasses; i++) {34if (!array[i])35continue;36Printf("%zu:%zu; ", i, array[i]);37}38Printf("\n");39}4041void MemprofStats::Print() {42Printf("Stats: %zuM malloced (%zuM for overhead) by %zu calls\n",43malloced >> 20, malloced_overhead >> 20, mallocs);44Printf("Stats: %zuM realloced by %zu calls\n", realloced >> 20, reallocs);45Printf("Stats: %zuM freed by %zu calls\n", freed >> 20, frees);46Printf("Stats: %zuM really freed by %zu calls\n", really_freed >> 20,47real_frees);48Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",49(mmaped - munmaped) >> 20, mmaped >> 20, munmaped >> 20, mmaps,50munmaps);5152PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);53Printf("Stats: malloc large: %zu\n", malloc_large);54}5556void MemprofStats::MergeFrom(const MemprofStats *stats) {57uptr *dst_ptr = reinterpret_cast<uptr *>(this);58const uptr *src_ptr = reinterpret_cast<const uptr *>(stats);59uptr num_fields = sizeof(*this) / sizeof(uptr);60for (uptr i = 0; i < num_fields; i++)61dst_ptr[i] += src_ptr[i];62}6364static Mutex print_lock;6566static MemprofStats unknown_thread_stats(LINKER_INITIALIZED);67static MemprofStats dead_threads_stats(LINKER_INITIALIZED);68static Mutex dead_threads_stats_lock;69// Required for malloc_zone_statistics() on OS X. This can't be stored in70// per-thread MemprofStats.71static uptr max_malloced_memory;7273static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {74MemprofStats *accumulated_stats = reinterpret_cast<MemprofStats *>(arg);75MemprofThreadContext *tctx = static_cast<MemprofThreadContext *>(tctx_base);76if (MemprofThread *t = tctx->thread)77accumulated_stats->MergeFrom(&t->stats());78}7980static void GetAccumulatedStats(MemprofStats *stats) {81stats->Clear();82{83ThreadRegistryLock l(&memprofThreadRegistry());84memprofThreadRegistry().RunCallbackForEachThreadLocked(MergeThreadStats,85stats);86}87stats->MergeFrom(&unknown_thread_stats);88{89Lock lock(&dead_threads_stats_lock);90stats->MergeFrom(&dead_threads_stats);91}92// This is not very accurate: we may miss allocation peaks that happen93// between two updates of accumulated_stats_. For more accurate bookkeeping94// the maximum should be updated on every malloc(), which is unacceptable.95if (max_malloced_memory < stats->malloced) {96max_malloced_memory = stats->malloced;97}98}99100void FlushToDeadThreadStats(MemprofStats *stats) {101Lock lock(&dead_threads_stats_lock);102dead_threads_stats.MergeFrom(stats);103stats->Clear();104}105106MemprofStats &GetCurrentThreadStats() {107MemprofThread *t = GetCurrentThread();108return (t) ? t->stats() : unknown_thread_stats;109}110111static void PrintAccumulatedStats() {112MemprofStats stats;113GetAccumulatedStats(&stats);114// Use lock to keep reports from mixing up.115Lock lock(&print_lock);116stats.Print();117StackDepotStats stack_depot_stats = StackDepotGetStats();118Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",119stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);120PrintInternalAllocatorStats();121}122123} // namespace __memprof124125// ---------------------- Interface ---------------- {{{1126using namespace __memprof;127128uptr __sanitizer_get_current_allocated_bytes() {129MemprofStats stats;130GetAccumulatedStats(&stats);131uptr malloced = stats.malloced;132uptr freed = stats.freed;133// Return sane value if malloced < freed due to racy134// way we update accumulated stats.135return (malloced > freed) ? malloced - freed : 1;136}137138uptr __sanitizer_get_heap_size() {139MemprofStats stats;140GetAccumulatedStats(&stats);141return stats.mmaped - stats.munmaped;142}143144uptr __sanitizer_get_free_bytes() {145MemprofStats stats;146GetAccumulatedStats(&stats);147uptr total_free = stats.mmaped - stats.munmaped + stats.really_freed;148uptr total_used = stats.malloced;149// Return sane value if total_free < total_used due to racy150// way we update accumulated stats.151return (total_free > total_used) ? total_free - total_used : 1;152}153154uptr __sanitizer_get_unmapped_bytes() { return 0; }155156void __memprof_print_accumulated_stats() { PrintAccumulatedStats(); }157158159