Path: blob/main/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
35291 views
//===-- local_cache.h -------------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#ifndef SCUDO_LOCAL_CACHE_H_9#define SCUDO_LOCAL_CACHE_H_1011#include "internal_defs.h"12#include "list.h"13#include "platform.h"14#include "report.h"15#include "stats.h"16#include "string_utils.h"1718namespace scudo {1920template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {21typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;22typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;2324void init(GlobalStats *S, SizeClassAllocator *A) {25DCHECK(isEmpty());26Stats.init();27if (LIKELY(S))28S->link(&Stats);29Allocator = A;30initCache();31}3233void destroy(GlobalStats *S) {34drain();35if (LIKELY(S))36S->unlink(&Stats);37}3839void *allocate(uptr ClassId) {40DCHECK_LT(ClassId, NumClasses);41PerClass *C = &PerClassArray[ClassId];42if (C->Count == 0) {43// Refill half of the number of max cached.44DCHECK_GT(C->MaxCount / 2, 0U);45if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))46return nullptr;47DCHECK_GT(C->Count, 0);48}49// We read ClassSize first before accessing Chunks because it's adjacent to50// Count, while Chunks might be further off (depending on Count). That keeps51// the memory accesses in close quarters.52const uptr ClassSize = C->ClassSize;53CompactPtrT CompactP = C->Chunks[--C->Count];54Stats.add(StatAllocated, ClassSize);55Stats.sub(StatFree, ClassSize);56return Allocator->decompactPtr(ClassId, CompactP);57}5859bool deallocate(uptr ClassId, void *P) {60CHECK_LT(ClassId, NumClasses);61PerClass *C = &PerClassArray[ClassId];6263// If the cache is full, drain half of blocks back to the main allocator.64const bool NeedToDrainCache = C->Count == C->MaxCount;65if (NeedToDrainCache)66drain(C, ClassId);67// See comment in allocate() about memory accesses.68const uptr ClassSize = C->ClassSize;69C->Chunks[C->Count++] =70Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));71Stats.sub(StatAllocated, ClassSize);72Stats.add(StatFree, ClassSize);7374return NeedToDrainCache;75}7677bool isEmpty() const {78for (uptr I = 0; I < NumClasses; ++I)79if (PerClassArray[I].Count)80return false;81return true;82}8384void drain() {85// Drain BatchClassId last as it may be needed while draining normal blocks.86for (uptr I = 0; I < NumClasses; ++I) {87if (I == BatchClassId)88continue;89while (PerClassArray[I].Count > 0)90drain(&PerClassArray[I], I);91}92while (PerClassArray[BatchClassId].Count > 0)93drain(&PerClassArray[BatchClassId], BatchClassId);94DCHECK(isEmpty());95}9697void *getBatchClassBlock() {98void *B = allocate(BatchClassId);99if (UNLIKELY(!B))100reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));101return B;102}103104LocalStats &getStats() { return Stats; }105106void getStats(ScopedString *Str) {107bool EmptyCache = true;108for (uptr I = 0; I < NumClasses; ++I) {109if (PerClassArray[I].Count == 0)110continue;111112EmptyCache = false;113// The size of BatchClass is set to 0 intentionally. See the comment in114// initCache() for more details.115const uptr ClassSize = I == BatchClassId116? SizeClassAllocator::getSizeByClassId(I)117: PerClassArray[I].ClassSize;118// Note that the string utils don't support printing u16 thus we cast it119// to a common use type uptr.120Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,121static_cast<uptr>(PerClassArray[I].Count),122static_cast<uptr>(PerClassArray[I].MaxCount));123}124125if (EmptyCache)126Str->append(" No block is cached.\n");127}128129static u16 getMaxCached(uptr Size) {130return Min(SizeClassMap::MaxNumCachedHint,131SizeClassMap::getMaxCachedHint(Size));132}133134private:135static const uptr NumClasses = SizeClassMap::NumClasses;136static const uptr BatchClassId = SizeClassMap::BatchClassId;137struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {138u16 Count;139u16 MaxCount;140// Note: ClassSize is zero for the transfer batch.141uptr ClassSize;142CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];143};144PerClass PerClassArray[NumClasses] = {};145LocalStats Stats;146SizeClassAllocator *Allocator = nullptr;147148NOINLINE void initCache() {149for (uptr I = 0; I < NumClasses; I++) {150PerClass *P = &PerClassArray[I];151const uptr Size = SizeClassAllocator::getSizeByClassId(I);152P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));153if (I != BatchClassId) {154P->ClassSize = Size;155} else {156// ClassSize in this struct is only used for malloc/free stats, which157// should only track user allocations, not internal movements.158P->ClassSize = 0;159}160}161}162163void destroyBatch(uptr ClassId, void *B) {164if (ClassId != BatchClassId)165deallocate(BatchClassId, B);166}167168NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {169const u16 NumBlocksRefilled =170Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);171DCHECK_LE(NumBlocksRefilled, MaxRefill);172C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);173return NumBlocksRefilled != 0;174}175176NOINLINE void drain(PerClass *C, uptr ClassId) {177const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);178Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);179// u16 will be promoted to int by arithmetic type conversion.180C->Count = static_cast<u16>(C->Count - Count);181for (u16 I = 0; I < C->Count; I++)182C->Chunks[I] = C->Chunks[I + Count];183}184};185186} // namespace scudo187188#endif // SCUDO_LOCAL_CACHE_H_189190191