Path: blob/main/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
35292 views
//===-- quarantine.h --------------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#ifndef SCUDO_QUARANTINE_H_9#define SCUDO_QUARANTINE_H_1011#include "list.h"12#include "mutex.h"13#include "string_utils.h"14#include "thread_annotations.h"1516namespace scudo {1718struct QuarantineBatch {19// With the following count, a batch (and the header that protects it) occupy20// 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.21static const u32 MaxCount = 1019;22QuarantineBatch *Next;23uptr Size;24u32 Count;25void *Batch[MaxCount];2627void init(void *Ptr, uptr Size) {28Count = 1;29Batch[0] = Ptr;30this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.31}3233// The total size of quarantined nodes recorded in this batch.34uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }3536void push_back(void *Ptr, uptr Size) {37DCHECK_LT(Count, MaxCount);38Batch[Count++] = Ptr;39this->Size += Size;40}4142bool canMerge(const QuarantineBatch *const From) const {43return Count + From->Count <= MaxCount;44}4546void merge(QuarantineBatch *const From) {47DCHECK_LE(Count + From->Count, MaxCount);48DCHECK_GE(Size, sizeof(QuarantineBatch));4950for (uptr I = 0; I < From->Count; ++I)51Batch[Count + I] = From->Batch[I];52Count += From->Count;53Size += From->getQuarantinedSize();5455From->Count = 0;56From->Size = sizeof(QuarantineBatch);57}5859void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }60};6162static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.6364// Per-thread cache of memory blocks.65template <typename Callback> class QuarantineCache {66public:67void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }6869// Total memory used, including internal accounting.70uptr getSize() const { return atomic_load_relaxed(&Size); }71// Memory used for internal accounting.72uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }7374void enqueue(Callback Cb, void *Ptr, uptr Size) {75if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {76QuarantineBatch *B =77reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));78DCHECK(B);79B->init(Ptr, Size);80enqueueBatch(B);81} else {82List.back()->push_back(Ptr, Size);83addToSize(Size);84}85}8687void transfer(QuarantineCache *From) {88List.append_back(&From->List);89addToSize(From->getSize());90atomic_store_relaxed(&From->Size, 0);91}9293void enqueueBatch(QuarantineBatch *B) {94List.push_back(B);95addToSize(B->Size);96}9798QuarantineBatch *dequeueBatch() {99if (List.empty())100return nullptr;101QuarantineBatch *B = List.front();102List.pop_front();103subFromSize(B->Size);104return B;105}106107void mergeBatches(QuarantineCache *ToDeallocate) {108uptr ExtractedSize = 0;109QuarantineBatch *Current = List.front();110while (Current && Current->Next) {111if (Current->canMerge(Current->Next)) {112QuarantineBatch *Extracted = Current->Next;113// Move all the chunks into the current batch.114Current->merge(Extracted);115DCHECK_EQ(Extracted->Count, 0);116DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));117// Remove the next batch From the list and account for its Size.118List.extract(Current, Extracted);119ExtractedSize += Extracted->Size;120// Add it to deallocation list.121ToDeallocate->enqueueBatch(Extracted);122} else {123Current = Current->Next;124}125}126subFromSize(ExtractedSize);127}128129void getStats(ScopedString *Str) const {130uptr BatchCount = 0;131uptr TotalOverheadBytes = 0;132uptr TotalBytes = 0;133uptr TotalQuarantineChunks = 0;134for (const QuarantineBatch &Batch : List) {135BatchCount++;136TotalBytes += Batch.Size;137TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();138TotalQuarantineChunks += Batch.Count;139}140const uptr QuarantineChunksCapacity =141BatchCount * QuarantineBatch::MaxCount;142const uptr ChunksUsagePercent =143(QuarantineChunksCapacity == 0)144? 0145: TotalQuarantineChunks * 100 / QuarantineChunksCapacity;146const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;147const uptr MemoryOverheadPercent =148(TotalQuarantinedBytes == 0)149? 0150: TotalOverheadBytes * 100 / TotalQuarantinedBytes;151Str->append(152"Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "153"(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",154BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,155QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);156}157158private:159SinglyLinkedList<QuarantineBatch> List;160atomic_uptr Size = {};161162void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }163void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }164};165166// The callback interface is:167// void Callback::recycle(Node *Ptr);168// void *Callback::allocate(uptr Size);169// void Callback::deallocate(void *Ptr);170template <typename Callback, typename Node> class GlobalQuarantine {171public:172typedef QuarantineCache<Callback> CacheT;173using ThisT = GlobalQuarantine<Callback, Node>;174175void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {176DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));177DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);178DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);179DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);180// Thread local quarantine size can be zero only when global quarantine size181// is zero (it allows us to perform just one atomic read per put() call).182CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);183184atomic_store_relaxed(&MaxSize, Size);185atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.186atomic_store_relaxed(&MaxCacheSize, CacheSize);187188Cache.init();189}190191uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }192uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }193194// This is supposed to be used in test only.195bool isEmpty() {196ScopedLock L(CacheMutex);197return Cache.getSize() == 0U;198}199200void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {201C->enqueue(Cb, Ptr, Size);202if (C->getSize() > getCacheSize())203drain(C, Cb);204}205206void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {207bool needRecycle = false;208{209ScopedLock L(CacheMutex);210Cache.transfer(C);211needRecycle = Cache.getSize() > getMaxSize();212}213214if (needRecycle && RecycleMutex.tryLock())215recycle(atomic_load_relaxed(&MinSize), Cb);216}217218void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {219{220ScopedLock L(CacheMutex);221Cache.transfer(C);222}223RecycleMutex.lock();224recycle(0, Cb);225}226227void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {228ScopedLock L(CacheMutex);229// It assumes that the world is stopped, just as the allocator's printStats.230Cache.getStats(Str);231Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",232getMaxSize() >> 10, getCacheSize() >> 10);233}234235void disable() NO_THREAD_SAFETY_ANALYSIS {236// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.237RecycleMutex.lock();238CacheMutex.lock();239}240241void enable() NO_THREAD_SAFETY_ANALYSIS {242CacheMutex.unlock();243RecycleMutex.unlock();244}245246private:247// Read-only data.248alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;249CacheT Cache GUARDED_BY(CacheMutex);250alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;251atomic_uptr MinSize = {};252atomic_uptr MaxSize = {};253alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};254255void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)256EXCLUDES(CacheMutex) {257CacheT Tmp;258Tmp.init();259{260ScopedLock L(CacheMutex);261// Go over the batches and merge partially filled ones to262// save some memory, otherwise batches themselves (since the memory used263// by them is counted against quarantine limit) can overcome the actual264// user's quarantined chunks, which diminishes the purpose of the265// quarantine.266const uptr CacheSize = Cache.getSize();267const uptr OverheadSize = Cache.getOverheadSize();268DCHECK_GE(CacheSize, OverheadSize);269// Do the merge only when overhead exceeds this predefined limit (might270// require some tuning). It saves us merge attempt when the batch list271// quarantine is unlikely to contain batches suitable for merge.272constexpr uptr OverheadThresholdPercents = 100;273if (CacheSize > OverheadSize &&274OverheadSize * (100 + OverheadThresholdPercents) >275CacheSize * OverheadThresholdPercents) {276Cache.mergeBatches(&Tmp);277}278// Extract enough chunks from the quarantine to get below the max279// quarantine size and leave some leeway for the newly quarantined chunks.280while (Cache.getSize() > MinSize)281Tmp.enqueueBatch(Cache.dequeueBatch());282}283RecycleMutex.unlock();284doRecycle(&Tmp, Cb);285}286287void NOINLINE doRecycle(CacheT *C, Callback Cb) {288while (QuarantineBatch *B = C->dequeueBatch()) {289const u32 Seed = static_cast<u32>(290(reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);291B->shuffle(Seed);292constexpr uptr NumberOfPrefetch = 8UL;293CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));294for (uptr I = 0; I < NumberOfPrefetch; I++)295PREFETCH(B->Batch[I]);296for (uptr I = 0, Count = B->Count; I < Count; I++) {297if (I + NumberOfPrefetch < Count)298PREFETCH(B->Batch[I + NumberOfPrefetch]);299Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));300}301Cb.deallocate(B);302}303}304};305306} // namespace scudo307308#endif // SCUDO_QUARANTINE_H_309310311