Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
35269 views
//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer (TSan), a race detector.9//10// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.11// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.12// The only difference with traditional slab allocators is that DenseSlabAlloc13// allocates/free indices of objects and provide a functionality to map14// the index onto the real pointer. The index is u32, that is, 2 times smaller15// than uptr (hense the Dense prefix).16//===----------------------------------------------------------------------===//17#ifndef TSAN_DENSE_ALLOC_H18#define TSAN_DENSE_ALLOC_H1920#include "sanitizer_common/sanitizer_common.h"21#include "tsan_defs.h"2223namespace __tsan {2425class DenseSlabAllocCache {26static const uptr kSize = 128;27typedef u32 IndexT;28uptr pos;29IndexT cache[kSize];30template <typename, uptr, uptr, u64>31friend class DenseSlabAlloc;32};3334template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>35class DenseSlabAlloc {36public:37typedef DenseSlabAllocCache Cache;38typedef typename Cache::IndexT IndexT;3940static_assert((kL1Size & (kL1Size - 1)) == 0,41"kL1Size must be a power-of-two");42static_assert((kL2Size & (kL2Size - 1)) == 0,43"kL2Size must be a power-of-two");44static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),45"kL1Size/kL2Size are too large");46static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,47"reserved bits don't fit");48static_assert(sizeof(T) > sizeof(IndexT),49"it doesn't make sense to use dense alloc");5051DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}5253explicit DenseSlabAlloc(const char *name)54: DenseSlabAlloc(LINKER_INITIALIZED, name) {55// It can be very large.56// Don't page it in for linker initialized objects.57internal_memset(map_, 0, sizeof(map_));58}5960~DenseSlabAlloc() {61for (uptr i = 0; i < kL1Size; i++) {62if (map_[i] != 0)63UnmapOrDie(map_[i], kL2Size * sizeof(T));64}65}6667IndexT Alloc(Cache *c) {68if (c->pos == 0)69Refill(c);70return c->cache[--c->pos];71}7273void Free(Cache *c, IndexT idx) {74DCHECK_NE(idx, 0);75if (c->pos == Cache::kSize)76Drain(c);77c->cache[c->pos++] = idx;78}7980T *Map(IndexT idx) {81DCHECK_NE(idx, 0);82DCHECK_LE(idx, kL1Size * kL2Size);83return &map_[idx / kL2Size][idx % kL2Size];84}8586void FlushCache(Cache *c) {87while (c->pos) Drain(c);88}8990void InitCache(Cache *c) {91c->pos = 0;92internal_memset(c->cache, 0, sizeof(c->cache));93}9495uptr AllocatedMemory() const {96return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);97}9899template <typename Func>100void ForEach(Func func) {101Lock lock(&mtx_);102uptr fillpos = atomic_load_relaxed(&fillpos_);103for (uptr l1 = 0; l1 < fillpos; l1++) {104for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);105}106}107108private:109T *map_[kL1Size];110Mutex mtx_;111// The freelist is organized as a lock-free stack of batches of nodes.112// The stack itself uses Block::next links, while the batch within each113// stack node uses Block::batch links.114// Low 32-bits of freelist_ is the node index, top 32-bits is ABA-counter.115atomic_uint64_t freelist_ = {0};116atomic_uintptr_t fillpos_ = {0};117const char *const name_;118119struct Block {120IndexT next;121IndexT batch;122};123124Block *MapBlock(IndexT idx) { return reinterpret_cast<Block *>(Map(idx)); }125126static constexpr u64 kCounterInc = 1ull << 32;127static constexpr u64 kCounterMask = ~(kCounterInc - 1);128129NOINLINE void Refill(Cache *c) {130// Pop 1 batch of nodes from the freelist.131IndexT idx;132u64 xchg;133u64 cmp = atomic_load(&freelist_, memory_order_acquire);134do {135idx = static_cast<IndexT>(cmp);136if (!idx)137return AllocSuperBlock(c);138Block *ptr = MapBlock(idx);139xchg = ptr->next | (cmp & kCounterMask);140} while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,141memory_order_acq_rel));142// Unpack it into c->cache.143while (idx) {144c->cache[c->pos++] = idx;145idx = MapBlock(idx)->batch;146}147}148149NOINLINE void Drain(Cache *c) {150// Build a batch of at most Cache::kSize / 2 nodes linked by Block::batch.151IndexT head_idx = 0;152for (uptr i = 0; i < Cache::kSize / 2 && c->pos; i++) {153IndexT idx = c->cache[--c->pos];154Block *ptr = MapBlock(idx);155ptr->batch = head_idx;156head_idx = idx;157}158// Push it onto the freelist stack.159Block *head = MapBlock(head_idx);160u64 xchg;161u64 cmp = atomic_load(&freelist_, memory_order_acquire);162do {163head->next = static_cast<IndexT>(cmp);164xchg = head_idx | (cmp & kCounterMask) + kCounterInc;165} while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,166memory_order_acq_rel));167}168169NOINLINE void AllocSuperBlock(Cache *c) {170Lock lock(&mtx_);171uptr fillpos = atomic_load_relaxed(&fillpos_);172if (fillpos == kL1Size) {173Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", name_, kL1Size,174kL2Size);175Die();176}177VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,178fillpos, kL1Size, kL2Size);179T *batch = (T *)MmapOrDie(kL2Size * sizeof(T), name_);180map_[fillpos] = batch;181// Reserve 0 as invalid index.182for (IndexT i = fillpos ? 0 : 1; i < kL2Size; i++) {183new (batch + i) T;184c->cache[c->pos++] = i + fillpos * kL2Size;185if (c->pos == Cache::kSize)186Drain(c);187}188atomic_store_relaxed(&fillpos_, fillpos + 1);189CHECK(c->pos);190}191};192193} // namespace __tsan194195#endif // TSAN_DENSE_ALLOC_H196197198