Path: blob/main/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp
35265 views
//===-- xray_buffer_queue.cpp ----------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of XRay, a dynamic runtime instrumentation system.9//10// Defines the interface for a buffer queue implementation.11//12//===----------------------------------------------------------------------===//13#include "xray_buffer_queue.h"14#include "sanitizer_common/sanitizer_atomic.h"15#include "sanitizer_common/sanitizer_common.h"16#include "sanitizer_common/sanitizer_libc.h"17#if !SANITIZER_FUCHSIA18#include "sanitizer_common/sanitizer_posix.h"19#endif20#include "xray_allocator.h"21#include "xray_defs.h"22#include <memory>23#include <sys/mman.h>2425using namespace __xray;2627namespace {2829BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {30auto B =31allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));32return B == nullptr ? nullptr33: reinterpret_cast<BufferQueue::ControlBlock *>(B);34}3536void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,37size_t Count) {38deallocateBuffer(reinterpret_cast<unsigned char *>(C),39(sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));40}4142void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {43if (C == nullptr)44return;45if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)46deallocControlBlock(C, Size, Count);47}4849void incRefCount(BufferQueue::ControlBlock *C) {50if (C == nullptr)51return;52atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);53}5455// We use a struct to ensure that we are allocating one atomic_uint64_t per56// cache line. This allows us to not worry about false-sharing among atomic57// objects being updated (constantly) by different threads.58struct ExtentsPadded {59union {60atomic_uint64_t Extents;61unsigned char Storage[kCacheLineSize];62};63};6465constexpr size_t kExtentsSize = sizeof(ExtentsPadded);6667} // namespace6869BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {70SpinMutexLock Guard(&Mutex);7172if (!finalizing())73return BufferQueue::ErrorCode::AlreadyInitialized;7475cleanupBuffers();7677bool Success = false;78BufferSize = BS;79BufferCount = BC;8081BackingStore = allocControlBlock(BufferSize, BufferCount);82if (BackingStore == nullptr)83return BufferQueue::ErrorCode::NotEnoughMemory;8485auto CleanupBackingStore = at_scope_exit([&, this] {86if (Success)87return;88deallocControlBlock(BackingStore, BufferSize, BufferCount);89BackingStore = nullptr;90});9192// Initialize enough atomic_uint64_t instances, each93ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);94if (ExtentsBackingStore == nullptr)95return BufferQueue::ErrorCode::NotEnoughMemory;9697auto CleanupExtentsBackingStore = at_scope_exit([&, this] {98if (Success)99return;100deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);101ExtentsBackingStore = nullptr;102});103104Buffers = initArray<BufferRep>(BufferCount);105if (Buffers == nullptr)106return BufferQueue::ErrorCode::NotEnoughMemory;107108// At this point we increment the generation number to associate the buffers109// to the new generation.110atomic_fetch_add(&Generation, 1, memory_order_acq_rel);111112// First, we initialize the refcount in the ControlBlock, which we treat as113// being at the start of the BackingStore pointer.114atomic_store(&BackingStore->RefCount, 1, memory_order_release);115atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);116117// Then we initialise the individual buffers that sub-divide the whole backing118// store. Each buffer will start at the `Data` member of the ControlBlock, and119// will be offsets from these locations.120for (size_t i = 0; i < BufferCount; ++i) {121auto &T = Buffers[i];122auto &Buf = T.Buff;123auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +124(kExtentsSize * i));125Buf.Extents = &E->Extents;126atomic_store(Buf.Extents, 0, memory_order_release);127Buf.Generation = generation();128Buf.Data = &BackingStore->Data + (BufferSize * i);129Buf.Size = BufferSize;130Buf.BackingStore = BackingStore;131Buf.ExtentsBackingStore = ExtentsBackingStore;132Buf.Count = BufferCount;133T.Used = false;134}135136Next = Buffers;137First = Buffers;138LiveBuffers = 0;139atomic_store(&Finalizing, 0, memory_order_release);140Success = true;141return BufferQueue::ErrorCode::Ok;142}143144BufferQueue::BufferQueue(size_t B, size_t N,145bool &Success) XRAY_NEVER_INSTRUMENT146: BufferSize(B),147BufferCount(N),148Mutex(),149Finalizing{1},150BackingStore(nullptr),151ExtentsBackingStore(nullptr),152Buffers(nullptr),153Next(Buffers),154First(Buffers),155LiveBuffers(0),156Generation{0} {157Success = init(B, N) == BufferQueue::ErrorCode::Ok;158}159160BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {161if (atomic_load(&Finalizing, memory_order_acquire))162return ErrorCode::QueueFinalizing;163164BufferRep *B = nullptr;165{166SpinMutexLock Guard(&Mutex);167if (LiveBuffers == BufferCount)168return ErrorCode::NotEnoughMemory;169B = Next++;170if (Next == (Buffers + BufferCount))171Next = Buffers;172++LiveBuffers;173}174175incRefCount(BackingStore);176incRefCount(ExtentsBackingStore);177Buf = B->Buff;178Buf.Generation = generation();179B->Used = true;180return ErrorCode::Ok;181}182183BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {184// Check whether the buffer being referred to is within the bounds of the185// backing store's range.186BufferRep *B = nullptr;187{188SpinMutexLock Guard(&Mutex);189if (Buf.Generation != generation() || LiveBuffers == 0) {190Buf = {};191decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);192decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);193return BufferQueue::ErrorCode::Ok;194}195196if (Buf.Data < &BackingStore->Data ||197Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))198return BufferQueue::ErrorCode::UnrecognizedBuffer;199200--LiveBuffers;201B = First++;202if (First == (Buffers + BufferCount))203First = Buffers;204}205206// Now that the buffer has been released, we mark it as "used".207B->Buff = Buf;208B->Used = true;209decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);210decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);211atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),212memory_order_release);213Buf = {};214return ErrorCode::Ok;215}216217BufferQueue::ErrorCode BufferQueue::finalize() {218if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))219return ErrorCode::QueueFinalizing;220return ErrorCode::Ok;221}222223void BufferQueue::cleanupBuffers() {224for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)225B->~BufferRep();226deallocateBuffer(Buffers, BufferCount);227decRefCount(BackingStore, BufferSize, BufferCount);228decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);229BackingStore = nullptr;230ExtentsBackingStore = nullptr;231Buffers = nullptr;232BufferCount = 0;233BufferSize = 0;234}235236BufferQueue::~BufferQueue() { cleanupBuffers(); }237238239