Path: blob/main/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
35233 views
//===-- sanitizer_allocator.cpp -------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is shared between AddressSanitizer and ThreadSanitizer9// run-time libraries.10// This allocator is used inside run-times.11//===----------------------------------------------------------------------===//1213#include "sanitizer_allocator.h"1415#include "sanitizer_allocator_checks.h"16#include "sanitizer_allocator_internal.h"17#include "sanitizer_atomic.h"18#include "sanitizer_common.h"19#include "sanitizer_platform.h"2021namespace __sanitizer {2223// Default allocator names.24const char *PrimaryAllocatorName = "SizeClassAllocator";25const char *SecondaryAllocatorName = "LargeMmapAllocator";2627alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)];28static atomic_uint8_t internal_allocator_initialized;29static StaticSpinMutex internal_alloc_init_mu;3031static InternalAllocatorCache internal_allocator_cache;32static StaticSpinMutex internal_allocator_cache_mu;3334InternalAllocator *internal_allocator() {35InternalAllocator *internal_allocator_instance =36reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);37if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {38SpinMutexLock l(&internal_alloc_init_mu);39if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==400) {41internal_allocator_instance->Init(kReleaseToOSIntervalNever);42atomic_store(&internal_allocator_initialized, 1, memory_order_release);43}44}45return internal_allocator_instance;46}4748static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,49uptr alignment) {50if (alignment == 0) alignment = 8;51if (cache == 0) {52SpinMutexLock l(&internal_allocator_cache_mu);53return internal_allocator()->Allocate(&internal_allocator_cache, size,54alignment);55}56return internal_allocator()->Allocate(cache, size, alignment);57}5859static void *RawInternalRealloc(void *ptr, uptr size,60InternalAllocatorCache *cache) {61uptr alignment = 8;62if (cache == 0) {63SpinMutexLock l(&internal_allocator_cache_mu);64return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,65size, alignment);66}67return internal_allocator()->Reallocate(cache, ptr, size, alignment);68}6970static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {71if (!cache) {72SpinMutexLock l(&internal_allocator_cache_mu);73return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);74}75internal_allocator()->Deallocate(cache, ptr);76}7778static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {79SetAllocatorOutOfMemory();80Report("FATAL: %s: internal allocator is out of memory trying to allocate "81"0x%zx bytes\n", SanitizerToolName, requested_size);82Die();83}8485void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {86void *p = RawInternalAlloc(size, cache, alignment);87if (UNLIKELY(!p))88ReportInternalAllocatorOutOfMemory(size);89return p;90}9192void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {93void *p = RawInternalRealloc(addr, size, cache);94if (UNLIKELY(!p))95ReportInternalAllocatorOutOfMemory(size);96return p;97}9899void *InternalReallocArray(void *addr, uptr count, uptr size,100InternalAllocatorCache *cache) {101if (UNLIKELY(CheckForCallocOverflow(count, size))) {102Report(103"FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "104"cannot be represented in type size_t\n",105SanitizerToolName, count, size);106Die();107}108return InternalRealloc(addr, count * size, cache);109}110111void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {112if (UNLIKELY(CheckForCallocOverflow(count, size))) {113Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "114"cannot be represented in type size_t\n", SanitizerToolName, count,115size);116Die();117}118void *p = InternalAlloc(count * size, cache);119if (LIKELY(p))120internal_memset(p, 0, count * size);121return p;122}123124void InternalFree(void *addr, InternalAllocatorCache *cache) {125RawInternalFree(addr, cache);126}127128void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {129internal_allocator_cache_mu.Lock();130internal_allocator()->ForceLock();131}132133void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {134internal_allocator()->ForceUnlock();135internal_allocator_cache_mu.Unlock();136}137138// LowLevelAllocator139constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;140constexpr uptr kMinNumPagesRounded = 16;141constexpr uptr kMinRoundedSize = 65536;142static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;143static LowLevelAllocateCallback low_level_alloc_callback;144145static LowLevelAllocator Alloc;146LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }147148void *LowLevelAllocator::Allocate(uptr size) {149// Align allocation size.150size = RoundUpTo(size, low_level_alloc_min_alignment);151if (allocated_end_ - allocated_current_ < (sptr)size) {152uptr size_to_allocate = RoundUpTo(153size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));154allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);155allocated_end_ = allocated_current_ + size_to_allocate;156if (low_level_alloc_callback) {157low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);158}159}160CHECK(allocated_end_ - allocated_current_ >= (sptr)size);161void *res = allocated_current_;162allocated_current_ += size;163return res;164}165166void SetLowLevelAllocateMinAlignment(uptr alignment) {167CHECK(IsPowerOfTwo(alignment));168low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);169}170171void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {172low_level_alloc_callback = callback;173}174175// Allocator's OOM and other errors handling support.176177static atomic_uint8_t allocator_out_of_memory = {0};178static atomic_uint8_t allocator_may_return_null = {0};179180bool IsAllocatorOutOfMemory() {181return atomic_load_relaxed(&allocator_out_of_memory);182}183184void SetAllocatorOutOfMemory() {185atomic_store_relaxed(&allocator_out_of_memory, 1);186}187188bool AllocatorMayReturnNull() {189return atomic_load(&allocator_may_return_null, memory_order_relaxed);190}191192void SetAllocatorMayReturnNull(bool may_return_null) {193atomic_store(&allocator_may_return_null, may_return_null,194memory_order_relaxed);195}196197void PrintHintAllocatorCannotReturnNull() {198Report("HINT: if you don't care about these errors you may set "199"allocator_may_return_null=1\n");200}201202static atomic_uint8_t rss_limit_exceeded;203204bool IsRssLimitExceeded() {205return atomic_load(&rss_limit_exceeded, memory_order_relaxed);206}207208void SetRssLimitExceeded(bool limit_exceeded) {209atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);210}211212} // namespace __sanitizer213214215