Path: blob/main/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
35233 views
//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of AddressSanitizer, an address sanity checker.9//10// ASan-private header for asan_allocator.cpp.11//===----------------------------------------------------------------------===//1213#ifndef ASAN_ALLOCATOR_H14#define ASAN_ALLOCATOR_H1516#include "asan_flags.h"17#include "asan_interceptors.h"18#include "asan_internal.h"19#include "sanitizer_common/sanitizer_allocator.h"20#include "sanitizer_common/sanitizer_list.h"21#include "sanitizer_common/sanitizer_platform.h"2223namespace __asan {2425enum AllocType {26FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.27FROM_NEW = 2, // Memory block came from operator new.28FROM_NEW_BR = 3 // Memory block came from operator new [ ]29};3031class AsanChunk;3233struct AllocatorOptions {34u32 quarantine_size_mb;35u32 thread_local_quarantine_size_kb;36u16 min_redzone;37u16 max_redzone;38u8 may_return_null;39u8 alloc_dealloc_mismatch;40s32 release_to_os_interval_ms;4142void SetFrom(const Flags *f, const CommonFlags *cf);43void CopyTo(Flags *f, CommonFlags *cf);44};4546void InitializeAllocator(const AllocatorOptions &options);47void ReInitializeAllocator(const AllocatorOptions &options);48void GetAllocatorOptions(AllocatorOptions *options);4950class AsanChunkView {51public:52explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}53bool IsValid() const; // Checks if AsanChunkView points to a valid54// allocated or quarantined chunk.55bool IsAllocated() const; // Checks if the memory is currently allocated.56bool IsQuarantined() const; // Checks if the memory is currently quarantined.57uptr Beg() const; // First byte of user memory.58uptr End() const; // Last byte of user memory.59uptr UsedSize() const; // Size requested by the user.60u32 UserRequestedAlignment() const; // Originally requested alignment.61uptr AllocTid() const;62uptr FreeTid() const;63bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }64u32 GetAllocStackId() const;65u32 GetFreeStackId() const;66AllocType GetAllocType() const;67bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {68if (addr >= Beg() && (addr + access_size) <= End()) {69*offset = addr - Beg();70return true;71}72return false;73}74bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {75(void)access_size;76if (addr < Beg()) {77*offset = Beg() - addr;78return true;79}80return false;81}82bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {83if (addr + access_size > End()) {84*offset = addr - End();85return true;86}87return false;88}8990private:91AsanChunk *const chunk_;92};9394AsanChunkView FindHeapChunkByAddress(uptr address);95AsanChunkView FindHeapChunkByAllocBeg(uptr address);9697// List of AsanChunks with total size.98class AsanChunkFifoList: public IntrusiveList<AsanChunk> {99public:100explicit AsanChunkFifoList(LinkerInitialized) { }101AsanChunkFifoList() { clear(); }102void Push(AsanChunk *n);103void PushList(AsanChunkFifoList *q);104AsanChunk *Pop();105uptr size() { return size_; }106void clear() {107IntrusiveList<AsanChunk>::clear();108size_ = 0;109}110private:111uptr size_;112};113114struct AsanMapUnmapCallback {115void OnMap(uptr p, uptr size) const;116void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;117void OnUnmap(uptr p, uptr size) const;118};119120#if SANITIZER_CAN_USE_ALLOCATOR64121# if SANITIZER_FUCHSIA122// This is a sentinel indicating we do not want the primary allocator arena to123// be placed at a fixed address. It will be anonymously mmap'd.124const uptr kAllocatorSpace = ~(uptr)0;125# if SANITIZER_RISCV64126127// These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +128// Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,129// libc, and scudo tests with this configuration.130//131// TODO: This is specifically tuned for Sv39. 48/57 will likely require other132// tunings, or possibly use the same tunings Fuchsia uses for other archs. The133// VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is134// supported, we'd need a way of dynamically checking what the VMA size is and135// determining optimal configuration.136137// This indicates the total amount of space dedicated for the primary allocator138// during initialization. This is roughly proportional to the size set by the139// FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could140// lead to some failures in sanitized bringup tests where we can't allocate new141// vmars because there wouldn't be enough contiguous space. We could try 2^34 if142// we re-evaluate the SizeClassMap settings.143const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB144145// This is roughly equivalent to the configuration for the VeryDenseSizeClassMap146// but has fewer size classes (ideally at most 32). Fewer class sizes means the147// region size for each class is larger, thus less chances of running out of148// space for each region. The main differences are the MidSizeLog (which is149// smaller) and the MaxSizeLog (which is larger).150//151// - The MaxSizeLog is higher to allow some of the largest allocations I've152// observed to be placed in the primary allocator's arena as opposed to being153// mmap'd by the secondary allocator. This helps reduce fragmentation from154// large classes. A huge example of this the scudo allocator tests (and its155// testing infrastructure) which malloc's/new's objects on the order of156// hundreds of kilobytes which normally would not be in the primary allocator157// arena with the default VeryDenseSizeClassMap.158// - The MidSizeLog is reduced to help shrink the number of size classes and159// increase region size. Without this, we'd see ASan complain many times about160// a region running out of available space.161//162// This differs a bit from the fuchsia config in scudo, mainly from the NumBits,163// MaxSizeLog, and NumCachedHintT. This should place the number of size classes164// for scudo at 45 and some large objects allocated by this config would be165// placed in the arena whereas scudo would mmap them. The asan allocator needs166// to have a number of classes that are a power of 2 for various internal things167// to work, so we can't match the scudo settings to a tee. The sanitizer168// allocator is slightly slower than scudo's but this is enough to get169// memory-intensive scudo tests to run with asan instrumentation.170typedef SizeClassMap</*kNumBits=*/2,171/*kMinSizeLog=*/5,172/*kMidSizeLog=*/8,173/*kMaxSizeLog=*/18,174/*kNumCachedHintT=*/8,175/*kMaxBytesCachedLog=*/10>176SizeClassMap;177static_assert(SizeClassMap::kNumClassesRounded <= 32,178"The above tunings were specifically selected to ensure there "179"would be at most 32 size classes. This restriction could be "180"loosened to 64 size classes if we can find a configuration of "181"allocator size and SizeClassMap tunings that allows us to "182"reliably run all bringup tests in a sanitized environment.");183184# else185// These are the default allocator tunings for non-RISCV environments where the186// VMA is usually 48 bits and we have lots of space.187const uptr kAllocatorSize = 0x40000000000ULL; // 4T.188typedef DefaultSizeClassMap SizeClassMap;189# endif190# elif defined(__powerpc64__)191const uptr kAllocatorSpace = ~(uptr)0;192const uptr kAllocatorSize = 0x20000000000ULL; // 2T.193typedef DefaultSizeClassMap SizeClassMap;194# elif defined(__aarch64__) && SANITIZER_ANDROID195// Android needs to support 39, 42 and 48 bit VMA.196const uptr kAllocatorSpace = ~(uptr)0;197const uptr kAllocatorSize = 0x2000000000ULL; // 128G.198typedef VeryCompactSizeClassMap SizeClassMap;199# elif SANITIZER_RISCV64200const uptr kAllocatorSpace = ~(uptr)0;201const uptr kAllocatorSize = 0x2000000000ULL; // 128G.202typedef VeryDenseSizeClassMap SizeClassMap;203# elif defined(__sparc__)204const uptr kAllocatorSpace = ~(uptr)0;205const uptr kAllocatorSize = 0x20000000000ULL; // 2T.206typedef DefaultSizeClassMap SizeClassMap;207# elif SANITIZER_WINDOWS208const uptr kAllocatorSpace = ~(uptr)0;209const uptr kAllocatorSize = 0x8000000000ULL; // 500G210typedef DefaultSizeClassMap SizeClassMap;211# elif SANITIZER_APPLE212const uptr kAllocatorSpace = 0x600000000000ULL;213const uptr kAllocatorSize = 0x40000000000ULL; // 4T.214typedef DefaultSizeClassMap SizeClassMap;215# else216const uptr kAllocatorSpace = 0x500000000000ULL;217const uptr kAllocatorSize = 0x40000000000ULL; // 4T.218typedef DefaultSizeClassMap SizeClassMap;219# endif220template <typename AddressSpaceViewTy>221struct AP64 { // Allocator64 parameters. Deliberately using a short name.222static const uptr kSpaceBeg = kAllocatorSpace;223static const uptr kSpaceSize = kAllocatorSize;224static const uptr kMetadataSize = 0;225typedef __asan::SizeClassMap SizeClassMap;226typedef AsanMapUnmapCallback MapUnmapCallback;227static const uptr kFlags = 0;228using AddressSpaceView = AddressSpaceViewTy;229};230231template <typename AddressSpaceView>232using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;233using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;234#else // Fallback to SizeClassAllocator32.235typedef CompactSizeClassMap SizeClassMap;236template <typename AddressSpaceViewTy>237struct AP32 {238static const uptr kSpaceBeg = 0;239static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;240static const uptr kMetadataSize = 0;241typedef __asan::SizeClassMap SizeClassMap;242static const uptr kRegionSizeLog = 20;243using AddressSpaceView = AddressSpaceViewTy;244typedef AsanMapUnmapCallback MapUnmapCallback;245static const uptr kFlags = 0;246};247template <typename AddressSpaceView>248using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >;249using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;250#endif // SANITIZER_CAN_USE_ALLOCATOR64251252static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;253254template <typename AddressSpaceView>255using AsanAllocatorASVT =256CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;257using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>;258using AllocatorCache = AsanAllocator::AllocatorCache;259260struct AsanThreadLocalMallocStorage {261uptr quarantine_cache[16];262AllocatorCache allocator_cache;263void CommitBack();264private:265// These objects are allocated via mmap() and are zero-initialized.266AsanThreadLocalMallocStorage() {}267};268269void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,270AllocType alloc_type);271void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);272void asan_delete(void *ptr, uptr size, uptr alignment,273BufferedStackTrace *stack, AllocType alloc_type);274275void *asan_malloc(uptr size, BufferedStackTrace *stack);276void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);277void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);278void *asan_reallocarray(void *p, uptr nmemb, uptr size,279BufferedStackTrace *stack);280void *asan_valloc(uptr size, BufferedStackTrace *stack);281void *asan_pvalloc(uptr size, BufferedStackTrace *stack);282283void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);284int asan_posix_memalign(void **memptr, uptr alignment, uptr size,285BufferedStackTrace *stack);286uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);287288uptr asan_mz_size(const void *ptr);289void asan_mz_force_lock();290void asan_mz_force_unlock();291292void PrintInternalAllocatorStats();293void AsanSoftRssLimitExceededCallback(bool exceeded);294295} // namespace __asan296#endif // ASAN_ALLOCATOR_H297298299