Path: blob/main/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
35262 views
//===-- msan_allocator.cpp -------------------------- ---------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of MemorySanitizer.9//10// MemorySanitizer allocator.11//===----------------------------------------------------------------------===//1213#include "msan_allocator.h"1415#include "msan.h"16#include "msan_interface_internal.h"17#include "msan_origin.h"18#include "msan_poisoning.h"19#include "msan_thread.h"20#include "sanitizer_common/sanitizer_allocator.h"21#include "sanitizer_common/sanitizer_allocator_checks.h"22#include "sanitizer_common/sanitizer_allocator_interface.h"23#include "sanitizer_common/sanitizer_allocator_report.h"24#include "sanitizer_common/sanitizer_errno.h"2526namespace __msan {2728struct Metadata {29uptr requested_size;30};3132struct MsanMapUnmapCallback {33void OnMap(uptr p, uptr size) const {}34void OnMapSecondary(uptr p, uptr size, uptr user_begin,35uptr user_size) const {}36void OnUnmap(uptr p, uptr size) const {37__msan_unpoison((void *)p, size);3839// We are about to unmap a chunk of user memory.40// Mark the corresponding shadow memory as not needed.41uptr shadow_p = MEM_TO_SHADOW(p);42ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);43if (__msan_get_track_origins()) {44uptr origin_p = MEM_TO_ORIGIN(p);45ReleaseMemoryPagesToOS(origin_p, origin_p + size);46}47}48};4950// Note: to ensure that the allocator is compatible with the application memory51// layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be52// duplicated as MappingDesc::ALLOCATOR in msan.h.53#if defined(__mips64)54static const uptr kMaxAllowedMallocSize = 2UL << 30;5556struct AP32 {57static const uptr kSpaceBeg = 0;58static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;59static const uptr kMetadataSize = sizeof(Metadata);60typedef __sanitizer::CompactSizeClassMap SizeClassMap;61static const uptr kRegionSizeLog = 20;62using AddressSpaceView = LocalAddressSpaceView;63typedef MsanMapUnmapCallback MapUnmapCallback;64static const uptr kFlags = 0;65};66typedef SizeClassAllocator32<AP32> PrimaryAllocator;67#elif defined(__x86_64__)68#if SANITIZER_NETBSD || SANITIZER_LINUX69static const uptr kAllocatorSpace = 0x700000000000ULL;70#else71static const uptr kAllocatorSpace = 0x600000000000ULL;72#endif73static const uptr kMaxAllowedMallocSize = 1ULL << 40;7475struct AP64 { // Allocator64 parameters. Deliberately using a short name.76static const uptr kSpaceBeg = kAllocatorSpace;77static const uptr kSpaceSize = 0x40000000000; // 4T.78static const uptr kMetadataSize = sizeof(Metadata);79typedef DefaultSizeClassMap SizeClassMap;80typedef MsanMapUnmapCallback MapUnmapCallback;81static const uptr kFlags = 0;82using AddressSpaceView = LocalAddressSpaceView;83};8485typedef SizeClassAllocator64<AP64> PrimaryAllocator;8687#elif defined(__loongarch_lp64)88const uptr kAllocatorSpace = 0x700000000000ULL;89const uptr kMaxAllowedMallocSize = 8UL << 30;9091struct AP64 { // Allocator64 parameters. Deliberately using a short name.92static const uptr kSpaceBeg = kAllocatorSpace;93static const uptr kSpaceSize = 0x40000000000; // 4T.94static const uptr kMetadataSize = sizeof(Metadata);95typedef DefaultSizeClassMap SizeClassMap;96typedef MsanMapUnmapCallback MapUnmapCallback;97static const uptr kFlags = 0;98using AddressSpaceView = LocalAddressSpaceView;99};100101typedef SizeClassAllocator64<AP64> PrimaryAllocator;102103#elif defined(__powerpc64__)104static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G105106struct AP64 { // Allocator64 parameters. Deliberately using a short name.107static const uptr kSpaceBeg = 0x300000000000;108static const uptr kSpaceSize = 0x020000000000; // 2T.109static const uptr kMetadataSize = sizeof(Metadata);110typedef DefaultSizeClassMap SizeClassMap;111typedef MsanMapUnmapCallback MapUnmapCallback;112static const uptr kFlags = 0;113using AddressSpaceView = LocalAddressSpaceView;114};115116typedef SizeClassAllocator64<AP64> PrimaryAllocator;117#elif defined(__s390x__)118static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G119120struct AP64 { // Allocator64 parameters. Deliberately using a short name.121static const uptr kSpaceBeg = 0x440000000000;122static const uptr kSpaceSize = 0x020000000000; // 2T.123static const uptr kMetadataSize = sizeof(Metadata);124typedef DefaultSizeClassMap SizeClassMap;125typedef MsanMapUnmapCallback MapUnmapCallback;126static const uptr kFlags = 0;127using AddressSpaceView = LocalAddressSpaceView;128};129130typedef SizeClassAllocator64<AP64> PrimaryAllocator;131#elif defined(__aarch64__)132static const uptr kMaxAllowedMallocSize = 8UL << 30;133134struct AP64 {135static const uptr kSpaceBeg = 0xE00000000000ULL;136static const uptr kSpaceSize = 0x40000000000; // 4T.137static const uptr kMetadataSize = sizeof(Metadata);138typedef DefaultSizeClassMap SizeClassMap;139typedef MsanMapUnmapCallback MapUnmapCallback;140static const uptr kFlags = 0;141using AddressSpaceView = LocalAddressSpaceView;142};143typedef SizeClassAllocator64<AP64> PrimaryAllocator;144#endif145typedef CombinedAllocator<PrimaryAllocator> Allocator;146typedef Allocator::AllocatorCache AllocatorCache;147148static Allocator allocator;149static AllocatorCache fallback_allocator_cache;150static StaticSpinMutex fallback_mutex;151152static uptr max_malloc_size;153154void MsanAllocatorInit() {155SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);156allocator.Init(common_flags()->allocator_release_to_os_interval_ms);157if (common_flags()->max_allocation_size_mb)158max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,159kMaxAllowedMallocSize);160else161max_malloc_size = kMaxAllowedMallocSize;162}163164void LockAllocator() { allocator.ForceLock(); }165166void UnlockAllocator() { allocator.ForceUnlock(); }167168AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {169CHECK(ms);170CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));171return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);172}173174void MsanThreadLocalMallocStorage::Init() {175allocator.InitCache(GetAllocatorCache(this));176}177178void MsanThreadLocalMallocStorage::CommitBack() {179allocator.SwallowCache(GetAllocatorCache(this));180allocator.DestroyCache(GetAllocatorCache(this));181}182183static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,184bool zeroise) {185if (UNLIKELY(size > max_malloc_size)) {186if (AllocatorMayReturnNull()) {187Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);188return nullptr;189}190GET_FATAL_STACK_TRACE_IF_EMPTY(stack);191ReportAllocationSizeTooBig(size, max_malloc_size, stack);192}193if (UNLIKELY(IsRssLimitExceeded())) {194if (AllocatorMayReturnNull())195return nullptr;196GET_FATAL_STACK_TRACE_IF_EMPTY(stack);197ReportRssLimitExceeded(stack);198}199MsanThread *t = GetCurrentThread();200void *allocated;201if (t) {202AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());203allocated = allocator.Allocate(cache, size, alignment);204} else {205SpinMutexLock l(&fallback_mutex);206AllocatorCache *cache = &fallback_allocator_cache;207allocated = allocator.Allocate(cache, size, alignment);208}209if (UNLIKELY(!allocated)) {210SetAllocatorOutOfMemory();211if (AllocatorMayReturnNull())212return nullptr;213GET_FATAL_STACK_TRACE_IF_EMPTY(stack);214ReportOutOfMemory(size, stack);215}216Metadata *meta =217reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));218meta->requested_size = size;219if (zeroise) {220if (allocator.FromPrimary(allocated))221__msan_clear_and_unpoison(allocated, size);222else223__msan_unpoison(allocated, size); // Mem is already zeroed.224} else if (flags()->poison_in_malloc) {225__msan_poison(allocated, size);226if (__msan_get_track_origins()) {227stack->tag = StackTrace::TAG_ALLOC;228Origin o = Origin::CreateHeapOrigin(stack);229__msan_set_origin(allocated, size, o.raw_id());230}231}232UnpoisonParam(2);233RunMallocHooks(allocated, size);234return allocated;235}236237void MsanDeallocate(BufferedStackTrace *stack, void *p) {238CHECK(p);239UnpoisonParam(1);240RunFreeHooks(p);241242Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));243uptr size = meta->requested_size;244meta->requested_size = 0;245// This memory will not be reused by anyone else, so we are free to keep it246// poisoned. The secondary allocator will unmap and unpoison by247// MsanMapUnmapCallback, no need to poison it here.248if (flags()->poison_in_free && allocator.FromPrimary(p)) {249__msan_poison(p, size);250if (__msan_get_track_origins()) {251stack->tag = StackTrace::TAG_DEALLOC;252Origin o = Origin::CreateHeapOrigin(stack);253__msan_set_origin(p, size, o.raw_id());254}255}256MsanThread *t = GetCurrentThread();257if (t) {258AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());259allocator.Deallocate(cache, p);260} else {261SpinMutexLock l(&fallback_mutex);262AllocatorCache *cache = &fallback_allocator_cache;263allocator.Deallocate(cache, p);264}265}266267static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,268uptr new_size, uptr alignment) {269Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));270uptr old_size = meta->requested_size;271uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);272if (new_size <= actually_allocated_size) {273// We are not reallocating here.274meta->requested_size = new_size;275if (new_size > old_size) {276if (flags()->poison_in_malloc) {277stack->tag = StackTrace::TAG_ALLOC;278PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);279}280}281return old_p;282}283uptr memcpy_size = Min(new_size, old_size);284void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);285if (new_p) {286CopyMemory(new_p, old_p, memcpy_size, stack);287MsanDeallocate(stack, old_p);288}289return new_p;290}291292static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {293if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {294if (AllocatorMayReturnNull())295return nullptr;296GET_FATAL_STACK_TRACE_IF_EMPTY(stack);297ReportCallocOverflow(nmemb, size, stack);298}299return MsanAllocate(stack, nmemb * size, sizeof(u64), true);300}301302static const void *AllocationBegin(const void *p) {303if (!p)304return nullptr;305void *beg = allocator.GetBlockBegin(p);306if (!beg)307return nullptr;308Metadata *b = (Metadata *)allocator.GetMetaData(beg);309if (!b)310return nullptr;311if (b->requested_size == 0)312return nullptr;313314return (const void *)beg;315}316317static uptr AllocationSize(const void *p) {318if (!p) return 0;319const void *beg = allocator.GetBlockBegin(p);320if (beg != p) return 0;321Metadata *b = (Metadata *)allocator.GetMetaData(p);322return b->requested_size;323}324325static uptr AllocationSizeFast(const void *p) {326return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;327}328329void *msan_malloc(uptr size, BufferedStackTrace *stack) {330return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));331}332333void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {334return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));335}336337void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {338if (!ptr)339return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));340if (size == 0) {341MsanDeallocate(stack, ptr);342return nullptr;343}344return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));345}346347void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,348BufferedStackTrace *stack) {349if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {350errno = errno_ENOMEM;351if (AllocatorMayReturnNull())352return nullptr;353GET_FATAL_STACK_TRACE_IF_EMPTY(stack);354ReportReallocArrayOverflow(nmemb, size, stack);355}356return msan_realloc(ptr, nmemb * size, stack);357}358359void *msan_valloc(uptr size, BufferedStackTrace *stack) {360return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));361}362363void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {364uptr PageSize = GetPageSizeCached();365if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {366errno = errno_ENOMEM;367if (AllocatorMayReturnNull())368return nullptr;369GET_FATAL_STACK_TRACE_IF_EMPTY(stack);370ReportPvallocOverflow(size, stack);371}372// pvalloc(0) should allocate one page.373size = size ? RoundUpTo(size, PageSize) : PageSize;374return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));375}376377void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {378if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {379errno = errno_EINVAL;380if (AllocatorMayReturnNull())381return nullptr;382GET_FATAL_STACK_TRACE_IF_EMPTY(stack);383ReportInvalidAlignedAllocAlignment(size, alignment, stack);384}385return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));386}387388void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {389if (UNLIKELY(!IsPowerOfTwo(alignment))) {390errno = errno_EINVAL;391if (AllocatorMayReturnNull())392return nullptr;393GET_FATAL_STACK_TRACE_IF_EMPTY(stack);394ReportInvalidAllocationAlignment(alignment, stack);395}396return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));397}398399int msan_posix_memalign(void **memptr, uptr alignment, uptr size,400BufferedStackTrace *stack) {401if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {402if (AllocatorMayReturnNull())403return errno_EINVAL;404GET_FATAL_STACK_TRACE_IF_EMPTY(stack);405ReportInvalidPosixMemalignAlignment(alignment, stack);406}407void *ptr = MsanAllocate(stack, size, alignment, false);408if (UNLIKELY(!ptr))409// OOM error is already taken care of by MsanAllocate.410return errno_ENOMEM;411CHECK(IsAligned((uptr)ptr, alignment));412*memptr = ptr;413return 0;414}415416} // namespace __msan417418using namespace __msan;419420uptr __sanitizer_get_current_allocated_bytes() {421uptr stats[AllocatorStatCount];422allocator.GetStats(stats);423return stats[AllocatorStatAllocated];424}425426uptr __sanitizer_get_heap_size() {427uptr stats[AllocatorStatCount];428allocator.GetStats(stats);429return stats[AllocatorStatMapped];430}431432uptr __sanitizer_get_free_bytes() { return 1; }433434uptr __sanitizer_get_unmapped_bytes() { return 1; }435436uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }437438int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }439440const void *__sanitizer_get_allocated_begin(const void *p) {441return AllocationBegin(p);442}443444uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }445446uptr __sanitizer_get_allocated_size_fast(const void *p) {447DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));448uptr ret = AllocationSizeFast(p);449DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));450return ret;451}452453void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }454455456