Path: blob/main/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
35236 views
//===-- hwasan_allocator.cpp ------------------------ ---------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of HWAddressSanitizer.9//10// HWAddressSanitizer allocator.11//===----------------------------------------------------------------------===//1213#include "sanitizer_common/sanitizer_atomic.h"14#include "sanitizer_common/sanitizer_errno.h"15#include "sanitizer_common/sanitizer_stackdepot.h"16#include "hwasan.h"17#include "hwasan_allocator.h"18#include "hwasan_checks.h"19#include "hwasan_mapping.h"20#include "hwasan_malloc_bisect.h"21#include "hwasan_thread.h"22#include "hwasan_report.h"23#include "lsan/lsan_common.h"2425namespace __hwasan {2627static Allocator allocator;28static AllocatorCache fallback_allocator_cache;29static SpinMutex fallback_mutex;30static atomic_uint8_t hwasan_allocator_tagging_enabled;3132static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;33static constexpr tag_t kFallbackFreeTag = 0xBC;3435enum {36// Either just allocated by underlying allocator, but AsanChunk is not yet37// ready, or almost returned to undelying allocator and AsanChunk is already38// meaningless.39CHUNK_INVALID = 0,40// The chunk is allocated and not yet freed.41CHUNK_ALLOCATED = 1,42};434445// Initialized in HwasanAllocatorInit, an never changed.46alignas(16) static u8 tail_magic[kShadowAlignment - 1];47static uptr max_malloc_size;4849bool HwasanChunkView::IsAllocated() const {50return metadata_ && metadata_->IsAllocated();51}5253uptr HwasanChunkView::Beg() const {54return block_;55}56uptr HwasanChunkView::End() const {57return Beg() + UsedSize();58}59uptr HwasanChunkView::UsedSize() const {60return metadata_->GetRequestedSize();61}62u32 HwasanChunkView::GetAllocStackId() const {63return metadata_->GetAllocStackId();64}6566u32 HwasanChunkView::GetAllocThreadId() const {67return metadata_->GetAllocThreadId();68}6970uptr HwasanChunkView::ActualSize() const {71return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));72}7374bool HwasanChunkView::FromSmallHeap() const {75return allocator.FromPrimary(reinterpret_cast<void *>(block_));76}7778bool HwasanChunkView::AddrIsInside(uptr addr) const {79return (addr >= Beg()) && (addr < Beg() + UsedSize());80}8182inline void Metadata::SetAllocated(u32 stack, u64 size) {83Thread *t = GetCurrentThread();84u64 context = t ? t->unique_id() : kMainTid;85context <<= 32;86context += stack;87requested_size_low = size & ((1ul << 32) - 1);88requested_size_high = size >> 32;89atomic_store(&alloc_context_id, context, memory_order_relaxed);90atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);91}9293inline void Metadata::SetUnallocated() {94atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);95requested_size_low = 0;96requested_size_high = 0;97atomic_store(&alloc_context_id, 0, memory_order_relaxed);98}99100inline bool Metadata::IsAllocated() const {101return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;102}103104inline u64 Metadata::GetRequestedSize() const {105return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;106}107108inline u32 Metadata::GetAllocStackId() const {109return atomic_load(&alloc_context_id, memory_order_relaxed);110}111112inline u32 Metadata::GetAllocThreadId() const {113u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);114u32 tid = context >> 32;115return tid;116}117118void GetAllocatorStats(AllocatorStatCounters s) {119allocator.GetStats(s);120}121122inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {123lsan_tag = tag;124}125126inline __lsan::ChunkTag Metadata::GetLsanTag() const {127return static_cast<__lsan::ChunkTag>(lsan_tag);128}129130uptr GetAliasRegionStart() {131#if defined(HWASAN_ALIASING_MODE)132constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);133uptr AliasRegionStart =134__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;135136CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,137__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);138CHECK_EQ(139(AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,140__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);141return AliasRegionStart;142#else143return 0;144#endif145}146147void HwasanAllocatorInit() {148atomic_store_relaxed(&hwasan_allocator_tagging_enabled,149!flags()->disable_allocator_tagging);150SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);151allocator.InitLinkerInitialized(152common_flags()->allocator_release_to_os_interval_ms,153GetAliasRegionStart());154for (uptr i = 0; i < sizeof(tail_magic); i++)155tail_magic[i] = GetCurrentThread()->GenerateRandomTag();156if (common_flags()->max_allocation_size_mb) {157max_malloc_size = common_flags()->max_allocation_size_mb << 20;158max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);159} else {160max_malloc_size = kMaxAllowedMallocSize;161}162}163164void HwasanAllocatorLock() { allocator.ForceLock(); }165166void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }167168void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }169170void AllocatorThreadFinish(AllocatorCache *cache) {171allocator.SwallowCache(cache);172allocator.DestroyCache(cache);173}174175static uptr TaggedSize(uptr size) {176if (!size) size = 1;177uptr new_size = RoundUpTo(size, kShadowAlignment);178CHECK_GE(new_size, size);179return new_size;180}181182static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,183bool zeroise) {184// Keep this consistent with LSAN and ASAN behavior.185if (UNLIKELY(orig_size == 0))186orig_size = 1;187if (UNLIKELY(orig_size > max_malloc_size)) {188if (AllocatorMayReturnNull()) {189Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",190orig_size);191return nullptr;192}193ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);194}195if (UNLIKELY(IsRssLimitExceeded())) {196if (AllocatorMayReturnNull())197return nullptr;198ReportRssLimitExceeded(stack);199}200201alignment = Max(alignment, kShadowAlignment);202uptr size = TaggedSize(orig_size);203Thread *t = GetCurrentThread();204void *allocated;205if (t) {206allocated = allocator.Allocate(t->allocator_cache(), size, alignment);207} else {208SpinMutexLock l(&fallback_mutex);209AllocatorCache *cache = &fallback_allocator_cache;210allocated = allocator.Allocate(cache, size, alignment);211}212if (UNLIKELY(!allocated)) {213SetAllocatorOutOfMemory();214if (AllocatorMayReturnNull())215return nullptr;216ReportOutOfMemory(size, stack);217}218if (zeroise) {219// The secondary allocator mmaps memory, which should be zero-inited so we220// don't need to explicitly clear it.221if (allocator.FromPrimary(allocated))222internal_memset(allocated, 0, size);223} else if (flags()->max_malloc_fill_size > 0) {224uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);225internal_memset(allocated, flags()->malloc_fill_byte, fill_size);226}227if (size != orig_size) {228u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;229uptr tail_length = size - orig_size;230internal_memcpy(tail, tail_magic, tail_length - 1);231// Short granule is excluded from magic tail, so we explicitly untag.232tail[tail_length - 1] = 0;233}234235void *user_ptr = allocated;236if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&237atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&238flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {239tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;240uptr tag_size = orig_size ? orig_size : 1;241uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);242user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);243if (full_granule_size != tag_size) {244u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;245TagMemoryAligned((uptr)short_granule, kShadowAlignment,246tag_size % kShadowAlignment);247short_granule[kShadowAlignment - 1] = tag;248}249} else {250// Tagging can not be completely skipped. If it's disabled, we need to tag251// with zeros.252user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);253}254255Metadata *meta =256reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));257#if CAN_SANITIZE_LEAKS258meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored259: __lsan::kDirectlyLeaked);260#endif261meta->SetAllocated(StackDepotPut(*stack), orig_size);262RunMallocHooks(user_ptr, orig_size);263return user_ptr;264}265266static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {267CHECK(tagged_ptr);268uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);269if (!InTaggableRegion(tagged_uptr))270return true;271tag_t mem_tag = *reinterpret_cast<tag_t *>(272MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));273return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);274}275276static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,277void *tagged_ptr) {278// This function can return true if halt_on_error is false.279if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||280!PointerAndMemoryTagsMatch(tagged_ptr)) {281ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));282return true;283}284return false;285}286287static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {288CHECK(tagged_ptr);289void *untagged_ptr = UntagPtr(tagged_ptr);290291if (RunFreeHooks(tagged_ptr))292return;293294if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))295return;296297void *aligned_ptr = reinterpret_cast<void *>(298RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));299tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));300Metadata *meta =301reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));302if (!meta) {303ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));304return;305}306307uptr orig_size = meta->GetRequestedSize();308u32 free_context_id = StackDepotPut(*stack);309u32 alloc_context_id = meta->GetAllocStackId();310u32 alloc_thread_id = meta->GetAllocThreadId();311312bool in_taggable_region =313InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));314315// Check tail magic.316uptr tagged_size = TaggedSize(orig_size);317if (flags()->free_checks_tail_magic && orig_size &&318tagged_size != orig_size) {319uptr tail_size = tagged_size - orig_size - 1;320CHECK_LT(tail_size, kShadowAlignment);321void *tail_beg = reinterpret_cast<void *>(322reinterpret_cast<uptr>(aligned_ptr) + orig_size);323tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(324reinterpret_cast<uptr>(tail_beg) + tail_size));325if (tail_size &&326(internal_memcmp(tail_beg, tail_magic, tail_size) ||327(in_taggable_region && pointer_tag != short_granule_memtag)))328ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),329orig_size, tail_magic);330}331332// TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).333meta->SetUnallocated();334// This memory will not be reused by anyone else, so we are free to keep it335// poisoned.336Thread *t = GetCurrentThread();337if (flags()->max_free_fill_size > 0) {338uptr fill_size =339Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);340internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);341}342if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&343atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&344allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {345// Always store full 8-bit tags on free to maximize UAF detection.346tag_t tag;347if (t) {348// Make sure we are not using a short granule tag as a poison tag. This349// would make us attempt to read the memory on a UaF.350// The tag can be zero if tagging is disabled on this thread.351do {352tag = t->GenerateRandomTag(/*num_bits=*/8);353} while (354UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));355} else {356static_assert(kFallbackFreeTag >= kShadowAlignment,357"fallback tag must not be a short granule tag.");358tag = kFallbackFreeTag;359}360TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),361tag);362}363if (t) {364allocator.Deallocate(t->allocator_cache(), aligned_ptr);365if (auto *ha = t->heap_allocations())366ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,367alloc_context_id, free_context_id,368static_cast<u32>(orig_size)});369} else {370SpinMutexLock l(&fallback_mutex);371AllocatorCache *cache = &fallback_allocator_cache;372allocator.Deallocate(cache, aligned_ptr);373}374}375376static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,377uptr new_size, uptr alignment) {378void *untagged_ptr_old = UntagPtr(tagged_ptr_old);379if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))380return nullptr;381void *tagged_ptr_new =382HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);383if (tagged_ptr_old && tagged_ptr_new) {384Metadata *meta =385reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));386void *untagged_ptr_new = UntagPtr(tagged_ptr_new);387internal_memcpy(untagged_ptr_new, untagged_ptr_old,388Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));389HwasanDeallocate(stack, tagged_ptr_old);390}391return tagged_ptr_new;392}393394static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {395if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {396if (AllocatorMayReturnNull())397return nullptr;398ReportCallocOverflow(nmemb, size, stack);399}400return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);401}402403HwasanChunkView FindHeapChunkByAddress(uptr address) {404if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))405return HwasanChunkView();406void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));407if (!block)408return HwasanChunkView();409Metadata *metadata =410reinterpret_cast<Metadata*>(allocator.GetMetaData(block));411return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);412}413414static const void *AllocationBegin(const void *p) {415const void *untagged_ptr = UntagPtr(p);416if (!untagged_ptr)417return nullptr;418419const void *beg = allocator.GetBlockBegin(untagged_ptr);420if (!beg)421return nullptr;422423Metadata *b = (Metadata *)allocator.GetMetaData(beg);424if (b->GetRequestedSize() == 0)425return nullptr;426427tag_t tag = GetTagFromPointer((uptr)p);428return (const void *)AddTagToPointer((uptr)beg, tag);429}430431static uptr AllocationSize(const void *p) {432const void *untagged_ptr = UntagPtr(p);433if (!untagged_ptr) return 0;434const void *beg = allocator.GetBlockBegin(untagged_ptr);435if (!beg)436return 0;437Metadata *b = (Metadata *)allocator.GetMetaData(beg);438return b->GetRequestedSize();439}440441static uptr AllocationSizeFast(const void *p) {442const void *untagged_ptr = UntagPtr(p);443void *aligned_ptr = reinterpret_cast<void *>(444RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));445Metadata *meta =446reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));447return meta->GetRequestedSize();448}449450void *hwasan_malloc(uptr size, StackTrace *stack) {451return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));452}453454void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {455return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));456}457458void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {459if (!ptr)460return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));461if (size == 0) {462HwasanDeallocate(stack, ptr);463return nullptr;464}465return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));466}467468void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {469if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {470errno = errno_ENOMEM;471if (AllocatorMayReturnNull())472return nullptr;473ReportReallocArrayOverflow(nmemb, size, stack);474}475return hwasan_realloc(ptr, nmemb * size, stack);476}477478void *hwasan_valloc(uptr size, StackTrace *stack) {479return SetErrnoOnNull(480HwasanAllocate(stack, size, GetPageSizeCached(), false));481}482483void *hwasan_pvalloc(uptr size, StackTrace *stack) {484uptr PageSize = GetPageSizeCached();485if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {486errno = errno_ENOMEM;487if (AllocatorMayReturnNull())488return nullptr;489ReportPvallocOverflow(size, stack);490}491// pvalloc(0) should allocate one page.492size = size ? RoundUpTo(size, PageSize) : PageSize;493return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));494}495496void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {497if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {498errno = errno_EINVAL;499if (AllocatorMayReturnNull())500return nullptr;501ReportInvalidAlignedAllocAlignment(size, alignment, stack);502}503return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));504}505506void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {507if (UNLIKELY(!IsPowerOfTwo(alignment))) {508errno = errno_EINVAL;509if (AllocatorMayReturnNull())510return nullptr;511ReportInvalidAllocationAlignment(alignment, stack);512}513return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));514}515516int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,517StackTrace *stack) {518if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {519if (AllocatorMayReturnNull())520return errno_EINVAL;521ReportInvalidPosixMemalignAlignment(alignment, stack);522}523void *ptr = HwasanAllocate(stack, size, alignment, false);524if (UNLIKELY(!ptr))525// OOM error is already taken care of by HwasanAllocate.526return errno_ENOMEM;527CHECK(IsAligned((uptr)ptr, alignment));528*memptr = ptr;529return 0;530}531532void hwasan_free(void *ptr, StackTrace *stack) {533return HwasanDeallocate(stack, ptr);534}535536} // namespace __hwasan537538// --- Implementation of LSan-specific functions --- {{{1539namespace __lsan {540541void LockAllocator() {542__hwasan::HwasanAllocatorLock();543}544545void UnlockAllocator() {546__hwasan::HwasanAllocatorUnlock();547}548549void GetAllocatorGlobalRange(uptr *begin, uptr *end) {550*begin = (uptr)&__hwasan::allocator;551*end = *begin + sizeof(__hwasan::allocator);552}553554uptr PointsIntoChunk(void *p) {555p = UntagPtr(p);556uptr addr = reinterpret_cast<uptr>(p);557uptr chunk =558reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));559if (!chunk)560return 0;561__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(562__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));563if (!metadata || !metadata->IsAllocated())564return 0;565if (addr < chunk + metadata->GetRequestedSize())566return chunk;567if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))568return chunk;569return 0;570}571572uptr GetUserBegin(uptr chunk) {573CHECK_EQ(UntagAddr(chunk), chunk);574void *block = __hwasan::allocator.GetBlockBeginFastLocked(575reinterpret_cast<void *>(chunk));576if (!block)577return 0;578__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(579__hwasan::allocator.GetMetaData(block));580if (!metadata || !metadata->IsAllocated())581return 0;582583return reinterpret_cast<uptr>(block);584}585586uptr GetUserAddr(uptr chunk) {587if (!InTaggableRegion(chunk))588return chunk;589tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);590return AddTagToPointer(chunk, mem_tag);591}592593LsanMetadata::LsanMetadata(uptr chunk) {594CHECK_EQ(UntagAddr(chunk), chunk);595metadata_ =596chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))597: nullptr;598}599600bool LsanMetadata::allocated() const {601if (!metadata_)602return false;603__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);604return m->IsAllocated();605}606607ChunkTag LsanMetadata::tag() const {608__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);609return m->GetLsanTag();610}611612void LsanMetadata::set_tag(ChunkTag value) {613__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);614m->SetLsanTag(value);615}616617uptr LsanMetadata::requested_size() const {618__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);619return m->GetRequestedSize();620}621622u32 LsanMetadata::stack_trace_id() const {623__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);624return m->GetAllocStackId();625}626627void ForEachChunk(ForEachChunkCallback callback, void *arg) {628__hwasan::allocator.ForEachChunk(callback, arg);629}630631IgnoreObjectResult IgnoreObject(const void *p) {632p = UntagPtr(p);633uptr addr = reinterpret_cast<uptr>(p);634uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));635if (!chunk)636return kIgnoreObjectInvalid;637__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(638__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));639if (!metadata || !metadata->IsAllocated())640return kIgnoreObjectInvalid;641if (addr >= chunk + metadata->GetRequestedSize())642return kIgnoreObjectInvalid;643if (metadata->GetLsanTag() == kIgnored)644return kIgnoreObjectAlreadyIgnored;645646metadata->SetLsanTag(kIgnored);647return kIgnoreObjectSuccess;648}649650} // namespace __lsan651652using namespace __hwasan;653654void __hwasan_enable_allocator_tagging() {655atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);656}657658void __hwasan_disable_allocator_tagging() {659atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);660}661662uptr __sanitizer_get_current_allocated_bytes() {663uptr stats[AllocatorStatCount];664allocator.GetStats(stats);665return stats[AllocatorStatAllocated];666}667668uptr __sanitizer_get_heap_size() {669uptr stats[AllocatorStatCount];670allocator.GetStats(stats);671return stats[AllocatorStatMapped];672}673674uptr __sanitizer_get_free_bytes() { return 1; }675676uptr __sanitizer_get_unmapped_bytes() { return 1; }677678uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }679680int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }681682const void *__sanitizer_get_allocated_begin(const void *p) {683return AllocationBegin(p);684}685686uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }687688uptr __sanitizer_get_allocated_size_fast(const void *p) {689DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));690uptr ret = AllocationSizeFast(p);691DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));692return ret;693}694695void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }696697698