Path: blob/main/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
35233 views
//===-- asan_poisoning.cpp ------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of AddressSanitizer, an address sanity checker.9//10// Shadow memory poisoning by ASan RTL and by user application.11//===----------------------------------------------------------------------===//1213#include "asan_poisoning.h"1415#include "asan_report.h"16#include "asan_stack.h"17#include "sanitizer_common/sanitizer_atomic.h"18#include "sanitizer_common/sanitizer_flags.h"19#include "sanitizer_common/sanitizer_interface_internal.h"20#include "sanitizer_common/sanitizer_libc.h"2122namespace __asan {2324static atomic_uint8_t can_poison_memory;2526void SetCanPoisonMemory(bool value) {27atomic_store(&can_poison_memory, value, memory_order_release);28}2930bool CanPoisonMemory() {31return atomic_load(&can_poison_memory, memory_order_acquire);32}3334void PoisonShadow(uptr addr, uptr size, u8 value) {35if (value && !CanPoisonMemory()) return;36CHECK(AddrIsAlignedByGranularity(addr));37CHECK(AddrIsInMem(addr));38CHECK(AddrIsAlignedByGranularity(addr + size));39CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));40CHECK(REAL(memset));41FastPoisonShadow(addr, size, value);42}4344void PoisonShadowPartialRightRedzone(uptr addr,45uptr size,46uptr redzone_size,47u8 value) {48if (!CanPoisonMemory()) return;49CHECK(AddrIsAlignedByGranularity(addr));50CHECK(AddrIsInMem(addr));51FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);52}5354struct ShadowSegmentEndpoint {55u8 *chunk;56s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)57s8 value; // = *chunk;5859explicit ShadowSegmentEndpoint(uptr address) {60chunk = (u8*)MemToShadow(address);61offset = address & (ASAN_SHADOW_GRANULARITY - 1);62value = *chunk;63}64};6566void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {67uptr end = ptr + size;68if (Verbosity()) {69Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",70poison ? "" : "un", (void *)ptr, (void *)end, size);71if (Verbosity() >= 2)72PRINT_CURRENT_STACK();73}74CHECK(size);75CHECK_LE(size, 4096);76CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));77if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {78*(u8 *)MemToShadow(ptr) =79poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;80ptr |= ASAN_SHADOW_GRANULARITY - 1;81ptr++;82}83for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)84*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;85}8687} // namespace __asan8889// ---------------------- Interface ---------------- {{{190using namespace __asan;9192// Current implementation of __asan_(un)poison_memory_region doesn't check93// that user program (un)poisons the memory it owns. It poisons memory94// conservatively, and unpoisons progressively to make sure asan shadow95// mapping invariant is preserved (see detailed mapping description here:96// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).97//98// * if user asks to poison region [left, right), the program poisons99// at least [left, AlignDown(right)).100// * if user asks to unpoison region [left, right), the program unpoisons101// at most [AlignDown(left), right).102void __asan_poison_memory_region(void const volatile *addr, uptr size) {103if (!flags()->allow_user_poisoning || size == 0) return;104uptr beg_addr = (uptr)addr;105uptr end_addr = beg_addr + size;106VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,107(void *)end_addr);108ShadowSegmentEndpoint beg(beg_addr);109ShadowSegmentEndpoint end(end_addr);110if (beg.chunk == end.chunk) {111CHECK_LT(beg.offset, end.offset);112s8 value = beg.value;113CHECK_EQ(value, end.value);114// We can only poison memory if the byte in end.offset is unaddressable.115// No need to re-poison memory if it is poisoned already.116if (value > 0 && value <= end.offset) {117if (beg.offset > 0) {118*beg.chunk = Min(value, beg.offset);119} else {120*beg.chunk = kAsanUserPoisonedMemoryMagic;121}122}123return;124}125CHECK_LT(beg.chunk, end.chunk);126if (beg.offset > 0) {127// Mark bytes from beg.offset as unaddressable.128if (beg.value == 0) {129*beg.chunk = beg.offset;130} else {131*beg.chunk = Min(beg.value, beg.offset);132}133beg.chunk++;134}135REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);136// Poison if byte in end.offset is unaddressable.137if (end.value > 0 && end.value <= end.offset) {138*end.chunk = kAsanUserPoisonedMemoryMagic;139}140}141142void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {143if (!flags()->allow_user_poisoning || size == 0) return;144uptr beg_addr = (uptr)addr;145uptr end_addr = beg_addr + size;146VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,147(void *)end_addr);148ShadowSegmentEndpoint beg(beg_addr);149ShadowSegmentEndpoint end(end_addr);150if (beg.chunk == end.chunk) {151CHECK_LT(beg.offset, end.offset);152s8 value = beg.value;153CHECK_EQ(value, end.value);154// We unpoison memory bytes up to enbytes up to end.offset if it is not155// unpoisoned already.156if (value != 0) {157*beg.chunk = Max(value, end.offset);158}159return;160}161CHECK_LT(beg.chunk, end.chunk);162REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);163if (end.offset > 0 && end.value != 0) {164*end.chunk = Max(end.value, end.offset);165}166}167168int __asan_address_is_poisoned(void const volatile *addr) {169return __asan::AddressIsPoisoned((uptr)addr);170}171172uptr __asan_region_is_poisoned(uptr beg, uptr size) {173if (!size)174return 0;175uptr end = beg + size;176if (!AddrIsInMem(beg))177return beg;178if (!AddrIsInMem(end))179return end;180CHECK_LT(beg, end);181uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);182uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);183uptr shadow_beg = MemToShadow(aligned_b);184uptr shadow_end = MemToShadow(aligned_e);185// First check the first and the last application bytes,186// then check the ASAN_SHADOW_GRANULARITY-aligned region by calling187// mem_is_zero on the corresponding shadow.188if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&189(shadow_end <= shadow_beg ||190__sanitizer::mem_is_zero((const char *)shadow_beg,191shadow_end - shadow_beg)))192return 0;193// The fast check failed, so we have a poisoned byte somewhere.194// Find it slowly.195for (; beg < end; beg++)196if (__asan::AddressIsPoisoned(beg))197return beg;198UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");199return 0;200}201202#define CHECK_SMALL_REGION(p, size, isWrite) \203do { \204uptr __p = reinterpret_cast<uptr>(p); \205uptr __size = size; \206if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \207__asan::AddressIsPoisoned(__p + __size - 1))) { \208GET_CURRENT_PC_BP_SP; \209uptr __bad = __asan_region_is_poisoned(__p, __size); \210__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\211} \212} while (false)213214215extern "C" SANITIZER_INTERFACE_ATTRIBUTE216u16 __sanitizer_unaligned_load16(const uu16 *p) {217CHECK_SMALL_REGION(p, sizeof(*p), false);218return *p;219}220221extern "C" SANITIZER_INTERFACE_ATTRIBUTE222u32 __sanitizer_unaligned_load32(const uu32 *p) {223CHECK_SMALL_REGION(p, sizeof(*p), false);224return *p;225}226227extern "C" SANITIZER_INTERFACE_ATTRIBUTE228u64 __sanitizer_unaligned_load64(const uu64 *p) {229CHECK_SMALL_REGION(p, sizeof(*p), false);230return *p;231}232233extern "C" SANITIZER_INTERFACE_ATTRIBUTE234void __sanitizer_unaligned_store16(uu16 *p, u16 x) {235CHECK_SMALL_REGION(p, sizeof(*p), true);236*p = x;237}238239extern "C" SANITIZER_INTERFACE_ATTRIBUTE240void __sanitizer_unaligned_store32(uu32 *p, u32 x) {241CHECK_SMALL_REGION(p, sizeof(*p), true);242*p = x;243}244245extern "C" SANITIZER_INTERFACE_ATTRIBUTE246void __sanitizer_unaligned_store64(uu64 *p, u64 x) {247CHECK_SMALL_REGION(p, sizeof(*p), true);248*p = x;249}250251extern "C" SANITIZER_INTERFACE_ATTRIBUTE252void __asan_poison_cxx_array_cookie(uptr p) {253if (SANITIZER_WORDSIZE != 64) return;254if (!flags()->poison_array_cookie) return;255uptr s = MEM_TO_SHADOW(p);256*reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;257}258259extern "C" SANITIZER_INTERFACE_ATTRIBUTE260uptr __asan_load_cxx_array_cookie(uptr *p) {261if (SANITIZER_WORDSIZE != 64) return *p;262if (!flags()->poison_array_cookie) return *p;263uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));264u8 sval = *reinterpret_cast<u8*>(s);265if (sval == kAsanArrayCookieMagic) return *p;266// If sval is not kAsanArrayCookieMagic it can only be freed memory,267// which means that we are going to get double-free. So, return 0 to avoid268// infinite loop of destructors. We don't want to report a double-free here269// though, so print a warning just in case.270// CHECK_EQ(sval, kAsanHeapFreeMagic);271if (sval == kAsanHeapFreeMagic) {272Report("AddressSanitizer: loaded array cookie from free-d memory; "273"expect a double-free report\n");274return 0;275}276// The cookie may remain unpoisoned if e.g. it comes from a custom277// operator new defined inside a class.278return *p;279}280281// This is a simplified version of __asan_(un)poison_memory_region, which282// assumes that left border of region to be poisoned is properly aligned.283static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {284if (size == 0) return;285uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);286PoisonShadow(addr, aligned_size,287do_poison ? kAsanStackUseAfterScopeMagic : 0);288if (size == aligned_size)289return;290s8 end_offset = (s8)(size - aligned_size);291s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);292s8 end_value = *shadow_end;293if (do_poison) {294// If possible, mark all the bytes mapping to last shadow byte as295// unaddressable.296if (end_value > 0 && end_value <= end_offset)297*shadow_end = (s8)kAsanStackUseAfterScopeMagic;298} else {299// If necessary, mark few first bytes mapping to last shadow byte300// as addressable301if (end_value != 0)302*shadow_end = Max(end_value, end_offset);303}304}305306void __asan_set_shadow_00(uptr addr, uptr size) {307REAL(memset)((void *)addr, 0, size);308}309310void __asan_set_shadow_01(uptr addr, uptr size) {311REAL(memset)((void *)addr, 0x01, size);312}313314void __asan_set_shadow_02(uptr addr, uptr size) {315REAL(memset)((void *)addr, 0x02, size);316}317318void __asan_set_shadow_03(uptr addr, uptr size) {319REAL(memset)((void *)addr, 0x03, size);320}321322void __asan_set_shadow_04(uptr addr, uptr size) {323REAL(memset)((void *)addr, 0x04, size);324}325326void __asan_set_shadow_05(uptr addr, uptr size) {327REAL(memset)((void *)addr, 0x05, size);328}329330void __asan_set_shadow_06(uptr addr, uptr size) {331REAL(memset)((void *)addr, 0x06, size);332}333334void __asan_set_shadow_07(uptr addr, uptr size) {335REAL(memset)((void *)addr, 0x07, size);336}337338void __asan_set_shadow_f1(uptr addr, uptr size) {339REAL(memset)((void *)addr, 0xf1, size);340}341342void __asan_set_shadow_f2(uptr addr, uptr size) {343REAL(memset)((void *)addr, 0xf2, size);344}345346void __asan_set_shadow_f3(uptr addr, uptr size) {347REAL(memset)((void *)addr, 0xf3, size);348}349350void __asan_set_shadow_f5(uptr addr, uptr size) {351REAL(memset)((void *)addr, 0xf5, size);352}353354void __asan_set_shadow_f8(uptr addr, uptr size) {355REAL(memset)((void *)addr, 0xf8, size);356}357358void __asan_poison_stack_memory(uptr addr, uptr size) {359VReport(1, "poisoning: %p %zx\n", (void *)addr, size);360PoisonAlignedStackMemory(addr, size, true);361}362363void __asan_unpoison_stack_memory(uptr addr, uptr size) {364VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);365PoisonAlignedStackMemory(addr, size, false);366}367368static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,369uptr &old_beg, uptr &old_end, uptr &new_beg,370uptr &new_end) {371constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;372if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {373uptr end_down = RoundDownTo(storage_end, granularity);374// Ignore the last unaligned granule if the storage is followed by375// unpoisoned byte, because we can't poison the prefix anyway. Don't call376// AddressIsPoisoned at all if container changes does not affect the last377// granule at all.378if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||379((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&380!AddressIsPoisoned(storage_end)) {381old_beg = Min(end_down, old_beg);382old_end = Min(end_down, old_end);383new_beg = Min(end_down, new_beg);384new_end = Min(end_down, new_end);385}386}387388// Handle misaligned begin and cut it off.389if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {390uptr beg_up = RoundUpTo(storage_beg, granularity);391// The first unaligned granule needs special handling only if we had bytes392// there before and will have none after.393if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&394old_beg < beg_up) {395// Keep granule prefix outside of the storage unpoisoned.396uptr beg_down = RoundDownTo(storage_beg, granularity);397*(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;398old_beg = Max(beg_up, old_beg);399old_end = Max(beg_up, old_end);400new_beg = Max(beg_up, new_beg);401new_end = Max(beg_up, new_end);402}403}404}405406void __sanitizer_annotate_contiguous_container(const void *beg_p,407const void *end_p,408const void *old_mid_p,409const void *new_mid_p) {410if (!flags()->detect_container_overflow)411return;412VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,413new_mid_p);414uptr storage_beg = reinterpret_cast<uptr>(beg_p);415uptr storage_end = reinterpret_cast<uptr>(end_p);416uptr old_end = reinterpret_cast<uptr>(old_mid_p);417uptr new_end = reinterpret_cast<uptr>(new_mid_p);418uptr old_beg = storage_beg;419uptr new_beg = storage_beg;420uptr granularity = ASAN_SHADOW_GRANULARITY;421if (!(storage_beg <= old_end && storage_beg <= new_end &&422old_end <= storage_end && new_end <= storage_end)) {423GET_STACK_TRACE_FATAL_HERE;424ReportBadParamsToAnnotateContiguousContainer(storage_beg, storage_end,425old_end, new_end, &stack);426}427CHECK_LE(storage_end - storage_beg,428FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.429430if (old_end == new_end)431return; // Nothing to do here.432433FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,434new_end);435436uptr a = RoundDownTo(Min(old_end, new_end), granularity);437uptr c = RoundUpTo(Max(old_end, new_end), granularity);438uptr d1 = RoundDownTo(old_end, granularity);439// uptr d2 = RoundUpTo(old_mid, granularity);440// Currently we should be in this state:441// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.442// Make a quick sanity check that we are indeed in this state.443//444// FIXME: Two of these three checks are disabled until we fix445// https://github.com/google/sanitizers/issues/258.446// if (d1 != d2)447// DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);448//449// NOTE: curly brackets for the "if" below to silence a MSVC warning.450if (a + granularity <= d1) {451DCHECK_EQ(*(u8 *)MemToShadow(a), 0);452}453// if (d2 + granularity <= c && c <= end)454// DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),455// kAsanContiguousContainerOOBMagic);456457uptr b1 = RoundDownTo(new_end, granularity);458uptr b2 = RoundUpTo(new_end, granularity);459// New state:460// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.461if (b1 > a)462PoisonShadow(a, b1 - a, 0);463else if (c > b2)464PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);465if (b1 != b2) {466CHECK_EQ(b2 - b1, granularity);467*(u8 *)MemToShadow(b1) = static_cast<u8>(new_end - b1);468}469}470471// Annotates a double ended contiguous memory area like std::deque's chunk.472// It allows detecting buggy accesses to allocated but not used begining473// or end items of such a container.474void __sanitizer_annotate_double_ended_contiguous_container(475const void *storage_beg_p, const void *storage_end_p,476const void *old_container_beg_p, const void *old_container_end_p,477const void *new_container_beg_p, const void *new_container_end_p) {478if (!flags()->detect_container_overflow)479return;480481VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,482storage_end_p, old_container_beg_p, old_container_end_p,483new_container_beg_p, new_container_end_p);484485uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);486uptr storage_end = reinterpret_cast<uptr>(storage_end_p);487uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);488uptr old_end = reinterpret_cast<uptr>(old_container_end_p);489uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);490uptr new_end = reinterpret_cast<uptr>(new_container_end_p);491492constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;493494if (!(old_beg <= old_end && new_beg <= new_end) ||495!(storage_beg <= new_beg && new_end <= storage_end) ||496!(storage_beg <= old_beg && old_end <= storage_end)) {497GET_STACK_TRACE_FATAL_HERE;498ReportBadParamsToAnnotateDoubleEndedContiguousContainer(499storage_beg, storage_end, old_beg, old_end, new_beg, new_end, &stack);500}501CHECK_LE(storage_end - storage_beg,502FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.503504if ((old_beg == old_end && new_beg == new_end) ||505(old_beg == new_beg && old_end == new_end))506return; // Nothing to do here.507508FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,509new_end);510511// Handle non-intersecting new/old containers separately have simpler512// intersecting case.513if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||514old_end <= new_beg) {515if (old_beg != old_end) {516// Poisoning the old container.517uptr a = RoundDownTo(old_beg, granularity);518uptr b = RoundUpTo(old_end, granularity);519PoisonShadow(a, b - a, kAsanContiguousContainerOOBMagic);520}521522if (new_beg != new_end) {523// Unpoisoning the new container.524uptr a = RoundDownTo(new_beg, granularity);525uptr b = RoundDownTo(new_end, granularity);526PoisonShadow(a, b - a, 0);527if (!AddrIsAlignedByGranularity(new_end))528*(u8 *)MemToShadow(b) = static_cast<u8>(new_end - b);529}530531return;532}533534// Intersection of old and new containers is not empty.535CHECK_LT(new_beg, old_end);536CHECK_GT(new_end, old_beg);537538if (new_beg < old_beg) {539// Round down because we can't poison prefixes.540uptr a = RoundDownTo(new_beg, granularity);541// Round down and ignore the [c, old_beg) as its state defined by unchanged542// [old_beg, old_end).543uptr c = RoundDownTo(old_beg, granularity);544PoisonShadow(a, c - a, 0);545} else if (new_beg > old_beg) {546// Round down and poison [a, old_beg) because it was unpoisoned only as a547// prefix.548uptr a = RoundDownTo(old_beg, granularity);549// Round down and ignore the [c, new_beg) as its state defined by unchanged550// [new_beg, old_end).551uptr c = RoundDownTo(new_beg, granularity);552553PoisonShadow(a, c - a, kAsanContiguousContainerOOBMagic);554}555556if (new_end > old_end) {557// Round down to poison the prefix.558uptr a = RoundDownTo(old_end, granularity);559// Round down and handle remainder below.560uptr c = RoundDownTo(new_end, granularity);561PoisonShadow(a, c - a, 0);562if (!AddrIsAlignedByGranularity(new_end))563*(u8 *)MemToShadow(c) = static_cast<u8>(new_end - c);564} else if (new_end < old_end) {565// Round up and handle remained below.566uptr a2 = RoundUpTo(new_end, granularity);567// Round up to poison entire granule as we had nothing in [old_end, c2).568uptr c2 = RoundUpTo(old_end, granularity);569PoisonShadow(a2, c2 - a2, kAsanContiguousContainerOOBMagic);570571if (!AddrIsAlignedByGranularity(new_end)) {572uptr a = RoundDownTo(new_end, granularity);573*(u8 *)MemToShadow(a) = static_cast<u8>(new_end - a);574}575}576}577578static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {579CHECK_LE(begin, end);580constexpr uptr kMaxRangeToCheck = 32;581if (end - begin > kMaxRangeToCheck * 2) {582if (auto *bad = FindBadAddress(begin, begin + kMaxRangeToCheck, poisoned))583return bad;584if (auto *bad = FindBadAddress(end - kMaxRangeToCheck, end, poisoned))585return bad;586}587588for (uptr i = begin; i < end; ++i)589if (AddressIsPoisoned(i) != poisoned)590return reinterpret_cast<const void *>(i);591return nullptr;592}593594const void *__sanitizer_contiguous_container_find_bad_address(595const void *beg_p, const void *mid_p, const void *end_p) {596if (!flags()->detect_container_overflow)597return nullptr;598uptr granularity = ASAN_SHADOW_GRANULARITY;599uptr beg = reinterpret_cast<uptr>(beg_p);600uptr end = reinterpret_cast<uptr>(end_p);601uptr mid = reinterpret_cast<uptr>(mid_p);602CHECK_LE(beg, mid);603CHECK_LE(mid, end);604// If the byte after the storage is unpoisoned, everything in the granule605// before must stay unpoisoned.606uptr annotations_end =607(!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))608? RoundDownTo(end, granularity)609: end;610beg = Min(beg, annotations_end);611mid = Min(mid, annotations_end);612if (auto *bad = FindBadAddress(beg, mid, false))613return bad;614if (auto *bad = FindBadAddress(mid, annotations_end, true))615return bad;616return FindBadAddress(annotations_end, end, false);617}618619int __sanitizer_verify_contiguous_container(const void *beg_p,620const void *mid_p,621const void *end_p) {622return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,623end_p) == nullptr;624}625626const void *__sanitizer_double_ended_contiguous_container_find_bad_address(627const void *storage_beg_p, const void *container_beg_p,628const void *container_end_p, const void *storage_end_p) {629if (!flags()->detect_container_overflow)630return nullptr;631uptr granularity = ASAN_SHADOW_GRANULARITY;632uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);633uptr storage_end = reinterpret_cast<uptr>(storage_end_p);634uptr beg = reinterpret_cast<uptr>(container_beg_p);635uptr end = reinterpret_cast<uptr>(container_end_p);636637// The prefix of the firs granule of the container is unpoisoned.638if (beg != end)639beg = Max(storage_beg, RoundDownTo(beg, granularity));640641// If the byte after the storage is unpoisoned, the prefix of the last granule642// is unpoisoned.643uptr annotations_end = (!AddrIsAlignedByGranularity(storage_end) &&644!AddressIsPoisoned(storage_end))645? RoundDownTo(storage_end, granularity)646: storage_end;647storage_beg = Min(storage_beg, annotations_end);648beg = Min(beg, annotations_end);649end = Min(end, annotations_end);650651if (auto *bad = FindBadAddress(storage_beg, beg, true))652return bad;653if (auto *bad = FindBadAddress(beg, end, false))654return bad;655if (auto *bad = FindBadAddress(end, annotations_end, true))656return bad;657return FindBadAddress(annotations_end, storage_end, false);658}659660int __sanitizer_verify_double_ended_contiguous_container(661const void *storage_beg_p, const void *container_beg_p,662const void *container_end_p, const void *storage_end_p) {663return __sanitizer_double_ended_contiguous_container_find_bad_address(664storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==665nullptr;666}667668extern "C" SANITIZER_INTERFACE_ATTRIBUTE669void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {670AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);671}672673extern "C" SANITIZER_INTERFACE_ATTRIBUTE674void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {675AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);676}677678// --- Implementation of LSan-specific functions --- {{{1679namespace __lsan {680bool WordIsPoisoned(uptr addr) {681return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);682}683}684685686