Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
35236 views
1
//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of HWAddressSanitizer.
10
//
11
// HWAddressSanitizer allocator.
12
//===----------------------------------------------------------------------===//
13
14
#include "sanitizer_common/sanitizer_atomic.h"
15
#include "sanitizer_common/sanitizer_errno.h"
16
#include "sanitizer_common/sanitizer_stackdepot.h"
17
#include "hwasan.h"
18
#include "hwasan_allocator.h"
19
#include "hwasan_checks.h"
20
#include "hwasan_mapping.h"
21
#include "hwasan_malloc_bisect.h"
22
#include "hwasan_thread.h"
23
#include "hwasan_report.h"
24
#include "lsan/lsan_common.h"
25
26
namespace __hwasan {
27
28
static Allocator allocator;
29
static AllocatorCache fallback_allocator_cache;
30
static SpinMutex fallback_mutex;
31
static atomic_uint8_t hwasan_allocator_tagging_enabled;
32
33
static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
34
static constexpr tag_t kFallbackFreeTag = 0xBC;
35
36
enum {
37
// Either just allocated by underlying allocator, but AsanChunk is not yet
38
// ready, or almost returned to undelying allocator and AsanChunk is already
39
// meaningless.
40
CHUNK_INVALID = 0,
41
// The chunk is allocated and not yet freed.
42
CHUNK_ALLOCATED = 1,
43
};
44
45
46
// Initialized in HwasanAllocatorInit, an never changed.
47
alignas(16) static u8 tail_magic[kShadowAlignment - 1];
48
static uptr max_malloc_size;
49
50
bool HwasanChunkView::IsAllocated() const {
51
return metadata_ && metadata_->IsAllocated();
52
}
53
54
uptr HwasanChunkView::Beg() const {
55
return block_;
56
}
57
uptr HwasanChunkView::End() const {
58
return Beg() + UsedSize();
59
}
60
uptr HwasanChunkView::UsedSize() const {
61
return metadata_->GetRequestedSize();
62
}
63
u32 HwasanChunkView::GetAllocStackId() const {
64
return metadata_->GetAllocStackId();
65
}
66
67
u32 HwasanChunkView::GetAllocThreadId() const {
68
return metadata_->GetAllocThreadId();
69
}
70
71
uptr HwasanChunkView::ActualSize() const {
72
return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73
}
74
75
bool HwasanChunkView::FromSmallHeap() const {
76
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77
}
78
79
bool HwasanChunkView::AddrIsInside(uptr addr) const {
80
return (addr >= Beg()) && (addr < Beg() + UsedSize());
81
}
82
83
inline void Metadata::SetAllocated(u32 stack, u64 size) {
84
Thread *t = GetCurrentThread();
85
u64 context = t ? t->unique_id() : kMainTid;
86
context <<= 32;
87
context += stack;
88
requested_size_low = size & ((1ul << 32) - 1);
89
requested_size_high = size >> 32;
90
atomic_store(&alloc_context_id, context, memory_order_relaxed);
91
atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
92
}
93
94
inline void Metadata::SetUnallocated() {
95
atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
96
requested_size_low = 0;
97
requested_size_high = 0;
98
atomic_store(&alloc_context_id, 0, memory_order_relaxed);
99
}
100
101
inline bool Metadata::IsAllocated() const {
102
return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
103
}
104
105
inline u64 Metadata::GetRequestedSize() const {
106
return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
107
}
108
109
inline u32 Metadata::GetAllocStackId() const {
110
return atomic_load(&alloc_context_id, memory_order_relaxed);
111
}
112
113
inline u32 Metadata::GetAllocThreadId() const {
114
u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
115
u32 tid = context >> 32;
116
return tid;
117
}
118
119
void GetAllocatorStats(AllocatorStatCounters s) {
120
allocator.GetStats(s);
121
}
122
123
inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
124
lsan_tag = tag;
125
}
126
127
inline __lsan::ChunkTag Metadata::GetLsanTag() const {
128
return static_cast<__lsan::ChunkTag>(lsan_tag);
129
}
130
131
uptr GetAliasRegionStart() {
132
#if defined(HWASAN_ALIASING_MODE)
133
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
134
uptr AliasRegionStart =
135
__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
136
137
CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
138
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
139
CHECK_EQ(
140
(AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
141
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
142
return AliasRegionStart;
143
#else
144
return 0;
145
#endif
146
}
147
148
void HwasanAllocatorInit() {
149
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
150
!flags()->disable_allocator_tagging);
151
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152
allocator.InitLinkerInitialized(
153
common_flags()->allocator_release_to_os_interval_ms,
154
GetAliasRegionStart());
155
for (uptr i = 0; i < sizeof(tail_magic); i++)
156
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
157
if (common_flags()->max_allocation_size_mb) {
158
max_malloc_size = common_flags()->max_allocation_size_mb << 20;
159
max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
160
} else {
161
max_malloc_size = kMaxAllowedMallocSize;
162
}
163
}
164
165
void HwasanAllocatorLock() { allocator.ForceLock(); }
166
167
void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
168
169
void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
170
171
void AllocatorThreadFinish(AllocatorCache *cache) {
172
allocator.SwallowCache(cache);
173
allocator.DestroyCache(cache);
174
}
175
176
static uptr TaggedSize(uptr size) {
177
if (!size) size = 1;
178
uptr new_size = RoundUpTo(size, kShadowAlignment);
179
CHECK_GE(new_size, size);
180
return new_size;
181
}
182
183
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
184
bool zeroise) {
185
// Keep this consistent with LSAN and ASAN behavior.
186
if (UNLIKELY(orig_size == 0))
187
orig_size = 1;
188
if (UNLIKELY(orig_size > max_malloc_size)) {
189
if (AllocatorMayReturnNull()) {
190
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
191
orig_size);
192
return nullptr;
193
}
194
ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
195
}
196
if (UNLIKELY(IsRssLimitExceeded())) {
197
if (AllocatorMayReturnNull())
198
return nullptr;
199
ReportRssLimitExceeded(stack);
200
}
201
202
alignment = Max(alignment, kShadowAlignment);
203
uptr size = TaggedSize(orig_size);
204
Thread *t = GetCurrentThread();
205
void *allocated;
206
if (t) {
207
allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
208
} else {
209
SpinMutexLock l(&fallback_mutex);
210
AllocatorCache *cache = &fallback_allocator_cache;
211
allocated = allocator.Allocate(cache, size, alignment);
212
}
213
if (UNLIKELY(!allocated)) {
214
SetAllocatorOutOfMemory();
215
if (AllocatorMayReturnNull())
216
return nullptr;
217
ReportOutOfMemory(size, stack);
218
}
219
if (zeroise) {
220
// The secondary allocator mmaps memory, which should be zero-inited so we
221
// don't need to explicitly clear it.
222
if (allocator.FromPrimary(allocated))
223
internal_memset(allocated, 0, size);
224
} else if (flags()->max_malloc_fill_size > 0) {
225
uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
226
internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
227
}
228
if (size != orig_size) {
229
u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
230
uptr tail_length = size - orig_size;
231
internal_memcpy(tail, tail_magic, tail_length - 1);
232
// Short granule is excluded from magic tail, so we explicitly untag.
233
tail[tail_length - 1] = 0;
234
}
235
236
void *user_ptr = allocated;
237
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
238
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
239
flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
240
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
241
uptr tag_size = orig_size ? orig_size : 1;
242
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
243
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
244
if (full_granule_size != tag_size) {
245
u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
246
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
247
tag_size % kShadowAlignment);
248
short_granule[kShadowAlignment - 1] = tag;
249
}
250
} else {
251
// Tagging can not be completely skipped. If it's disabled, we need to tag
252
// with zeros.
253
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
254
}
255
256
Metadata *meta =
257
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
258
#if CAN_SANITIZE_LEAKS
259
meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
260
: __lsan::kDirectlyLeaked);
261
#endif
262
meta->SetAllocated(StackDepotPut(*stack), orig_size);
263
RunMallocHooks(user_ptr, orig_size);
264
return user_ptr;
265
}
266
267
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
268
CHECK(tagged_ptr);
269
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
270
if (!InTaggableRegion(tagged_uptr))
271
return true;
272
tag_t mem_tag = *reinterpret_cast<tag_t *>(
273
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
274
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
275
}
276
277
static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
278
void *tagged_ptr) {
279
// This function can return true if halt_on_error is false.
280
if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
281
!PointerAndMemoryTagsMatch(tagged_ptr)) {
282
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
283
return true;
284
}
285
return false;
286
}
287
288
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
289
CHECK(tagged_ptr);
290
void *untagged_ptr = UntagPtr(tagged_ptr);
291
292
if (RunFreeHooks(tagged_ptr))
293
return;
294
295
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
296
return;
297
298
void *aligned_ptr = reinterpret_cast<void *>(
299
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
300
tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
301
Metadata *meta =
302
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
303
if (!meta) {
304
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
305
return;
306
}
307
308
uptr orig_size = meta->GetRequestedSize();
309
u32 free_context_id = StackDepotPut(*stack);
310
u32 alloc_context_id = meta->GetAllocStackId();
311
u32 alloc_thread_id = meta->GetAllocThreadId();
312
313
bool in_taggable_region =
314
InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
315
316
// Check tail magic.
317
uptr tagged_size = TaggedSize(orig_size);
318
if (flags()->free_checks_tail_magic && orig_size &&
319
tagged_size != orig_size) {
320
uptr tail_size = tagged_size - orig_size - 1;
321
CHECK_LT(tail_size, kShadowAlignment);
322
void *tail_beg = reinterpret_cast<void *>(
323
reinterpret_cast<uptr>(aligned_ptr) + orig_size);
324
tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
325
reinterpret_cast<uptr>(tail_beg) + tail_size));
326
if (tail_size &&
327
(internal_memcmp(tail_beg, tail_magic, tail_size) ||
328
(in_taggable_region && pointer_tag != short_granule_memtag)))
329
ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
330
orig_size, tail_magic);
331
}
332
333
// TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
334
meta->SetUnallocated();
335
// This memory will not be reused by anyone else, so we are free to keep it
336
// poisoned.
337
Thread *t = GetCurrentThread();
338
if (flags()->max_free_fill_size > 0) {
339
uptr fill_size =
340
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
341
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
342
}
343
if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
344
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
345
allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
346
// Always store full 8-bit tags on free to maximize UAF detection.
347
tag_t tag;
348
if (t) {
349
// Make sure we are not using a short granule tag as a poison tag. This
350
// would make us attempt to read the memory on a UaF.
351
// The tag can be zero if tagging is disabled on this thread.
352
do {
353
tag = t->GenerateRandomTag(/*num_bits=*/8);
354
} while (
355
UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
356
} else {
357
static_assert(kFallbackFreeTag >= kShadowAlignment,
358
"fallback tag must not be a short granule tag.");
359
tag = kFallbackFreeTag;
360
}
361
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
362
tag);
363
}
364
if (t) {
365
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
366
if (auto *ha = t->heap_allocations())
367
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
368
alloc_context_id, free_context_id,
369
static_cast<u32>(orig_size)});
370
} else {
371
SpinMutexLock l(&fallback_mutex);
372
AllocatorCache *cache = &fallback_allocator_cache;
373
allocator.Deallocate(cache, aligned_ptr);
374
}
375
}
376
377
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
378
uptr new_size, uptr alignment) {
379
void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
380
if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
381
return nullptr;
382
void *tagged_ptr_new =
383
HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
384
if (tagged_ptr_old && tagged_ptr_new) {
385
Metadata *meta =
386
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
387
void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
388
internal_memcpy(untagged_ptr_new, untagged_ptr_old,
389
Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
390
HwasanDeallocate(stack, tagged_ptr_old);
391
}
392
return tagged_ptr_new;
393
}
394
395
static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
396
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
397
if (AllocatorMayReturnNull())
398
return nullptr;
399
ReportCallocOverflow(nmemb, size, stack);
400
}
401
return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
402
}
403
404
HwasanChunkView FindHeapChunkByAddress(uptr address) {
405
if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
406
return HwasanChunkView();
407
void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
408
if (!block)
409
return HwasanChunkView();
410
Metadata *metadata =
411
reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
412
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
413
}
414
415
static const void *AllocationBegin(const void *p) {
416
const void *untagged_ptr = UntagPtr(p);
417
if (!untagged_ptr)
418
return nullptr;
419
420
const void *beg = allocator.GetBlockBegin(untagged_ptr);
421
if (!beg)
422
return nullptr;
423
424
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
425
if (b->GetRequestedSize() == 0)
426
return nullptr;
427
428
tag_t tag = GetTagFromPointer((uptr)p);
429
return (const void *)AddTagToPointer((uptr)beg, tag);
430
}
431
432
static uptr AllocationSize(const void *p) {
433
const void *untagged_ptr = UntagPtr(p);
434
if (!untagged_ptr) return 0;
435
const void *beg = allocator.GetBlockBegin(untagged_ptr);
436
if (!beg)
437
return 0;
438
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
439
return b->GetRequestedSize();
440
}
441
442
static uptr AllocationSizeFast(const void *p) {
443
const void *untagged_ptr = UntagPtr(p);
444
void *aligned_ptr = reinterpret_cast<void *>(
445
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
446
Metadata *meta =
447
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
448
return meta->GetRequestedSize();
449
}
450
451
void *hwasan_malloc(uptr size, StackTrace *stack) {
452
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
453
}
454
455
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
456
return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
457
}
458
459
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
460
if (!ptr)
461
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
462
if (size == 0) {
463
HwasanDeallocate(stack, ptr);
464
return nullptr;
465
}
466
return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
467
}
468
469
void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
470
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
471
errno = errno_ENOMEM;
472
if (AllocatorMayReturnNull())
473
return nullptr;
474
ReportReallocArrayOverflow(nmemb, size, stack);
475
}
476
return hwasan_realloc(ptr, nmemb * size, stack);
477
}
478
479
void *hwasan_valloc(uptr size, StackTrace *stack) {
480
return SetErrnoOnNull(
481
HwasanAllocate(stack, size, GetPageSizeCached(), false));
482
}
483
484
void *hwasan_pvalloc(uptr size, StackTrace *stack) {
485
uptr PageSize = GetPageSizeCached();
486
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
487
errno = errno_ENOMEM;
488
if (AllocatorMayReturnNull())
489
return nullptr;
490
ReportPvallocOverflow(size, stack);
491
}
492
// pvalloc(0) should allocate one page.
493
size = size ? RoundUpTo(size, PageSize) : PageSize;
494
return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
495
}
496
497
void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
498
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
499
errno = errno_EINVAL;
500
if (AllocatorMayReturnNull())
501
return nullptr;
502
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
503
}
504
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
505
}
506
507
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
508
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
509
errno = errno_EINVAL;
510
if (AllocatorMayReturnNull())
511
return nullptr;
512
ReportInvalidAllocationAlignment(alignment, stack);
513
}
514
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
515
}
516
517
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
518
StackTrace *stack) {
519
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
520
if (AllocatorMayReturnNull())
521
return errno_EINVAL;
522
ReportInvalidPosixMemalignAlignment(alignment, stack);
523
}
524
void *ptr = HwasanAllocate(stack, size, alignment, false);
525
if (UNLIKELY(!ptr))
526
// OOM error is already taken care of by HwasanAllocate.
527
return errno_ENOMEM;
528
CHECK(IsAligned((uptr)ptr, alignment));
529
*memptr = ptr;
530
return 0;
531
}
532
533
void hwasan_free(void *ptr, StackTrace *stack) {
534
return HwasanDeallocate(stack, ptr);
535
}
536
537
} // namespace __hwasan
538
539
// --- Implementation of LSan-specific functions --- {{{1
540
namespace __lsan {
541
542
void LockAllocator() {
543
__hwasan::HwasanAllocatorLock();
544
}
545
546
void UnlockAllocator() {
547
__hwasan::HwasanAllocatorUnlock();
548
}
549
550
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
551
*begin = (uptr)&__hwasan::allocator;
552
*end = *begin + sizeof(__hwasan::allocator);
553
}
554
555
uptr PointsIntoChunk(void *p) {
556
p = UntagPtr(p);
557
uptr addr = reinterpret_cast<uptr>(p);
558
uptr chunk =
559
reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
560
if (!chunk)
561
return 0;
562
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
563
__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
564
if (!metadata || !metadata->IsAllocated())
565
return 0;
566
if (addr < chunk + metadata->GetRequestedSize())
567
return chunk;
568
if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
569
return chunk;
570
return 0;
571
}
572
573
uptr GetUserBegin(uptr chunk) {
574
CHECK_EQ(UntagAddr(chunk), chunk);
575
void *block = __hwasan::allocator.GetBlockBeginFastLocked(
576
reinterpret_cast<void *>(chunk));
577
if (!block)
578
return 0;
579
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
580
__hwasan::allocator.GetMetaData(block));
581
if (!metadata || !metadata->IsAllocated())
582
return 0;
583
584
return reinterpret_cast<uptr>(block);
585
}
586
587
uptr GetUserAddr(uptr chunk) {
588
if (!InTaggableRegion(chunk))
589
return chunk;
590
tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
591
return AddTagToPointer(chunk, mem_tag);
592
}
593
594
LsanMetadata::LsanMetadata(uptr chunk) {
595
CHECK_EQ(UntagAddr(chunk), chunk);
596
metadata_ =
597
chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
598
: nullptr;
599
}
600
601
bool LsanMetadata::allocated() const {
602
if (!metadata_)
603
return false;
604
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
605
return m->IsAllocated();
606
}
607
608
ChunkTag LsanMetadata::tag() const {
609
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
610
return m->GetLsanTag();
611
}
612
613
void LsanMetadata::set_tag(ChunkTag value) {
614
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
615
m->SetLsanTag(value);
616
}
617
618
uptr LsanMetadata::requested_size() const {
619
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
620
return m->GetRequestedSize();
621
}
622
623
u32 LsanMetadata::stack_trace_id() const {
624
__hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
625
return m->GetAllocStackId();
626
}
627
628
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
629
__hwasan::allocator.ForEachChunk(callback, arg);
630
}
631
632
IgnoreObjectResult IgnoreObject(const void *p) {
633
p = UntagPtr(p);
634
uptr addr = reinterpret_cast<uptr>(p);
635
uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
636
if (!chunk)
637
return kIgnoreObjectInvalid;
638
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
639
__hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
640
if (!metadata || !metadata->IsAllocated())
641
return kIgnoreObjectInvalid;
642
if (addr >= chunk + metadata->GetRequestedSize())
643
return kIgnoreObjectInvalid;
644
if (metadata->GetLsanTag() == kIgnored)
645
return kIgnoreObjectAlreadyIgnored;
646
647
metadata->SetLsanTag(kIgnored);
648
return kIgnoreObjectSuccess;
649
}
650
651
} // namespace __lsan
652
653
using namespace __hwasan;
654
655
void __hwasan_enable_allocator_tagging() {
656
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
657
}
658
659
void __hwasan_disable_allocator_tagging() {
660
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
661
}
662
663
uptr __sanitizer_get_current_allocated_bytes() {
664
uptr stats[AllocatorStatCount];
665
allocator.GetStats(stats);
666
return stats[AllocatorStatAllocated];
667
}
668
669
uptr __sanitizer_get_heap_size() {
670
uptr stats[AllocatorStatCount];
671
allocator.GetStats(stats);
672
return stats[AllocatorStatMapped];
673
}
674
675
uptr __sanitizer_get_free_bytes() { return 1; }
676
677
uptr __sanitizer_get_unmapped_bytes() { return 1; }
678
679
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
680
681
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
682
683
const void *__sanitizer_get_allocated_begin(const void *p) {
684
return AllocationBegin(p);
685
}
686
687
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
688
689
uptr __sanitizer_get_allocated_size_fast(const void *p) {
690
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
691
uptr ret = AllocationSizeFast(p);
692
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
693
return ret;
694
}
695
696
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
697
698