Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
35233 views
1
//===-- asan_poisoning.cpp ------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of AddressSanitizer, an address sanity checker.
10
//
11
// Shadow memory poisoning by ASan RTL and by user application.
12
//===----------------------------------------------------------------------===//
13
14
#include "asan_poisoning.h"
15
16
#include "asan_report.h"
17
#include "asan_stack.h"
18
#include "sanitizer_common/sanitizer_atomic.h"
19
#include "sanitizer_common/sanitizer_flags.h"
20
#include "sanitizer_common/sanitizer_interface_internal.h"
21
#include "sanitizer_common/sanitizer_libc.h"
22
23
namespace __asan {
24
25
static atomic_uint8_t can_poison_memory;
26
27
void SetCanPoisonMemory(bool value) {
28
atomic_store(&can_poison_memory, value, memory_order_release);
29
}
30
31
bool CanPoisonMemory() {
32
return atomic_load(&can_poison_memory, memory_order_acquire);
33
}
34
35
void PoisonShadow(uptr addr, uptr size, u8 value) {
36
if (value && !CanPoisonMemory()) return;
37
CHECK(AddrIsAlignedByGranularity(addr));
38
CHECK(AddrIsInMem(addr));
39
CHECK(AddrIsAlignedByGranularity(addr + size));
40
CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
41
CHECK(REAL(memset));
42
FastPoisonShadow(addr, size, value);
43
}
44
45
void PoisonShadowPartialRightRedzone(uptr addr,
46
uptr size,
47
uptr redzone_size,
48
u8 value) {
49
if (!CanPoisonMemory()) return;
50
CHECK(AddrIsAlignedByGranularity(addr));
51
CHECK(AddrIsInMem(addr));
52
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
53
}
54
55
struct ShadowSegmentEndpoint {
56
u8 *chunk;
57
s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
58
s8 value; // = *chunk;
59
60
explicit ShadowSegmentEndpoint(uptr address) {
61
chunk = (u8*)MemToShadow(address);
62
offset = address & (ASAN_SHADOW_GRANULARITY - 1);
63
value = *chunk;
64
}
65
};
66
67
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
68
uptr end = ptr + size;
69
if (Verbosity()) {
70
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
71
poison ? "" : "un", (void *)ptr, (void *)end, size);
72
if (Verbosity() >= 2)
73
PRINT_CURRENT_STACK();
74
}
75
CHECK(size);
76
CHECK_LE(size, 4096);
77
CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
78
if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {
79
*(u8 *)MemToShadow(ptr) =
80
poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
81
ptr |= ASAN_SHADOW_GRANULARITY - 1;
82
ptr++;
83
}
84
for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
85
*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
86
}
87
88
} // namespace __asan
89
90
// ---------------------- Interface ---------------- {{{1
91
using namespace __asan;
92
93
// Current implementation of __asan_(un)poison_memory_region doesn't check
94
// that user program (un)poisons the memory it owns. It poisons memory
95
// conservatively, and unpoisons progressively to make sure asan shadow
96
// mapping invariant is preserved (see detailed mapping description here:
97
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
98
//
99
// * if user asks to poison region [left, right), the program poisons
100
// at least [left, AlignDown(right)).
101
// * if user asks to unpoison region [left, right), the program unpoisons
102
// at most [AlignDown(left), right).
103
void __asan_poison_memory_region(void const volatile *addr, uptr size) {
104
if (!flags()->allow_user_poisoning || size == 0) return;
105
uptr beg_addr = (uptr)addr;
106
uptr end_addr = beg_addr + size;
107
VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
108
(void *)end_addr);
109
ShadowSegmentEndpoint beg(beg_addr);
110
ShadowSegmentEndpoint end(end_addr);
111
if (beg.chunk == end.chunk) {
112
CHECK_LT(beg.offset, end.offset);
113
s8 value = beg.value;
114
CHECK_EQ(value, end.value);
115
// We can only poison memory if the byte in end.offset is unaddressable.
116
// No need to re-poison memory if it is poisoned already.
117
if (value > 0 && value <= end.offset) {
118
if (beg.offset > 0) {
119
*beg.chunk = Min(value, beg.offset);
120
} else {
121
*beg.chunk = kAsanUserPoisonedMemoryMagic;
122
}
123
}
124
return;
125
}
126
CHECK_LT(beg.chunk, end.chunk);
127
if (beg.offset > 0) {
128
// Mark bytes from beg.offset as unaddressable.
129
if (beg.value == 0) {
130
*beg.chunk = beg.offset;
131
} else {
132
*beg.chunk = Min(beg.value, beg.offset);
133
}
134
beg.chunk++;
135
}
136
REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
137
// Poison if byte in end.offset is unaddressable.
138
if (end.value > 0 && end.value <= end.offset) {
139
*end.chunk = kAsanUserPoisonedMemoryMagic;
140
}
141
}
142
143
void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
144
if (!flags()->allow_user_poisoning || size == 0) return;
145
uptr beg_addr = (uptr)addr;
146
uptr end_addr = beg_addr + size;
147
VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
148
(void *)end_addr);
149
ShadowSegmentEndpoint beg(beg_addr);
150
ShadowSegmentEndpoint end(end_addr);
151
if (beg.chunk == end.chunk) {
152
CHECK_LT(beg.offset, end.offset);
153
s8 value = beg.value;
154
CHECK_EQ(value, end.value);
155
// We unpoison memory bytes up to enbytes up to end.offset if it is not
156
// unpoisoned already.
157
if (value != 0) {
158
*beg.chunk = Max(value, end.offset);
159
}
160
return;
161
}
162
CHECK_LT(beg.chunk, end.chunk);
163
REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
164
if (end.offset > 0 && end.value != 0) {
165
*end.chunk = Max(end.value, end.offset);
166
}
167
}
168
169
int __asan_address_is_poisoned(void const volatile *addr) {
170
return __asan::AddressIsPoisoned((uptr)addr);
171
}
172
173
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
174
if (!size)
175
return 0;
176
uptr end = beg + size;
177
if (!AddrIsInMem(beg))
178
return beg;
179
if (!AddrIsInMem(end))
180
return end;
181
CHECK_LT(beg, end);
182
uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);
183
uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
184
uptr shadow_beg = MemToShadow(aligned_b);
185
uptr shadow_end = MemToShadow(aligned_e);
186
// First check the first and the last application bytes,
187
// then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
188
// mem_is_zero on the corresponding shadow.
189
if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
190
(shadow_end <= shadow_beg ||
191
__sanitizer::mem_is_zero((const char *)shadow_beg,
192
shadow_end - shadow_beg)))
193
return 0;
194
// The fast check failed, so we have a poisoned byte somewhere.
195
// Find it slowly.
196
for (; beg < end; beg++)
197
if (__asan::AddressIsPoisoned(beg))
198
return beg;
199
UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
200
return 0;
201
}
202
203
#define CHECK_SMALL_REGION(p, size, isWrite) \
204
do { \
205
uptr __p = reinterpret_cast<uptr>(p); \
206
uptr __size = size; \
207
if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
208
__asan::AddressIsPoisoned(__p + __size - 1))) { \
209
GET_CURRENT_PC_BP_SP; \
210
uptr __bad = __asan_region_is_poisoned(__p, __size); \
211
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
212
} \
213
} while (false)
214
215
216
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
217
u16 __sanitizer_unaligned_load16(const uu16 *p) {
218
CHECK_SMALL_REGION(p, sizeof(*p), false);
219
return *p;
220
}
221
222
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
223
u32 __sanitizer_unaligned_load32(const uu32 *p) {
224
CHECK_SMALL_REGION(p, sizeof(*p), false);
225
return *p;
226
}
227
228
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
229
u64 __sanitizer_unaligned_load64(const uu64 *p) {
230
CHECK_SMALL_REGION(p, sizeof(*p), false);
231
return *p;
232
}
233
234
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
235
void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
236
CHECK_SMALL_REGION(p, sizeof(*p), true);
237
*p = x;
238
}
239
240
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
241
void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
242
CHECK_SMALL_REGION(p, sizeof(*p), true);
243
*p = x;
244
}
245
246
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
247
void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
248
CHECK_SMALL_REGION(p, sizeof(*p), true);
249
*p = x;
250
}
251
252
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
253
void __asan_poison_cxx_array_cookie(uptr p) {
254
if (SANITIZER_WORDSIZE != 64) return;
255
if (!flags()->poison_array_cookie) return;
256
uptr s = MEM_TO_SHADOW(p);
257
*reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
258
}
259
260
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
261
uptr __asan_load_cxx_array_cookie(uptr *p) {
262
if (SANITIZER_WORDSIZE != 64) return *p;
263
if (!flags()->poison_array_cookie) return *p;
264
uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
265
u8 sval = *reinterpret_cast<u8*>(s);
266
if (sval == kAsanArrayCookieMagic) return *p;
267
// If sval is not kAsanArrayCookieMagic it can only be freed memory,
268
// which means that we are going to get double-free. So, return 0 to avoid
269
// infinite loop of destructors. We don't want to report a double-free here
270
// though, so print a warning just in case.
271
// CHECK_EQ(sval, kAsanHeapFreeMagic);
272
if (sval == kAsanHeapFreeMagic) {
273
Report("AddressSanitizer: loaded array cookie from free-d memory; "
274
"expect a double-free report\n");
275
return 0;
276
}
277
// The cookie may remain unpoisoned if e.g. it comes from a custom
278
// operator new defined inside a class.
279
return *p;
280
}
281
282
// This is a simplified version of __asan_(un)poison_memory_region, which
283
// assumes that left border of region to be poisoned is properly aligned.
284
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
285
if (size == 0) return;
286
uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
287
PoisonShadow(addr, aligned_size,
288
do_poison ? kAsanStackUseAfterScopeMagic : 0);
289
if (size == aligned_size)
290
return;
291
s8 end_offset = (s8)(size - aligned_size);
292
s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
293
s8 end_value = *shadow_end;
294
if (do_poison) {
295
// If possible, mark all the bytes mapping to last shadow byte as
296
// unaddressable.
297
if (end_value > 0 && end_value <= end_offset)
298
*shadow_end = (s8)kAsanStackUseAfterScopeMagic;
299
} else {
300
// If necessary, mark few first bytes mapping to last shadow byte
301
// as addressable
302
if (end_value != 0)
303
*shadow_end = Max(end_value, end_offset);
304
}
305
}
306
307
void __asan_set_shadow_00(uptr addr, uptr size) {
308
REAL(memset)((void *)addr, 0, size);
309
}
310
311
void __asan_set_shadow_01(uptr addr, uptr size) {
312
REAL(memset)((void *)addr, 0x01, size);
313
}
314
315
void __asan_set_shadow_02(uptr addr, uptr size) {
316
REAL(memset)((void *)addr, 0x02, size);
317
}
318
319
void __asan_set_shadow_03(uptr addr, uptr size) {
320
REAL(memset)((void *)addr, 0x03, size);
321
}
322
323
void __asan_set_shadow_04(uptr addr, uptr size) {
324
REAL(memset)((void *)addr, 0x04, size);
325
}
326
327
void __asan_set_shadow_05(uptr addr, uptr size) {
328
REAL(memset)((void *)addr, 0x05, size);
329
}
330
331
void __asan_set_shadow_06(uptr addr, uptr size) {
332
REAL(memset)((void *)addr, 0x06, size);
333
}
334
335
void __asan_set_shadow_07(uptr addr, uptr size) {
336
REAL(memset)((void *)addr, 0x07, size);
337
}
338
339
void __asan_set_shadow_f1(uptr addr, uptr size) {
340
REAL(memset)((void *)addr, 0xf1, size);
341
}
342
343
void __asan_set_shadow_f2(uptr addr, uptr size) {
344
REAL(memset)((void *)addr, 0xf2, size);
345
}
346
347
void __asan_set_shadow_f3(uptr addr, uptr size) {
348
REAL(memset)((void *)addr, 0xf3, size);
349
}
350
351
void __asan_set_shadow_f5(uptr addr, uptr size) {
352
REAL(memset)((void *)addr, 0xf5, size);
353
}
354
355
void __asan_set_shadow_f8(uptr addr, uptr size) {
356
REAL(memset)((void *)addr, 0xf8, size);
357
}
358
359
void __asan_poison_stack_memory(uptr addr, uptr size) {
360
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
361
PoisonAlignedStackMemory(addr, size, true);
362
}
363
364
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
365
VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
366
PoisonAlignedStackMemory(addr, size, false);
367
}
368
369
static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
370
uptr &old_beg, uptr &old_end, uptr &new_beg,
371
uptr &new_end) {
372
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
373
if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
374
uptr end_down = RoundDownTo(storage_end, granularity);
375
// Ignore the last unaligned granule if the storage is followed by
376
// unpoisoned byte, because we can't poison the prefix anyway. Don't call
377
// AddressIsPoisoned at all if container changes does not affect the last
378
// granule at all.
379
if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||
380
((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&
381
!AddressIsPoisoned(storage_end)) {
382
old_beg = Min(end_down, old_beg);
383
old_end = Min(end_down, old_end);
384
new_beg = Min(end_down, new_beg);
385
new_end = Min(end_down, new_end);
386
}
387
}
388
389
// Handle misaligned begin and cut it off.
390
if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
391
uptr beg_up = RoundUpTo(storage_beg, granularity);
392
// The first unaligned granule needs special handling only if we had bytes
393
// there before and will have none after.
394
if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
395
old_beg < beg_up) {
396
// Keep granule prefix outside of the storage unpoisoned.
397
uptr beg_down = RoundDownTo(storage_beg, granularity);
398
*(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;
399
old_beg = Max(beg_up, old_beg);
400
old_end = Max(beg_up, old_end);
401
new_beg = Max(beg_up, new_beg);
402
new_end = Max(beg_up, new_end);
403
}
404
}
405
}
406
407
void __sanitizer_annotate_contiguous_container(const void *beg_p,
408
const void *end_p,
409
const void *old_mid_p,
410
const void *new_mid_p) {
411
if (!flags()->detect_container_overflow)
412
return;
413
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
414
new_mid_p);
415
uptr storage_beg = reinterpret_cast<uptr>(beg_p);
416
uptr storage_end = reinterpret_cast<uptr>(end_p);
417
uptr old_end = reinterpret_cast<uptr>(old_mid_p);
418
uptr new_end = reinterpret_cast<uptr>(new_mid_p);
419
uptr old_beg = storage_beg;
420
uptr new_beg = storage_beg;
421
uptr granularity = ASAN_SHADOW_GRANULARITY;
422
if (!(storage_beg <= old_end && storage_beg <= new_end &&
423
old_end <= storage_end && new_end <= storage_end)) {
424
GET_STACK_TRACE_FATAL_HERE;
425
ReportBadParamsToAnnotateContiguousContainer(storage_beg, storage_end,
426
old_end, new_end, &stack);
427
}
428
CHECK_LE(storage_end - storage_beg,
429
FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
430
431
if (old_end == new_end)
432
return; // Nothing to do here.
433
434
FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
435
new_end);
436
437
uptr a = RoundDownTo(Min(old_end, new_end), granularity);
438
uptr c = RoundUpTo(Max(old_end, new_end), granularity);
439
uptr d1 = RoundDownTo(old_end, granularity);
440
// uptr d2 = RoundUpTo(old_mid, granularity);
441
// Currently we should be in this state:
442
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
443
// Make a quick sanity check that we are indeed in this state.
444
//
445
// FIXME: Two of these three checks are disabled until we fix
446
// https://github.com/google/sanitizers/issues/258.
447
// if (d1 != d2)
448
// DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
449
//
450
// NOTE: curly brackets for the "if" below to silence a MSVC warning.
451
if (a + granularity <= d1) {
452
DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
453
}
454
// if (d2 + granularity <= c && c <= end)
455
// DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
456
// kAsanContiguousContainerOOBMagic);
457
458
uptr b1 = RoundDownTo(new_end, granularity);
459
uptr b2 = RoundUpTo(new_end, granularity);
460
// New state:
461
// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
462
if (b1 > a)
463
PoisonShadow(a, b1 - a, 0);
464
else if (c > b2)
465
PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
466
if (b1 != b2) {
467
CHECK_EQ(b2 - b1, granularity);
468
*(u8 *)MemToShadow(b1) = static_cast<u8>(new_end - b1);
469
}
470
}
471
472
// Annotates a double ended contiguous memory area like std::deque's chunk.
473
// It allows detecting buggy accesses to allocated but not used begining
474
// or end items of such a container.
475
void __sanitizer_annotate_double_ended_contiguous_container(
476
const void *storage_beg_p, const void *storage_end_p,
477
const void *old_container_beg_p, const void *old_container_end_p,
478
const void *new_container_beg_p, const void *new_container_end_p) {
479
if (!flags()->detect_container_overflow)
480
return;
481
482
VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
483
storage_end_p, old_container_beg_p, old_container_end_p,
484
new_container_beg_p, new_container_end_p);
485
486
uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
487
uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
488
uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
489
uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
490
uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
491
uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
492
493
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
494
495
if (!(old_beg <= old_end && new_beg <= new_end) ||
496
!(storage_beg <= new_beg && new_end <= storage_end) ||
497
!(storage_beg <= old_beg && old_end <= storage_end)) {
498
GET_STACK_TRACE_FATAL_HERE;
499
ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
500
storage_beg, storage_end, old_beg, old_end, new_beg, new_end, &stack);
501
}
502
CHECK_LE(storage_end - storage_beg,
503
FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
504
505
if ((old_beg == old_end && new_beg == new_end) ||
506
(old_beg == new_beg && old_end == new_end))
507
return; // Nothing to do here.
508
509
FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
510
new_end);
511
512
// Handle non-intersecting new/old containers separately have simpler
513
// intersecting case.
514
if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
515
old_end <= new_beg) {
516
if (old_beg != old_end) {
517
// Poisoning the old container.
518
uptr a = RoundDownTo(old_beg, granularity);
519
uptr b = RoundUpTo(old_end, granularity);
520
PoisonShadow(a, b - a, kAsanContiguousContainerOOBMagic);
521
}
522
523
if (new_beg != new_end) {
524
// Unpoisoning the new container.
525
uptr a = RoundDownTo(new_beg, granularity);
526
uptr b = RoundDownTo(new_end, granularity);
527
PoisonShadow(a, b - a, 0);
528
if (!AddrIsAlignedByGranularity(new_end))
529
*(u8 *)MemToShadow(b) = static_cast<u8>(new_end - b);
530
}
531
532
return;
533
}
534
535
// Intersection of old and new containers is not empty.
536
CHECK_LT(new_beg, old_end);
537
CHECK_GT(new_end, old_beg);
538
539
if (new_beg < old_beg) {
540
// Round down because we can't poison prefixes.
541
uptr a = RoundDownTo(new_beg, granularity);
542
// Round down and ignore the [c, old_beg) as its state defined by unchanged
543
// [old_beg, old_end).
544
uptr c = RoundDownTo(old_beg, granularity);
545
PoisonShadow(a, c - a, 0);
546
} else if (new_beg > old_beg) {
547
// Round down and poison [a, old_beg) because it was unpoisoned only as a
548
// prefix.
549
uptr a = RoundDownTo(old_beg, granularity);
550
// Round down and ignore the [c, new_beg) as its state defined by unchanged
551
// [new_beg, old_end).
552
uptr c = RoundDownTo(new_beg, granularity);
553
554
PoisonShadow(a, c - a, kAsanContiguousContainerOOBMagic);
555
}
556
557
if (new_end > old_end) {
558
// Round down to poison the prefix.
559
uptr a = RoundDownTo(old_end, granularity);
560
// Round down and handle remainder below.
561
uptr c = RoundDownTo(new_end, granularity);
562
PoisonShadow(a, c - a, 0);
563
if (!AddrIsAlignedByGranularity(new_end))
564
*(u8 *)MemToShadow(c) = static_cast<u8>(new_end - c);
565
} else if (new_end < old_end) {
566
// Round up and handle remained below.
567
uptr a2 = RoundUpTo(new_end, granularity);
568
// Round up to poison entire granule as we had nothing in [old_end, c2).
569
uptr c2 = RoundUpTo(old_end, granularity);
570
PoisonShadow(a2, c2 - a2, kAsanContiguousContainerOOBMagic);
571
572
if (!AddrIsAlignedByGranularity(new_end)) {
573
uptr a = RoundDownTo(new_end, granularity);
574
*(u8 *)MemToShadow(a) = static_cast<u8>(new_end - a);
575
}
576
}
577
}
578
579
static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
580
CHECK_LE(begin, end);
581
constexpr uptr kMaxRangeToCheck = 32;
582
if (end - begin > kMaxRangeToCheck * 2) {
583
if (auto *bad = FindBadAddress(begin, begin + kMaxRangeToCheck, poisoned))
584
return bad;
585
if (auto *bad = FindBadAddress(end - kMaxRangeToCheck, end, poisoned))
586
return bad;
587
}
588
589
for (uptr i = begin; i < end; ++i)
590
if (AddressIsPoisoned(i) != poisoned)
591
return reinterpret_cast<const void *>(i);
592
return nullptr;
593
}
594
595
const void *__sanitizer_contiguous_container_find_bad_address(
596
const void *beg_p, const void *mid_p, const void *end_p) {
597
if (!flags()->detect_container_overflow)
598
return nullptr;
599
uptr granularity = ASAN_SHADOW_GRANULARITY;
600
uptr beg = reinterpret_cast<uptr>(beg_p);
601
uptr end = reinterpret_cast<uptr>(end_p);
602
uptr mid = reinterpret_cast<uptr>(mid_p);
603
CHECK_LE(beg, mid);
604
CHECK_LE(mid, end);
605
// If the byte after the storage is unpoisoned, everything in the granule
606
// before must stay unpoisoned.
607
uptr annotations_end =
608
(!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
609
? RoundDownTo(end, granularity)
610
: end;
611
beg = Min(beg, annotations_end);
612
mid = Min(mid, annotations_end);
613
if (auto *bad = FindBadAddress(beg, mid, false))
614
return bad;
615
if (auto *bad = FindBadAddress(mid, annotations_end, true))
616
return bad;
617
return FindBadAddress(annotations_end, end, false);
618
}
619
620
int __sanitizer_verify_contiguous_container(const void *beg_p,
621
const void *mid_p,
622
const void *end_p) {
623
return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
624
end_p) == nullptr;
625
}
626
627
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
628
const void *storage_beg_p, const void *container_beg_p,
629
const void *container_end_p, const void *storage_end_p) {
630
if (!flags()->detect_container_overflow)
631
return nullptr;
632
uptr granularity = ASAN_SHADOW_GRANULARITY;
633
uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
634
uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
635
uptr beg = reinterpret_cast<uptr>(container_beg_p);
636
uptr end = reinterpret_cast<uptr>(container_end_p);
637
638
// The prefix of the firs granule of the container is unpoisoned.
639
if (beg != end)
640
beg = Max(storage_beg, RoundDownTo(beg, granularity));
641
642
// If the byte after the storage is unpoisoned, the prefix of the last granule
643
// is unpoisoned.
644
uptr annotations_end = (!AddrIsAlignedByGranularity(storage_end) &&
645
!AddressIsPoisoned(storage_end))
646
? RoundDownTo(storage_end, granularity)
647
: storage_end;
648
storage_beg = Min(storage_beg, annotations_end);
649
beg = Min(beg, annotations_end);
650
end = Min(end, annotations_end);
651
652
if (auto *bad = FindBadAddress(storage_beg, beg, true))
653
return bad;
654
if (auto *bad = FindBadAddress(beg, end, false))
655
return bad;
656
if (auto *bad = FindBadAddress(end, annotations_end, true))
657
return bad;
658
return FindBadAddress(annotations_end, storage_end, false);
659
}
660
661
int __sanitizer_verify_double_ended_contiguous_container(
662
const void *storage_beg_p, const void *container_beg_p,
663
const void *container_end_p, const void *storage_end_p) {
664
return __sanitizer_double_ended_contiguous_container_find_bad_address(
665
storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
666
nullptr;
667
}
668
669
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
670
void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
671
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
672
}
673
674
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
675
void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
676
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
677
}
678
679
// --- Implementation of LSan-specific functions --- {{{1
680
namespace __lsan {
681
bool WordIsPoisoned(uptr addr) {
682
return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
683
}
684
}
685
686