Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
35233 views
1
//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is shared between run-time libraries of sanitizers.
10
//
11
// It declares common functions and classes that are used in both runtimes.
12
// Implementation of some functions are provided in sanitizer_common, while
13
// others must be defined by run-time library itself.
14
//===----------------------------------------------------------------------===//
15
#ifndef SANITIZER_COMMON_H
16
#define SANITIZER_COMMON_H
17
18
#include "sanitizer_flags.h"
19
#include "sanitizer_internal_defs.h"
20
#include "sanitizer_libc.h"
21
#include "sanitizer_list.h"
22
#include "sanitizer_mutex.h"
23
24
#if defined(_MSC_VER) && !defined(__clang__)
25
extern "C" void _ReadWriteBarrier();
26
#pragma intrinsic(_ReadWriteBarrier)
27
#endif
28
29
namespace __sanitizer {
30
31
struct AddressInfo;
32
struct BufferedStackTrace;
33
struct SignalContext;
34
struct StackTrace;
35
struct SymbolizedStack;
36
37
// Constants.
38
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39
const uptr kWordSizeInBits = 8 * kWordSize;
40
41
const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42
43
const uptr kMaxPathLength = 4096;
44
45
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
46
47
const uptr kErrorMessageBufferSize = 1 << 16;
48
49
// Denotes fake PC values that come from JIT/JAVA/etc.
50
// For such PC values __tsan_symbolize_external_ex() will be called.
51
const u64 kExternalPCBit = 1ULL << 60;
52
53
extern const char *SanitizerToolName; // Can be changed by the tool.
54
55
extern atomic_uint32_t current_verbosity;
56
inline void SetVerbosity(int verbosity) {
57
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58
}
59
inline int Verbosity() {
60
return atomic_load(&current_verbosity, memory_order_relaxed);
61
}
62
63
#if SANITIZER_ANDROID && !defined(__aarch64__)
64
// 32-bit Android only has 4k pages.
65
inline uptr GetPageSize() { return 4096; }
66
inline uptr GetPageSizeCached() { return 4096; }
67
#else
68
uptr GetPageSize();
69
extern uptr PageSizeCached;
70
inline uptr GetPageSizeCached() {
71
if (!PageSizeCached)
72
PageSizeCached = GetPageSize();
73
return PageSizeCached;
74
}
75
#endif
76
77
uptr GetMmapGranularity();
78
uptr GetMaxVirtualAddress();
79
uptr GetMaxUserVirtualAddress();
80
// Threads
81
tid_t GetTid();
82
int TgKill(pid_t pid, tid_t tid, int sig);
83
uptr GetThreadSelf();
84
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
85
uptr *stack_bottom);
86
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
87
uptr *tls_addr, uptr *tls_size);
88
89
// Memory management
90
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
91
92
inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
93
return MmapOrDie(size, mem_type, /*raw_report*/ true);
94
}
95
void UnmapOrDie(void *addr, uptr size, bool raw_report = false);
96
// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
97
// case returns nullptr.
98
void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
99
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
100
WARN_UNUSED_RESULT;
101
bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
102
const char *name = nullptr) WARN_UNUSED_RESULT;
103
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
104
void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
105
// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
106
// that case returns nullptr.
107
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
108
const char *name = nullptr);
109
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
110
void *MmapNoAccess(uptr size);
111
// Map aligned chunk of address space; size and alignment are powers of two.
112
// Dies on all but out of memory errors, in the latter case returns nullptr.
113
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
114
const char *mem_type);
115
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
116
// unaccessible memory.
117
bool MprotectNoAccess(uptr addr, uptr size);
118
bool MprotectReadOnly(uptr addr, uptr size);
119
bool MprotectReadWrite(uptr addr, uptr size);
120
121
void MprotectMallocZones(void *addr, int prot);
122
123
#if SANITIZER_WINDOWS
124
// Zero previously mmap'd memory. Currently used only on Windows.
125
bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
126
#endif
127
128
#if SANITIZER_LINUX
129
// Unmap memory. Currently only used on Linux.
130
void UnmapFromTo(uptr from, uptr to);
131
#endif
132
133
// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
134
// be aligned to the mmap granularity * 2^shadow_scale, or to
135
// 2^min_shadow_base_alignment if that is larger. The returned address will
136
// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
137
// shadow_size_bytes bytes on the right, which on linux is mapped no access.
138
// The high_mem_end may be updated if the original shadow size doesn't fit.
139
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
140
uptr min_shadow_base_alignment, uptr &high_mem_end,
141
uptr granularity);
142
143
// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
144
// Reserves 2*S bytes of address space to the right of the returned address and
145
// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
146
// Also creates num_aliases regions of accessible memory starting at offset S
147
// from the returned address. Each region has size alias_size and is backed by
148
// the same physical memory.
149
uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
150
uptr num_aliases, uptr ring_buffer_size);
151
152
// Reserve memory range [beg, end]. If madvise_shadow is true then apply
153
// madvise (e.g. hugepages, core dumping) requested by options.
154
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
155
bool madvise_shadow = true);
156
157
// Protect size bytes of memory starting at addr. Also try to protect
158
// several pages at the start of the address space as specified by
159
// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
160
void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
161
uptr zero_base_max_shadow_start);
162
163
// Find an available address space.
164
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
165
uptr *largest_gap_found, uptr *max_occupied_addr);
166
167
// Used to check if we can map shadow memory to a fixed location.
168
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
169
// Releases memory pages entirely within the [beg, end] address range. Noop if
170
// the provided range does not contain at least one entire page.
171
void ReleaseMemoryPagesToOS(uptr beg, uptr end);
172
void IncreaseTotalMmap(uptr size);
173
void DecreaseTotalMmap(uptr size);
174
uptr GetRSS();
175
void SetShadowRegionHugePageMode(uptr addr, uptr length);
176
bool DontDumpShadowMemory(uptr addr, uptr length);
177
// Check if the built VMA size matches the runtime one.
178
void CheckVMASize();
179
void RunMallocHooks(void *ptr, uptr size);
180
int RunFreeHooks(void *ptr);
181
182
class ReservedAddressRange {
183
public:
184
uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
185
uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
186
uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
187
uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
188
void Unmap(uptr addr, uptr size);
189
void *base() const { return base_; }
190
uptr size() const { return size_; }
191
192
private:
193
void* base_;
194
uptr size_;
195
const char* name_;
196
uptr os_handle_;
197
};
198
199
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
200
/*out*/ uptr *stats);
201
202
// Parse the contents of /proc/self/smaps and generate a memory profile.
203
// |cb| is a tool-specific callback that fills the |stats| array.
204
void GetMemoryProfile(fill_profile_f cb, uptr *stats);
205
void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
206
uptr smaps_len);
207
208
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
209
// constructor, so all instances of LowLevelAllocator should be
210
// linker initialized.
211
//
212
// NOTE: Users should instead use the singleton provided via
213
// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
214
// number of mmap fragments can be reduced and use the same contiguous mmap
215
// provided by this singleton.
216
class LowLevelAllocator {
217
public:
218
// Requires an external lock.
219
void *Allocate(uptr size);
220
221
private:
222
char *allocated_end_;
223
char *allocated_current_;
224
};
225
// Set the min alignment of LowLevelAllocator to at least alignment.
226
void SetLowLevelAllocateMinAlignment(uptr alignment);
227
typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
228
// Allows to register tool-specific callbacks for LowLevelAllocator.
229
// Passing NULL removes the callback.
230
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
231
232
LowLevelAllocator &GetGlobalLowLevelAllocator();
233
234
// IO
235
void CatastrophicErrorWrite(const char *buffer, uptr length);
236
void RawWrite(const char *buffer);
237
bool ColorizeReports();
238
void RemoveANSIEscapeSequencesFromString(char *buffer);
239
void Printf(const char *format, ...) FORMAT(1, 2);
240
void Report(const char *format, ...) FORMAT(1, 2);
241
void SetPrintfAndReportCallback(void (*callback)(const char *));
242
#define VReport(level, ...) \
243
do { \
244
if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
245
} while (0)
246
#define VPrintf(level, ...) \
247
do { \
248
if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
249
} while (0)
250
251
// Lock sanitizer error reporting and protects against nested errors.
252
class ScopedErrorReportLock {
253
public:
254
ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
255
~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
256
257
static void Lock() SANITIZER_ACQUIRE(mutex_);
258
static void Unlock() SANITIZER_RELEASE(mutex_);
259
static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
260
261
private:
262
static atomic_uintptr_t reporting_thread_;
263
static StaticSpinMutex mutex_;
264
};
265
266
extern uptr stoptheworld_tracer_pid;
267
extern uptr stoptheworld_tracer_ppid;
268
269
bool IsAccessibleMemoryRange(uptr beg, uptr size);
270
271
// Error report formatting.
272
const char *StripPathPrefix(const char *filepath,
273
const char *strip_file_prefix);
274
// Strip the directories from the module name.
275
const char *StripModuleName(const char *module);
276
277
// OS
278
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
279
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
280
uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
281
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
282
const char *GetProcessName();
283
void UpdateProcessName();
284
void CacheBinaryName();
285
void DisableCoreDumperIfNecessary();
286
void DumpProcessMap();
287
const char *GetEnv(const char *name);
288
bool SetEnv(const char *name, const char *value);
289
290
u32 GetUid();
291
void ReExec();
292
void CheckASLR();
293
void CheckMPROTECT();
294
char **GetArgv();
295
char **GetEnviron();
296
void PrintCmdline();
297
bool StackSizeIsUnlimited();
298
void SetStackSizeLimitInBytes(uptr limit);
299
bool AddressSpaceIsUnlimited();
300
void SetAddressSpaceUnlimited();
301
void AdjustStackSize(void *attr);
302
void PlatformPrepareForSandboxing(void *args);
303
void SetSandboxingCallback(void (*f)());
304
305
void InitializeCoverage(bool enabled, const char *coverage_dir);
306
307
void InitTlsSize();
308
uptr GetTlsSize();
309
310
// Other
311
void WaitForDebugger(unsigned seconds, const char *label);
312
void SleepForSeconds(unsigned seconds);
313
void SleepForMillis(unsigned millis);
314
u64 NanoTime();
315
u64 MonotonicNanoTime();
316
int Atexit(void (*function)(void));
317
bool TemplateMatch(const char *templ, const char *str);
318
319
// Exit
320
void NORETURN Abort();
321
void NORETURN Die();
322
void NORETURN
323
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
324
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
325
const char *mmap_type, error_t err,
326
bool raw_report = false);
327
void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
328
bool raw_report = false);
329
330
// Returns true if the platform-specific error reported is an OOM error.
331
bool ErrorIsOOM(error_t err);
332
333
// This reports an error in the form:
334
//
335
// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
336
//
337
// Downstream tools that read sanitizer output will know that errors starting
338
// in this format are specifically OOM errors.
339
#define ERROR_OOM(err_msg, ...) \
340
Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
341
342
// Specific tools may override behavior of "Die" function to do tool-specific
343
// job.
344
typedef void (*DieCallbackType)(void);
345
346
// It's possible to add several callbacks that would be run when "Die" is
347
// called. The callbacks will be run in the opposite order. The tools are
348
// strongly recommended to setup all callbacks during initialization, when there
349
// is only a single thread.
350
bool AddDieCallback(DieCallbackType callback);
351
bool RemoveDieCallback(DieCallbackType callback);
352
353
void SetUserDieCallback(DieCallbackType callback);
354
355
void SetCheckUnwindCallback(void (*callback)());
356
357
// Functions related to signal handling.
358
typedef void (*SignalHandlerType)(int, void *, void *);
359
HandleSignalMode GetHandleSignalMode(int signum);
360
void InstallDeadlySignalHandlers(SignalHandlerType handler);
361
362
// Signal reporting.
363
// Each sanitizer uses slightly different implementation of stack unwinding.
364
typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
365
const void *callback_context,
366
BufferedStackTrace *stack);
367
// Print deadly signal report and die.
368
void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
369
UnwindSignalStackCallbackType unwind,
370
const void *unwind_context);
371
372
// Part of HandleDeadlySignal, exposed for asan.
373
void StartReportDeadlySignal();
374
// Part of HandleDeadlySignal, exposed for asan.
375
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
376
UnwindSignalStackCallbackType unwind,
377
const void *unwind_context);
378
379
// Alternative signal stack (POSIX-only).
380
void SetAlternateSignalStack();
381
void UnsetAlternateSignalStack();
382
383
// Construct a one-line string:
384
// SUMMARY: SanitizerToolName: error_message
385
// and pass it to __sanitizer_report_error_summary.
386
// If alt_tool_name is provided, it's used in place of SanitizerToolName.
387
void ReportErrorSummary(const char *error_message,
388
const char *alt_tool_name = nullptr);
389
// Same as above, but construct error_message as:
390
// error_type file:line[:column][ function]
391
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
392
const char *alt_tool_name = nullptr);
393
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
394
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
395
const char *alt_tool_name = nullptr);
396
// Skips frames which we consider internal and not usefull to the users.
397
const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
398
399
void ReportMmapWriteExec(int prot, int mflags);
400
401
// Math
402
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
403
extern "C" {
404
unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
405
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
406
#if defined(_WIN64)
407
unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
408
unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
409
#endif
410
}
411
#endif
412
413
inline uptr MostSignificantSetBitIndex(uptr x) {
414
CHECK_NE(x, 0U);
415
unsigned long up;
416
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
417
# ifdef _WIN64
418
up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
419
# else
420
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
421
# endif
422
#elif defined(_WIN64)
423
_BitScanReverse64(&up, x);
424
#else
425
_BitScanReverse(&up, x);
426
#endif
427
return up;
428
}
429
430
inline uptr LeastSignificantSetBitIndex(uptr x) {
431
CHECK_NE(x, 0U);
432
unsigned long up;
433
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
434
# ifdef _WIN64
435
up = __builtin_ctzll(x);
436
# else
437
up = __builtin_ctzl(x);
438
# endif
439
#elif defined(_WIN64)
440
_BitScanForward64(&up, x);
441
#else
442
_BitScanForward(&up, x);
443
#endif
444
return up;
445
}
446
447
inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
448
449
inline uptr RoundUpToPowerOfTwo(uptr size) {
450
CHECK(size);
451
if (IsPowerOfTwo(size)) return size;
452
453
uptr up = MostSignificantSetBitIndex(size);
454
CHECK_LT(size, (1ULL << (up + 1)));
455
CHECK_GT(size, (1ULL << up));
456
return 1ULL << (up + 1);
457
}
458
459
inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
460
RAW_CHECK(IsPowerOfTwo(boundary));
461
return (size + boundary - 1) & ~(boundary - 1);
462
}
463
464
inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
465
return x & ~(boundary - 1);
466
}
467
468
inline constexpr bool IsAligned(uptr a, uptr alignment) {
469
return (a & (alignment - 1)) == 0;
470
}
471
472
inline uptr Log2(uptr x) {
473
CHECK(IsPowerOfTwo(x));
474
return LeastSignificantSetBitIndex(x);
475
}
476
477
// Don't use std::min, std::max or std::swap, to minimize dependency
478
// on libstdc++.
479
template <class T>
480
constexpr T Min(T a, T b) {
481
return a < b ? a : b;
482
}
483
template <class T>
484
constexpr T Max(T a, T b) {
485
return a > b ? a : b;
486
}
487
template <class T>
488
constexpr T Abs(T a) {
489
return a < 0 ? -a : a;
490
}
491
template<class T> void Swap(T& a, T& b) {
492
T tmp = a;
493
a = b;
494
b = tmp;
495
}
496
497
// Char handling
498
inline bool IsSpace(int c) {
499
return (c == ' ') || (c == '\n') || (c == '\t') ||
500
(c == '\f') || (c == '\r') || (c == '\v');
501
}
502
inline bool IsDigit(int c) {
503
return (c >= '0') && (c <= '9');
504
}
505
inline int ToLower(int c) {
506
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
507
}
508
509
// A low-level vector based on mmap. May incur a significant memory overhead for
510
// small vectors.
511
// WARNING: The current implementation supports only POD types.
512
template <typename T, bool raw_report = false>
513
class InternalMmapVectorNoCtor {
514
public:
515
using value_type = T;
516
void Initialize(uptr initial_capacity) {
517
capacity_bytes_ = 0;
518
size_ = 0;
519
data_ = 0;
520
reserve(initial_capacity);
521
}
522
void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); }
523
T &operator[](uptr i) {
524
CHECK_LT(i, size_);
525
return data_[i];
526
}
527
const T &operator[](uptr i) const {
528
CHECK_LT(i, size_);
529
return data_[i];
530
}
531
void push_back(const T &element) {
532
if (UNLIKELY(size_ >= capacity())) {
533
CHECK_EQ(size_, capacity());
534
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
535
Realloc(new_capacity);
536
}
537
internal_memcpy(&data_[size_++], &element, sizeof(T));
538
}
539
T &back() {
540
CHECK_GT(size_, 0);
541
return data_[size_ - 1];
542
}
543
void pop_back() {
544
CHECK_GT(size_, 0);
545
size_--;
546
}
547
uptr size() const {
548
return size_;
549
}
550
const T *data() const {
551
return data_;
552
}
553
T *data() {
554
return data_;
555
}
556
uptr capacity() const { return capacity_bytes_ / sizeof(T); }
557
void reserve(uptr new_size) {
558
// Never downsize internal buffer.
559
if (new_size > capacity())
560
Realloc(new_size);
561
}
562
void resize(uptr new_size) {
563
if (new_size > size_) {
564
reserve(new_size);
565
internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
566
}
567
size_ = new_size;
568
}
569
570
void clear() { size_ = 0; }
571
bool empty() const { return size() == 0; }
572
573
const T *begin() const {
574
return data();
575
}
576
T *begin() {
577
return data();
578
}
579
const T *end() const {
580
return data() + size();
581
}
582
T *end() {
583
return data() + size();
584
}
585
586
void swap(InternalMmapVectorNoCtor &other) {
587
Swap(data_, other.data_);
588
Swap(capacity_bytes_, other.capacity_bytes_);
589
Swap(size_, other.size_);
590
}
591
592
private:
593
NOINLINE void Realloc(uptr new_capacity) {
594
CHECK_GT(new_capacity, 0);
595
CHECK_LE(size_, new_capacity);
596
uptr new_capacity_bytes =
597
RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
598
T *new_data =
599
(T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector", raw_report);
600
internal_memcpy(new_data, data_, size_ * sizeof(T));
601
UnmapOrDie(data_, capacity_bytes_, raw_report);
602
data_ = new_data;
603
capacity_bytes_ = new_capacity_bytes;
604
}
605
606
T *data_;
607
uptr capacity_bytes_;
608
uptr size_;
609
};
610
611
template <typename T>
612
bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
613
const InternalMmapVectorNoCtor<T> &rhs) {
614
if (lhs.size() != rhs.size()) return false;
615
return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
616
}
617
618
template <typename T>
619
bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
620
const InternalMmapVectorNoCtor<T> &rhs) {
621
return !(lhs == rhs);
622
}
623
624
template<typename T>
625
class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
626
public:
627
InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
628
explicit InternalMmapVector(uptr cnt) {
629
InternalMmapVectorNoCtor<T>::Initialize(cnt);
630
this->resize(cnt);
631
}
632
~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
633
// Disallow copies and moves.
634
InternalMmapVector(const InternalMmapVector &) = delete;
635
InternalMmapVector &operator=(const InternalMmapVector &) = delete;
636
InternalMmapVector(InternalMmapVector &&) = delete;
637
InternalMmapVector &operator=(InternalMmapVector &&) = delete;
638
};
639
640
class InternalScopedString {
641
public:
642
InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
643
644
uptr length() const { return buffer_.size() - 1; }
645
void clear() {
646
buffer_.resize(1);
647
buffer_[0] = '\0';
648
}
649
void Append(const char *str);
650
void AppendF(const char *format, ...) FORMAT(2, 3);
651
const char *data() const { return buffer_.data(); }
652
char *data() { return buffer_.data(); }
653
654
private:
655
InternalMmapVector<char> buffer_;
656
};
657
658
template <class T>
659
struct CompareLess {
660
bool operator()(const T &a, const T &b) const { return a < b; }
661
};
662
663
// HeapSort for arrays and InternalMmapVector.
664
template <class T, class Compare = CompareLess<T>>
665
void Sort(T *v, uptr size, Compare comp = {}) {
666
if (size < 2)
667
return;
668
// Stage 1: insert elements to the heap.
669
for (uptr i = 1; i < size; i++) {
670
uptr j, p;
671
for (j = i; j > 0; j = p) {
672
p = (j - 1) / 2;
673
if (comp(v[p], v[j]))
674
Swap(v[j], v[p]);
675
else
676
break;
677
}
678
}
679
// Stage 2: swap largest element with the last one,
680
// and sink the new top.
681
for (uptr i = size - 1; i > 0; i--) {
682
Swap(v[0], v[i]);
683
uptr j, max_ind;
684
for (j = 0; j < i; j = max_ind) {
685
uptr left = 2 * j + 1;
686
uptr right = 2 * j + 2;
687
max_ind = j;
688
if (left < i && comp(v[max_ind], v[left]))
689
max_ind = left;
690
if (right < i && comp(v[max_ind], v[right]))
691
max_ind = right;
692
if (max_ind != j)
693
Swap(v[j], v[max_ind]);
694
else
695
break;
696
}
697
}
698
}
699
700
// Works like std::lower_bound: finds the first element that is not less
701
// than the val.
702
template <class Container, class T,
703
class Compare = CompareLess<typename Container::value_type>>
704
uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
705
uptr first = 0;
706
uptr last = v.size();
707
while (last > first) {
708
uptr mid = (first + last) / 2;
709
if (comp(v[mid], val))
710
first = mid + 1;
711
else
712
last = mid;
713
}
714
return first;
715
}
716
717
enum ModuleArch {
718
kModuleArchUnknown,
719
kModuleArchI386,
720
kModuleArchX86_64,
721
kModuleArchX86_64H,
722
kModuleArchARMV6,
723
kModuleArchARMV7,
724
kModuleArchARMV7S,
725
kModuleArchARMV7K,
726
kModuleArchARM64,
727
kModuleArchLoongArch64,
728
kModuleArchRISCV64,
729
kModuleArchHexagon
730
};
731
732
// Sorts and removes duplicates from the container.
733
template <class Container,
734
class Compare = CompareLess<typename Container::value_type>>
735
void SortAndDedup(Container &v, Compare comp = {}) {
736
Sort(v.data(), v.size(), comp);
737
uptr size = v.size();
738
if (size < 2)
739
return;
740
uptr last = 0;
741
for (uptr i = 1; i < size; ++i) {
742
if (comp(v[last], v[i])) {
743
++last;
744
if (last != i)
745
v[last] = v[i];
746
} else {
747
CHECK(!comp(v[i], v[last]));
748
}
749
}
750
v.resize(last + 1);
751
}
752
753
constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
754
755
// Opens the file 'file_name" and reads up to 'max_len' bytes.
756
// The resulting buffer is mmaped and stored in '*buff'.
757
// Returns true if file was successfully opened and read.
758
bool ReadFileToVector(const char *file_name,
759
InternalMmapVectorNoCtor<char> *buff,
760
uptr max_len = kDefaultFileMaxSize,
761
error_t *errno_p = nullptr);
762
763
// Opens the file 'file_name" and reads up to 'max_len' bytes.
764
// This function is less I/O efficient than ReadFileToVector as it may reread
765
// file multiple times to avoid mmap during read attempts. It's used to read
766
// procmap, so short reads with mmap in between can produce inconsistent result.
767
// The resulting buffer is mmaped and stored in '*buff'.
768
// The size of the mmaped region is stored in '*buff_size'.
769
// The total number of read bytes is stored in '*read_len'.
770
// Returns true if file was successfully opened and read.
771
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
772
uptr *read_len, uptr max_len = kDefaultFileMaxSize,
773
error_t *errno_p = nullptr);
774
775
int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
776
uptr *pc_offset);
777
778
// When adding a new architecture, don't forget to also update
779
// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
780
inline const char *ModuleArchToString(ModuleArch arch) {
781
switch (arch) {
782
case kModuleArchUnknown:
783
return "";
784
case kModuleArchI386:
785
return "i386";
786
case kModuleArchX86_64:
787
return "x86_64";
788
case kModuleArchX86_64H:
789
return "x86_64h";
790
case kModuleArchARMV6:
791
return "armv6";
792
case kModuleArchARMV7:
793
return "armv7";
794
case kModuleArchARMV7S:
795
return "armv7s";
796
case kModuleArchARMV7K:
797
return "armv7k";
798
case kModuleArchARM64:
799
return "arm64";
800
case kModuleArchLoongArch64:
801
return "loongarch64";
802
case kModuleArchRISCV64:
803
return "riscv64";
804
case kModuleArchHexagon:
805
return "hexagon";
806
}
807
CHECK(0 && "Invalid module arch");
808
return "";
809
}
810
811
#if SANITIZER_APPLE
812
const uptr kModuleUUIDSize = 16;
813
#else
814
const uptr kModuleUUIDSize = 32;
815
#endif
816
const uptr kMaxSegName = 16;
817
818
// Represents a binary loaded into virtual memory (e.g. this can be an
819
// executable or a shared object).
820
class LoadedModule {
821
public:
822
LoadedModule()
823
: full_name_(nullptr),
824
base_address_(0),
825
max_address_(0),
826
arch_(kModuleArchUnknown),
827
uuid_size_(0),
828
instrumented_(false) {
829
internal_memset(uuid_, 0, kModuleUUIDSize);
830
ranges_.clear();
831
}
832
void set(const char *module_name, uptr base_address);
833
void set(const char *module_name, uptr base_address, ModuleArch arch,
834
u8 uuid[kModuleUUIDSize], bool instrumented);
835
void setUuid(const char *uuid, uptr size);
836
void clear();
837
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
838
const char *name = nullptr);
839
bool containsAddress(uptr address) const;
840
841
const char *full_name() const { return full_name_; }
842
uptr base_address() const { return base_address_; }
843
uptr max_address() const { return max_address_; }
844
ModuleArch arch() const { return arch_; }
845
const u8 *uuid() const { return uuid_; }
846
uptr uuid_size() const { return uuid_size_; }
847
bool instrumented() const { return instrumented_; }
848
849
struct AddressRange {
850
AddressRange *next;
851
uptr beg;
852
uptr end;
853
bool executable;
854
bool writable;
855
char name[kMaxSegName];
856
857
AddressRange(uptr beg, uptr end, bool executable, bool writable,
858
const char *name)
859
: next(nullptr),
860
beg(beg),
861
end(end),
862
executable(executable),
863
writable(writable) {
864
internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
865
}
866
};
867
868
const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
869
870
private:
871
char *full_name_; // Owned.
872
uptr base_address_;
873
uptr max_address_;
874
ModuleArch arch_;
875
uptr uuid_size_;
876
u8 uuid_[kModuleUUIDSize];
877
bool instrumented_;
878
IntrusiveList<AddressRange> ranges_;
879
};
880
881
// List of LoadedModules. OS-dependent implementation is responsible for
882
// filling this information.
883
class ListOfModules {
884
public:
885
ListOfModules() : initialized(false) {}
886
~ListOfModules() { clear(); }
887
void init();
888
void fallbackInit(); // Uses fallback init if available, otherwise clears
889
const LoadedModule *begin() const { return modules_.begin(); }
890
LoadedModule *begin() { return modules_.begin(); }
891
const LoadedModule *end() const { return modules_.end(); }
892
LoadedModule *end() { return modules_.end(); }
893
uptr size() const { return modules_.size(); }
894
const LoadedModule &operator[](uptr i) const {
895
CHECK_LT(i, modules_.size());
896
return modules_[i];
897
}
898
899
private:
900
void clear() {
901
for (auto &module : modules_) module.clear();
902
modules_.clear();
903
}
904
void clearOrInit() {
905
initialized ? clear() : modules_.Initialize(kInitialCapacity);
906
initialized = true;
907
}
908
909
InternalMmapVectorNoCtor<LoadedModule> modules_;
910
// We rarely have more than 16K loaded modules.
911
static const uptr kInitialCapacity = 1 << 14;
912
bool initialized;
913
};
914
915
// Callback type for iterating over a set of memory ranges.
916
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
917
918
enum AndroidApiLevel {
919
ANDROID_NOT_ANDROID = 0,
920
ANDROID_KITKAT = 19,
921
ANDROID_LOLLIPOP_MR1 = 22,
922
ANDROID_POST_LOLLIPOP = 23
923
};
924
925
void WriteToSyslog(const char *buffer);
926
927
#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
928
#define SANITIZER_WIN_TRACE 1
929
#else
930
#define SANITIZER_WIN_TRACE 0
931
#endif
932
933
#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
934
void LogFullErrorReport(const char *buffer);
935
#else
936
inline void LogFullErrorReport(const char *buffer) {}
937
#endif
938
939
#if SANITIZER_LINUX || SANITIZER_APPLE
940
void WriteOneLineToSyslog(const char *s);
941
void LogMessageOnPrintf(const char *str);
942
#else
943
inline void WriteOneLineToSyslog(const char *s) {}
944
inline void LogMessageOnPrintf(const char *str) {}
945
#endif
946
947
#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
948
// Initialize Android logging. Any writes before this are silently lost.
949
void AndroidLogInit();
950
void SetAbortMessage(const char *);
951
#else
952
inline void AndroidLogInit() {}
953
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
954
inline void SetAbortMessage(const char *) {}
955
#endif
956
957
#if SANITIZER_ANDROID
958
void SanitizerInitializeUnwinder();
959
AndroidApiLevel AndroidGetApiLevel();
960
#else
961
inline void AndroidLogWrite(const char *buffer_unused) {}
962
inline void SanitizerInitializeUnwinder() {}
963
inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
964
#endif
965
966
inline uptr GetPthreadDestructorIterations() {
967
#if SANITIZER_ANDROID
968
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
969
#elif SANITIZER_POSIX
970
return 4;
971
#else
972
// Unused on Windows.
973
return 0;
974
#endif
975
}
976
977
void *internal_start_thread(void *(*func)(void*), void *arg);
978
void internal_join_thread(void *th);
979
void MaybeStartBackgroudThread();
980
981
// Make the compiler think that something is going on there.
982
// Use this inside a loop that looks like memset/memcpy/etc to prevent the
983
// compiler from recognising it and turning it into an actual call to
984
// memset/memcpy/etc.
985
static inline void SanitizerBreakOptimization(void *arg) {
986
#if defined(_MSC_VER) && !defined(__clang__)
987
_ReadWriteBarrier();
988
#else
989
__asm__ __volatile__("" : : "r" (arg) : "memory");
990
#endif
991
}
992
993
struct SignalContext {
994
void *siginfo;
995
void *context;
996
uptr addr;
997
uptr pc;
998
uptr sp;
999
uptr bp;
1000
bool is_memory_access;
1001
enum WriteFlag { Unknown, Read, Write } write_flag;
1002
1003
// In some cases the kernel cannot provide the true faulting address; `addr`
1004
// will be zero then. This field allows to distinguish between these cases
1005
// and dereferences of null.
1006
bool is_true_faulting_addr;
1007
1008
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
1009
// constructor
1010
SignalContext() = default;
1011
1012
// Creates signal context in a platform-specific manner.
1013
// SignalContext is going to keep pointers to siginfo and context without
1014
// owning them.
1015
SignalContext(void *siginfo, void *context)
1016
: siginfo(siginfo),
1017
context(context),
1018
addr(GetAddress()),
1019
is_memory_access(IsMemoryAccess()),
1020
write_flag(GetWriteFlag()),
1021
is_true_faulting_addr(IsTrueFaultingAddress()) {
1022
InitPcSpBp();
1023
}
1024
1025
static void DumpAllRegisters(void *context);
1026
1027
// Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1028
int GetType() const;
1029
1030
// String description of the signal.
1031
const char *Describe() const;
1032
1033
// Returns true if signal is stack overflow.
1034
bool IsStackOverflow() const;
1035
1036
private:
1037
// Platform specific initialization.
1038
void InitPcSpBp();
1039
uptr GetAddress() const;
1040
WriteFlag GetWriteFlag() const;
1041
bool IsMemoryAccess() const;
1042
bool IsTrueFaultingAddress() const;
1043
};
1044
1045
void InitializePlatformEarly();
1046
1047
template <typename Fn>
1048
class RunOnDestruction {
1049
public:
1050
explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1051
~RunOnDestruction() { fn_(); }
1052
1053
private:
1054
Fn fn_;
1055
};
1056
1057
// A simple scope guard. Usage:
1058
// auto cleanup = at_scope_exit([]{ do_cleanup; });
1059
template <typename Fn>
1060
RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1061
return RunOnDestruction<Fn>(fn);
1062
}
1063
1064
// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1065
// if a process uses virtual memory over 4TB (as many sanitizers like
1066
// to do). This function will abort the process if running on a kernel
1067
// that looks vulnerable.
1068
#if SANITIZER_LINUX && SANITIZER_S390_64
1069
void AvoidCVE_2016_2143();
1070
#else
1071
inline void AvoidCVE_2016_2143() {}
1072
#endif
1073
1074
struct StackDepotStats {
1075
uptr n_uniq_ids;
1076
uptr allocated;
1077
};
1078
1079
// The default value for allocator_release_to_os_interval_ms common flag to
1080
// indicate that sanitizer allocator should not attempt to release memory to OS.
1081
const s32 kReleaseToOSIntervalNever = -1;
1082
1083
void CheckNoDeepBind(const char *filename, int flag);
1084
1085
// Returns the requested amount of random data (up to 256 bytes) that can then
1086
// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1087
bool GetRandom(void *buffer, uptr length, bool blocking = true);
1088
1089
// Returns the number of logical processors on the system.
1090
u32 GetNumberOfCPUs();
1091
extern u32 NumberOfCPUsCached;
1092
inline u32 GetNumberOfCPUsCached() {
1093
if (!NumberOfCPUsCached)
1094
NumberOfCPUsCached = GetNumberOfCPUs();
1095
return NumberOfCPUsCached;
1096
}
1097
1098
} // namespace __sanitizer
1099
1100
inline void *operator new(__sanitizer::usize size,
1101
__sanitizer::LowLevelAllocator &alloc) {
1102
return alloc.Allocate(size);
1103
}
1104
1105
#endif // SANITIZER_COMMON_H
1106
1107