Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
35262 views
1
//===-- msan_allocator.cpp -------------------------- ---------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of MemorySanitizer.
10
//
11
// MemorySanitizer allocator.
12
//===----------------------------------------------------------------------===//
13
14
#include "msan_allocator.h"
15
16
#include "msan.h"
17
#include "msan_interface_internal.h"
18
#include "msan_origin.h"
19
#include "msan_poisoning.h"
20
#include "msan_thread.h"
21
#include "sanitizer_common/sanitizer_allocator.h"
22
#include "sanitizer_common/sanitizer_allocator_checks.h"
23
#include "sanitizer_common/sanitizer_allocator_interface.h"
24
#include "sanitizer_common/sanitizer_allocator_report.h"
25
#include "sanitizer_common/sanitizer_errno.h"
26
27
namespace __msan {
28
29
struct Metadata {
30
uptr requested_size;
31
};
32
33
struct MsanMapUnmapCallback {
34
void OnMap(uptr p, uptr size) const {}
35
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
36
uptr user_size) const {}
37
void OnUnmap(uptr p, uptr size) const {
38
__msan_unpoison((void *)p, size);
39
40
// We are about to unmap a chunk of user memory.
41
// Mark the corresponding shadow memory as not needed.
42
uptr shadow_p = MEM_TO_SHADOW(p);
43
ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
44
if (__msan_get_track_origins()) {
45
uptr origin_p = MEM_TO_ORIGIN(p);
46
ReleaseMemoryPagesToOS(origin_p, origin_p + size);
47
}
48
}
49
};
50
51
// Note: to ensure that the allocator is compatible with the application memory
52
// layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
53
// duplicated as MappingDesc::ALLOCATOR in msan.h.
54
#if defined(__mips64)
55
static const uptr kMaxAllowedMallocSize = 2UL << 30;
56
57
struct AP32 {
58
static const uptr kSpaceBeg = 0;
59
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
60
static const uptr kMetadataSize = sizeof(Metadata);
61
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
62
static const uptr kRegionSizeLog = 20;
63
using AddressSpaceView = LocalAddressSpaceView;
64
typedef MsanMapUnmapCallback MapUnmapCallback;
65
static const uptr kFlags = 0;
66
};
67
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
68
#elif defined(__x86_64__)
69
#if SANITIZER_NETBSD || SANITIZER_LINUX
70
static const uptr kAllocatorSpace = 0x700000000000ULL;
71
#else
72
static const uptr kAllocatorSpace = 0x600000000000ULL;
73
#endif
74
static const uptr kMaxAllowedMallocSize = 1ULL << 40;
75
76
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
77
static const uptr kSpaceBeg = kAllocatorSpace;
78
static const uptr kSpaceSize = 0x40000000000; // 4T.
79
static const uptr kMetadataSize = sizeof(Metadata);
80
typedef DefaultSizeClassMap SizeClassMap;
81
typedef MsanMapUnmapCallback MapUnmapCallback;
82
static const uptr kFlags = 0;
83
using AddressSpaceView = LocalAddressSpaceView;
84
};
85
86
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
87
88
#elif defined(__loongarch_lp64)
89
const uptr kAllocatorSpace = 0x700000000000ULL;
90
const uptr kMaxAllowedMallocSize = 8UL << 30;
91
92
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
93
static const uptr kSpaceBeg = kAllocatorSpace;
94
static const uptr kSpaceSize = 0x40000000000; // 4T.
95
static const uptr kMetadataSize = sizeof(Metadata);
96
typedef DefaultSizeClassMap SizeClassMap;
97
typedef MsanMapUnmapCallback MapUnmapCallback;
98
static const uptr kFlags = 0;
99
using AddressSpaceView = LocalAddressSpaceView;
100
};
101
102
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
103
104
#elif defined(__powerpc64__)
105
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
106
107
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
108
static const uptr kSpaceBeg = 0x300000000000;
109
static const uptr kSpaceSize = 0x020000000000; // 2T.
110
static const uptr kMetadataSize = sizeof(Metadata);
111
typedef DefaultSizeClassMap SizeClassMap;
112
typedef MsanMapUnmapCallback MapUnmapCallback;
113
static const uptr kFlags = 0;
114
using AddressSpaceView = LocalAddressSpaceView;
115
};
116
117
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
118
#elif defined(__s390x__)
119
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
120
121
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
122
static const uptr kSpaceBeg = 0x440000000000;
123
static const uptr kSpaceSize = 0x020000000000; // 2T.
124
static const uptr kMetadataSize = sizeof(Metadata);
125
typedef DefaultSizeClassMap SizeClassMap;
126
typedef MsanMapUnmapCallback MapUnmapCallback;
127
static const uptr kFlags = 0;
128
using AddressSpaceView = LocalAddressSpaceView;
129
};
130
131
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
132
#elif defined(__aarch64__)
133
static const uptr kMaxAllowedMallocSize = 8UL << 30;
134
135
struct AP64 {
136
static const uptr kSpaceBeg = 0xE00000000000ULL;
137
static const uptr kSpaceSize = 0x40000000000; // 4T.
138
static const uptr kMetadataSize = sizeof(Metadata);
139
typedef DefaultSizeClassMap SizeClassMap;
140
typedef MsanMapUnmapCallback MapUnmapCallback;
141
static const uptr kFlags = 0;
142
using AddressSpaceView = LocalAddressSpaceView;
143
};
144
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
145
#endif
146
typedef CombinedAllocator<PrimaryAllocator> Allocator;
147
typedef Allocator::AllocatorCache AllocatorCache;
148
149
static Allocator allocator;
150
static AllocatorCache fallback_allocator_cache;
151
static StaticSpinMutex fallback_mutex;
152
153
static uptr max_malloc_size;
154
155
void MsanAllocatorInit() {
156
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
157
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
158
if (common_flags()->max_allocation_size_mb)
159
max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
160
kMaxAllowedMallocSize);
161
else
162
max_malloc_size = kMaxAllowedMallocSize;
163
}
164
165
void LockAllocator() { allocator.ForceLock(); }
166
167
void UnlockAllocator() { allocator.ForceUnlock(); }
168
169
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
170
CHECK(ms);
171
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
172
return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
173
}
174
175
void MsanThreadLocalMallocStorage::Init() {
176
allocator.InitCache(GetAllocatorCache(this));
177
}
178
179
void MsanThreadLocalMallocStorage::CommitBack() {
180
allocator.SwallowCache(GetAllocatorCache(this));
181
allocator.DestroyCache(GetAllocatorCache(this));
182
}
183
184
static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
185
bool zeroise) {
186
if (UNLIKELY(size > max_malloc_size)) {
187
if (AllocatorMayReturnNull()) {
188
Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
189
return nullptr;
190
}
191
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
192
ReportAllocationSizeTooBig(size, max_malloc_size, stack);
193
}
194
if (UNLIKELY(IsRssLimitExceeded())) {
195
if (AllocatorMayReturnNull())
196
return nullptr;
197
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
198
ReportRssLimitExceeded(stack);
199
}
200
MsanThread *t = GetCurrentThread();
201
void *allocated;
202
if (t) {
203
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
204
allocated = allocator.Allocate(cache, size, alignment);
205
} else {
206
SpinMutexLock l(&fallback_mutex);
207
AllocatorCache *cache = &fallback_allocator_cache;
208
allocated = allocator.Allocate(cache, size, alignment);
209
}
210
if (UNLIKELY(!allocated)) {
211
SetAllocatorOutOfMemory();
212
if (AllocatorMayReturnNull())
213
return nullptr;
214
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
215
ReportOutOfMemory(size, stack);
216
}
217
Metadata *meta =
218
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
219
meta->requested_size = size;
220
if (zeroise) {
221
if (allocator.FromPrimary(allocated))
222
__msan_clear_and_unpoison(allocated, size);
223
else
224
__msan_unpoison(allocated, size); // Mem is already zeroed.
225
} else if (flags()->poison_in_malloc) {
226
__msan_poison(allocated, size);
227
if (__msan_get_track_origins()) {
228
stack->tag = StackTrace::TAG_ALLOC;
229
Origin o = Origin::CreateHeapOrigin(stack);
230
__msan_set_origin(allocated, size, o.raw_id());
231
}
232
}
233
UnpoisonParam(2);
234
RunMallocHooks(allocated, size);
235
return allocated;
236
}
237
238
void MsanDeallocate(BufferedStackTrace *stack, void *p) {
239
CHECK(p);
240
UnpoisonParam(1);
241
RunFreeHooks(p);
242
243
Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
244
uptr size = meta->requested_size;
245
meta->requested_size = 0;
246
// This memory will not be reused by anyone else, so we are free to keep it
247
// poisoned. The secondary allocator will unmap and unpoison by
248
// MsanMapUnmapCallback, no need to poison it here.
249
if (flags()->poison_in_free && allocator.FromPrimary(p)) {
250
__msan_poison(p, size);
251
if (__msan_get_track_origins()) {
252
stack->tag = StackTrace::TAG_DEALLOC;
253
Origin o = Origin::CreateHeapOrigin(stack);
254
__msan_set_origin(p, size, o.raw_id());
255
}
256
}
257
MsanThread *t = GetCurrentThread();
258
if (t) {
259
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
260
allocator.Deallocate(cache, p);
261
} else {
262
SpinMutexLock l(&fallback_mutex);
263
AllocatorCache *cache = &fallback_allocator_cache;
264
allocator.Deallocate(cache, p);
265
}
266
}
267
268
static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
269
uptr new_size, uptr alignment) {
270
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
271
uptr old_size = meta->requested_size;
272
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
273
if (new_size <= actually_allocated_size) {
274
// We are not reallocating here.
275
meta->requested_size = new_size;
276
if (new_size > old_size) {
277
if (flags()->poison_in_malloc) {
278
stack->tag = StackTrace::TAG_ALLOC;
279
PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
280
}
281
}
282
return old_p;
283
}
284
uptr memcpy_size = Min(new_size, old_size);
285
void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
286
if (new_p) {
287
CopyMemory(new_p, old_p, memcpy_size, stack);
288
MsanDeallocate(stack, old_p);
289
}
290
return new_p;
291
}
292
293
static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
294
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
295
if (AllocatorMayReturnNull())
296
return nullptr;
297
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
298
ReportCallocOverflow(nmemb, size, stack);
299
}
300
return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
301
}
302
303
static const void *AllocationBegin(const void *p) {
304
if (!p)
305
return nullptr;
306
void *beg = allocator.GetBlockBegin(p);
307
if (!beg)
308
return nullptr;
309
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
310
if (!b)
311
return nullptr;
312
if (b->requested_size == 0)
313
return nullptr;
314
315
return (const void *)beg;
316
}
317
318
static uptr AllocationSize(const void *p) {
319
if (!p) return 0;
320
const void *beg = allocator.GetBlockBegin(p);
321
if (beg != p) return 0;
322
Metadata *b = (Metadata *)allocator.GetMetaData(p);
323
return b->requested_size;
324
}
325
326
static uptr AllocationSizeFast(const void *p) {
327
return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
328
}
329
330
void *msan_malloc(uptr size, BufferedStackTrace *stack) {
331
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
332
}
333
334
void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
335
return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
336
}
337
338
void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
339
if (!ptr)
340
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
341
if (size == 0) {
342
MsanDeallocate(stack, ptr);
343
return nullptr;
344
}
345
return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
346
}
347
348
void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
349
BufferedStackTrace *stack) {
350
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
351
errno = errno_ENOMEM;
352
if (AllocatorMayReturnNull())
353
return nullptr;
354
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
355
ReportReallocArrayOverflow(nmemb, size, stack);
356
}
357
return msan_realloc(ptr, nmemb * size, stack);
358
}
359
360
void *msan_valloc(uptr size, BufferedStackTrace *stack) {
361
return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
362
}
363
364
void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
365
uptr PageSize = GetPageSizeCached();
366
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
367
errno = errno_ENOMEM;
368
if (AllocatorMayReturnNull())
369
return nullptr;
370
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
371
ReportPvallocOverflow(size, stack);
372
}
373
// pvalloc(0) should allocate one page.
374
size = size ? RoundUpTo(size, PageSize) : PageSize;
375
return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
376
}
377
378
void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
379
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
380
errno = errno_EINVAL;
381
if (AllocatorMayReturnNull())
382
return nullptr;
383
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
384
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
385
}
386
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
387
}
388
389
void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
390
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
391
errno = errno_EINVAL;
392
if (AllocatorMayReturnNull())
393
return nullptr;
394
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
395
ReportInvalidAllocationAlignment(alignment, stack);
396
}
397
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
398
}
399
400
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
401
BufferedStackTrace *stack) {
402
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
403
if (AllocatorMayReturnNull())
404
return errno_EINVAL;
405
GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
406
ReportInvalidPosixMemalignAlignment(alignment, stack);
407
}
408
void *ptr = MsanAllocate(stack, size, alignment, false);
409
if (UNLIKELY(!ptr))
410
// OOM error is already taken care of by MsanAllocate.
411
return errno_ENOMEM;
412
CHECK(IsAligned((uptr)ptr, alignment));
413
*memptr = ptr;
414
return 0;
415
}
416
417
} // namespace __msan
418
419
using namespace __msan;
420
421
uptr __sanitizer_get_current_allocated_bytes() {
422
uptr stats[AllocatorStatCount];
423
allocator.GetStats(stats);
424
return stats[AllocatorStatAllocated];
425
}
426
427
uptr __sanitizer_get_heap_size() {
428
uptr stats[AllocatorStatCount];
429
allocator.GetStats(stats);
430
return stats[AllocatorStatMapped];
431
}
432
433
uptr __sanitizer_get_free_bytes() { return 1; }
434
435
uptr __sanitizer_get_unmapped_bytes() { return 1; }
436
437
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
438
439
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
440
441
const void *__sanitizer_get_allocated_begin(const void *p) {
442
return AllocationBegin(p);
443
}
444
445
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
446
447
uptr __sanitizer_get_allocated_size_fast(const void *p) {
448
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
449
uptr ret = AllocationSizeFast(p);
450
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
451
return ret;
452
}
453
454
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
455
456