Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/alloc.c
6175 views
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
#ifndef _DEFAULT_SOURCE
8
#define _DEFAULT_SOURCE // for realpath() on Linux
9
#endif
10
11
#include "mimalloc.h"
12
#include "mimalloc/internal.h"
13
#include "mimalloc/atomic.h"
14
#include "mimalloc/prim.h" // _mi_prim_thread_id()
15
16
#include <string.h> // memset, strlen (for mi_strdup)
17
#include <stdlib.h> // malloc, abort
18
19
#define MI_IN_ALLOC_C
20
#include "alloc-override.c"
21
#include "free.c"
22
#undef MI_IN_ALLOC_C
23
24
// ------------------------------------------------------
25
// Allocation
26
// ------------------------------------------------------
27
28
// Fast allocation in a page: just pop from the free list.
29
// Fall back to generic allocation only if the list is empty.
30
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
31
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
32
{
33
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
34
mi_block_t* const block = page->free;
35
if mi_unlikely(block == NULL) {
36
return _mi_malloc_generic(heap, size, zero, 0);
37
}
38
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
39
// pop from the free list
40
page->free = mi_block_next(page, block);
41
page->used++;
42
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
43
#if MI_DEBUG>3
44
if (page->free_is_zero) {
45
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
46
}
47
#endif
48
49
// allow use of the block internally
50
// note: when tracking we need to avoid ever touching the MI_PADDING since
51
// that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
52
mi_track_mem_undefined(block, mi_page_usable_block_size(page));
53
54
// zero the block? note: we need to zero the full block size (issue #63)
55
if mi_unlikely(zero) {
56
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
57
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
58
if (page->free_is_zero) {
59
block->next = 0;
60
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
61
}
62
else {
63
_mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
64
}
65
}
66
67
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
68
if (!zero && !mi_page_is_huge(page)) {
69
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
70
}
71
#elif (MI_SECURE!=0)
72
if (!zero) { block->next = 0; } // don't leak internal data
73
#endif
74
75
#if (MI_STAT>0)
76
const size_t bsize = mi_page_usable_block_size(page);
77
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
78
mi_heap_stat_increase(heap, normal, bsize);
79
mi_heap_stat_counter_increase(heap, normal_count, 1);
80
#if (MI_STAT>1)
81
const size_t bin = _mi_bin(bsize);
82
mi_heap_stat_increase(heap, normal_bins[bin], 1);
83
#endif
84
}
85
#endif
86
87
#if MI_PADDING // && !MI_TRACK_ENABLED
88
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
89
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
90
#if (MI_DEBUG>=2)
91
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
92
#endif
93
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
94
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
95
padding->delta = (uint32_t)(delta);
96
#if MI_PADDING_CHECK
97
if (!mi_page_is_huge(page)) {
98
uint8_t* fill = (uint8_t*)padding - delta;
99
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
100
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
101
}
102
#endif
103
#endif
104
105
return block;
106
}
107
108
// extra entries for improved efficiency in `alloc-aligned.c`.
109
extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
110
return _mi_page_malloc_zero(heap,page,size,false);
111
}
112
extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
113
return _mi_page_malloc_zero(heap,page,size,true);
114
}
115
116
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
117
mi_assert(heap != NULL);
118
#if MI_DEBUG
119
const uintptr_t tid = _mi_thread_id();
120
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
121
#endif
122
mi_assert(size <= MI_SMALL_SIZE_MAX);
123
#if (MI_PADDING)
124
if (size == 0) { size = sizeof(void*); }
125
#endif
126
127
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
128
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
129
mi_track_malloc(p,size,zero);
130
131
#if MI_STAT>1
132
if (p != NULL) {
133
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
134
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
135
}
136
#endif
137
#if MI_DEBUG>3
138
if (p != NULL && zero) {
139
mi_assert_expensive(mi_mem_is_zero(p, size));
140
}
141
#endif
142
return p;
143
}
144
145
// allocate a small block
146
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
147
return mi_heap_malloc_small_zero(heap, size, false);
148
}
149
150
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
151
return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
152
}
153
154
// The main allocation function
155
extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
156
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
157
mi_assert_internal(huge_alignment == 0);
158
return mi_heap_malloc_small_zero(heap, size, zero);
159
}
160
else {
161
mi_assert(heap!=NULL);
162
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
163
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
164
mi_track_malloc(p,size,zero);
165
#if MI_STAT>1
166
if (p != NULL) {
167
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
168
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
169
}
170
#endif
171
#if MI_DEBUG>3
172
if (p != NULL && zero) {
173
mi_assert_expensive(mi_mem_is_zero(p, size));
174
}
175
#endif
176
return p;
177
}
178
}
179
180
extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
181
return _mi_heap_malloc_zero_ex(heap, size, zero, 0);
182
}
183
184
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
185
return _mi_heap_malloc_zero(heap, size, false);
186
}
187
188
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
189
return mi_heap_malloc(mi_prim_get_default_heap(), size);
190
}
191
192
// zero initialized small block
193
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
194
return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true);
195
}
196
197
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
198
return _mi_heap_malloc_zero(heap, size, true);
199
}
200
201
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
202
return mi_heap_zalloc(mi_prim_get_default_heap(),size);
203
}
204
205
206
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
207
size_t total;
208
if (mi_count_size_overflow(count,size,&total)) return NULL;
209
return mi_heap_zalloc(heap,total);
210
}
211
212
mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
213
return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
214
}
215
216
// Uninitialized `calloc`
217
mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
218
size_t total;
219
if (mi_count_size_overflow(count, size, &total)) return NULL;
220
return mi_heap_malloc(heap, total);
221
}
222
223
mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
224
return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
225
}
226
227
// Expand (or shrink) in place (or fail)
228
void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
229
#if MI_PADDING
230
// we do not shrink/expand with padding enabled
231
MI_UNUSED(p); MI_UNUSED(newsize);
232
return NULL;
233
#else
234
if (p == NULL) return NULL;
235
const size_t size = _mi_usable_size(p,"mi_expand");
236
if (newsize > size) return NULL;
237
return p; // it fits
238
#endif
239
}
240
241
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
242
// if p == NULL then behave as malloc.
243
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
244
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
245
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
246
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
247
mi_assert_internal(p!=NULL);
248
// todo: do not track as the usable size is still the same in the free; adjust potential padding?
249
// mi_track_resize(p,size,newsize)
250
// if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); }
251
return p; // reallocation still fits and not more than 50% waste
252
}
253
void* newp = mi_heap_malloc(heap,newsize);
254
if mi_likely(newp != NULL) {
255
if (zero && newsize > size) {
256
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
257
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
258
_mi_memzero((uint8_t*)newp + start, newsize - start);
259
}
260
else if (newsize == 0) {
261
((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
262
}
263
if mi_likely(p != NULL) {
264
const size_t copysize = (newsize > size ? size : newsize);
265
mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
266
_mi_memcpy(newp, p, copysize);
267
mi_free(p); // only free the original pointer if successful
268
}
269
}
270
return newp;
271
}
272
273
mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
274
return _mi_heap_realloc_zero(heap, p, newsize, false);
275
}
276
277
mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
278
size_t total;
279
if (mi_count_size_overflow(count, size, &total)) return NULL;
280
return mi_heap_realloc(heap, p, total);
281
}
282
283
284
// Reallocate but free `p` on errors
285
mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
286
void* newp = mi_heap_realloc(heap, p, newsize);
287
if (newp==NULL && p!=NULL) mi_free(p);
288
return newp;
289
}
290
291
mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
292
return _mi_heap_realloc_zero(heap, p, newsize, true);
293
}
294
295
mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
296
size_t total;
297
if (mi_count_size_overflow(count, size, &total)) return NULL;
298
return mi_heap_rezalloc(heap, p, total);
299
}
300
301
302
mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
303
return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
304
}
305
306
mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
307
return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
308
}
309
310
// Reallocate but free `p` on errors
311
mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
312
return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
313
}
314
315
mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
316
return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
317
}
318
319
mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
320
return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
321
}
322
323
324
325
// ------------------------------------------------------
326
// strdup, strndup, and realpath
327
// ------------------------------------------------------
328
329
// `strdup` using mi_malloc
330
mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
331
if (s == NULL) return NULL;
332
size_t len = _mi_strlen(s);
333
char* t = (char*)mi_heap_malloc(heap,len+1);
334
if (t == NULL) return NULL;
335
_mi_memcpy(t, s, len);
336
t[len] = 0;
337
return t;
338
}
339
340
mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
341
return mi_heap_strdup(mi_prim_get_default_heap(), s);
342
}
343
344
// `strndup` using mi_malloc
345
mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
346
if (s == NULL) return NULL;
347
const size_t len = _mi_strnlen(s,n); // len <= n
348
char* t = (char*)mi_heap_malloc(heap, len+1);
349
if (t == NULL) return NULL;
350
_mi_memcpy(t, s, len);
351
t[len] = 0;
352
return t;
353
}
354
355
mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
356
return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
357
}
358
359
#ifndef __wasi__
360
// `realpath` using mi_malloc
361
#ifdef _WIN32
362
#ifndef PATH_MAX
363
#define PATH_MAX MAX_PATH
364
#endif
365
#include <windows.h>
366
mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
367
// todo: use GetFullPathNameW to allow longer file names
368
char buf[PATH_MAX];
369
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
370
if (res == 0) {
371
errno = GetLastError(); return NULL;
372
}
373
else if (res > PATH_MAX) {
374
errno = EINVAL; return NULL;
375
}
376
else if (resolved_name != NULL) {
377
return resolved_name;
378
}
379
else {
380
return mi_heap_strndup(heap, buf, PATH_MAX);
381
}
382
}
383
#else
384
/*
385
#include <unistd.h> // pathconf
386
static size_t mi_path_max(void) {
387
static size_t path_max = 0;
388
if (path_max <= 0) {
389
long m = pathconf("/",_PC_PATH_MAX);
390
if (m <= 0) path_max = 4096; // guess
391
else if (m < 256) path_max = 256; // at least 256
392
else path_max = m;
393
}
394
return path_max;
395
}
396
*/
397
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
398
if (resolved_name != NULL) {
399
return realpath(fname,resolved_name);
400
}
401
else {
402
char* rname = realpath(fname, NULL);
403
if (rname == NULL) return NULL;
404
char* result = mi_heap_strdup(heap, rname);
405
mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok)
406
// note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-(
407
return result;
408
}
409
/*
410
const size_t n = mi_path_max();
411
char* buf = (char*)mi_malloc(n+1);
412
if (buf == NULL) {
413
errno = ENOMEM;
414
return NULL;
415
}
416
char* rname = realpath(fname,buf);
417
char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
418
mi_free(buf);
419
return result;
420
}
421
*/
422
}
423
#endif
424
425
mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
426
return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
427
}
428
#endif
429
430
/*-------------------------------------------------------
431
C++ new and new_aligned
432
The standard requires calling into `get_new_handler` and
433
throwing the bad_alloc exception on failure. If we compile
434
with a C++ compiler we can implement this precisely. If we
435
use a C compiler we cannot throw a `bad_alloc` exception
436
but we call `exit` instead (i.e. not returning).
437
-------------------------------------------------------*/
438
439
#ifdef __cplusplus
440
#include <new>
441
static bool mi_try_new_handler(bool nothrow) {
442
#if defined(_MSC_VER) || (__cplusplus >= 201103L)
443
std::new_handler h = std::get_new_handler();
444
#else
445
std::new_handler h = std::set_new_handler();
446
std::set_new_handler(h);
447
#endif
448
if (h==NULL) {
449
_mi_error_message(ENOMEM, "out of memory in 'new'");
450
#if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled
451
if (!nothrow) {
452
throw std::bad_alloc();
453
}
454
#else
455
MI_UNUSED(nothrow);
456
#endif
457
return false;
458
}
459
else {
460
h();
461
return true;
462
}
463
}
464
#else
465
typedef void (*std_new_handler_t)(void);
466
467
#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631
468
std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
469
return NULL;
470
}
471
static std_new_handler_t mi_get_new_handler(void) {
472
return _ZSt15get_new_handlerv();
473
}
474
#else
475
// note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
476
static std_new_handler_t mi_get_new_handler() {
477
return NULL;
478
}
479
#endif
480
481
static bool mi_try_new_handler(bool nothrow) {
482
std_new_handler_t h = mi_get_new_handler();
483
if (h==NULL) {
484
_mi_error_message(ENOMEM, "out of memory in 'new'");
485
if (!nothrow) {
486
abort(); // cannot throw in plain C, use abort
487
}
488
return false;
489
}
490
else {
491
h();
492
return true;
493
}
494
}
495
#endif
496
497
mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
498
void* p = NULL;
499
while(p == NULL && mi_try_new_handler(nothrow)) {
500
p = mi_heap_malloc(heap,size);
501
}
502
return p;
503
}
504
505
static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
506
return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
507
}
508
509
510
mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
511
void* p = mi_heap_malloc(heap,size);
512
if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
513
return p;
514
}
515
516
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
517
return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
518
}
519
520
521
mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
522
size_t total;
523
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
524
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
525
return NULL;
526
}
527
else {
528
return mi_heap_alloc_new(heap,total);
529
}
530
}
531
532
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
533
return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count);
534
}
535
536
537
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
538
void* p = mi_malloc(size);
539
if mi_unlikely(p == NULL) return mi_try_new(size, true);
540
return p;
541
}
542
543
mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
544
void* p;
545
do {
546
p = mi_malloc_aligned(size, alignment);
547
}
548
while(p == NULL && mi_try_new_handler(false));
549
return p;
550
}
551
552
mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
553
void* p;
554
do {
555
p = mi_malloc_aligned(size, alignment);
556
}
557
while(p == NULL && mi_try_new_handler(true));
558
return p;
559
}
560
561
mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
562
void* q;
563
do {
564
q = mi_realloc(p, newsize);
565
} while (q == NULL && mi_try_new_handler(false));
566
return q;
567
}
568
569
mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
570
size_t total;
571
if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
572
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
573
return NULL;
574
}
575
else {
576
return mi_new_realloc(p, total);
577
}
578
}
579
580
// ------------------------------------------------------
581
// ensure explicit external inline definitions are emitted!
582
// ------------------------------------------------------
583
584
#ifdef __cplusplus
585
void* _mi_externs[] = {
586
(void*)&_mi_page_malloc,
587
(void*)&_mi_heap_malloc_zero,
588
(void*)&_mi_heap_malloc_zero_ex,
589
(void*)&mi_malloc,
590
(void*)&mi_malloc_small,
591
(void*)&mi_zalloc_small,
592
(void*)&mi_heap_malloc,
593
(void*)&mi_heap_zalloc,
594
(void*)&mi_heap_malloc_small,
595
// (void*)&mi_heap_alloc_new,
596
// (void*)&mi_heap_alloc_new_n
597
};
598
#endif
599
600