Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/init.c
6175 views
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
#include "mimalloc.h"
8
#include "mimalloc/internal.h"
9
#include "mimalloc/prim.h"
10
11
#include <string.h> // memcpy, memset
12
#include <stdlib.h> // atexit
13
14
15
// Empty page used to initialize the small free pages array
16
const mi_page_t _mi_page_empty = {
17
0,
18
false, false, false, false,
19
0, // capacity
20
0, // reserved capacity
21
{ 0 }, // flags
22
false, // is_zero
23
0, // retire_expire
24
NULL, // free
25
NULL, // local_free
26
0, // used
27
0, // block size shift
28
0, // heap tag
29
0, // block_size
30
NULL, // page_start
31
#if (MI_PADDING || MI_ENCODE_FREELIST)
32
{ 0, 0 },
33
#endif
34
MI_ATOMIC_VAR_INIT(0), // xthread_free
35
MI_ATOMIC_VAR_INIT(0), // xheap
36
NULL, NULL
37
, { 0 } // padding
38
};
39
40
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
41
42
#if (MI_SMALL_WSIZE_MAX==128)
43
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
44
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
45
#elif (MI_PADDING>0)
46
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
47
#else
48
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
49
#endif
50
#else
51
#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
52
#endif
53
54
// Empty page queues for every bin
55
#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
56
#define MI_PAGE_QUEUES_EMPTY \
57
{ QNULL(1), \
58
QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \
59
QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \
60
QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \
61
QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \
62
QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \
63
QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \
64
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
65
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
66
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
67
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
68
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
69
70
#define MI_STAT_COUNT_NULL() {0,0,0,0}
71
72
// Empty statistics
73
#if MI_STAT>1
74
#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) }
75
#else
76
#define MI_STAT_COUNT_END_NULL()
77
#endif
78
79
#define MI_STATS_NULL \
80
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
81
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
82
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
83
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
84
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
85
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
86
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
87
MI_STAT_COUNT_NULL(), \
88
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
89
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
90
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
91
{ 0, 0 } \
92
MI_STAT_COUNT_END_NULL()
93
94
95
// Empty slice span queues for every bin
96
#define SQNULL(sz) { NULL, NULL, sz }
97
#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
98
{ SQNULL(1), \
99
SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
100
SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
101
SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
102
SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
103
SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
104
105
106
// --------------------------------------------------------
107
// Statically allocate an empty heap as the initial
108
// thread local value for the default heap,
109
// and statically allocate the backing heap for the main
110
// thread so it can function without doing any allocation
111
// itself (as accessing a thread local for the first time
112
// may lead to allocation itself on some platforms)
113
// --------------------------------------------------------
114
115
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
116
NULL,
117
MI_ATOMIC_VAR_INIT(NULL),
118
0, // tid
119
0, // cookie
120
0, // arena id
121
{ 0, 0 }, // keys
122
{ {0}, {0}, 0, true }, // random
123
0, // page count
124
MI_BIN_FULL, 0, // page retired min/max
125
NULL, // next
126
false, // can reclaim
127
0, // tag
128
MI_SMALL_PAGES_EMPTY,
129
MI_PAGE_QUEUES_EMPTY
130
};
131
132
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
133
#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os)))
134
135
mi_decl_cache_align static const mi_tld_t tld_empty = {
136
0,
137
false,
138
NULL, NULL,
139
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
140
{ 0, tld_empty_stats }, // os
141
{ MI_STATS_NULL } // stats
142
};
143
144
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
145
return _mi_prim_thread_id();
146
}
147
148
// the thread-local default heap for allocation
149
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
150
151
extern mi_heap_t _mi_heap_main;
152
153
static mi_tld_t tld_main = {
154
0, false,
155
&_mi_heap_main, & _mi_heap_main,
156
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
157
{ 0, &tld_main.stats }, // os
158
{ MI_STATS_NULL } // stats
159
};
160
161
mi_heap_t _mi_heap_main = {
162
&tld_main,
163
MI_ATOMIC_VAR_INIT(NULL),
164
0, // thread id
165
0, // initial cookie
166
0, // arena id
167
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
168
{ {0x846ca68b}, {0}, 0, true }, // random
169
0, // page count
170
MI_BIN_FULL, 0, // page retired min/max
171
NULL, // next heap
172
false, // can reclaim
173
0, // tag
174
MI_SMALL_PAGES_EMPTY,
175
MI_PAGE_QUEUES_EMPTY
176
};
177
178
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
179
180
mi_stats_t _mi_stats_main = { MI_STATS_NULL };
181
182
183
static void mi_heap_main_init(void) {
184
if (_mi_heap_main.cookie == 0) {
185
_mi_heap_main.thread_id = _mi_thread_id();
186
_mi_heap_main.cookie = 1;
187
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
188
_mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking
189
#else
190
_mi_random_init(&_mi_heap_main.random);
191
#endif
192
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
193
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
194
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
195
}
196
}
197
198
mi_heap_t* _mi_heap_main_get(void) {
199
mi_heap_main_init();
200
return &_mi_heap_main;
201
}
202
203
204
/* -----------------------------------------------------------
205
Initialization and freeing of the thread local heaps
206
----------------------------------------------------------- */
207
208
// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size).
209
typedef struct mi_thread_data_s {
210
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
211
mi_tld_t tld;
212
mi_memid_t memid; // must come last due to zero'ing
213
} mi_thread_data_t;
214
215
216
// Thread meta-data is allocated directly from the OS. For
217
// some programs that do not use thread pools and allocate and
218
// destroy many OS threads, this may causes too much overhead
219
// per thread so we maintain a small cache of recently freed metadata.
220
221
#define TD_CACHE_SIZE (16)
222
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
223
224
static mi_thread_data_t* mi_thread_data_zalloc(void) {
225
// try to find thread metadata in the cache
226
bool is_zero = false;
227
mi_thread_data_t* td = NULL;
228
for (int i = 0; i < TD_CACHE_SIZE; i++) {
229
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
230
if (td != NULL) {
231
// found cached allocation, try use it
232
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
233
if (td != NULL) {
234
break;
235
}
236
}
237
}
238
239
// if that fails, allocate as meta data
240
if (td == NULL) {
241
mi_memid_t memid;
242
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
243
if (td == NULL) {
244
// if this fails, try once more. (issue #257)
245
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
246
if (td == NULL) {
247
// really out of memory
248
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
249
}
250
}
251
if (td != NULL) {
252
td->memid = memid;
253
is_zero = memid.initially_zero;
254
}
255
}
256
257
if (td != NULL && !is_zero) {
258
_mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid));
259
}
260
return td;
261
}
262
263
static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
264
// try to add the thread metadata to the cache
265
for (int i = 0; i < TD_CACHE_SIZE; i++) {
266
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
267
if (td == NULL) {
268
mi_thread_data_t* expected = NULL;
269
if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) {
270
return;
271
}
272
}
273
}
274
// if that fails, just free it directly
275
_mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
276
}
277
278
void _mi_thread_data_collect(void) {
279
// free all thread metadata from the cache
280
for (int i = 0; i < TD_CACHE_SIZE; i++) {
281
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
282
if (td != NULL) {
283
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
284
if (td != NULL) {
285
_mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main);
286
}
287
}
288
}
289
}
290
291
// Initialize the thread local default heap, called from `mi_thread_init`
292
static bool _mi_thread_heap_init(void) {
293
if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
294
if (_mi_is_main_thread()) {
295
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
296
// the main heap is statically allocated
297
mi_heap_main_init();
298
_mi_heap_set_default_direct(&_mi_heap_main);
299
//mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
300
}
301
else {
302
// use `_mi_os_alloc` to allocate directly from the OS
303
mi_thread_data_t* td = mi_thread_data_zalloc();
304
if (td == NULL) return false;
305
306
mi_tld_t* tld = &td->tld;
307
mi_heap_t* heap = &td->heap;
308
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
309
_mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
310
_mi_heap_set_default_direct(heap);
311
}
312
return false;
313
}
314
315
// initialize thread local data
316
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
317
_mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t));
318
tld->heap_backing = bheap;
319
tld->heaps = NULL;
320
tld->segments.stats = &tld->stats;
321
tld->segments.os = &tld->os;
322
tld->os.stats = &tld->stats;
323
}
324
325
// Free the thread local default heap (called from `mi_thread_done`)
326
static bool _mi_thread_heap_done(mi_heap_t* heap) {
327
if (!mi_heap_is_initialized(heap)) return true;
328
329
// reset default heap
330
_mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty);
331
332
// switch to backing heap
333
heap = heap->tld->heap_backing;
334
if (!mi_heap_is_initialized(heap)) return false;
335
336
// delete all non-backing heaps in this thread
337
mi_heap_t* curr = heap->tld->heaps;
338
while (curr != NULL) {
339
mi_heap_t* next = curr->next; // save `next` as `curr` will be freed
340
if (curr != heap) {
341
mi_assert_internal(!mi_heap_is_backing(curr));
342
mi_heap_delete(curr);
343
}
344
curr = next;
345
}
346
mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL);
347
mi_assert_internal(mi_heap_is_backing(heap));
348
349
// collect if not the main thread
350
if (heap != &_mi_heap_main) {
351
_mi_heap_collect_abandon(heap);
352
}
353
354
// merge stats
355
_mi_stats_done(&heap->tld->stats);
356
357
// free if not the main thread
358
if (heap != &_mi_heap_main) {
359
// the following assertion does not always hold for huge segments as those are always treated
360
// as abondened: one may allocate it in one thread, but deallocate in another in which case
361
// the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
362
// mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
363
mi_thread_data_free((mi_thread_data_t*)heap);
364
}
365
else {
366
#if 0
367
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
368
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
369
_mi_heap_destroy_pages(heap);
370
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
371
#endif
372
}
373
return false;
374
}
375
376
377
378
// --------------------------------------------------------
379
// Try to run `mi_thread_done()` automatically so any memory
380
// owned by the thread but not yet released can be abandoned
381
// and re-owned by another thread.
382
//
383
// 1. windows dynamic library:
384
// call from DllMain on DLL_THREAD_DETACH
385
// 2. windows static library:
386
// use `FlsAlloc` to call a destructor when the thread is done
387
// 3. unix, pthreads:
388
// use a pthread key to call a destructor when a pthread is done
389
//
390
// In the last two cases we also need to call `mi_process_init`
391
// to set up the thread local keys.
392
// --------------------------------------------------------
393
394
// Set up handlers so `mi_thread_done` is called automatically
395
static void mi_process_setup_auto_thread_done(void) {
396
static bool tls_initialized = false; // fine if it races
397
if (tls_initialized) return;
398
tls_initialized = true;
399
_mi_prim_thread_init_auto_done();
400
_mi_heap_set_default_direct(&_mi_heap_main);
401
}
402
403
404
bool _mi_is_main_thread(void) {
405
return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id());
406
}
407
408
static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1);
409
410
size_t _mi_current_thread_count(void) {
411
return mi_atomic_load_relaxed(&thread_count);
412
}
413
414
// This is called from the `mi_malloc_generic`
415
void mi_thread_init(void) mi_attr_noexcept
416
{
417
// ensure our process has started already
418
mi_process_init();
419
420
// initialize the thread local default heap
421
// (this will call `_mi_heap_set_default_direct` and thus set the
422
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
423
if (_mi_thread_heap_init()) return; // returns true if already initialized
424
425
_mi_stat_increase(&_mi_stats_main.threads, 1);
426
mi_atomic_increment_relaxed(&thread_count);
427
//_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
428
}
429
430
void mi_thread_done(void) mi_attr_noexcept {
431
_mi_thread_done(NULL);
432
}
433
434
void _mi_thread_done(mi_heap_t* heap)
435
{
436
// calling with NULL implies using the default heap
437
if (heap == NULL) {
438
heap = mi_prim_get_default_heap();
439
if (heap == NULL) return;
440
}
441
442
// prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
443
if (!mi_heap_is_initialized(heap)) {
444
return;
445
}
446
447
// adjust stats
448
mi_atomic_decrement_relaxed(&thread_count);
449
_mi_stat_decrease(&_mi_stats_main.threads, 1);
450
451
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
452
if (heap->thread_id != _mi_thread_id()) return;
453
454
// abandon the thread local heap
455
if (_mi_thread_heap_done(heap)) return; // returns true if already ran
456
}
457
458
void _mi_heap_set_default_direct(mi_heap_t* heap) {
459
mi_assert_internal(heap != NULL);
460
#if defined(MI_TLS_SLOT)
461
mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
462
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
463
*mi_prim_tls_pthread_heap_slot() = heap;
464
#elif defined(MI_TLS_PTHREAD)
465
// we use _mi_heap_default_key
466
#else
467
_mi_heap_default = heap;
468
#endif
469
470
// ensure the default heap is passed to `_mi_thread_done`
471
// setting to a non-NULL value also ensures `mi_thread_done` is called.
472
_mi_prim_thread_associate_default_heap(heap);
473
}
474
475
476
// --------------------------------------------------------
477
// Run functions on process init/done, and thread init/done
478
// --------------------------------------------------------
479
static void mi_cdecl mi_process_done(void);
480
481
static bool os_preloading = true; // true until this module is initialized
482
static bool mi_redirected = false; // true if malloc redirects to mi_malloc
483
484
// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
485
bool mi_decl_noinline _mi_preloading(void) {
486
return os_preloading;
487
}
488
489
mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
490
return mi_redirected;
491
}
492
493
// Communicate with the redirection module on Windows
494
#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
495
#ifdef __cplusplus
496
extern "C" {
497
#endif
498
mi_decl_export void _mi_redirect_entry(DWORD reason) {
499
// called on redirection; careful as this may be called before DllMain
500
if (reason == DLL_PROCESS_ATTACH) {
501
mi_redirected = true;
502
}
503
else if (reason == DLL_PROCESS_DETACH) {
504
mi_redirected = false;
505
}
506
else if (reason == DLL_THREAD_DETACH) {
507
mi_thread_done();
508
}
509
}
510
__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
511
__declspec(dllimport) void mi_cdecl mi_allocator_done(void);
512
#ifdef __cplusplus
513
}
514
#endif
515
#else
516
static bool mi_allocator_init(const char** message) {
517
if (message != NULL) *message = NULL;
518
return true;
519
}
520
static void mi_allocator_done(void) {
521
// nothing to do
522
}
523
#endif
524
525
// Called once by the process loader
526
static void mi_process_load(void) {
527
mi_heap_main_init();
528
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
529
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
530
if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697)
531
#endif
532
os_preloading = false;
533
mi_assert_internal(_mi_is_main_thread());
534
#if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521)
535
atexit(&mi_process_done);
536
#endif
537
_mi_options_init();
538
mi_process_setup_auto_thread_done();
539
mi_process_init();
540
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
541
542
// show message from the redirector (if present)
543
const char* msg = NULL;
544
mi_allocator_init(&msg);
545
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
546
_mi_fputs(NULL,NULL,NULL,msg);
547
}
548
549
// reseed random
550
_mi_random_reinit_if_weak(&_mi_heap_main.random);
551
}
552
553
#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
554
#include <intrin.h>
555
mi_decl_cache_align bool _mi_cpu_has_fsrm = false;
556
557
static void mi_detect_cpu_features(void) {
558
// FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
559
int32_t cpu_info[4];
560
__cpuid(cpu_info, 7);
561
_mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see <https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features>
562
}
563
#else
564
static void mi_detect_cpu_features(void) {
565
// nothing
566
}
567
#endif
568
569
// Initialize the process; called by thread_init or the process loader
570
void mi_process_init(void) mi_attr_noexcept {
571
// ensure we are called once
572
static mi_atomic_once_t process_init;
573
#if _MSC_VER < 1920
574
mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main
575
#endif
576
if (!mi_atomic_once(&process_init)) return;
577
_mi_process_is_initialized = true;
578
_mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
579
mi_process_setup_auto_thread_done();
580
581
mi_detect_cpu_features();
582
_mi_os_init();
583
mi_heap_main_init();
584
#if MI_DEBUG
585
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
586
#endif
587
_mi_verbose_message("secure level: %d\n", MI_SECURE);
588
_mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL);
589
#if MI_TSAN
590
_mi_verbose_message("thread santizer enabled\n");
591
#endif
592
mi_thread_init();
593
594
#if defined(_WIN32)
595
// On windows, when building as a static lib the FLS cleanup happens to early for the main thread.
596
// To avoid this, set the FLS value for the main thread to NULL so the fls cleanup
597
// will not call _mi_thread_done on the (still executing) main thread. See issue #508.
598
_mi_prim_thread_associate_default_heap(NULL);
599
#endif
600
601
mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL)
602
mi_track_init();
603
604
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
605
size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024);
606
long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at);
607
if (reserve_at != -1) {
608
mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
609
} else {
610
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
611
}
612
}
613
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
614
long ksize = mi_option_get(mi_option_reserve_os_memory);
615
if (ksize > 0) {
616
mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
617
}
618
}
619
}
620
621
// Called when the process is done (through `at_exit`)
622
static void mi_cdecl mi_process_done(void) {
623
// only shutdown if we were initialized
624
if (!_mi_process_is_initialized) return;
625
// ensure we are called once
626
static bool process_done = false;
627
if (process_done) return;
628
process_done = true;
629
630
// release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
631
_mi_prim_thread_done_auto_done();
632
633
#ifndef MI_SKIP_COLLECT_ON_EXIT
634
#if (MI_DEBUG || !defined(MI_SHARED_LIB))
635
// free all memory if possible on process exit. This is not needed for a stand-alone process
636
// but should be done if mimalloc is statically linked into another shared library which
637
// is repeatedly loaded/unloaded, see issue #281.
638
mi_collect(true /* force */ );
639
#endif
640
#endif
641
642
// Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free
643
// since after process_done there might still be other code running that calls `free` (like at_exit routines,
644
// or C-runtime termination code.
645
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
646
mi_collect(true /* force */);
647
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
648
_mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
649
}
650
651
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
652
mi_stats_print(NULL);
653
}
654
mi_allocator_done();
655
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
656
os_preloading = true; // don't call the C runtime anymore
657
}
658
659
660
661
#if defined(_WIN32) && defined(MI_SHARED_LIB)
662
// Windows DLL: easy to hook into process_init and thread_done
663
__declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
664
MI_UNUSED(reserved);
665
MI_UNUSED(inst);
666
if (reason==DLL_PROCESS_ATTACH) {
667
mi_process_load();
668
}
669
else if (reason==DLL_PROCESS_DETACH) {
670
mi_process_done();
671
}
672
else if (reason==DLL_THREAD_DETACH) {
673
if (!mi_is_redirected()) {
674
mi_thread_done();
675
}
676
}
677
return TRUE;
678
}
679
680
#elif defined(_MSC_VER)
681
// MSVC: use data section magic for static libraries
682
// See <https://www.codeguru.com/cpp/misc/misc/applicationcontrol/article.php/c6945/Running-Code-Before-and-After-Main.htm>
683
static int _mi_process_init(void) {
684
mi_process_load();
685
return 0;
686
}
687
typedef int(*_mi_crt_callback_t)(void);
688
#if defined(_M_X64) || defined(_M_ARM64)
689
__pragma(comment(linker, "/include:" "_mi_msvc_initu"))
690
#pragma section(".CRT$XIU", long, read)
691
#else
692
__pragma(comment(linker, "/include:" "__mi_msvc_initu"))
693
#endif
694
#pragma data_seg(".CRT$XIU")
695
mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init };
696
#pragma data_seg()
697
698
#elif defined(__cplusplus)
699
// C++: use static initialization to detect process start
700
static bool _mi_process_init(void) {
701
mi_process_load();
702
return (_mi_heap_main.thread_id != 0);
703
}
704
static bool mi_initialized = _mi_process_init();
705
706
#elif defined(__GNUC__) || defined(__clang__)
707
// GCC,Clang: use the constructor attribute
708
static void __attribute__((constructor)) _mi_process_init(void) {
709
mi_process_load();
710
}
711
712
#else
713
#pragma message("define a way to call mi_process_load on your platform")
714
#endif
715
716