Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/page.c
6175 views
1
/*----------------------------------------------------------------------------
2
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
/* -----------------------------------------------------------
9
The core of the allocator. Every segment contains
10
pages of a certain block size. The main function
11
exported is `mi_malloc_generic`.
12
----------------------------------------------------------- */
13
14
#include "mimalloc.h"
15
#include "mimalloc/internal.h"
16
#include "mimalloc/atomic.h"
17
18
/* -----------------------------------------------------------
19
Definition of page queues for each block size
20
----------------------------------------------------------- */
21
22
#define MI_IN_PAGE_C
23
#include "page-queue.c"
24
#undef MI_IN_PAGE_C
25
26
27
/* -----------------------------------------------------------
28
Page helpers
29
----------------------------------------------------------- */
30
31
// Index a block in a page
32
static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) {
33
MI_UNUSED(page);
34
mi_assert_internal(page != NULL);
35
mi_assert_internal(i <= page->reserved);
36
return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
37
}
38
39
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
40
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
41
42
#if (MI_DEBUG>=3)
43
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
44
size_t count = 0;
45
while (head != NULL) {
46
mi_assert_internal(page == _mi_ptr_page(head));
47
count++;
48
head = mi_block_next(page, head);
49
}
50
return count;
51
}
52
53
/*
54
// Start of the page available memory
55
static inline uint8_t* mi_page_area(const mi_page_t* page) {
56
return _mi_page_start(_mi_page_segment(page), page, NULL);
57
}
58
*/
59
60
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
61
size_t psize;
62
uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
63
mi_block_t* start = (mi_block_t*)page_area;
64
mi_block_t* end = (mi_block_t*)(page_area + psize);
65
while(p != NULL) {
66
if (p < start || p >= end) return false;
67
p = mi_block_next(page, p);
68
}
69
#if MI_DEBUG>3 // generally too expensive to check this
70
if (page->free_is_zero) {
71
const size_t ubsize = mi_page_usable_block_size(page);
72
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
73
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
74
}
75
}
76
#endif
77
return true;
78
}
79
80
static bool mi_page_is_valid_init(mi_page_t* page) {
81
mi_assert_internal(mi_page_block_size(page) > 0);
82
mi_assert_internal(page->used <= page->capacity);
83
mi_assert_internal(page->capacity <= page->reserved);
84
85
uint8_t* start = mi_page_start(page);
86
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
87
mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
88
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
89
90
mi_assert_internal(mi_page_list_is_valid(page,page->free));
91
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
92
93
#if MI_DEBUG>3 // generally too expensive to check this
94
if (page->free_is_zero) {
95
const size_t ubsize = mi_page_usable_block_size(page);
96
for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
97
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
98
}
99
}
100
#endif
101
102
#if !MI_TRACK_ENABLED && !MI_TSAN
103
mi_block_t* tfree = mi_page_thread_free(page);
104
mi_assert_internal(mi_page_list_is_valid(page, tfree));
105
//size_t tfree_count = mi_page_list_count(page, tfree);
106
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
107
#endif
108
109
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
110
mi_assert_internal(page->used + free_count == page->capacity);
111
112
return true;
113
}
114
115
extern bool _mi_process_is_initialized; // has mi_process_init been called?
116
117
bool _mi_page_is_valid(mi_page_t* page) {
118
mi_assert_internal(mi_page_is_valid_init(page));
119
#if MI_SECURE
120
mi_assert_internal(page->keys[0] != 0);
121
#endif
122
if (mi_page_heap(page)!=NULL) {
123
mi_segment_t* segment = _mi_page_segment(page);
124
125
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
126
#if MI_HUGE_PAGE_ABANDON
127
if (segment->kind != MI_SEGMENT_HUGE)
128
#endif
129
{
130
mi_page_queue_t* pq = mi_page_queue_of(page);
131
mi_assert_internal(mi_page_queue_contains(pq, page));
132
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
133
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
134
}
135
}
136
return true;
137
}
138
#endif
139
140
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
141
while (!_mi_page_try_use_delayed_free(page, delay, override_never)) {
142
mi_atomic_yield();
143
}
144
}
145
146
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
147
mi_thread_free_t tfreex;
148
mi_delayed_t old_delay;
149
mi_thread_free_t tfree;
150
size_t yield_count = 0;
151
do {
152
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
153
tfreex = mi_tf_set_delayed(tfree, delay);
154
old_delay = mi_tf_delayed(tfree);
155
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
156
if (yield_count >= 4) return false; // give up after 4 tries
157
yield_count++;
158
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
159
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
160
}
161
else if (delay == old_delay) {
162
break; // avoid atomic operation if already equal
163
}
164
else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) {
165
break; // leave never-delayed flag set
166
}
167
} while ((old_delay == MI_DELAYED_FREEING) ||
168
!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
169
170
return true; // success
171
}
172
173
/* -----------------------------------------------------------
174
Page collect the `local_free` and `thread_free` lists
175
----------------------------------------------------------- */
176
177
// Collect the local `thread_free` list using an atomic exchange.
178
// Note: The exchange must be done atomically as this is used right after
179
// moving to the full list in `mi_page_collect_ex` and we need to
180
// ensure that there was no race where the page became unfull just before the move.
181
static void _mi_page_thread_free_collect(mi_page_t* page)
182
{
183
mi_block_t* head;
184
mi_thread_free_t tfreex;
185
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
186
do {
187
head = mi_tf_block(tfree);
188
tfreex = mi_tf_set_block(tfree,NULL);
189
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex));
190
191
// return if the list is empty
192
if (head == NULL) return;
193
194
// find the tail -- also to get a proper count (without data races)
195
size_t max_count = page->capacity; // cannot collect more than capacity
196
size_t count = 1;
197
mi_block_t* tail = head;
198
mi_block_t* next;
199
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
200
count++;
201
tail = next;
202
}
203
// if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
204
if (count > max_count) {
205
_mi_error_message(EFAULT, "corrupted thread-free list\n");
206
return; // the thread-free items cannot be freed
207
}
208
209
// and append the current local free list
210
mi_block_set_next(page,tail, page->local_free);
211
page->local_free = head;
212
213
// update counts now
214
page->used -= (uint16_t)count;
215
}
216
217
void _mi_page_free_collect(mi_page_t* page, bool force) {
218
mi_assert_internal(page!=NULL);
219
220
// collect the thread free list
221
if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation
222
_mi_page_thread_free_collect(page);
223
}
224
225
// and the local free list
226
if (page->local_free != NULL) {
227
if mi_likely(page->free == NULL) {
228
// usual case
229
page->free = page->local_free;
230
page->local_free = NULL;
231
page->free_is_zero = false;
232
}
233
else if (force) {
234
// append -- only on shutdown (force) as this is a linear operation
235
mi_block_t* tail = page->local_free;
236
mi_block_t* next;
237
while ((next = mi_block_next(page, tail)) != NULL) {
238
tail = next;
239
}
240
mi_block_set_next(page, tail, page->free);
241
page->free = page->local_free;
242
page->local_free = NULL;
243
page->free_is_zero = false;
244
}
245
}
246
247
mi_assert_internal(!force || page->local_free == NULL);
248
}
249
250
251
252
/* -----------------------------------------------------------
253
Page fresh and retire
254
----------------------------------------------------------- */
255
256
// called from segments when reclaiming abandoned pages
257
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
258
mi_assert_expensive(mi_page_is_valid_init(page));
259
260
mi_assert_internal(mi_page_heap(page) == heap);
261
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
262
#if MI_HUGE_PAGE_ABANDON
263
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
264
#endif
265
266
// TODO: push on full queue immediately if it is full?
267
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
268
mi_page_queue_push(heap, pq, page);
269
mi_assert_expensive(_mi_page_is_valid(page));
270
}
271
272
// allocate a fresh page from a segment
273
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
274
#if !MI_HUGE_PAGE_ABANDON
275
mi_assert_internal(pq != NULL);
276
mi_assert_internal(mi_heap_contains_queue(heap, pq));
277
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
278
#endif
279
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
280
if (page == NULL) {
281
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
282
return NULL;
283
}
284
#if MI_HUGE_PAGE_ABANDON
285
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
286
#endif
287
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
288
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
289
// a fresh page was found, initialize it
290
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
291
mi_assert_internal(full_block_size >= block_size);
292
mi_page_init(heap, page, full_block_size, heap->tld);
293
mi_heap_stat_increase(heap, pages, 1);
294
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
295
mi_assert_expensive(_mi_page_is_valid(page));
296
return page;
297
}
298
299
// Get a fresh page to use
300
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
301
mi_assert_internal(mi_heap_contains_queue(heap, pq));
302
mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
303
if (page==NULL) return NULL;
304
mi_assert_internal(pq->block_size==mi_page_block_size(page));
305
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
306
return page;
307
}
308
309
/* -----------------------------------------------------------
310
Do any delayed frees
311
(put there by other threads if they deallocated in a full page)
312
----------------------------------------------------------- */
313
void _mi_heap_delayed_free_all(mi_heap_t* heap) {
314
while (!_mi_heap_delayed_free_partial(heap)) {
315
mi_atomic_yield();
316
}
317
}
318
319
// returns true if all delayed frees were processed
320
bool _mi_heap_delayed_free_partial(mi_heap_t* heap) {
321
// take over the list (note: no atomic exchange since it is often NULL)
322
mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
323
while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ };
324
bool all_freed = true;
325
326
// and free them all
327
while(block != NULL) {
328
mi_block_t* next = mi_block_nextx(heap,block, heap->keys);
329
// use internal free instead of regular one to keep stats etc correct
330
if (!_mi_free_delayed_block(block)) {
331
// we might already start delayed freeing while another thread has not yet
332
// reset the delayed_freeing flag; in that case delay it further by reinserting the current block
333
// into the delayed free list
334
all_freed = false;
335
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
336
do {
337
mi_block_set_nextx(heap, block, dfree, heap->keys);
338
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
339
}
340
block = next;
341
}
342
return all_freed;
343
}
344
345
/* -----------------------------------------------------------
346
Unfull, abandon, free and retire
347
----------------------------------------------------------- */
348
349
// Move a page from the full list back to a regular list
350
void _mi_page_unfull(mi_page_t* page) {
351
mi_assert_internal(page != NULL);
352
mi_assert_expensive(_mi_page_is_valid(page));
353
mi_assert_internal(mi_page_is_in_full(page));
354
if (!mi_page_is_in_full(page)) return;
355
356
mi_heap_t* heap = mi_page_heap(page);
357
mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
358
mi_page_set_in_full(page, false); // to get the right queue
359
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
360
mi_page_set_in_full(page, true);
361
mi_page_queue_enqueue_from(pq, pqfull, page);
362
}
363
364
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
365
mi_assert_internal(pq == mi_page_queue_of(page));
366
mi_assert_internal(!mi_page_immediate_available(page));
367
mi_assert_internal(!mi_page_is_in_full(page));
368
369
if (mi_page_is_in_full(page)) return;
370
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
371
_mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
372
}
373
374
375
// Abandon a page with used blocks at the end of a thread.
376
// Note: only call if it is ensured that no references exist from
377
// the `page->heap->thread_delayed_free` into this page.
378
// Currently only called through `mi_heap_collect_ex` which ensures this.
379
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
380
mi_assert_internal(page != NULL);
381
mi_assert_expensive(_mi_page_is_valid(page));
382
mi_assert_internal(pq == mi_page_queue_of(page));
383
mi_assert_internal(mi_page_heap(page) != NULL);
384
385
mi_heap_t* pheap = mi_page_heap(page);
386
387
// remove from our page list
388
mi_segments_tld_t* segments_tld = &pheap->tld->segments;
389
mi_page_queue_remove(pq, page);
390
391
// page is no longer associated with our heap
392
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
393
mi_page_set_heap(page, NULL);
394
395
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
396
// check there are no references left..
397
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
398
mi_assert_internal(_mi_ptr_page(block) != page);
399
}
400
#endif
401
402
// and abandon it
403
mi_assert_internal(mi_page_heap(page) == NULL);
404
_mi_segment_page_abandon(page,segments_tld);
405
}
406
407
408
// Free a page with no more free blocks
409
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
410
mi_assert_internal(page != NULL);
411
mi_assert_expensive(_mi_page_is_valid(page));
412
mi_assert_internal(pq == mi_page_queue_of(page));
413
mi_assert_internal(mi_page_all_free(page));
414
mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING);
415
416
// no more aligned blocks in here
417
mi_page_set_has_aligned(page, false);
418
419
mi_heap_t* heap = mi_page_heap(page);
420
421
// remove from the page list
422
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
423
mi_segments_tld_t* segments_tld = &heap->tld->segments;
424
mi_page_queue_remove(pq, page);
425
426
// and free it
427
mi_page_set_heap(page,NULL);
428
_mi_segment_page_free(page, force, segments_tld);
429
}
430
431
#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
432
#define MI_RETIRE_CYCLES (16)
433
434
// Retire a page with no more used blocks
435
// Important to not retire too quickly though as new
436
// allocations might coming.
437
// Note: called from `mi_free` and benchmarks often
438
// trigger this due to freeing everything and then
439
// allocating again so careful when changing this.
440
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
441
mi_assert_internal(page != NULL);
442
mi_assert_expensive(_mi_page_is_valid(page));
443
mi_assert_internal(mi_page_all_free(page));
444
445
mi_page_set_has_aligned(page, false);
446
447
// don't retire too often..
448
// (or we end up retiring and re-allocating most of the time)
449
// NOTE: refine this more: we should not retire if this
450
// is the only page left with free blocks. It is not clear
451
// how to check this efficiently though...
452
// for now, we don't retire if it is the only page left of this size class.
453
mi_page_queue_t* pq = mi_page_queue_of(page);
454
const size_t bsize = mi_page_block_size(page);
455
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
456
if (pq->last==page && pq->first==page) { // the only page in the queue?
457
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
458
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
459
mi_heap_t* heap = mi_page_heap(page);
460
mi_assert_internal(pq >= heap->pages);
461
const size_t index = pq - heap->pages;
462
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
463
if (index < heap->page_retired_min) heap->page_retired_min = index;
464
if (index > heap->page_retired_max) heap->page_retired_max = index;
465
mi_assert_internal(mi_page_all_free(page));
466
return; // don't free after all
467
}
468
}
469
_mi_page_free(page, pq, false);
470
}
471
472
// free retired pages: we don't need to look at the entire queues
473
// since we only retire pages that are at the head position in a queue.
474
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
475
size_t min = MI_BIN_FULL;
476
size_t max = 0;
477
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
478
mi_page_queue_t* pq = &heap->pages[bin];
479
mi_page_t* page = pq->first;
480
if (page != NULL && page->retire_expire != 0) {
481
if (mi_page_all_free(page)) {
482
page->retire_expire--;
483
if (force || page->retire_expire == 0) {
484
_mi_page_free(pq->first, pq, force);
485
}
486
else {
487
// keep retired, update min/max
488
if (bin < min) min = bin;
489
if (bin > max) max = bin;
490
}
491
}
492
else {
493
page->retire_expire = 0;
494
}
495
}
496
}
497
heap->page_retired_min = min;
498
heap->page_retired_max = max;
499
}
500
501
502
/* -----------------------------------------------------------
503
Initialize the initial free list in a page.
504
In secure mode we initialize a randomized list by
505
alternating between slices.
506
----------------------------------------------------------- */
507
508
#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices
509
#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
510
#define MI_MIN_SLICES (2)
511
512
static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
513
MI_UNUSED(stats);
514
#if (MI_SECURE<=2)
515
mi_assert_internal(page->free == NULL);
516
mi_assert_internal(page->local_free == NULL);
517
#endif
518
mi_assert_internal(page->capacity + extend <= page->reserved);
519
mi_assert_internal(bsize == mi_page_block_size(page));
520
void* const page_area = mi_page_start(page);
521
522
// initialize a randomized free list
523
// set up `slice_count` slices to alternate between
524
size_t shift = MI_MAX_SLICE_SHIFT;
525
while ((extend >> shift) == 0) {
526
shift--;
527
}
528
const size_t slice_count = (size_t)1U << shift;
529
const size_t slice_extend = extend / slice_count;
530
mi_assert_internal(slice_extend >= 1);
531
mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
532
size_t counts[MI_MAX_SLICES]; // available objects in the slice
533
for (size_t i = 0; i < slice_count; i++) {
534
blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend);
535
counts[i] = slice_extend;
536
}
537
counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
538
539
// and initialize the free list by randomly threading through them
540
// set up first element
541
const uintptr_t r = _mi_heap_random_next(heap);
542
size_t current = r % slice_count;
543
counts[current]--;
544
mi_block_t* const free_start = blocks[current];
545
// and iterate through the rest; use `random_shuffle` for performance
546
uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0
547
for (size_t i = 1; i < extend; i++) {
548
// call random_shuffle only every INTPTR_SIZE rounds
549
const size_t round = i%MI_INTPTR_SIZE;
550
if (round == 0) rnd = _mi_random_shuffle(rnd);
551
// select a random next slice index
552
size_t next = ((rnd >> 8*round) & (slice_count-1));
553
while (counts[next]==0) { // ensure it still has space
554
next++;
555
if (next==slice_count) next = 0;
556
}
557
// and link the current block to it
558
counts[next]--;
559
mi_block_t* const block = blocks[current];
560
blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
561
mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
562
current = next;
563
}
564
// prepend to the free list (usually NULL)
565
mi_block_set_next(page, blocks[current], page->free); // end of the list
566
page->free = free_start;
567
}
568
569
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
570
{
571
MI_UNUSED(stats);
572
#if (MI_SECURE <= 2)
573
mi_assert_internal(page->free == NULL);
574
mi_assert_internal(page->local_free == NULL);
575
#endif
576
mi_assert_internal(page->capacity + extend <= page->reserved);
577
mi_assert_internal(bsize == mi_page_block_size(page));
578
void* const page_area = mi_page_start(page);
579
580
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
581
582
// initialize a sequential free list
583
mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1);
584
mi_block_t* block = start;
585
while(block <= last) {
586
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
587
mi_block_set_next(page,block,next);
588
block = next;
589
}
590
// prepend to free list (usually `NULL`)
591
mi_block_set_next(page, last, page->free);
592
page->free = start;
593
}
594
595
/* -----------------------------------------------------------
596
Page initialize and extend the capacity
597
----------------------------------------------------------- */
598
599
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
600
#if (MI_SECURE>0)
601
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
602
#else
603
#define MI_MIN_EXTEND (4)
604
#endif
605
606
// Extend the capacity (up to reserved) by initializing a free list
607
// We do at most `MI_MAX_EXTEND` to avoid touching too much memory
608
// Note: we also experimented with "bump" allocation on the first
609
// allocations but this did not speed up any benchmark (due to an
610
// extra test in malloc? or cache effects?)
611
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
612
MI_UNUSED(tld);
613
mi_assert_expensive(mi_page_is_valid_init(page));
614
#if (MI_SECURE<=2)
615
mi_assert(page->free == NULL);
616
mi_assert(page->local_free == NULL);
617
if (page->free != NULL) return;
618
#endif
619
if (page->capacity >= page->reserved) return;
620
621
mi_stat_counter_increase(tld->stats.pages_extended, 1);
622
623
// calculate the extend count
624
const size_t bsize = mi_page_block_size(page);
625
size_t extend = page->reserved - page->capacity;
626
mi_assert_internal(extend > 0);
627
628
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
629
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
630
mi_assert_internal(max_extend > 0);
631
632
if (extend > max_extend) {
633
// ensure we don't touch memory beyond the page to reduce page commit.
634
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
635
extend = max_extend;
636
}
637
638
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
639
mi_assert_internal(extend < (1UL<<16));
640
641
// and append the extend the free list
642
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
643
mi_page_free_list_extend(page, bsize, extend, &tld->stats );
644
}
645
else {
646
mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats);
647
}
648
// enable the new free list
649
page->capacity += (uint16_t)extend;
650
mi_stat_increase(tld->stats.page_committed, extend * bsize);
651
mi_assert_expensive(mi_page_is_valid_init(page));
652
}
653
654
// Initialize a fresh page
655
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) {
656
mi_assert(page != NULL);
657
mi_segment_t* segment = _mi_page_segment(page);
658
mi_assert(segment != NULL);
659
mi_assert_internal(block_size > 0);
660
// set fields
661
mi_page_set_heap(page, heap);
662
page->block_size = block_size;
663
size_t page_size;
664
page->page_start = _mi_segment_page_start(segment, page, &page_size);
665
mi_track_mem_noaccess(page->page_start,page_size);
666
mi_assert_internal(mi_page_block_size(page) <= page_size);
667
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
668
mi_assert_internal(page_size / block_size < (1L<<16));
669
page->reserved = (uint16_t)(page_size / block_size);
670
mi_assert_internal(page->reserved > 0);
671
#if (MI_PADDING || MI_ENCODE_FREELIST)
672
page->keys[0] = _mi_heap_random_next(heap);
673
page->keys[1] = _mi_heap_random_next(heap);
674
#endif
675
page->free_is_zero = page->is_zero_init;
676
#if MI_DEBUG>2
677
if (page->is_zero_init) {
678
mi_track_mem_defined(page->page_start, page_size);
679
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
680
}
681
#endif
682
mi_assert_internal(page->is_committed);
683
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
684
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
685
}
686
else {
687
page->block_size_shift = 0;
688
}
689
690
mi_assert_internal(page->capacity == 0);
691
mi_assert_internal(page->free == NULL);
692
mi_assert_internal(page->used == 0);
693
mi_assert_internal(page->xthread_free == 0);
694
mi_assert_internal(page->next == NULL);
695
mi_assert_internal(page->prev == NULL);
696
mi_assert_internal(page->retire_expire == 0);
697
mi_assert_internal(!mi_page_has_aligned(page));
698
#if (MI_PADDING || MI_ENCODE_FREELIST)
699
mi_assert_internal(page->keys[0] != 0);
700
mi_assert_internal(page->keys[1] != 0);
701
#endif
702
mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
703
mi_assert_expensive(mi_page_is_valid_init(page));
704
705
// initialize an initial free list
706
mi_page_extend_free(heap,page,tld);
707
mi_assert(mi_page_immediate_available(page));
708
}
709
710
711
/* -----------------------------------------------------------
712
Find pages with free blocks
713
-------------------------------------------------------------*/
714
715
// Find a page with free blocks of `page->block_size`.
716
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
717
{
718
// search through the pages in "next fit" order
719
#if MI_STAT
720
size_t count = 0;
721
#endif
722
mi_page_t* page = pq->first;
723
while (page != NULL)
724
{
725
mi_page_t* next = page->next; // remember next
726
#if MI_STAT
727
count++;
728
#endif
729
730
// 0. collect freed blocks by us and other threads
731
_mi_page_free_collect(page, false);
732
733
// 1. if the page contains free blocks, we are done
734
if (mi_page_immediate_available(page)) {
735
break; // pick this one
736
}
737
738
// 2. Try to extend
739
if (page->capacity < page->reserved) {
740
mi_page_extend_free(heap, page, heap->tld);
741
mi_assert_internal(mi_page_immediate_available(page));
742
break;
743
}
744
745
// 3. If the page is completely full, move it to the `mi_pages_full`
746
// queue so we don't visit long-lived pages too often.
747
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
748
mi_page_to_full(page, pq);
749
750
page = next;
751
} // for each page
752
753
mi_heap_stat_counter_increase(heap, searches, count);
754
755
if (page == NULL) {
756
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
757
page = mi_page_fresh(heap, pq);
758
if (page == NULL && first_try) {
759
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
760
page = mi_page_queue_find_free_ex(heap, pq, false);
761
}
762
}
763
else {
764
mi_assert(pq->first == page);
765
page->retire_expire = 0;
766
}
767
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
768
return page;
769
}
770
771
772
773
// Find a page with free blocks of `size`.
774
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
775
mi_page_queue_t* pq = mi_page_queue(heap,size);
776
mi_page_t* page = pq->first;
777
if (page != NULL) {
778
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
779
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
780
mi_page_extend_free(heap, page, heap->tld);
781
mi_assert_internal(mi_page_immediate_available(page));
782
}
783
else
784
#endif
785
{
786
_mi_page_free_collect(page,false);
787
}
788
789
if (mi_page_immediate_available(page)) {
790
page->retire_expire = 0;
791
return page; // fast path
792
}
793
}
794
return mi_page_queue_find_free_ex(heap, pq, true);
795
}
796
797
798
/* -----------------------------------------------------------
799
Users can register a deferred free function called
800
when the `free` list is empty. Since the `local_free`
801
is separate this is deterministically called after
802
a certain number of allocations.
803
----------------------------------------------------------- */
804
805
static mi_deferred_free_fun* volatile deferred_free = NULL;
806
static _Atomic(void*) deferred_arg; // = NULL
807
808
void _mi_deferred_free(mi_heap_t* heap, bool force) {
809
heap->tld->heartbeat++;
810
if (deferred_free != NULL && !heap->tld->recurse) {
811
heap->tld->recurse = true;
812
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
813
heap->tld->recurse = false;
814
}
815
}
816
817
void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept {
818
deferred_free = fn;
819
mi_atomic_store_ptr_release(void,&deferred_arg, arg);
820
}
821
822
823
/* -----------------------------------------------------------
824
General allocation
825
----------------------------------------------------------- */
826
827
// Large and huge page allocation.
828
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
829
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
830
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
831
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
832
size_t block_size = _mi_os_good_alloc_size(size);
833
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
834
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
835
#if MI_HUGE_PAGE_ABANDON
836
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
837
#else
838
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
839
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
840
#endif
841
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
842
if (page != NULL) {
843
mi_assert_internal(mi_page_immediate_available(page));
844
845
if (is_huge) {
846
mi_assert_internal(mi_page_is_huge(page));
847
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
848
mi_assert_internal(_mi_page_segment(page)->used==1);
849
#if MI_HUGE_PAGE_ABANDON
850
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
851
mi_page_set_heap(page, NULL);
852
#endif
853
}
854
else {
855
mi_assert_internal(!mi_page_is_huge(page));
856
}
857
858
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
859
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
860
mi_heap_stat_increase(heap, large, bsize);
861
mi_heap_stat_counter_increase(heap, large_count, 1);
862
}
863
else {
864
mi_heap_stat_increase(heap, huge, bsize);
865
mi_heap_stat_counter_increase(heap, huge_count, 1);
866
}
867
}
868
return page;
869
}
870
871
872
// Allocate a page
873
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
874
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
875
// huge allocation?
876
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
877
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
878
if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
879
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
880
return NULL;
881
}
882
else {
883
return mi_large_huge_page_alloc(heap,size,huge_alignment);
884
}
885
}
886
else {
887
// otherwise find a page with free blocks in our size segregated queues
888
#if MI_PADDING
889
mi_assert_internal(size >= MI_PADDING_SIZE);
890
#endif
891
return mi_find_free_page(heap, size);
892
}
893
}
894
895
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
896
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
897
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
898
// very large requested alignments in which case we use a huge segment.
899
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
900
{
901
mi_assert_internal(heap != NULL);
902
903
// initialize if necessary
904
if mi_unlikely(!mi_heap_is_initialized(heap)) {
905
heap = mi_heap_get_default(); // calls mi_thread_init
906
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
907
}
908
mi_assert_internal(mi_heap_is_initialized(heap));
909
910
// call potential deferred free routines
911
_mi_deferred_free(heap, false);
912
913
// free delayed frees from other threads (but skip contended ones)
914
_mi_heap_delayed_free_partial(heap);
915
916
// find (or allocate) a page of the right size
917
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
918
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
919
mi_heap_collect(heap, true /* force */);
920
page = mi_find_page(heap, size, huge_alignment);
921
}
922
923
if mi_unlikely(page == NULL) { // out of memory
924
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
925
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
926
return NULL;
927
}
928
929
mi_assert_internal(mi_page_immediate_available(page));
930
mi_assert_internal(mi_page_block_size(page) >= size);
931
932
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
933
if mi_unlikely(zero && page->block_size == 0) {
934
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
935
void* p = _mi_page_malloc(heap, page, size);
936
mi_assert_internal(p != NULL);
937
_mi_memzero_aligned(p, mi_page_usable_block_size(page));
938
return p;
939
}
940
else {
941
return _mi_page_malloc_zero(heap, page, size, zero);
942
}
943
}
944
945