Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/heap.c
6175 views
1
/*----------------------------------------------------------------------------
2
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
#include "mimalloc.h"
9
#include "mimalloc/internal.h"
10
#include "mimalloc/atomic.h"
11
#include "mimalloc/prim.h" // mi_prim_get_default_heap
12
13
#include <string.h> // memset, memcpy
14
15
#if defined(_MSC_VER) && (_MSC_VER < 1920)
16
#pragma warning(disable:4204) // non-constant aggregate initializer
17
#endif
18
19
/* -----------------------------------------------------------
20
Helpers
21
----------------------------------------------------------- */
22
23
// return `true` if ok, `false` to break
24
typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
25
26
// Visit all pages in a heap; returns `false` if break was called.
27
static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
28
{
29
if (heap==NULL || heap->page_count==0) return 0;
30
31
// visit all pages
32
#if MI_DEBUG>1
33
size_t total = heap->page_count;
34
size_t count = 0;
35
#endif
36
37
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
38
mi_page_queue_t* pq = &heap->pages[i];
39
mi_page_t* page = pq->first;
40
while(page != NULL) {
41
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
42
mi_assert_internal(mi_page_heap(page) == heap);
43
#if MI_DEBUG>1
44
count++;
45
#endif
46
if (!fn(heap, pq, page, arg1, arg2)) return false;
47
page = next; // and continue
48
}
49
}
50
mi_assert_internal(count == total);
51
return true;
52
}
53
54
55
#if MI_DEBUG>=2
56
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
57
MI_UNUSED(arg1);
58
MI_UNUSED(arg2);
59
MI_UNUSED(pq);
60
mi_assert_internal(mi_page_heap(page) == heap);
61
mi_segment_t* segment = _mi_page_segment(page);
62
mi_assert_internal(segment->thread_id == heap->thread_id);
63
mi_assert_expensive(_mi_page_is_valid(page));
64
return true;
65
}
66
#endif
67
#if MI_DEBUG>=3
68
static bool mi_heap_is_valid(mi_heap_t* heap) {
69
mi_assert_internal(heap!=NULL);
70
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
71
return true;
72
}
73
#endif
74
75
76
77
78
/* -----------------------------------------------------------
79
"Collect" pages by migrating `local_free` and `thread_free`
80
lists and freeing empty pages. This is done when a thread
81
stops (and in that case abandons pages if there are still
82
blocks alive)
83
----------------------------------------------------------- */
84
85
typedef enum mi_collect_e {
86
MI_NORMAL,
87
MI_FORCE,
88
MI_ABANDON
89
} mi_collect_t;
90
91
92
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
93
MI_UNUSED(arg2);
94
MI_UNUSED(heap);
95
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
96
mi_collect_t collect = *((mi_collect_t*)arg_collect);
97
_mi_page_free_collect(page, collect >= MI_FORCE);
98
if (collect == MI_FORCE) {
99
// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
100
mi_segment_t* segment = _mi_page_segment(page);
101
_mi_segment_collect(segment, true /* force? */, &heap->tld->segments);
102
}
103
if (mi_page_all_free(page)) {
104
// no more used blocks, free the page.
105
// note: this will free retired pages as well.
106
_mi_page_free(page, pq, collect >= MI_FORCE);
107
}
108
else if (collect == MI_ABANDON) {
109
// still used blocks but the thread is done; abandon the page
110
_mi_page_abandon(page, pq);
111
}
112
return true; // don't break
113
}
114
115
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
116
MI_UNUSED(arg1);
117
MI_UNUSED(arg2);
118
MI_UNUSED(heap);
119
MI_UNUSED(pq);
120
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
121
return true; // don't break
122
}
123
124
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
125
{
126
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
127
128
const bool force = (collect >= MI_FORCE);
129
_mi_deferred_free(heap, force);
130
131
// python/cpython#112532: we may be called from a thread that is not the owner of the heap
132
const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
133
134
// note: never reclaim on collect but leave it to threads that need storage to reclaim
135
const bool force_main =
136
#ifdef NDEBUG
137
collect == MI_FORCE
138
#else
139
collect >= MI_FORCE
140
#endif
141
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
142
143
if (force_main) {
144
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
145
// if all memory is freed by now, all segments should be freed.
146
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
147
}
148
149
// if abandoning, mark all pages to no longer add to delayed_free
150
if (collect == MI_ABANDON) {
151
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
152
}
153
154
// free all current thread delayed blocks.
155
// (if abandoning, after this there are no more thread-delayed references into the pages.)
156
_mi_heap_delayed_free_all(heap);
157
158
// collect retired pages
159
_mi_heap_collect_retired(heap, force);
160
161
// collect all pages owned by this thread
162
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
163
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
164
165
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
166
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
167
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
168
169
// if forced, collect thread data cache on program-exit (or shared library unload)
170
if (force && is_main_thread && mi_heap_is_backing(heap)) {
171
_mi_thread_data_collect(); // collect thread data cache
172
}
173
174
// collect arenas (this is program wide so don't force purges on abandonment of threads)
175
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
176
}
177
178
void _mi_heap_collect_abandon(mi_heap_t* heap) {
179
mi_heap_collect_ex(heap, MI_ABANDON);
180
}
181
182
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
183
mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
184
}
185
186
void mi_collect(bool force) mi_attr_noexcept {
187
mi_heap_collect(mi_prim_get_default_heap(), force);
188
}
189
190
191
/* -----------------------------------------------------------
192
Heap new
193
----------------------------------------------------------- */
194
195
mi_heap_t* mi_heap_get_default(void) {
196
mi_thread_init();
197
return mi_prim_get_default_heap();
198
}
199
200
static bool mi_heap_is_default(const mi_heap_t* heap) {
201
return (heap == mi_prim_get_default_heap());
202
}
203
204
205
mi_heap_t* mi_heap_get_backing(void) {
206
mi_heap_t* heap = mi_heap_get_default();
207
mi_assert_internal(heap!=NULL);
208
mi_heap_t* bheap = heap->tld->heap_backing;
209
mi_assert_internal(bheap!=NULL);
210
mi_assert_internal(bheap->thread_id == _mi_thread_id());
211
return bheap;
212
}
213
214
void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) {
215
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
216
heap->tld = tld;
217
heap->thread_id = _mi_thread_id();
218
heap->arena_id = arena_id;
219
heap->no_reclaim = noreclaim;
220
heap->tag = tag;
221
if (heap == tld->heap_backing) {
222
_mi_random_init(&heap->random);
223
}
224
else {
225
_mi_random_split(&tld->heap_backing->random, &heap->random);
226
}
227
heap->cookie = _mi_heap_random_next(heap) | 1;
228
heap->keys[0] = _mi_heap_random_next(heap);
229
heap->keys[1] = _mi_heap_random_next(heap);
230
// push on the thread local heaps list
231
heap->next = heap->tld->heaps;
232
heap->tld->heaps = heap;
233
}
234
235
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
236
mi_heap_t* bheap = mi_heap_get_backing();
237
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
238
if (heap == NULL) return NULL;
239
// don't reclaim abandoned pages or otherwise destroy is unsafe
240
_mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */);
241
return heap;
242
}
243
244
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
245
return mi_heap_new_in_arena(_mi_arena_id_none());
246
}
247
248
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
249
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
250
}
251
252
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
253
return _mi_random_next(&heap->random);
254
}
255
256
// zero out the page queues
257
static void mi_heap_reset_pages(mi_heap_t* heap) {
258
mi_assert_internal(heap != NULL);
259
mi_assert_internal(mi_heap_is_initialized(heap));
260
// TODO: copy full empty heap instead?
261
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
262
_mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
263
heap->thread_delayed_free = NULL;
264
heap->page_count = 0;
265
}
266
267
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
268
static void mi_heap_free(mi_heap_t* heap) {
269
mi_assert(heap != NULL);
270
mi_assert_internal(mi_heap_is_initialized(heap));
271
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
272
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
273
274
// reset default
275
if (mi_heap_is_default(heap)) {
276
_mi_heap_set_default_direct(heap->tld->heap_backing);
277
}
278
279
// remove ourselves from the thread local heaps list
280
// linear search but we expect the number of heaps to be relatively small
281
mi_heap_t* prev = NULL;
282
mi_heap_t* curr = heap->tld->heaps;
283
while (curr != heap && curr != NULL) {
284
prev = curr;
285
curr = curr->next;
286
}
287
mi_assert_internal(curr == heap);
288
if (curr == heap) {
289
if (prev != NULL) { prev->next = heap->next; }
290
else { heap->tld->heaps = heap->next; }
291
}
292
mi_assert_internal(heap->tld->heaps != NULL);
293
294
// and free the used memory
295
mi_free(heap);
296
}
297
298
// return a heap on the same thread as `heap` specialized for the specified tag (if it exists)
299
mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {
300
if (heap->tag == tag) {
301
return heap;
302
}
303
for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) {
304
if (curr->tag == tag) {
305
return curr;
306
}
307
}
308
return NULL;
309
}
310
311
/* -----------------------------------------------------------
312
Heap destroy
313
----------------------------------------------------------- */
314
315
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
316
MI_UNUSED(arg1);
317
MI_UNUSED(arg2);
318
MI_UNUSED(heap);
319
MI_UNUSED(pq);
320
321
// ensure no more thread_delayed_free will be added
322
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
323
324
// stats
325
const size_t bsize = mi_page_block_size(page);
326
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
327
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
328
mi_heap_stat_decrease(heap, large, bsize);
329
}
330
else {
331
mi_heap_stat_decrease(heap, huge, bsize);
332
}
333
}
334
#if (MI_STAT)
335
_mi_page_free_collect(page, false); // update used count
336
const size_t inuse = page->used;
337
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
338
mi_heap_stat_decrease(heap, normal, bsize * inuse);
339
#if (MI_STAT>1)
340
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
341
#endif
342
}
343
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
344
#endif
345
346
/// pretend it is all free now
347
mi_assert_internal(mi_page_thread_free(page) == NULL);
348
page->used = 0;
349
350
// and free the page
351
// mi_page_free(page,false);
352
page->next = NULL;
353
page->prev = NULL;
354
_mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
355
356
return true; // keep going
357
}
358
359
void _mi_heap_destroy_pages(mi_heap_t* heap) {
360
mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
361
mi_heap_reset_pages(heap);
362
}
363
364
#if MI_TRACK_HEAP_DESTROY
365
static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
366
MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
367
mi_track_free_size(block,mi_usable_size(block));
368
return true;
369
}
370
#endif
371
372
void mi_heap_destroy(mi_heap_t* heap) {
373
mi_assert(heap != NULL);
374
mi_assert(mi_heap_is_initialized(heap));
375
mi_assert(heap->no_reclaim);
376
mi_assert_expensive(mi_heap_is_valid(heap));
377
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
378
if (!heap->no_reclaim) {
379
// don't free in case it may contain reclaimed pages
380
mi_heap_delete(heap);
381
}
382
else {
383
// track all blocks as freed
384
#if MI_TRACK_HEAP_DESTROY
385
mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
386
#endif
387
// free all pages
388
_mi_heap_destroy_pages(heap);
389
mi_heap_free(heap);
390
}
391
}
392
393
// forcefully destroy all heaps in the current thread
394
void _mi_heap_unsafe_destroy_all(void) {
395
mi_heap_t* bheap = mi_heap_get_backing();
396
mi_heap_t* curr = bheap->tld->heaps;
397
while (curr != NULL) {
398
mi_heap_t* next = curr->next;
399
if (curr->no_reclaim) {
400
mi_heap_destroy(curr);
401
}
402
else {
403
_mi_heap_destroy_pages(curr);
404
}
405
curr = next;
406
}
407
}
408
409
/* -----------------------------------------------------------
410
Safe Heap delete
411
----------------------------------------------------------- */
412
413
// Transfer the pages from one heap to the other
414
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
415
mi_assert_internal(heap!=NULL);
416
if (from==NULL || from->page_count == 0) return;
417
418
// reduce the size of the delayed frees
419
_mi_heap_delayed_free_partial(from);
420
421
// transfer all pages by appending the queues; this will set a new heap field
422
// so threads may do delayed frees in either heap for a while.
423
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
424
// so after this only the new heap will get delayed frees
425
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
426
mi_page_queue_t* pq = &heap->pages[i];
427
mi_page_queue_t* append = &from->pages[i];
428
size_t pcount = _mi_page_queue_append(heap, pq, append);
429
heap->page_count += pcount;
430
from->page_count -= pcount;
431
}
432
mi_assert_internal(from->page_count == 0);
433
434
// and do outstanding delayed frees in the `from` heap
435
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
436
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
437
// the regular `_mi_free_delayed_block` which is safe.
438
_mi_heap_delayed_free_all(from);
439
#if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
440
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
441
#endif
442
443
// and reset the `from` heap
444
mi_heap_reset_pages(from);
445
}
446
447
// Safe delete a heap without freeing any still allocated blocks in that heap.
448
void mi_heap_delete(mi_heap_t* heap)
449
{
450
mi_assert(heap != NULL);
451
mi_assert(mi_heap_is_initialized(heap));
452
mi_assert_expensive(mi_heap_is_valid(heap));
453
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
454
455
if (!mi_heap_is_backing(heap)) {
456
// transfer still used pages to the backing heap
457
mi_heap_absorb(heap->tld->heap_backing, heap);
458
}
459
else {
460
// the backing heap abandons its pages
461
_mi_heap_collect_abandon(heap);
462
}
463
mi_assert_internal(heap->page_count==0);
464
mi_heap_free(heap);
465
}
466
467
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
468
mi_assert(heap != NULL);
469
mi_assert(mi_heap_is_initialized(heap));
470
if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
471
mi_assert_expensive(mi_heap_is_valid(heap));
472
mi_heap_t* old = mi_prim_get_default_heap();
473
_mi_heap_set_default_direct(heap);
474
return old;
475
}
476
477
478
479
480
/* -----------------------------------------------------------
481
Analysis
482
----------------------------------------------------------- */
483
484
// static since it is not thread safe to access heaps from other threads.
485
static mi_heap_t* mi_heap_of_block(const void* p) {
486
if (p == NULL) return NULL;
487
mi_segment_t* segment = _mi_ptr_segment(p);
488
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
489
mi_assert_internal(valid);
490
if mi_unlikely(!valid) return NULL;
491
return mi_page_heap(_mi_segment_page_of(segment,p));
492
}
493
494
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
495
mi_assert(heap != NULL);
496
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
497
return (heap == mi_heap_of_block(p));
498
}
499
500
501
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
502
MI_UNUSED(heap);
503
MI_UNUSED(pq);
504
bool* found = (bool*)vfound;
505
void* start = mi_page_start(page);
506
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
507
*found = (p >= start && p < end);
508
return (!*found); // continue if not found
509
}
510
511
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
512
mi_assert(heap != NULL);
513
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
514
if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
515
bool found = false;
516
mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
517
return found;
518
}
519
520
bool mi_check_owned(const void* p) {
521
return mi_heap_check_owned(mi_prim_get_default_heap(), p);
522
}
523
524
/* -----------------------------------------------------------
525
Visit all heap blocks and areas
526
Todo: enable visiting abandoned pages, and
527
enable visiting all blocks of all heaps across threads
528
----------------------------------------------------------- */
529
530
// Separate struct to keep `mi_page_t` out of the public interface
531
typedef struct mi_heap_area_ex_s {
532
mi_heap_area_t area;
533
mi_page_t* page;
534
} mi_heap_area_ex_t;
535
536
static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) {
537
mi_assert(xarea != NULL);
538
if (xarea==NULL) return true;
539
const mi_heap_area_t* area = &xarea->area;
540
mi_page_t* page = xarea->page;
541
mi_assert(page != NULL);
542
if (page == NULL) return true;
543
544
_mi_page_free_collect(page,true);
545
mi_assert_internal(page->local_free == NULL);
546
if (page->used == 0) return true;
547
548
const size_t bsize = mi_page_block_size(page);
549
const size_t ubsize = mi_page_usable_block_size(page); // without padding
550
size_t psize;
551
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
552
553
if (page->capacity == 1) {
554
// optimize page with one block
555
mi_assert_internal(page->used == 1 && page->free == NULL);
556
return visitor(mi_page_heap(page), area, pstart, ubsize, arg);
557
}
558
559
// create a bitmap of free blocks.
560
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
561
uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)];
562
memset(free_map, 0, sizeof(free_map));
563
564
#if MI_DEBUG>1
565
size_t free_count = 0;
566
#endif
567
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
568
#if MI_DEBUG>1
569
free_count++;
570
#endif
571
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
572
size_t offset = (uint8_t*)block - pstart;
573
mi_assert_internal(offset % bsize == 0);
574
size_t blockidx = offset / bsize; // Todo: avoid division?
575
mi_assert_internal( blockidx < MI_MAX_BLOCKS);
576
size_t bitidx = (blockidx / sizeof(uintptr_t));
577
size_t bit = blockidx - (bitidx * sizeof(uintptr_t));
578
free_map[bitidx] |= ((uintptr_t)1 << bit);
579
}
580
mi_assert_internal(page->capacity == (free_count + page->used));
581
582
// walk through all blocks skipping the free ones
583
#if MI_DEBUG>1
584
size_t used_count = 0;
585
#endif
586
for (size_t i = 0; i < page->capacity; i++) {
587
size_t bitidx = (i / sizeof(uintptr_t));
588
size_t bit = i - (bitidx * sizeof(uintptr_t));
589
uintptr_t m = free_map[bitidx];
590
if (bit == 0 && m == UINTPTR_MAX) {
591
i += (sizeof(uintptr_t) - 1); // skip a run of free blocks
592
}
593
else if ((m & ((uintptr_t)1 << bit)) == 0) {
594
#if MI_DEBUG>1
595
used_count++;
596
#endif
597
uint8_t* block = pstart + (i * bsize);
598
if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false;
599
}
600
}
601
mi_assert_internal(page->used == used_count);
602
return true;
603
}
604
605
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
606
607
608
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
609
MI_UNUSED(heap);
610
MI_UNUSED(pq);
611
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
612
mi_heap_area_ex_t xarea;
613
const size_t bsize = mi_page_block_size(page);
614
const size_t ubsize = mi_page_usable_block_size(page);
615
xarea.page = page;
616
xarea.area.reserved = page->reserved * bsize;
617
xarea.area.committed = page->capacity * bsize;
618
xarea.area.blocks = mi_page_start(page);
619
xarea.area.used = page->used; // number of blocks in use (#553)
620
xarea.area.block_size = ubsize;
621
xarea.area.full_block_size = bsize;
622
return fun(heap, &xarea, arg);
623
}
624
625
// Visit all heap pages as areas
626
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
627
if (visitor == NULL) return false;
628
return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
629
}
630
631
// Just to pass arguments
632
typedef struct mi_visit_blocks_args_s {
633
bool visit_blocks;
634
mi_block_visit_fun* visitor;
635
void* arg;
636
} mi_visit_blocks_args_t;
637
638
static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
639
mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
640
if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
641
if (args->visit_blocks) {
642
return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg);
643
}
644
else {
645
return true;
646
}
647
}
648
649
// Visit all blocks in a heap
650
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
651
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
652
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
653
}
654
655