Path: blob/main/system/lib/mimalloc/src/heap.c
6175 views
/*----------------------------------------------------------------------------1Copyright (c) 2018-2021, Microsoft Research, Daan Leijen2This is free software; you can redistribute it and/or modify it under the3terms of the MIT license. A copy of the license can be found in the file4"LICENSE" at the root of this distribution.5-----------------------------------------------------------------------------*/67#include "mimalloc.h"8#include "mimalloc/internal.h"9#include "mimalloc/atomic.h"10#include "mimalloc/prim.h" // mi_prim_get_default_heap1112#include <string.h> // memset, memcpy1314#if defined(_MSC_VER) && (_MSC_VER < 1920)15#pragma warning(disable:4204) // non-constant aggregate initializer16#endif1718/* -----------------------------------------------------------19Helpers20----------------------------------------------------------- */2122// return `true` if ok, `false` to break23typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);2425// Visit all pages in a heap; returns `false` if break was called.26static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)27{28if (heap==NULL || heap->page_count==0) return 0;2930// visit all pages31#if MI_DEBUG>132size_t total = heap->page_count;33size_t count = 0;34#endif3536for (size_t i = 0; i <= MI_BIN_FULL; i++) {37mi_page_queue_t* pq = &heap->pages[i];38mi_page_t* page = pq->first;39while(page != NULL) {40mi_page_t* next = page->next; // save next in case the page gets removed from the queue41mi_assert_internal(mi_page_heap(page) == heap);42#if MI_DEBUG>143count++;44#endif45if (!fn(heap, pq, page, arg1, arg2)) return false;46page = next; // and continue47}48}49mi_assert_internal(count == total);50return true;51}525354#if MI_DEBUG>=255static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {56MI_UNUSED(arg1);57MI_UNUSED(arg2);58MI_UNUSED(pq);59mi_assert_internal(mi_page_heap(page) == heap);60mi_segment_t* segment = _mi_page_segment(page);61mi_assert_internal(segment->thread_id == heap->thread_id);62mi_assert_expensive(_mi_page_is_valid(page));63return true;64}65#endif66#if MI_DEBUG>=367static bool mi_heap_is_valid(mi_heap_t* heap) {68mi_assert_internal(heap!=NULL);69mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);70return true;71}72#endif7374757677/* -----------------------------------------------------------78"Collect" pages by migrating `local_free` and `thread_free`79lists and freeing empty pages. This is done when a thread80stops (and in that case abandons pages if there are still81blocks alive)82----------------------------------------------------------- */8384typedef enum mi_collect_e {85MI_NORMAL,86MI_FORCE,87MI_ABANDON88} mi_collect_t;899091static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {92MI_UNUSED(arg2);93MI_UNUSED(heap);94mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));95mi_collect_t collect = *((mi_collect_t*)arg_collect);96_mi_page_free_collect(page, collect >= MI_FORCE);97if (collect == MI_FORCE) {98// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.99mi_segment_t* segment = _mi_page_segment(page);100_mi_segment_collect(segment, true /* force? */, &heap->tld->segments);101}102if (mi_page_all_free(page)) {103// no more used blocks, free the page.104// note: this will free retired pages as well.105_mi_page_free(page, pq, collect >= MI_FORCE);106}107else if (collect == MI_ABANDON) {108// still used blocks but the thread is done; abandon the page109_mi_page_abandon(page, pq);110}111return true; // don't break112}113114static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {115MI_UNUSED(arg1);116MI_UNUSED(arg2);117MI_UNUSED(heap);118MI_UNUSED(pq);119_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);120return true; // don't break121}122123static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)124{125if (heap==NULL || !mi_heap_is_initialized(heap)) return;126127const bool force = (collect >= MI_FORCE);128_mi_deferred_free(heap, force);129130// python/cpython#112532: we may be called from a thread that is not the owner of the heap131const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());132133// note: never reclaim on collect but leave it to threads that need storage to reclaim134const bool force_main =135#ifdef NDEBUG136collect == MI_FORCE137#else138collect >= MI_FORCE139#endif140&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;141142if (force_main) {143// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.144// if all memory is freed by now, all segments should be freed.145_mi_abandoned_reclaim_all(heap, &heap->tld->segments);146}147148// if abandoning, mark all pages to no longer add to delayed_free149if (collect == MI_ABANDON) {150mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);151}152153// free all current thread delayed blocks.154// (if abandoning, after this there are no more thread-delayed references into the pages.)155_mi_heap_delayed_free_all(heap);156157// collect retired pages158_mi_heap_collect_retired(heap, force);159160// collect all pages owned by this thread161mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);162mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );163164// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)165// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment166_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);167168// if forced, collect thread data cache on program-exit (or shared library unload)169if (force && is_main_thread && mi_heap_is_backing(heap)) {170_mi_thread_data_collect(); // collect thread data cache171}172173// collect arenas (this is program wide so don't force purges on abandonment of threads)174_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);175}176177void _mi_heap_collect_abandon(mi_heap_t* heap) {178mi_heap_collect_ex(heap, MI_ABANDON);179}180181void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {182mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));183}184185void mi_collect(bool force) mi_attr_noexcept {186mi_heap_collect(mi_prim_get_default_heap(), force);187}188189190/* -----------------------------------------------------------191Heap new192----------------------------------------------------------- */193194mi_heap_t* mi_heap_get_default(void) {195mi_thread_init();196return mi_prim_get_default_heap();197}198199static bool mi_heap_is_default(const mi_heap_t* heap) {200return (heap == mi_prim_get_default_heap());201}202203204mi_heap_t* mi_heap_get_backing(void) {205mi_heap_t* heap = mi_heap_get_default();206mi_assert_internal(heap!=NULL);207mi_heap_t* bheap = heap->tld->heap_backing;208mi_assert_internal(bheap!=NULL);209mi_assert_internal(bheap->thread_id == _mi_thread_id());210return bheap;211}212213void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) {214_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));215heap->tld = tld;216heap->thread_id = _mi_thread_id();217heap->arena_id = arena_id;218heap->no_reclaim = noreclaim;219heap->tag = tag;220if (heap == tld->heap_backing) {221_mi_random_init(&heap->random);222}223else {224_mi_random_split(&tld->heap_backing->random, &heap->random);225}226heap->cookie = _mi_heap_random_next(heap) | 1;227heap->keys[0] = _mi_heap_random_next(heap);228heap->keys[1] = _mi_heap_random_next(heap);229// push on the thread local heaps list230heap->next = heap->tld->heaps;231heap->tld->heaps = heap;232}233234mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {235mi_heap_t* bheap = mi_heap_get_backing();236mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?237if (heap == NULL) return NULL;238// don't reclaim abandoned pages or otherwise destroy is unsafe239_mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */);240return heap;241}242243mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {244return mi_heap_new_in_arena(_mi_arena_id_none());245}246247bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {248return _mi_arena_memid_is_suitable(memid, heap->arena_id);249}250251uintptr_t _mi_heap_random_next(mi_heap_t* heap) {252return _mi_random_next(&heap->random);253}254255// zero out the page queues256static void mi_heap_reset_pages(mi_heap_t* heap) {257mi_assert_internal(heap != NULL);258mi_assert_internal(mi_heap_is_initialized(heap));259// TODO: copy full empty heap instead?260memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));261_mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));262heap->thread_delayed_free = NULL;263heap->page_count = 0;264}265266// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.267static void mi_heap_free(mi_heap_t* heap) {268mi_assert(heap != NULL);269mi_assert_internal(mi_heap_is_initialized(heap));270if (heap==NULL || !mi_heap_is_initialized(heap)) return;271if (mi_heap_is_backing(heap)) return; // dont free the backing heap272273// reset default274if (mi_heap_is_default(heap)) {275_mi_heap_set_default_direct(heap->tld->heap_backing);276}277278// remove ourselves from the thread local heaps list279// linear search but we expect the number of heaps to be relatively small280mi_heap_t* prev = NULL;281mi_heap_t* curr = heap->tld->heaps;282while (curr != heap && curr != NULL) {283prev = curr;284curr = curr->next;285}286mi_assert_internal(curr == heap);287if (curr == heap) {288if (prev != NULL) { prev->next = heap->next; }289else { heap->tld->heaps = heap->next; }290}291mi_assert_internal(heap->tld->heaps != NULL);292293// and free the used memory294mi_free(heap);295}296297// return a heap on the same thread as `heap` specialized for the specified tag (if it exists)298mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {299if (heap->tag == tag) {300return heap;301}302for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) {303if (curr->tag == tag) {304return curr;305}306}307return NULL;308}309310/* -----------------------------------------------------------311Heap destroy312----------------------------------------------------------- */313314static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {315MI_UNUSED(arg1);316MI_UNUSED(arg2);317MI_UNUSED(heap);318MI_UNUSED(pq);319320// ensure no more thread_delayed_free will be added321_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);322323// stats324const size_t bsize = mi_page_block_size(page);325if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {326if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {327mi_heap_stat_decrease(heap, large, bsize);328}329else {330mi_heap_stat_decrease(heap, huge, bsize);331}332}333#if (MI_STAT)334_mi_page_free_collect(page, false); // update used count335const size_t inuse = page->used;336if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {337mi_heap_stat_decrease(heap, normal, bsize * inuse);338#if (MI_STAT>1)339mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);340#endif341}342mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...343#endif344345/// pretend it is all free now346mi_assert_internal(mi_page_thread_free(page) == NULL);347page->used = 0;348349// and free the page350// mi_page_free(page,false);351page->next = NULL;352page->prev = NULL;353_mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);354355return true; // keep going356}357358void _mi_heap_destroy_pages(mi_heap_t* heap) {359mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);360mi_heap_reset_pages(heap);361}362363#if MI_TRACK_HEAP_DESTROY364static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {365MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);366mi_track_free_size(block,mi_usable_size(block));367return true;368}369#endif370371void mi_heap_destroy(mi_heap_t* heap) {372mi_assert(heap != NULL);373mi_assert(mi_heap_is_initialized(heap));374mi_assert(heap->no_reclaim);375mi_assert_expensive(mi_heap_is_valid(heap));376if (heap==NULL || !mi_heap_is_initialized(heap)) return;377if (!heap->no_reclaim) {378// don't free in case it may contain reclaimed pages379mi_heap_delete(heap);380}381else {382// track all blocks as freed383#if MI_TRACK_HEAP_DESTROY384mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);385#endif386// free all pages387_mi_heap_destroy_pages(heap);388mi_heap_free(heap);389}390}391392// forcefully destroy all heaps in the current thread393void _mi_heap_unsafe_destroy_all(void) {394mi_heap_t* bheap = mi_heap_get_backing();395mi_heap_t* curr = bheap->tld->heaps;396while (curr != NULL) {397mi_heap_t* next = curr->next;398if (curr->no_reclaim) {399mi_heap_destroy(curr);400}401else {402_mi_heap_destroy_pages(curr);403}404curr = next;405}406}407408/* -----------------------------------------------------------409Safe Heap delete410----------------------------------------------------------- */411412// Transfer the pages from one heap to the other413static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {414mi_assert_internal(heap!=NULL);415if (from==NULL || from->page_count == 0) return;416417// reduce the size of the delayed frees418_mi_heap_delayed_free_partial(from);419420// transfer all pages by appending the queues; this will set a new heap field421// so threads may do delayed frees in either heap for a while.422// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state423// so after this only the new heap will get delayed frees424for (size_t i = 0; i <= MI_BIN_FULL; i++) {425mi_page_queue_t* pq = &heap->pages[i];426mi_page_queue_t* append = &from->pages[i];427size_t pcount = _mi_page_queue_append(heap, pq, append);428heap->page_count += pcount;429from->page_count -= pcount;430}431mi_assert_internal(from->page_count == 0);432433// and do outstanding delayed frees in the `from` heap434// note: be careful here as the `heap` field in all those pages no longer point to `from`,435// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a436// the regular `_mi_free_delayed_block` which is safe.437_mi_heap_delayed_free_all(from);438#if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353439mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);440#endif441442// and reset the `from` heap443mi_heap_reset_pages(from);444}445446// Safe delete a heap without freeing any still allocated blocks in that heap.447void mi_heap_delete(mi_heap_t* heap)448{449mi_assert(heap != NULL);450mi_assert(mi_heap_is_initialized(heap));451mi_assert_expensive(mi_heap_is_valid(heap));452if (heap==NULL || !mi_heap_is_initialized(heap)) return;453454if (!mi_heap_is_backing(heap)) {455// transfer still used pages to the backing heap456mi_heap_absorb(heap->tld->heap_backing, heap);457}458else {459// the backing heap abandons its pages460_mi_heap_collect_abandon(heap);461}462mi_assert_internal(heap->page_count==0);463mi_heap_free(heap);464}465466mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {467mi_assert(heap != NULL);468mi_assert(mi_heap_is_initialized(heap));469if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;470mi_assert_expensive(mi_heap_is_valid(heap));471mi_heap_t* old = mi_prim_get_default_heap();472_mi_heap_set_default_direct(heap);473return old;474}475476477478479/* -----------------------------------------------------------480Analysis481----------------------------------------------------------- */482483// static since it is not thread safe to access heaps from other threads.484static mi_heap_t* mi_heap_of_block(const void* p) {485if (p == NULL) return NULL;486mi_segment_t* segment = _mi_ptr_segment(p);487bool valid = (_mi_ptr_cookie(segment) == segment->cookie);488mi_assert_internal(valid);489if mi_unlikely(!valid) return NULL;490return mi_page_heap(_mi_segment_page_of(segment,p));491}492493bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {494mi_assert(heap != NULL);495if (heap==NULL || !mi_heap_is_initialized(heap)) return false;496return (heap == mi_heap_of_block(p));497}498499500static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {501MI_UNUSED(heap);502MI_UNUSED(pq);503bool* found = (bool*)vfound;504void* start = mi_page_start(page);505void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));506*found = (p >= start && p < end);507return (!*found); // continue if not found508}509510bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {511mi_assert(heap != NULL);512if (heap==NULL || !mi_heap_is_initialized(heap)) return false;513if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers514bool found = false;515mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);516return found;517}518519bool mi_check_owned(const void* p) {520return mi_heap_check_owned(mi_prim_get_default_heap(), p);521}522523/* -----------------------------------------------------------524Visit all heap blocks and areas525Todo: enable visiting abandoned pages, and526enable visiting all blocks of all heaps across threads527----------------------------------------------------------- */528529// Separate struct to keep `mi_page_t` out of the public interface530typedef struct mi_heap_area_ex_s {531mi_heap_area_t area;532mi_page_t* page;533} mi_heap_area_ex_t;534535static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) {536mi_assert(xarea != NULL);537if (xarea==NULL) return true;538const mi_heap_area_t* area = &xarea->area;539mi_page_t* page = xarea->page;540mi_assert(page != NULL);541if (page == NULL) return true;542543_mi_page_free_collect(page,true);544mi_assert_internal(page->local_free == NULL);545if (page->used == 0) return true;546547const size_t bsize = mi_page_block_size(page);548const size_t ubsize = mi_page_usable_block_size(page); // without padding549size_t psize;550uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);551552if (page->capacity == 1) {553// optimize page with one block554mi_assert_internal(page->used == 1 && page->free == NULL);555return visitor(mi_page_heap(page), area, pstart, ubsize, arg);556}557558// create a bitmap of free blocks.559#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))560uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)];561memset(free_map, 0, sizeof(free_map));562563#if MI_DEBUG>1564size_t free_count = 0;565#endif566for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {567#if MI_DEBUG>1568free_count++;569#endif570mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));571size_t offset = (uint8_t*)block - pstart;572mi_assert_internal(offset % bsize == 0);573size_t blockidx = offset / bsize; // Todo: avoid division?574mi_assert_internal( blockidx < MI_MAX_BLOCKS);575size_t bitidx = (blockidx / sizeof(uintptr_t));576size_t bit = blockidx - (bitidx * sizeof(uintptr_t));577free_map[bitidx] |= ((uintptr_t)1 << bit);578}579mi_assert_internal(page->capacity == (free_count + page->used));580581// walk through all blocks skipping the free ones582#if MI_DEBUG>1583size_t used_count = 0;584#endif585for (size_t i = 0; i < page->capacity; i++) {586size_t bitidx = (i / sizeof(uintptr_t));587size_t bit = i - (bitidx * sizeof(uintptr_t));588uintptr_t m = free_map[bitidx];589if (bit == 0 && m == UINTPTR_MAX) {590i += (sizeof(uintptr_t) - 1); // skip a run of free blocks591}592else if ((m & ((uintptr_t)1 << bit)) == 0) {593#if MI_DEBUG>1594used_count++;595#endif596uint8_t* block = pstart + (i * bsize);597if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false;598}599}600mi_assert_internal(page->used == used_count);601return true;602}603604typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);605606607static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {608MI_UNUSED(heap);609MI_UNUSED(pq);610mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;611mi_heap_area_ex_t xarea;612const size_t bsize = mi_page_block_size(page);613const size_t ubsize = mi_page_usable_block_size(page);614xarea.page = page;615xarea.area.reserved = page->reserved * bsize;616xarea.area.committed = page->capacity * bsize;617xarea.area.blocks = mi_page_start(page);618xarea.area.used = page->used; // number of blocks in use (#553)619xarea.area.block_size = ubsize;620xarea.area.full_block_size = bsize;621return fun(heap, &xarea, arg);622}623624// Visit all heap pages as areas625static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {626if (visitor == NULL) return false;627return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{628}629630// Just to pass arguments631typedef struct mi_visit_blocks_args_s {632bool visit_blocks;633mi_block_visit_fun* visitor;634void* arg;635} mi_visit_blocks_args_t;636637static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {638mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;639if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;640if (args->visit_blocks) {641return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg);642}643else {644return true;645}646}647648// Visit all blocks in a heap649bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {650mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };651return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);652}653654655