Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/free.c
6175 views
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
#if !defined(MI_IN_ALLOC_C)
8
#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"
9
// add includes help an IDE
10
#include "mimalloc.h"
11
#include "mimalloc/internal.h"
12
#include "mimalloc/atomic.h"
13
#include "mimalloc/prim.h" // _mi_prim_thread_id()
14
#endif
15
16
// forward declarations
17
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block);
18
static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);
19
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block);
20
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
21
22
23
// ------------------------------------------------------
24
// Free
25
// ------------------------------------------------------
26
27
// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
28
static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);
29
30
// regular free of a (thread local) block pointer
31
// fast path written carefully to prevent spilling on the stack
32
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
33
{
34
// checks
35
if mi_unlikely(mi_check_is_double_free(page, block)) return;
36
mi_check_padding(page, block);
37
if (track_stats) { mi_stat_free(page, block); }
38
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
39
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
40
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
41
}
42
#endif
43
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
44
45
// actual free: push on the local free list
46
mi_block_set_next(page, block, page->local_free);
47
page->local_free = block;
48
if mi_unlikely(--page->used == 0) {
49
_mi_page_retire(page);
50
}
51
else if mi_unlikely(check_full && mi_page_is_in_full(page)) {
52
_mi_page_unfull(page);
53
}
54
}
55
56
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
57
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
58
// `page_start` and `block_size` fields; however these are constant and the page won't be
59
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
60
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
61
mi_assert_internal(page!=NULL && p!=NULL);
62
63
size_t diff = (uint8_t*)p - page->page_start;
64
size_t adjust;
65
if mi_likely(page->block_size_shift != 0) {
66
adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
67
}
68
else {
69
adjust = diff % mi_page_block_size(page);
70
}
71
72
return (mi_block_t*)((uintptr_t)p - adjust);
73
}
74
75
// free a local pointer (page parameter comes first for better codegen)
76
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
77
MI_UNUSED(segment);
78
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
79
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
80
}
81
82
// free a pointer owned by another thread (page parameter comes first for better codegen)
83
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
84
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
85
mi_free_block_mt(page, segment, block);
86
}
87
88
// generic free (for runtime integration)
89
void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
90
if (is_local) mi_free_generic_local(page,segment,p);
91
else mi_free_generic_mt(page,segment,p);
92
}
93
94
// Get the segment data belonging to a pointer
95
// This is just a single `and` in release mode but does further checks in debug mode
96
// (and secure mode) to see if this was a valid pointer.
97
static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
98
{
99
MI_UNUSED(msg);
100
101
#if (MI_DEBUG>0)
102
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
103
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
104
return NULL;
105
}
106
#endif
107
108
mi_segment_t* const segment = _mi_ptr_segment(p);
109
if mi_unlikely(segment==NULL) return segment;
110
111
#if (MI_DEBUG>0)
112
if mi_unlikely(!mi_is_in_heap_region(p)) {
113
#if (MI_INTPTR_SIZE == 8 && defined(__linux__))
114
if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
115
#else
116
{
117
#endif
118
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
119
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
120
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
121
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
122
}
123
}
124
}
125
#endif
126
#if (MI_DEBUG>0 || MI_SECURE>=4)
127
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
128
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
129
return NULL;
130
}
131
#endif
132
133
return segment;
134
}
135
136
// Free a block
137
// Fast path written carefully to prevent register spilling on the stack
138
void mi_free(void* p) mi_attr_noexcept
139
{
140
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
141
if mi_unlikely(segment==NULL) return;
142
143
const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
144
mi_page_t* const page = _mi_segment_page_of(segment, p);
145
146
if mi_likely(is_local) { // thread-local free?
147
if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
148
// thread-local, aligned, and not a full page
149
mi_block_t* const block = (mi_block_t*)p;
150
mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */);
151
}
152
else {
153
// page is full or contains (inner) aligned blocks; use generic path
154
mi_free_generic_local(page, segment, p);
155
}
156
}
157
else {
158
// not thread-local; use generic path
159
mi_free_generic_mt(page, segment, p);
160
}
161
}
162
163
// return true if successful
164
bool _mi_free_delayed_block(mi_block_t* block) {
165
// get segment and page
166
mi_assert_internal(block!=NULL);
167
const mi_segment_t* const segment = _mi_ptr_segment(block);
168
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
169
mi_assert_internal(_mi_thread_id() == segment->thread_id);
170
mi_page_t* const page = _mi_segment_page_of(segment, block);
171
172
// Clear the no-delayed flag so delayed freeing is used again for this page.
173
// This must be done before collecting the free lists on this page -- otherwise
174
// some blocks may end up in the page `thread_free` list with no blocks in the
175
// heap `thread_delayed_free` list which may cause the page to be never freed!
176
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
177
if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
178
return false;
179
}
180
181
// collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count
182
_mi_page_free_collect(page, false);
183
184
// and free the block (possibly freeing the page as well since `used` is updated)
185
mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */);
186
return true;
187
}
188
189
// ------------------------------------------------------
190
// Multi-threaded Free (`_mt`)
191
// ------------------------------------------------------
192
193
// Push a block that is owned by another thread on its page-local thread free
194
// list or it's heap delayed free list. Such blocks are later collected by
195
// the owning thread in `_mi_free_delayed_block`.
196
static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )
197
{
198
// Try to put the block on either the page-local thread free list,
199
// or the heap delayed free list (if this is the first non-local free in that page)
200
mi_thread_free_t tfreex;
201
bool use_delayed;
202
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
203
do {
204
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
205
if mi_unlikely(use_delayed) {
206
// unlikely: this only happens on the first concurrent free in a page that is in the full list
207
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
208
}
209
else {
210
// usual: directly add to page thread_free list
211
mi_block_set_next(page, block, mi_tf_block(tfree));
212
tfreex = mi_tf_set_block(tfree,block);
213
}
214
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
215
216
// If this was the first non-local free, we need to push it on the heap delayed free list instead
217
if mi_unlikely(use_delayed) {
218
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
219
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
220
mi_assert_internal(heap != NULL);
221
if (heap != NULL) {
222
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
223
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
224
do {
225
mi_block_set_nextx(heap,block,dfree, heap->keys);
226
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
227
}
228
229
// and reset the MI_DELAYED_FREEING flag
230
tfree = mi_atomic_load_relaxed(&page->xthread_free);
231
do {
232
tfreex = tfree;
233
mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
234
tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
235
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
236
}
237
}
238
239
// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
240
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)
241
{
242
// first see if the segment was abandoned and if we can reclaim it into our thread
243
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
244
#if MI_HUGE_PAGE_ABANDON
245
segment->page_kind != MI_PAGE_HUGE &&
246
#endif
247
mi_atomic_load_relaxed(&segment->thread_id) == 0)
248
{
249
// the segment is abandoned, try to reclaim it into our heap
250
if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {
251
mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
252
mi_free(block); // recursively free as now it will be a local free in our heap
253
return;
254
}
255
}
256
257
// The padding check may access the non-thread-owned page for the key values.
258
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
259
mi_check_padding(page, block);
260
261
// adjust stats (after padding check and potentially recursive `mi_free` above)
262
mi_stat_free(page, block); // stat_free may access the padding
263
mi_track_free_size(block, mi_page_usable_size_of(page,block));
264
265
// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
266
_mi_padding_shrink(page, block, sizeof(mi_block_t));
267
268
if (segment->kind == MI_SEGMENT_HUGE) {
269
#if MI_HUGE_PAGE_ABANDON
270
// huge page segments are always abandoned and can be freed immediately
271
_mi_segment_huge_page_free(segment, page, block);
272
return;
273
#else
274
// huge pages are special as they occupy the entire segment
275
// as these are large we reset the memory occupied by the page so it is available to other threads
276
// (as the owning thread needs to actually free the memory later).
277
_mi_segment_huge_page_reset(segment, page, block);
278
#endif
279
}
280
else {
281
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
282
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
283
#endif
284
}
285
286
// and finally free the actual block by pushing it on the owning heap
287
// thread_delayed free list (or heap delayed free list)
288
mi_free_block_delayed_mt(page,block);
289
}
290
291
292
// ------------------------------------------------------
293
// Usable size
294
// ------------------------------------------------------
295
296
// Bytes available in a block
297
static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {
298
const mi_block_t* block = _mi_page_ptr_unalign(page, p);
299
const size_t size = mi_page_usable_size_of(page, block);
300
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
301
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
302
return (size - adjust);
303
}
304
305
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
306
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
307
if mi_unlikely(segment==NULL) return 0;
308
const mi_page_t* const page = _mi_segment_page_of(segment, p);
309
if mi_likely(!mi_page_has_aligned(page)) {
310
const mi_block_t* block = (const mi_block_t*)p;
311
return mi_page_usable_size_of(page, block);
312
}
313
else {
314
// split out to separate routine for improved code generation
315
return mi_page_usable_aligned_size_of(page, p);
316
}
317
}
318
319
mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
320
return _mi_usable_size(p, "mi_usable_size");
321
}
322
323
324
// ------------------------------------------------------
325
// Free variants
326
// ------------------------------------------------------
327
328
void mi_free_size(void* p, size_t size) mi_attr_noexcept {
329
MI_UNUSED_RELEASE(size);
330
mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
331
mi_free(p);
332
}
333
334
void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
335
MI_UNUSED_RELEASE(alignment);
336
mi_assert(((uintptr_t)p % alignment) == 0);
337
mi_free_size(p,size);
338
}
339
340
void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
341
MI_UNUSED_RELEASE(alignment);
342
mi_assert(((uintptr_t)p % alignment) == 0);
343
mi_free(p);
344
}
345
346
347
// ------------------------------------------------------
348
// Check for double free in secure and debug mode
349
// This is somewhat expensive so only enabled for secure mode 4
350
// ------------------------------------------------------
351
352
#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
353
// linear check if the free list contains a specific element
354
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
355
while (list != NULL) {
356
if (elem==list) return true;
357
list = mi_block_next(page, list);
358
}
359
return false;
360
}
361
362
static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
363
// The decoded value is in the same page (or NULL).
364
// Walk the free lists to verify positively if it is already freed
365
if (mi_list_contains(page, page->free, block) ||
366
mi_list_contains(page, page->local_free, block) ||
367
mi_list_contains(page, mi_page_thread_free(page), block))
368
{
369
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
370
return true;
371
}
372
return false;
373
}
374
375
#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
376
377
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
378
bool is_double_free = false;
379
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
380
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
381
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
382
{
383
// Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free?
384
// (continue in separate function to improve code generation)
385
is_double_free = mi_check_is_double_freex(page, block);
386
}
387
return is_double_free;
388
}
389
#else
390
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
391
MI_UNUSED(page);
392
MI_UNUSED(block);
393
return false;
394
}
395
#endif
396
397
398
// ---------------------------------------------------------------------------
399
// Check for heap block overflow by setting up padding at the end of the block
400
// ---------------------------------------------------------------------------
401
402
#if MI_PADDING // && !MI_TRACK_ENABLED
403
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
404
*bsize = mi_page_usable_block_size(page);
405
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
406
mi_track_mem_defined(padding,sizeof(mi_padding_t));
407
*delta = padding->delta;
408
uint32_t canary = padding->canary;
409
uintptr_t keys[2];
410
keys[0] = page->keys[0];
411
keys[1] = page->keys[1];
412
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
413
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
414
return ok;
415
}
416
417
// Return the exact usable size of a block.
418
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
419
size_t bsize;
420
size_t delta;
421
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
422
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
423
return (ok ? bsize - delta : 0);
424
}
425
426
// When a non-thread-local block is freed, it becomes part of the thread delayed free
427
// list that is freed later by the owning heap. If the exact usable size is too small to
428
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
429
// so it will later not trigger an overflow error in `mi_free_block`.
430
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
431
size_t bsize;
432
size_t delta;
433
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
434
mi_assert_internal(ok);
435
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
436
mi_assert_internal(bsize >= min_size);
437
if (bsize < min_size) return; // should never happen
438
size_t new_delta = (bsize - min_size);
439
mi_assert_internal(new_delta < bsize);
440
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
441
mi_track_mem_defined(padding,sizeof(mi_padding_t));
442
padding->delta = (uint32_t)new_delta;
443
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
444
}
445
#else
446
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
447
MI_UNUSED(block);
448
return mi_page_usable_block_size(page);
449
}
450
451
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
452
MI_UNUSED(page);
453
MI_UNUSED(block);
454
MI_UNUSED(min_size);
455
}
456
#endif
457
458
#if MI_PADDING && MI_PADDING_CHECK
459
460
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
461
size_t bsize;
462
size_t delta;
463
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
464
*size = *wrong = bsize;
465
if (!ok) return false;
466
mi_assert_internal(bsize >= delta);
467
*size = bsize - delta;
468
if (!mi_page_is_huge(page)) {
469
uint8_t* fill = (uint8_t*)block + bsize - delta;
470
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
471
mi_track_mem_defined(fill, maxpad);
472
for (size_t i = 0; i < maxpad; i++) {
473
if (fill[i] != MI_DEBUG_PADDING) {
474
*wrong = bsize - delta + i;
475
ok = false;
476
break;
477
}
478
}
479
mi_track_mem_noaccess(fill, maxpad);
480
}
481
return ok;
482
}
483
484
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
485
size_t size;
486
size_t wrong;
487
if (!mi_verify_padding(page,block,&size,&wrong)) {
488
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
489
}
490
}
491
492
#else
493
494
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
495
MI_UNUSED(page);
496
MI_UNUSED(block);
497
}
498
499
#endif
500
501
// only maintain stats for smaller objects if requested
502
#if (MI_STAT>0)
503
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
504
#if (MI_STAT < 2)
505
MI_UNUSED(block);
506
#endif
507
mi_heap_t* const heap = mi_heap_get_default();
508
const size_t bsize = mi_page_usable_block_size(page);
509
#if (MI_STAT>1)
510
const size_t usize = mi_page_usable_size_of(page, block);
511
mi_heap_stat_decrease(heap, malloc, usize);
512
#endif
513
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
514
mi_heap_stat_decrease(heap, normal, bsize);
515
#if (MI_STAT > 1)
516
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
517
#endif
518
}
519
else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
520
mi_heap_stat_decrease(heap, large, bsize);
521
}
522
else {
523
mi_heap_stat_decrease(heap, huge, bsize);
524
}
525
}
526
#else
527
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
528
MI_UNUSED(page); MI_UNUSED(block);
529
}
530
#endif
531
532