Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/segment.c
6175 views
1
/* ----------------------------------------------------------------------------
2
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
#include "mimalloc.h"
8
#include "mimalloc/internal.h"
9
#include "mimalloc/atomic.h"
10
11
#include <string.h> // memset
12
#include <stdio.h>
13
14
// -------------------------------------------------------------------
15
// Segments
16
// mimalloc pages reside in segments. See `mi_segment_valid` for invariants.
17
// -------------------------------------------------------------------
18
19
20
static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
21
22
23
// -------------------------------------------------------------------
24
// commit mask
25
// -------------------------------------------------------------------
26
27
static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
28
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
29
if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false;
30
}
31
return true;
32
}
33
34
static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
35
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
36
if ((commit->mask[i] & cm->mask[i]) != 0) return true;
37
}
38
return false;
39
}
40
41
static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) {
42
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
43
res->mask[i] = (commit->mask[i] & cm->mask[i]);
44
}
45
}
46
47
static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
48
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
49
res->mask[i] &= ~(cm->mask[i]);
50
}
51
}
52
53
static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
54
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
55
res->mask[i] |= cm->mask[i];
56
}
57
}
58
59
static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) {
60
mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
61
mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
62
if (bitcount == MI_COMMIT_MASK_BITS) {
63
mi_assert_internal(bitidx==0);
64
mi_commit_mask_create_full(cm);
65
}
66
else if (bitcount == 0) {
67
mi_commit_mask_create_empty(cm);
68
}
69
else {
70
mi_commit_mask_create_empty(cm);
71
size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS;
72
size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS;
73
while (bitcount > 0) {
74
mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT);
75
size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs;
76
size_t count = (bitcount > avail ? avail : bitcount);
77
size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs);
78
cm->mask[i] = mask;
79
bitcount -= count;
80
ofs = 0;
81
i++;
82
}
83
}
84
}
85
86
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) {
87
mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
88
size_t count = 0;
89
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
90
size_t mask = cm->mask[i];
91
if (~mask == 0) {
92
count += MI_COMMIT_MASK_FIELD_BITS;
93
}
94
else {
95
for (; mask != 0; mask >>= 1) { // todo: use popcount
96
if ((mask&1)!=0) count++;
97
}
98
}
99
}
100
// we use total since for huge segments each commit bit may represent a larger size
101
return ((total / MI_COMMIT_MASK_BITS) * count);
102
}
103
104
105
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
106
size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS;
107
size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS;
108
size_t mask = 0;
109
// find first ones
110
while (i < MI_COMMIT_MASK_FIELD_COUNT) {
111
mask = cm->mask[i];
112
mask >>= ofs;
113
if (mask != 0) {
114
while ((mask&1) == 0) {
115
mask >>= 1;
116
ofs++;
117
}
118
break;
119
}
120
i++;
121
ofs = 0;
122
}
123
if (i >= MI_COMMIT_MASK_FIELD_COUNT) {
124
// not found
125
*idx = MI_COMMIT_MASK_BITS;
126
return 0;
127
}
128
else {
129
// found, count ones
130
size_t count = 0;
131
*idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs;
132
do {
133
mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1);
134
do {
135
count++;
136
mask >>= 1;
137
} while ((mask&1) == 1);
138
if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) {
139
i++;
140
if (i >= MI_COMMIT_MASK_FIELD_COUNT) break;
141
mask = cm->mask[i];
142
ofs = 0;
143
}
144
} while ((mask&1) == 1);
145
mi_assert_internal(count > 0);
146
return count;
147
}
148
}
149
150
151
/* --------------------------------------------------------------------------------
152
Segment allocation
153
-------------------------------------------------------------------------------- */
154
155
156
/* -----------------------------------------------------------
157
Slices
158
----------------------------------------------------------- */
159
160
161
static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) {
162
return &segment->slices[segment->slice_entries];
163
}
164
165
static uint8_t* mi_slice_start(const mi_slice_t* slice) {
166
mi_segment_t* segment = _mi_ptr_segment(slice);
167
mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment));
168
return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE));
169
}
170
171
172
/* -----------------------------------------------------------
173
Bins
174
----------------------------------------------------------- */
175
// Use bit scan forward to quickly find the first zero bit if it is available
176
177
static inline size_t mi_slice_bin8(size_t slice_count) {
178
if (slice_count<=1) return slice_count;
179
mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT);
180
slice_count--;
181
size_t s = mi_bsr(slice_count); // slice_count > 1
182
if (s <= 2) return slice_count + 1;
183
size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4;
184
return bin;
185
}
186
187
static inline size_t mi_slice_bin(size_t slice_count) {
188
mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE);
189
mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX);
190
size_t bin = mi_slice_bin8(slice_count);
191
mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX);
192
return bin;
193
}
194
195
static inline size_t mi_slice_index(const mi_slice_t* slice) {
196
mi_segment_t* segment = _mi_ptr_segment(slice);
197
ptrdiff_t index = slice - segment->slices;
198
mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries);
199
return index;
200
}
201
202
203
/* -----------------------------------------------------------
204
Slice span queues
205
----------------------------------------------------------- */
206
207
static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
208
// todo: or push to the end?
209
mi_assert_internal(slice->prev == NULL && slice->next==NULL);
210
slice->prev = NULL; // paranoia
211
slice->next = sq->first;
212
sq->first = slice;
213
if (slice->next != NULL) slice->next->prev = slice;
214
else sq->last = slice;
215
slice->block_size = 0; // free
216
}
217
218
static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
219
size_t bin = mi_slice_bin(slice_count);
220
mi_span_queue_t* sq = &tld->spans[bin];
221
mi_assert_internal(sq->slice_count >= slice_count);
222
return sq;
223
}
224
225
static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
226
mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0);
227
// should work too if the queue does not contain slice (which can happen during reclaim)
228
if (slice->prev != NULL) slice->prev->next = slice->next;
229
if (slice == sq->first) sq->first = slice->next;
230
if (slice->next != NULL) slice->next->prev = slice->prev;
231
if (slice == sq->last) sq->last = slice->prev;
232
slice->prev = NULL;
233
slice->next = NULL;
234
slice->block_size = 1; // no more free
235
}
236
237
238
/* -----------------------------------------------------------
239
Invariant checking
240
----------------------------------------------------------- */
241
242
static bool mi_slice_is_used(const mi_slice_t* slice) {
243
return (slice->block_size > 0);
244
}
245
246
247
#if (MI_DEBUG>=3)
248
static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) {
249
for (mi_slice_t* s = sq->first; s != NULL; s = s->next) {
250
if (s==slice) return true;
251
}
252
return false;
253
}
254
255
static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
256
mi_assert_internal(segment != NULL);
257
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
258
mi_assert_internal(segment->abandoned <= segment->used);
259
mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
260
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks
261
//mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
262
mi_slice_t* slice = &segment->slices[0];
263
const mi_slice_t* end = mi_segment_slices_end(segment);
264
size_t used_count = 0;
265
mi_span_queue_t* sq;
266
while(slice < end) {
267
mi_assert_internal(slice->slice_count > 0);
268
mi_assert_internal(slice->slice_offset == 0);
269
size_t index = mi_slice_index(slice);
270
size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
271
if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets
272
used_count++;
273
mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE));
274
for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) {
275
mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
276
mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
277
mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
278
}
279
// and the last entry as well (for coalescing)
280
const mi_slice_t* last = slice + slice->slice_count - 1;
281
if (last > slice && last < mi_segment_slices_end(segment)) {
282
mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
283
mi_assert_internal(last->slice_count == 0);
284
mi_assert_internal(last->block_size == 1);
285
}
286
}
287
else { // free range of slices; only last slice needs a valid back offset
288
mi_slice_t* last = &segment->slices[maxindex];
289
if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) {
290
mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
291
}
292
mi_assert_internal(slice == last || last->slice_count == 0 );
293
mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1));
294
if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
295
sq = mi_span_queue_for(slice->slice_count,tld);
296
mi_assert_internal(mi_span_queue_contains(sq,slice));
297
}
298
}
299
slice = &segment->slices[maxindex+1];
300
}
301
mi_assert_internal(slice == end);
302
mi_assert_internal(used_count == segment->used + 1);
303
return true;
304
}
305
#endif
306
307
/* -----------------------------------------------------------
308
Segment size calculations
309
----------------------------------------------------------- */
310
311
static size_t mi_segment_info_size(mi_segment_t* segment) {
312
return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE;
313
}
314
315
static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t block_size, size_t* page_size)
316
{
317
const ptrdiff_t idx = slice - segment->slices;
318
const size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE;
319
uint8_t* const pstart = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE);
320
// make the start not OS page aligned for smaller blocks to avoid page/cache effects
321
// note: the offset must always be a block_size multiple since we assume small allocations
322
// are aligned (see `mi_heap_malloc_aligned`).
323
size_t start_offset = 0;
324
if (block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) {
325
// for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
326
const size_t adjust = block_size - ((uintptr_t)pstart % block_size);
327
if (adjust < block_size && psize >= block_size + adjust) {
328
start_offset += adjust;
329
}
330
}
331
if (block_size >= MI_INTPTR_SIZE) {
332
if (block_size <= 64) { start_offset += 3*block_size; }
333
else if (block_size <= 512) { start_offset += block_size; }
334
}
335
if (page_size != NULL) { *page_size = psize - start_offset; }
336
return (pstart + start_offset);
337
}
338
339
// Start of the page available memory; can be used on uninitialized pages
340
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
341
{
342
const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
343
uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size);
344
mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page);
345
mi_assert_internal(_mi_ptr_segment(p) == segment);
346
return p;
347
}
348
349
350
static size_t mi_segment_calculate_slices(size_t required, size_t* info_slices) {
351
size_t page_size = _mi_os_page_size();
352
size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size);
353
size_t guardsize = 0;
354
355
if (MI_SECURE>0) {
356
// in secure mode, we set up a protected page in between the segment info
357
// and the page data (and one at the end of the segment)
358
guardsize = page_size;
359
if (required > 0) {
360
required = _mi_align_up(required, MI_SEGMENT_SLICE_SIZE) + page_size;
361
}
362
}
363
364
isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE);
365
if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE;
366
size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) );
367
mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0);
368
return (segment_size / MI_SEGMENT_SLICE_SIZE);
369
}
370
371
372
/* ----------------------------------------------------------------------------
373
Segment caches
374
We keep a small segment cache per thread to increase local
375
reuse and avoid setting/clearing guard pages in secure mode.
376
------------------------------------------------------------------------------- */
377
378
static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
379
if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
380
else _mi_stat_decrease(&tld->stats->segments,1);
381
tld->count += (segment_size >= 0 ? 1 : -1);
382
if (tld->count > tld->peak_count) tld->peak_count = tld->count;
383
tld->current_size += segment_size;
384
if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
385
}
386
387
static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
388
segment->thread_id = 0;
389
_mi_segment_map_freed_at(segment);
390
mi_segments_track_size(-((long)mi_segment_size(segment)),tld);
391
if (segment->was_reclaimed) {
392
tld->reclaim_count--;
393
segment->was_reclaimed = false;
394
}
395
if (MI_SECURE>0) {
396
// _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
397
// unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted
398
size_t os_pagesize = _mi_os_page_size();
399
_mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
400
uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
401
_mi_os_unprotect(end, os_pagesize);
402
}
403
404
// purge delayed decommits now? (no, leave it to the arena)
405
// mi_segment_try_purge(segment,true,tld->stats);
406
407
const size_t size = mi_segment_size(segment);
408
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
409
410
_mi_abandoned_await_readers(); // wait until safe to free
411
_mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats);
412
}
413
414
/* -----------------------------------------------------------
415
Commit/Decommit ranges
416
----------------------------------------------------------- */
417
418
static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
419
mi_assert_internal(_mi_ptr_segment(p + 1) == segment);
420
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
421
mi_commit_mask_create_empty(cm);
422
if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
423
const size_t segstart = mi_segment_info_size(segment);
424
const size_t segsize = mi_segment_size(segment);
425
if (p >= (uint8_t*)segment + segsize) return;
426
427
size_t pstart = (p - (uint8_t*)segment);
428
mi_assert_internal(pstart + size <= segsize);
429
430
size_t start;
431
size_t end;
432
if (conservative) {
433
// decommit conservative
434
start = _mi_align_up(pstart, MI_COMMIT_SIZE);
435
end = _mi_align_down(pstart + size, MI_COMMIT_SIZE);
436
mi_assert_internal(start >= segstart);
437
mi_assert_internal(end <= segsize);
438
}
439
else {
440
// commit liberal
441
start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE);
442
end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE);
443
}
444
if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area
445
start = segstart;
446
}
447
if (end > segsize) {
448
end = segsize;
449
}
450
451
mi_assert_internal(start <= pstart && (pstart + size) <= end);
452
mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
453
*start_p = (uint8_t*)segment + start;
454
*full_size = (end > start ? end - start : 0);
455
if (*full_size == 0) return;
456
457
size_t bitidx = start / MI_COMMIT_SIZE;
458
mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
459
460
size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
461
if (bitidx + bitcount > MI_COMMIT_MASK_BITS) {
462
_mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size);
463
}
464
mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
465
mi_commit_mask_create(bitidx, bitcount, cm);
466
}
467
468
static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
469
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
470
471
// commit liberal
472
uint8_t* start = NULL;
473
size_t full_size = 0;
474
mi_commit_mask_t mask;
475
mi_segment_commit_mask(segment, false /* conservative? */, p, size, &start, &full_size, &mask);
476
if (mi_commit_mask_is_empty(&mask) || full_size == 0) return true;
477
478
if (!mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
479
// committing
480
bool is_zero = false;
481
mi_commit_mask_t cmask;
482
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
483
_mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
484
if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false;
485
mi_commit_mask_set(&segment->commit_mask, &mask);
486
}
487
488
// increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon.
489
if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) {
490
segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay);
491
}
492
493
// always clear any delayed purges in our range (as they are either committed now)
494
mi_commit_mask_clear(&segment->purge_mask, &mask);
495
return true;
496
}
497
498
static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
499
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
500
// note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
501
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed
502
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
503
return mi_segment_commit(segment, p, size, stats);
504
}
505
506
static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
507
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
508
if (!segment->allow_purge) return true;
509
510
// purge conservative
511
uint8_t* start = NULL;
512
size_t full_size = 0;
513
mi_commit_mask_t mask;
514
mi_segment_commit_mask(segment, true /* conservative? */, p, size, &start, &full_size, &mask);
515
if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
516
517
if (mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
518
// purging
519
mi_assert_internal((void*)start != (void*)segment);
520
mi_assert_internal(segment->allow_decommit);
521
const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit
522
if (decommitted) {
523
mi_commit_mask_t cmask;
524
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
525
_mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting
526
mi_commit_mask_clear(&segment->commit_mask, &mask);
527
}
528
}
529
530
// always clear any scheduled purges in our range
531
mi_commit_mask_clear(&segment->purge_mask, &mask);
532
return true;
533
}
534
535
static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
536
if (!segment->allow_purge) return;
537
538
if (mi_option_get(mi_option_purge_delay) == 0) {
539
mi_segment_purge(segment, p, size, stats);
540
}
541
else {
542
// register for future purge in the purge mask
543
uint8_t* start = NULL;
544
size_t full_size = 0;
545
mi_commit_mask_t mask;
546
mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask);
547
if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
548
549
// update delayed commit
550
mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask));
551
mi_commit_mask_t cmask;
552
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more
553
mi_commit_mask_set(&segment->purge_mask, &cmask);
554
mi_msecs_t now = _mi_clock_now();
555
if (segment->purge_expire == 0) {
556
// no previous purgess, initialize now
557
segment->purge_expire = now + mi_option_get(mi_option_purge_delay);
558
}
559
else if (segment->purge_expire <= now) {
560
// previous purge mask already expired
561
if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) {
562
mi_segment_try_purge(segment, true, stats);
563
}
564
else {
565
segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's
566
}
567
}
568
else {
569
// previous purge mask is not yet expired, increase the expiration by a bit.
570
segment->purge_expire += mi_option_get(mi_option_purge_extend_delay);
571
}
572
}
573
}
574
575
static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) {
576
if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return;
577
mi_msecs_t now = _mi_clock_now();
578
if (!force && now < segment->purge_expire) return;
579
580
mi_commit_mask_t mask = segment->purge_mask;
581
segment->purge_expire = 0;
582
mi_commit_mask_create_empty(&segment->purge_mask);
583
584
size_t idx;
585
size_t count;
586
mi_commit_mask_foreach(&mask, idx, count) {
587
// if found, decommit that sequence
588
if (count > 0) {
589
uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
590
size_t size = count * MI_COMMIT_SIZE;
591
mi_segment_purge(segment, p, size, stats);
592
}
593
}
594
mi_commit_mask_foreach_end()
595
mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
596
}
597
598
// called from `mi_heap_collect_ex`
599
// this can be called per-page so it is important that try_purge has fast exit path
600
void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
601
mi_segment_try_purge(segment, force, tld->stats);
602
}
603
604
/* -----------------------------------------------------------
605
Span free
606
----------------------------------------------------------- */
607
608
static bool mi_segment_is_abandoned(mi_segment_t* segment) {
609
return (mi_atomic_load_relaxed(&segment->thread_id) == 0);
610
}
611
612
// note: can be called on abandoned segments
613
static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) {
614
mi_assert_internal(slice_index < segment->slice_entries);
615
mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment)
616
? NULL : mi_span_queue_for(slice_count,tld));
617
if (slice_count==0) slice_count = 1;
618
mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries);
619
620
// set first and last slice (the intermediates can be undetermined)
621
mi_slice_t* slice = &segment->slices[slice_index];
622
slice->slice_count = (uint32_t)slice_count;
623
mi_assert_internal(slice->slice_count == slice_count); // no overflow?
624
slice->slice_offset = 0;
625
if (slice_count > 1) {
626
mi_slice_t* last = slice + slice_count - 1;
627
mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
628
if (last > end) { last = end; }
629
last->slice_count = 0;
630
last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
631
last->block_size = 0;
632
}
633
634
// perhaps decommit
635
if (allow_purge) {
636
mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats);
637
}
638
639
// and push it on the free page queue (if it was not a huge page)
640
if (sq != NULL) mi_span_queue_push( sq, slice );
641
else slice->block_size = 0; // mark huge page as free anyways
642
}
643
644
/*
645
// called from reclaim to add existing free spans
646
static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) {
647
mi_segment_t* segment = _mi_ptr_segment(slice);
648
mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
649
size_t slice_index = mi_slice_index(slice);
650
mi_segment_span_free(segment,slice_index,slice->slice_count,tld);
651
}
652
*/
653
654
static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
655
mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0);
656
mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
657
mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
658
mi_span_queue_delete(sq, slice);
659
}
660
661
// note: can be called on abandoned segments
662
static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
663
mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
664
mi_segment_t* const segment = _mi_ptr_segment(slice);
665
const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment);
666
667
// for huge pages, just mark as free but don't add to the queues
668
if (segment->kind == MI_SEGMENT_HUGE) {
669
// issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
670
mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
671
slice->block_size = 0; // mark as free anyways
672
// we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
673
// avoid a possible cache miss (and the segment is about to be freed)
674
return slice;
675
}
676
677
// otherwise coalesce the span and add to the free span queues
678
size_t slice_count = slice->slice_count;
679
mi_slice_t* next = slice + slice->slice_count;
680
mi_assert_internal(next <= mi_segment_slices_end(segment));
681
if (next < mi_segment_slices_end(segment) && next->block_size==0) {
682
// free next block -- remove it from free and merge
683
mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
684
slice_count += next->slice_count; // extend
685
if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); }
686
}
687
if (slice > segment->slices) {
688
mi_slice_t* prev = mi_slice_first(slice - 1);
689
mi_assert_internal(prev >= segment->slices);
690
if (prev->block_size==0) {
691
// free previous slice -- remove it from free and merge
692
mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
693
slice_count += prev->slice_count;
694
if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); }
695
slice = prev;
696
}
697
}
698
699
// and add the new free page
700
mi_segment_span_free(segment, mi_slice_index(slice), slice_count, true, tld);
701
return slice;
702
}
703
704
705
706
/* -----------------------------------------------------------
707
Page allocation
708
----------------------------------------------------------- */
709
710
// Note: may still return NULL if committing the memory failed
711
static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
712
mi_assert_internal(slice_index < segment->slice_entries);
713
mi_slice_t* const slice = &segment->slices[slice_index];
714
mi_assert_internal(slice->block_size==0 || slice->block_size==1);
715
716
// commit before changing the slice data
717
if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
718
return NULL; // commit failed!
719
}
720
721
// convert the slices to a page
722
slice->slice_offset = 0;
723
slice->slice_count = (uint32_t)slice_count;
724
mi_assert_internal(slice->slice_count == slice_count);
725
const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
726
slice->block_size = bsize;
727
mi_page_t* page = mi_slice_to_page(slice);
728
mi_assert_internal(mi_page_block_size(page) == bsize);
729
730
// set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries
731
size_t extra = slice_count-1;
732
if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT;
733
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
734
735
mi_slice_t* slice_next = slice + 1;
736
for (size_t i = 1; i <= extra; i++, slice_next++) {
737
slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
738
slice_next->slice_count = 0;
739
slice_next->block_size = 1;
740
}
741
742
// and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
743
// note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543)
744
mi_slice_t* last = slice + slice_count - 1;
745
mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
746
if (last > end) last = end;
747
if (last > slice) {
748
last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
749
last->slice_count = 0;
750
last->block_size = 1;
751
}
752
753
// and initialize the page
754
page->is_committed = true;
755
page->is_huge = (segment->kind == MI_SEGMENT_HUGE);
756
segment->used++;
757
return page;
758
}
759
760
static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
761
mi_assert_internal(_mi_ptr_segment(slice) == segment);
762
mi_assert_internal(slice->slice_count >= slice_count);
763
mi_assert_internal(slice->block_size > 0); // no more in free queue
764
if (slice->slice_count <= slice_count) return;
765
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
766
size_t next_index = mi_slice_index(slice) + slice_count;
767
size_t next_count = slice->slice_count - slice_count;
768
mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld);
769
slice->slice_count = (uint32_t)slice_count;
770
}
771
772
static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) {
773
mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
774
// search from best fit up
775
mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
776
if (slice_count == 0) slice_count = 1;
777
while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) {
778
for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
779
if (slice->slice_count >= slice_count) {
780
// found one
781
mi_segment_t* segment = _mi_ptr_segment(slice);
782
if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) {
783
// found a suitable page span
784
mi_span_queue_delete(sq, slice);
785
786
if (slice->slice_count > slice_count) {
787
mi_segment_slice_split(segment, slice, slice_count, tld);
788
}
789
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
790
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
791
if (page == NULL) {
792
// commit failed; return NULL but first restore the slice
793
mi_segment_span_free_coalesce(slice, tld);
794
return NULL;
795
}
796
return page;
797
}
798
}
799
}
800
sq++;
801
}
802
// could not find a page..
803
return NULL;
804
}
805
806
807
/* -----------------------------------------------------------
808
Segment allocation
809
----------------------------------------------------------- */
810
811
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
812
size_t* psegment_slices, size_t* pinfo_slices,
813
bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
814
815
{
816
mi_memid_t memid;
817
bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
818
size_t align_offset = 0;
819
size_t alignment = MI_SEGMENT_ALIGN;
820
821
if (page_alignment > 0) {
822
// mi_assert_internal(huge_page != NULL);
823
mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
824
alignment = page_alignment;
825
const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE;
826
align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
827
const size_t extra = align_offset - info_size;
828
// recalculate due to potential guard pages
829
*psegment_slices = mi_segment_calculate_slices(required + extra, pinfo_slices);
830
mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX);
831
}
832
833
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
834
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld);
835
if (segment == NULL) {
836
return NULL; // failed to allocate
837
}
838
839
// ensure metadata part of the segment is committed
840
mi_commit_mask_t commit_mask;
841
if (memid.initially_committed) {
842
mi_commit_mask_create_full(&commit_mask);
843
}
844
else {
845
// at least commit the info slices
846
const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
847
mi_assert_internal(commit_needed>0);
848
mi_commit_mask_create(0, commit_needed, &commit_mask);
849
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
850
if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) {
851
_mi_arena_free(segment,segment_size,0,memid,tld->stats);
852
return NULL;
853
}
854
}
855
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
856
857
segment->memid = memid;
858
segment->allow_decommit = !memid.is_pinned;
859
segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
860
segment->segment_size = segment_size;
861
segment->commit_mask = commit_mask;
862
segment->purge_expire = 0;
863
mi_commit_mask_create_empty(&segment->purge_mask);
864
865
mi_segments_track_size((long)(segment_size), tld);
866
_mi_segment_map_allocated_at(segment);
867
return segment;
868
}
869
870
871
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
872
static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
873
{
874
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
875
876
// calculate needed sizes first
877
size_t info_slices;
878
size_t segment_slices = mi_segment_calculate_slices(required, &info_slices);
879
mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX);
880
881
// Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
882
const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
883
_mi_current_thread_count() > 1 && // do not delay for the first N threads
884
tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
885
const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
886
bool commit = eager || (required > 0);
887
888
// Allocate the segment from the OS
889
mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
890
&segment_slices, &info_slices, commit, tld, os_tld);
891
if (segment == NULL) return NULL;
892
893
// zero the segment info? -- not always needed as it may be zero initialized from the OS
894
if (!segment->memid.initially_zero) {
895
ptrdiff_t ofs = offsetof(mi_segment_t, next);
896
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
897
size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
898
_mi_memzero((uint8_t*)segment + ofs, zsize);
899
}
900
901
// initialize the rest of the segment info
902
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
903
segment->segment_slices = segment_slices;
904
segment->segment_info_slices = info_slices;
905
segment->thread_id = _mi_thread_id();
906
segment->cookie = _mi_ptr_cookie(segment);
907
segment->slice_entries = slice_entries;
908
segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
909
910
// _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1));
911
_mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment));
912
913
// set up guard pages
914
size_t guard_slices = 0;
915
if (MI_SECURE>0) {
916
// in secure mode, we set up a protected page in between the segment info
917
// and the page data, and at the end of the segment.
918
size_t os_pagesize = _mi_os_page_size();
919
_mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
920
uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
921
mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats);
922
_mi_os_protect(end, os_pagesize);
923
if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-(
924
guard_slices = 1;
925
}
926
927
// reserve first slices for segment info
928
mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld);
929
mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance
930
mi_assert_internal(segment->used == 1);
931
segment->used = 0; // don't count our internal slices towards usage
932
933
// initialize initial free pages
934
if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
935
mi_assert_internal(huge_page==NULL);
936
mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
937
}
938
else {
939
mi_assert_internal(huge_page!=NULL);
940
mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
941
mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
942
*huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld);
943
mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
944
}
945
946
mi_assert_expensive(mi_segment_is_valid(segment,tld));
947
return segment;
948
}
949
950
951
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
952
MI_UNUSED(force);
953
mi_assert_internal(segment != NULL);
954
mi_assert_internal(segment->next == NULL);
955
mi_assert_internal(segment->used == 0);
956
957
// Remove the free pages
958
mi_slice_t* slice = &segment->slices[0];
959
const mi_slice_t* end = mi_segment_slices_end(segment);
960
#if MI_DEBUG>1
961
size_t page_count = 0;
962
#endif
963
while (slice < end) {
964
mi_assert_internal(slice->slice_count > 0);
965
mi_assert_internal(slice->slice_offset == 0);
966
mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages ..
967
if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
968
mi_segment_span_remove_from_queue(slice, tld);
969
}
970
#if MI_DEBUG>1
971
page_count++;
972
#endif
973
slice = slice + slice->slice_count;
974
}
975
mi_assert_internal(page_count == 2); // first page is allocated by the segment itself
976
977
// stats
978
_mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
979
980
// return it to the OS
981
mi_segment_os_free(segment, tld);
982
}
983
984
985
/* -----------------------------------------------------------
986
Page Free
987
----------------------------------------------------------- */
988
989
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
990
991
// note: can be called on abandoned pages
992
static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
993
mi_assert_internal(page->block_size > 0);
994
mi_assert_internal(mi_page_all_free(page));
995
mi_segment_t* segment = _mi_ptr_segment(page);
996
mi_assert_internal(segment->used > 0);
997
998
size_t inuse = page->capacity * mi_page_block_size(page);
999
_mi_stat_decrease(&tld->stats->page_committed, inuse);
1000
_mi_stat_decrease(&tld->stats->pages, 1);
1001
1002
// reset the page memory to reduce memory pressure?
1003
if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
1004
size_t psize;
1005
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
1006
_mi_os_reset(start, psize, tld->stats);
1007
}
1008
1009
// zero the page data, but not the segment fields and heap tag
1010
page->is_zero_init = false;
1011
uint8_t heap_tag = page->heap_tag;
1012
ptrdiff_t ofs = offsetof(mi_page_t, capacity);
1013
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
1014
page->block_size = 1;
1015
page->heap_tag = heap_tag;
1016
1017
// and free it
1018
mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
1019
segment->used--;
1020
// cannot assert segment valid as it is called during reclaim
1021
// mi_assert_expensive(mi_segment_is_valid(segment, tld));
1022
return slice;
1023
}
1024
1025
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
1026
{
1027
mi_assert(page != NULL);
1028
1029
mi_segment_t* segment = _mi_page_segment(page);
1030
mi_assert_expensive(mi_segment_is_valid(segment,tld));
1031
1032
// mark it as free now
1033
mi_segment_page_clear(page, tld);
1034
mi_assert_expensive(mi_segment_is_valid(segment, tld));
1035
1036
if (segment->used == 0) {
1037
// no more used pages; remove from the free list and free the segment
1038
mi_segment_free(segment, force, tld);
1039
}
1040
else if (segment->used == segment->abandoned) {
1041
// only abandoned pages; remove from free list and abandon
1042
mi_segment_abandon(segment,tld);
1043
}
1044
else {
1045
// perform delayed purges
1046
mi_segment_try_purge(segment, false /* force? */, tld->stats);
1047
}
1048
}
1049
1050
1051
/* -----------------------------------------------------------
1052
Abandonment
1053
1054
When threads terminate, they can leave segments with
1055
live blocks (reachable through other threads). Such segments
1056
are "abandoned" and will be reclaimed by other threads to
1057
reuse their pages and/or free them eventually. The
1058
`thread_id` of such segments is 0.
1059
1060
When a block is freed in an abandoned segment, the segment
1061
is reclaimed into that thread.
1062
1063
Moreover, if threads are looking for a fresh segment, they
1064
will first consider abondoned segments -- these can be found
1065
by scanning the arena memory
1066
(segments outside arena memoryare only reclaimed by a free).
1067
----------------------------------------------------------- */
1068
1069
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
1070
void _mi_abandoned_await_readers(void) {
1071
// nothing needed
1072
}
1073
1074
/* -----------------------------------------------------------
1075
Abandon segment/page
1076
----------------------------------------------------------- */
1077
1078
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
1079
mi_assert_internal(segment->used == segment->abandoned);
1080
mi_assert_internal(segment->used > 0);
1081
mi_assert_internal(segment->abandoned_visits == 0);
1082
mi_assert_expensive(mi_segment_is_valid(segment,tld));
1083
1084
// remove the free pages from the free page queues
1085
mi_slice_t* slice = &segment->slices[0];
1086
const mi_slice_t* end = mi_segment_slices_end(segment);
1087
while (slice < end) {
1088
mi_assert_internal(slice->slice_count > 0);
1089
mi_assert_internal(slice->slice_offset == 0);
1090
if (slice->block_size == 0) { // a free page
1091
mi_segment_span_remove_from_queue(slice,tld);
1092
slice->block_size = 0; // but keep it free
1093
}
1094
slice = slice + slice->slice_count;
1095
}
1096
1097
// perform delayed decommits (forcing is much slower on mstress)
1098
// Only abandoned segments in arena memory can be reclaimed without a free
1099
// so if a segment is not from an arena we force purge here to be conservative.
1100
const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge);
1101
mi_segment_try_purge(segment, force_purge, tld->stats);
1102
1103
// all pages in the segment are abandoned; add it to the abandoned list
1104
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
1105
mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
1106
segment->thread_id = 0;
1107
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
1108
if (segment->was_reclaimed) {
1109
tld->reclaim_count--;
1110
segment->was_reclaimed = false;
1111
}
1112
_mi_arena_segment_mark_abandoned(segment);
1113
}
1114
1115
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
1116
mi_assert(page != NULL);
1117
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
1118
mi_assert_internal(mi_page_heap(page) == NULL);
1119
mi_segment_t* segment = _mi_page_segment(page);
1120
1121
mi_assert_expensive(mi_segment_is_valid(segment,tld));
1122
segment->abandoned++;
1123
1124
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
1125
mi_assert_internal(segment->abandoned <= segment->used);
1126
if (segment->used == segment->abandoned) {
1127
// all pages are abandoned, abandon the entire segment
1128
mi_segment_abandon(segment, tld);
1129
}
1130
}
1131
1132
/* -----------------------------------------------------------
1133
Reclaim abandoned pages
1134
----------------------------------------------------------- */
1135
1136
static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
1137
mi_slice_t* slice = &segment->slices[0];
1138
*end = mi_segment_slices_end(segment);
1139
mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page
1140
slice = slice + slice->slice_count; // skip the first segment allocated page
1141
return slice;
1142
}
1143
1144
// Possibly free pages and check if free space is available
1145
static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
1146
{
1147
mi_assert_internal(mi_segment_is_abandoned(segment));
1148
bool has_page = false;
1149
1150
// for all slices
1151
const mi_slice_t* end;
1152
mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1153
while (slice < end) {
1154
mi_assert_internal(slice->slice_count > 0);
1155
mi_assert_internal(slice->slice_offset == 0);
1156
if (mi_slice_is_used(slice)) { // used page
1157
// ensure used count is up to date and collect potential concurrent frees
1158
mi_page_t* const page = mi_slice_to_page(slice);
1159
_mi_page_free_collect(page, false);
1160
if (mi_page_all_free(page)) {
1161
// if this page is all free now, free it without adding to any queues (yet)
1162
mi_assert_internal(page->next == NULL && page->prev==NULL);
1163
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
1164
segment->abandoned--;
1165
slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
1166
mi_assert_internal(!mi_slice_is_used(slice));
1167
if (slice->slice_count >= slices_needed) {
1168
has_page = true;
1169
}
1170
}
1171
else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
1172
// a page has available free blocks of the right size
1173
has_page = true;
1174
}
1175
}
1176
else {
1177
// empty span
1178
if (slice->slice_count >= slices_needed) {
1179
has_page = true;
1180
}
1181
}
1182
slice = slice + slice->slice_count;
1183
}
1184
return has_page;
1185
}
1186
1187
// Reclaim an abandoned segment; returns NULL if the segment was freed
1188
// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
1189
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
1190
if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
1191
// can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free.
1192
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id());
1193
mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
1194
segment->abandoned_visits = 0;
1195
segment->was_reclaimed = true;
1196
tld->reclaim_count++;
1197
mi_segments_track_size((long)mi_segment_size(segment), tld);
1198
mi_assert_internal(segment->next == NULL);
1199
_mi_stat_decrease(&tld->stats->segments_abandoned, 1);
1200
1201
// for all slices
1202
const mi_slice_t* end;
1203
mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1204
while (slice < end) {
1205
mi_assert_internal(slice->slice_count > 0);
1206
mi_assert_internal(slice->slice_offset == 0);
1207
if (mi_slice_is_used(slice)) {
1208
// in use: reclaim the page in our heap
1209
mi_page_t* page = mi_slice_to_page(slice);
1210
mi_assert_internal(page->is_committed);
1211
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
1212
mi_assert_internal(mi_page_heap(page) == NULL);
1213
mi_assert_internal(page->next == NULL && page->prev==NULL);
1214
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
1215
segment->abandoned--;
1216
// set the heap again and allow heap thread delayed free again.
1217
mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
1218
if (target_heap == NULL) {
1219
target_heap = heap;
1220
_mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using %u instead)\n", page->heap_tag, heap->tag );
1221
}
1222
mi_page_set_heap(page, target_heap);
1223
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
1224
_mi_page_free_collect(page, false); // ensure used count is up to date
1225
if (mi_page_all_free(page)) {
1226
// if everything free by now, free the page
1227
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
1228
}
1229
else {
1230
// otherwise reclaim it into the heap
1231
_mi_page_reclaim(target_heap, page);
1232
if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) {
1233
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
1234
}
1235
}
1236
}
1237
else {
1238
// the span is free, add it to our page queues
1239
slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing
1240
}
1241
mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0);
1242
slice = slice + slice->slice_count;
1243
}
1244
1245
mi_assert(segment->abandoned == 0);
1246
mi_assert_expensive(mi_segment_is_valid(segment, tld));
1247
if (segment->used == 0) { // due to page_clear
1248
mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
1249
mi_segment_free(segment, false, tld);
1250
return NULL;
1251
}
1252
else {
1253
return segment;
1254
}
1255
}
1256
1257
// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`)
1258
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
1259
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
1260
// don't reclaim more from a free than half the current segments
1261
// this is to prevent a pure free-ing thread to start owning too many segments
1262
if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false;
1263
if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon
1264
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
1265
mi_assert_internal(res == segment);
1266
return (res != NULL);
1267
}
1268
return false;
1269
}
1270
1271
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
1272
mi_segment_t* segment;
1273
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, &current);
1274
while ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL) {
1275
mi_segment_reclaim(segment, heap, 0, NULL, tld);
1276
}
1277
}
1278
1279
static long mi_segment_get_reclaim_tries(void) {
1280
// limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
1281
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
1282
if (perc <= 0) return 0;
1283
const size_t total_count = _mi_arena_segment_abandoned_count();
1284
if (total_count == 0) return 0;
1285
const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow
1286
long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count));
1287
if (max_tries < 8 && total_count > 8) { max_tries = 8; }
1288
return max_tries;
1289
}
1290
1291
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
1292
{
1293
*reclaimed = false;
1294
long max_tries = mi_segment_get_reclaim_tries();
1295
if (max_tries <= 0) return NULL;
1296
1297
mi_segment_t* segment;
1298
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, &current);
1299
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL))
1300
{
1301
segment->abandoned_visits++;
1302
// todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit?
1303
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries
1304
// Perhaps we can skip non-suitable ones in a better way?
1305
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
1306
bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
1307
if (segment->used == 0) {
1308
// free the segment (by forced reclaim) to make it available to other threads.
1309
// note1: we prefer to free a segment as that might lead to reclaiming another
1310
// segment that is still partially used.
1311
// note2: we could in principle optimize this by skipping reclaim and directly
1312
// freeing but that would violate some invariants temporarily)
1313
mi_segment_reclaim(segment, heap, 0, NULL, tld);
1314
}
1315
else if (has_page && is_suitable) {
1316
// found a large enough free span, or a page of the right block_size with free space
1317
// we return the result of reclaim (which is usually `segment`) as it might free
1318
// the segment due to concurrent frees (in which case `NULL` is returned).
1319
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
1320
}
1321
else if (segment->abandoned_visits > 3 && is_suitable) {
1322
// always reclaim on 3rd visit to limit the abandoned queue length.
1323
mi_segment_reclaim(segment, heap, 0, NULL, tld);
1324
}
1325
else {
1326
// otherwise, push on the visited list so it gets not looked at too quickly again
1327
mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again
1328
_mi_arena_segment_mark_abandoned(segment);
1329
}
1330
}
1331
return NULL;
1332
}
1333
1334
1335
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
1336
{
1337
mi_segment_t* segment;
1338
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, &current);
1339
long max_tries = (force ? (long)_mi_arena_segment_abandoned_count() : 1024); // limit latency
1340
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL)) {
1341
mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
1342
if (segment->used == 0) {
1343
// free the segment (by forced reclaim) to make it available to other threads.
1344
// note: we could in principle optimize this by skipping reclaim and directly
1345
// freeing but that would violate some invariants temporarily)
1346
mi_segment_reclaim(segment, heap, 0, NULL, tld);
1347
}
1348
else {
1349
// otherwise, purge if needed and push on the visited list
1350
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
1351
mi_segment_try_purge(segment, force, tld->stats);
1352
_mi_arena_segment_mark_abandoned(segment);
1353
}
1354
}
1355
}
1356
1357
/* -----------------------------------------------------------
1358
Reclaim or allocate
1359
----------------------------------------------------------- */
1360
1361
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
1362
{
1363
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
1364
1365
// 1. try to reclaim an abandoned segment
1366
bool reclaimed;
1367
mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
1368
if (reclaimed) {
1369
// reclaimed the right page right into the heap
1370
mi_assert_internal(segment != NULL);
1371
return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
1372
}
1373
else if (segment != NULL) {
1374
// reclaimed a segment with a large enough empty span in it
1375
return segment;
1376
}
1377
// 2. otherwise allocate a fresh segment
1378
return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL);
1379
}
1380
1381
1382
/* -----------------------------------------------------------
1383
Page allocation
1384
----------------------------------------------------------- */
1385
1386
static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
1387
{
1388
mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
1389
1390
// find a free page
1391
size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
1392
size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
1393
mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
1394
mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
1395
if (page==NULL) {
1396
// no free page, allocate a new segment and try again
1397
if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) {
1398
// OOM or reclaimed a good page in the heap
1399
return NULL;
1400
}
1401
else {
1402
// otherwise try again
1403
return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld);
1404
}
1405
}
1406
mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
1407
mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
1408
mi_segment_try_purge(_mi_ptr_segment(page), false, tld->stats);
1409
return page;
1410
}
1411
1412
1413
1414
/* -----------------------------------------------------------
1415
Huge page allocation
1416
----------------------------------------------------------- */
1417
1418
static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
1419
{
1420
mi_page_t* page = NULL;
1421
mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page);
1422
if (segment == NULL || page==NULL) return NULL;
1423
mi_assert_internal(segment->used==1);
1424
mi_assert_internal(mi_page_block_size(page) >= size);
1425
#if MI_HUGE_PAGE_ABANDON
1426
segment->thread_id = 0; // huge segments are immediately abandoned
1427
#endif
1428
1429
// for huge pages we initialize the block_size as we may
1430
// overallocate to accommodate large alignments.
1431
size_t psize;
1432
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
1433
page->block_size = psize;
1434
mi_assert_internal(page->is_huge);
1435
1436
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
1437
if (page_alignment > 0 && segment->allow_decommit) {
1438
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
1439
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
1440
mi_assert_internal(psize - (aligned_p - start) >= size);
1441
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
1442
ptrdiff_t decommit_size = aligned_p - decommit_start;
1443
_mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
1444
}
1445
1446
return page;
1447
}
1448
1449
#if MI_HUGE_PAGE_ABANDON
1450
// free huge block from another thread
1451
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
1452
// huge page segments are always abandoned and can be freed immediately by any thread
1453
mi_assert_internal(segment->kind==MI_SEGMENT_HUGE);
1454
mi_assert_internal(segment == _mi_page_segment(page));
1455
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
1456
1457
// claim it and free
1458
mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
1459
// paranoia: if this it the last reference, the cas should always succeed
1460
size_t expected_tid = 0;
1461
if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
1462
mi_block_set_next(page, block, page->free);
1463
page->free = block;
1464
page->used--;
1465
page->is_zero_init = false;
1466
mi_assert(page->used == 0);
1467
mi_tld_t* tld = heap->tld;
1468
_mi_segment_page_free(page, true, &tld->segments);
1469
}
1470
#if (MI_DEBUG!=0)
1471
else {
1472
mi_assert_internal(false);
1473
}
1474
#endif
1475
}
1476
1477
#else
1478
// reset memory of a huge block from another thread
1479
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
1480
MI_UNUSED(page);
1481
mi_assert_internal(segment->kind == MI_SEGMENT_HUGE);
1482
mi_assert_internal(segment == _mi_page_segment(page));
1483
mi_assert_internal(page->used == 1); // this is called just before the free
1484
mi_assert_internal(page->free == NULL);
1485
if (segment->allow_decommit) {
1486
size_t csize = mi_usable_size(block);
1487
if (csize > sizeof(mi_block_t)) {
1488
csize = csize - sizeof(mi_block_t);
1489
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
1490
_mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
1491
}
1492
}
1493
}
1494
#endif
1495
1496
/* -----------------------------------------------------------
1497
Page allocation and free
1498
----------------------------------------------------------- */
1499
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
1500
mi_page_t* page;
1501
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
1502
mi_assert_internal(_mi_is_power_of_two(page_alignment));
1503
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
1504
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
1505
page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
1506
}
1507
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
1508
page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld);
1509
}
1510
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
1511
page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld);
1512
}
1513
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
1514
page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
1515
}
1516
else {
1517
page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
1518
}
1519
mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
1520
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
1521
return page;
1522
}
1523
1524
1525
1526