Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/mimalloc/src/page-queue.c
6175 views
1
/*----------------------------------------------------------------------------
2
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
This is free software; you can redistribute it and/or modify it under the
4
terms of the MIT license. A copy of the license can be found in the file
5
"LICENSE" at the root of this distribution.
6
-----------------------------------------------------------------------------*/
7
8
/* -----------------------------------------------------------
9
Definition of page queues for each block size
10
----------------------------------------------------------- */
11
12
#ifndef MI_IN_PAGE_C
13
#error "this file should be included from 'page.c'"
14
// include to help an IDE
15
#include "mimalloc.h"
16
#include "mimalloc/internal.h"
17
#include "mimalloc/atomic.h"
18
#endif
19
20
/* -----------------------------------------------------------
21
Minimal alignment in machine words (i.e. `sizeof(void*)`)
22
----------------------------------------------------------- */
23
24
#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
25
#error "define alignment for more than 4x word size for this platform"
26
#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
27
#define MI_ALIGN4W // 4 machine words minimal alignment
28
#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
29
#define MI_ALIGN2W // 2 machine words minimal alignment
30
#else
31
// ok, default alignment is 1 word
32
#endif
33
34
35
/* -----------------------------------------------------------
36
Queue query
37
----------------------------------------------------------- */
38
39
40
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
41
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
42
}
43
44
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
45
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
46
}
47
48
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
49
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
50
}
51
52
/* -----------------------------------------------------------
53
Bins
54
----------------------------------------------------------- */
55
56
// Return the bin for a given field size.
57
// Returns MI_BIN_HUGE if the size is too large.
58
// We use `wsize` for the size in "machine word sizes",
59
// i.e. byte size == `wsize*sizeof(void*)`.
60
static inline uint8_t mi_bin(size_t size) {
61
size_t wsize = _mi_wsize_from_size(size);
62
uint8_t bin;
63
if (wsize <= 1) {
64
bin = 1;
65
}
66
#if defined(MI_ALIGN4W)
67
else if (wsize <= 4) {
68
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
69
}
70
#elif defined(MI_ALIGN2W)
71
else if (wsize <= 8) {
72
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
73
}
74
#else
75
else if (wsize <= 8) {
76
bin = (uint8_t)wsize;
77
}
78
#endif
79
else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
80
bin = MI_BIN_HUGE;
81
}
82
else {
83
#if defined(MI_ALIGN4W)
84
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
85
#endif
86
wsize--;
87
// find the highest bit
88
uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0
89
// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
90
// - adjust with 3 because we use do not round the first 8 sizes
91
// which each get an exact bin
92
bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
93
mi_assert_internal(bin < MI_BIN_HUGE);
94
}
95
mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
96
return bin;
97
}
98
99
100
101
/* -----------------------------------------------------------
102
Queue of pages with free blocks
103
----------------------------------------------------------- */
104
105
uint8_t _mi_bin(size_t size) {
106
return mi_bin(size);
107
}
108
109
size_t _mi_bin_size(uint8_t bin) {
110
return _mi_heap_empty.pages[bin].block_size;
111
}
112
113
// Good size for allocation
114
size_t mi_good_size(size_t size) mi_attr_noexcept {
115
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
116
return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
117
}
118
else {
119
return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size());
120
}
121
}
122
123
#if (MI_DEBUG>1)
124
static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
125
mi_assert_internal(page != NULL);
126
mi_page_t* list = queue->first;
127
while (list != NULL) {
128
mi_assert_internal(list->next == NULL || list->next->prev == list);
129
mi_assert_internal(list->prev == NULL || list->prev->next == list);
130
if (list == page) break;
131
list = list->next;
132
}
133
return (list == page);
134
}
135
136
#endif
137
138
#if (MI_DEBUG>1)
139
static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
140
return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
141
}
142
#endif
143
144
static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {
145
return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
146
}
147
148
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
149
mi_assert_internal(heap!=NULL);
150
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
151
mi_assert_internal(bin <= MI_BIN_FULL);
152
mi_page_queue_t* pq = &heap->pages[bin];
153
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
154
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
155
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
156
return pq;
157
}
158
159
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
160
mi_heap_t* heap = mi_page_heap(page);
161
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
162
mi_assert_expensive(mi_page_queue_contains(pq, page));
163
return pq;
164
}
165
166
// The current small page array is for efficiency and for each
167
// small size (up to 256) it points directly to the page for that
168
// size without having to compute the bin. This means when the
169
// current free page queue is updated for a small bin, we need to update a
170
// range of entries in `_mi_page_small_free`.
171
static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
172
mi_assert_internal(mi_heap_contains_queue(heap,pq));
173
size_t size = pq->block_size;
174
if (size > MI_SMALL_SIZE_MAX) return;
175
176
mi_page_t* page = pq->first;
177
if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
178
179
// find index in the right direct page array
180
size_t start;
181
size_t idx = _mi_wsize_from_size(size);
182
mi_page_t** pages_free = heap->pages_free_direct;
183
184
if (pages_free[idx] == page) return; // already set
185
186
// find start slot
187
if (idx<=1) {
188
start = 0;
189
}
190
else {
191
// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
192
uint8_t bin = mi_bin(size);
193
const mi_page_queue_t* prev = pq - 1;
194
while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
195
prev--;
196
}
197
start = 1 + _mi_wsize_from_size(prev->block_size);
198
if (start > idx) start = idx;
199
}
200
201
// set size range to the right page
202
mi_assert(start <= idx);
203
for (size_t sz = start; sz <= idx; sz++) {
204
pages_free[sz] = page;
205
}
206
}
207
208
/*
209
static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
210
return (queue->first == NULL);
211
}
212
*/
213
214
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
215
mi_assert_internal(page != NULL);
216
mi_assert_expensive(mi_page_queue_contains(queue, page));
217
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
218
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
219
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
220
mi_heap_t* heap = mi_page_heap(page);
221
222
if (page->prev != NULL) page->prev->next = page->next;
223
if (page->next != NULL) page->next->prev = page->prev;
224
if (page == queue->last) queue->last = page->prev;
225
if (page == queue->first) {
226
queue->first = page->next;
227
// update first
228
mi_assert_internal(mi_heap_contains_queue(heap, queue));
229
mi_heap_queue_first_update(heap,queue);
230
}
231
heap->page_count--;
232
page->next = NULL;
233
page->prev = NULL;
234
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
235
mi_page_set_in_full(page,false);
236
}
237
238
239
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
240
mi_assert_internal(mi_page_heap(page) == heap);
241
mi_assert_internal(!mi_page_queue_contains(queue, page));
242
#if MI_HUGE_PAGE_ABANDON
243
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
244
#endif
245
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
246
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
247
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
248
249
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
250
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
251
page->next = queue->first;
252
page->prev = NULL;
253
if (queue->first != NULL) {
254
mi_assert_internal(queue->first->prev == NULL);
255
queue->first->prev = page;
256
queue->first = page;
257
}
258
else {
259
queue->first = queue->last = page;
260
}
261
262
// update direct
263
mi_heap_queue_first_update(heap, queue);
264
heap->page_count++;
265
}
266
267
268
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
269
mi_assert_internal(page != NULL);
270
mi_assert_expensive(mi_page_queue_contains(from, page));
271
mi_assert_expensive(!mi_page_queue_contains(to, page));
272
const size_t bsize = mi_page_block_size(page);
273
MI_UNUSED(bsize);
274
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
275
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
276
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
277
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
278
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
279
280
mi_heap_t* heap = mi_page_heap(page);
281
if (page->prev != NULL) page->prev->next = page->next;
282
if (page->next != NULL) page->next->prev = page->prev;
283
if (page == from->last) from->last = page->prev;
284
if (page == from->first) {
285
from->first = page->next;
286
// update first
287
mi_assert_internal(mi_heap_contains_queue(heap, from));
288
mi_heap_queue_first_update(heap, from);
289
}
290
291
page->prev = to->last;
292
page->next = NULL;
293
if (to->last != NULL) {
294
mi_assert_internal(heap == mi_page_heap(to->last));
295
to->last->next = page;
296
to->last = page;
297
}
298
else {
299
to->first = page;
300
to->last = page;
301
mi_heap_queue_first_update(heap, to);
302
}
303
304
mi_page_set_in_full(page, mi_page_queue_is_full(to));
305
}
306
307
// Only called from `mi_heap_absorb`.
308
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
309
mi_assert_internal(mi_heap_contains_queue(heap,pq));
310
mi_assert_internal(pq->block_size == append->block_size);
311
312
if (append->first==NULL) return 0;
313
314
// set append pages to new heap and count
315
size_t count = 0;
316
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
317
// inline `mi_page_set_heap` to avoid wrong assertion during absorption;
318
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
319
mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
320
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
321
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
322
// that after appending only the new heap will be used for delayed free operations.
323
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
324
count++;
325
}
326
327
if (pq->last==NULL) {
328
// take over afresh
329
mi_assert_internal(pq->first==NULL);
330
pq->first = append->first;
331
pq->last = append->last;
332
mi_heap_queue_first_update(heap, pq);
333
}
334
else {
335
// append to end
336
mi_assert_internal(pq->last!=NULL);
337
mi_assert_internal(append->first!=NULL);
338
pq->last->next = append->first;
339
append->first->prev = pq->last;
340
pq->last = append->last;
341
}
342
return count;
343
}
344
345