Path: blob/main/system/lib/mimalloc/src/page-queue.c
6175 views
/*----------------------------------------------------------------------------1Copyright (c) 2018-2024, Microsoft Research, Daan Leijen2This is free software; you can redistribute it and/or modify it under the3terms of the MIT license. A copy of the license can be found in the file4"LICENSE" at the root of this distribution.5-----------------------------------------------------------------------------*/67/* -----------------------------------------------------------8Definition of page queues for each block size9----------------------------------------------------------- */1011#ifndef MI_IN_PAGE_C12#error "this file should be included from 'page.c'"13// include to help an IDE14#include "mimalloc.h"15#include "mimalloc/internal.h"16#include "mimalloc/atomic.h"17#endif1819/* -----------------------------------------------------------20Minimal alignment in machine words (i.e. `sizeof(void*)`)21----------------------------------------------------------- */2223#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)24#error "define alignment for more than 4x word size for this platform"25#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)26#define MI_ALIGN4W // 4 machine words minimal alignment27#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)28#define MI_ALIGN2W // 2 machine words minimal alignment29#else30// ok, default alignment is 1 word31#endif323334/* -----------------------------------------------------------35Queue query36----------------------------------------------------------- */373839static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {40return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));41}4243static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {44return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));45}4647static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {48return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);49}5051/* -----------------------------------------------------------52Bins53----------------------------------------------------------- */5455// Return the bin for a given field size.56// Returns MI_BIN_HUGE if the size is too large.57// We use `wsize` for the size in "machine word sizes",58// i.e. byte size == `wsize*sizeof(void*)`.59static inline uint8_t mi_bin(size_t size) {60size_t wsize = _mi_wsize_from_size(size);61uint8_t bin;62if (wsize <= 1) {63bin = 1;64}65#if defined(MI_ALIGN4W)66else if (wsize <= 4) {67bin = (uint8_t)((wsize+1)&~1); // round to double word sizes68}69#elif defined(MI_ALIGN2W)70else if (wsize <= 8) {71bin = (uint8_t)((wsize+1)&~1); // round to double word sizes72}73#else74else if (wsize <= 8) {75bin = (uint8_t)wsize;76}77#endif78else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {79bin = MI_BIN_HUGE;80}81else {82#if defined(MI_ALIGN4W)83if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes84#endif85wsize--;86// find the highest bit87uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 088// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).89// - adjust with 3 because we use do not round the first 8 sizes90// which each get an exact bin91bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;92mi_assert_internal(bin < MI_BIN_HUGE);93}94mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);95return bin;96}979899100/* -----------------------------------------------------------101Queue of pages with free blocks102----------------------------------------------------------- */103104uint8_t _mi_bin(size_t size) {105return mi_bin(size);106}107108size_t _mi_bin_size(uint8_t bin) {109return _mi_heap_empty.pages[bin].block_size;110}111112// Good size for allocation113size_t mi_good_size(size_t size) mi_attr_noexcept {114if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {115return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));116}117else {118return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size());119}120}121122#if (MI_DEBUG>1)123static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {124mi_assert_internal(page != NULL);125mi_page_t* list = queue->first;126while (list != NULL) {127mi_assert_internal(list->next == NULL || list->next->prev == list);128mi_assert_internal(list->prev == NULL || list->prev->next == list);129if (list == page) break;130list = list->next;131}132return (list == page);133}134135#endif136137#if (MI_DEBUG>1)138static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {139return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);140}141#endif142143static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {144return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));145}146147static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {148mi_assert_internal(heap!=NULL);149uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));150mi_assert_internal(bin <= MI_BIN_FULL);151mi_page_queue_t* pq = &heap->pages[bin];152mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||153(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||154(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));155return pq;156}157158static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {159mi_heap_t* heap = mi_page_heap(page);160mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);161mi_assert_expensive(mi_page_queue_contains(pq, page));162return pq;163}164165// The current small page array is for efficiency and for each166// small size (up to 256) it points directly to the page for that167// size without having to compute the bin. This means when the168// current free page queue is updated for a small bin, we need to update a169// range of entries in `_mi_page_small_free`.170static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {171mi_assert_internal(mi_heap_contains_queue(heap,pq));172size_t size = pq->block_size;173if (size > MI_SMALL_SIZE_MAX) return;174175mi_page_t* page = pq->first;176if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;177178// find index in the right direct page array179size_t start;180size_t idx = _mi_wsize_from_size(size);181mi_page_t** pages_free = heap->pages_free_direct;182183if (pages_free[idx] == page) return; // already set184185// find start slot186if (idx<=1) {187start = 0;188}189else {190// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped191uint8_t bin = mi_bin(size);192const mi_page_queue_t* prev = pq - 1;193while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {194prev--;195}196start = 1 + _mi_wsize_from_size(prev->block_size);197if (start > idx) start = idx;198}199200// set size range to the right page201mi_assert(start <= idx);202for (size_t sz = start; sz <= idx; sz++) {203pages_free[sz] = page;204}205}206207/*208static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {209return (queue->first == NULL);210}211*/212213static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {214mi_assert_internal(page != NULL);215mi_assert_expensive(mi_page_queue_contains(queue, page));216mi_assert_internal(mi_page_block_size(page) == queue->block_size ||217(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||218(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));219mi_heap_t* heap = mi_page_heap(page);220221if (page->prev != NULL) page->prev->next = page->next;222if (page->next != NULL) page->next->prev = page->prev;223if (page == queue->last) queue->last = page->prev;224if (page == queue->first) {225queue->first = page->next;226// update first227mi_assert_internal(mi_heap_contains_queue(heap, queue));228mi_heap_queue_first_update(heap,queue);229}230heap->page_count--;231page->next = NULL;232page->prev = NULL;233// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);234mi_page_set_in_full(page,false);235}236237238static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {239mi_assert_internal(mi_page_heap(page) == heap);240mi_assert_internal(!mi_page_queue_contains(queue, page));241#if MI_HUGE_PAGE_ABANDON242mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);243#endif244mi_assert_internal(mi_page_block_size(page) == queue->block_size ||245(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||246(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));247248mi_page_set_in_full(page, mi_page_queue_is_full(queue));249// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);250page->next = queue->first;251page->prev = NULL;252if (queue->first != NULL) {253mi_assert_internal(queue->first->prev == NULL);254queue->first->prev = page;255queue->first = page;256}257else {258queue->first = queue->last = page;259}260261// update direct262mi_heap_queue_first_update(heap, queue);263heap->page_count++;264}265266267static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {268mi_assert_internal(page != NULL);269mi_assert_expensive(mi_page_queue_contains(from, page));270mi_assert_expensive(!mi_page_queue_contains(to, page));271const size_t bsize = mi_page_block_size(page);272MI_UNUSED(bsize);273mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||274(bsize == to->block_size && mi_page_queue_is_full(from)) ||275(bsize == from->block_size && mi_page_queue_is_full(to)) ||276(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||277(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));278279mi_heap_t* heap = mi_page_heap(page);280if (page->prev != NULL) page->prev->next = page->next;281if (page->next != NULL) page->next->prev = page->prev;282if (page == from->last) from->last = page->prev;283if (page == from->first) {284from->first = page->next;285// update first286mi_assert_internal(mi_heap_contains_queue(heap, from));287mi_heap_queue_first_update(heap, from);288}289290page->prev = to->last;291page->next = NULL;292if (to->last != NULL) {293mi_assert_internal(heap == mi_page_heap(to->last));294to->last->next = page;295to->last = page;296}297else {298to->first = page;299to->last = page;300mi_heap_queue_first_update(heap, to);301}302303mi_page_set_in_full(page, mi_page_queue_is_full(to));304}305306// Only called from `mi_heap_absorb`.307size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {308mi_assert_internal(mi_heap_contains_queue(heap,pq));309mi_assert_internal(pq->block_size == append->block_size);310311if (append->first==NULL) return 0;312313// set append pages to new heap and count314size_t count = 0;315for (mi_page_t* page = append->first; page != NULL; page = page->next) {316// inline `mi_page_set_heap` to avoid wrong assertion during absorption;317// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.318mi_atomic_store_release(&page->xheap, (uintptr_t)heap);319// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a320// side effect that it spins until any DELAYED_FREEING is finished. This ensures321// that after appending only the new heap will be used for delayed free operations.322_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);323count++;324}325326if (pq->last==NULL) {327// take over afresh328mi_assert_internal(pq->first==NULL);329pq->first = append->first;330pq->last = append->last;331mi_heap_queue_first_update(heap, pq);332}333else {334// append to end335mi_assert_internal(pq->last!=NULL);336mi_assert_internal(append->first!=NULL);337pq->last->next = append->first;338append->first->prev = pq->last;339pq->last = append->last;340}341return count;342}343344345