Path: blob/main/system/lib/mimalloc/src/free.c
6175 views
/* ----------------------------------------------------------------------------1Copyright (c) 2018-2024, Microsoft Research, Daan Leijen2This is free software; you can redistribute it and/or modify it under the3terms of the MIT license. A copy of the license can be found in the file4"LICENSE" at the root of this distribution.5-----------------------------------------------------------------------------*/6#if !defined(MI_IN_ALLOC_C)7#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"8// add includes help an IDE9#include "mimalloc.h"10#include "mimalloc/internal.h"11#include "mimalloc/atomic.h"12#include "mimalloc/prim.h" // _mi_prim_thread_id()13#endif1415// forward declarations16static void mi_check_padding(const mi_page_t* page, const mi_block_t* block);17static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);18static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block);19static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);202122// ------------------------------------------------------23// Free24// ------------------------------------------------------2526// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)27static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);2829// regular free of a (thread local) block pointer30// fast path written carefully to prevent spilling on the stack31static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)32{33// checks34if mi_unlikely(mi_check_is_double_free(page, block)) return;35mi_check_padding(page, block);36if (track_stats) { mi_stat_free(page, block); }37#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN38if (!mi_page_is_huge(page)) { // huge page content may be already decommitted39memset(block, MI_DEBUG_FREED, mi_page_block_size(page));40}41#endif42if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned4344// actual free: push on the local free list45mi_block_set_next(page, block, page->local_free);46page->local_free = block;47if mi_unlikely(--page->used == 0) {48_mi_page_retire(page);49}50else if mi_unlikely(check_full && mi_page_is_in_full(page)) {51_mi_page_unfull(page);52}53}5455// Adjust a block that was allocated aligned, to the actual start of the block in the page.56// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the57// `page_start` and `block_size` fields; however these are constant and the page won't be58// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.59mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {60mi_assert_internal(page!=NULL && p!=NULL);6162size_t diff = (uint8_t*)p - page->page_start;63size_t adjust;64if mi_likely(page->block_size_shift != 0) {65adjust = diff & (((size_t)1 << page->block_size_shift) - 1);66}67else {68adjust = diff % mi_page_block_size(page);69}7071return (mi_block_t*)((uintptr_t)p - adjust);72}7374// free a local pointer (page parameter comes first for better codegen)75static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {76MI_UNUSED(segment);77mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);78mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);79}8081// free a pointer owned by another thread (page parameter comes first for better codegen)82static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {83mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)84mi_free_block_mt(page, segment, block);85}8687// generic free (for runtime integration)88void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {89if (is_local) mi_free_generic_local(page,segment,p);90else mi_free_generic_mt(page,segment,p);91}9293// Get the segment data belonging to a pointer94// This is just a single `and` in release mode but does further checks in debug mode95// (and secure mode) to see if this was a valid pointer.96static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)97{98MI_UNUSED(msg);99100#if (MI_DEBUG>0)101if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {102_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);103return NULL;104}105#endif106107mi_segment_t* const segment = _mi_ptr_segment(p);108if mi_unlikely(segment==NULL) return segment;109110#if (MI_DEBUG>0)111if mi_unlikely(!mi_is_in_heap_region(p)) {112#if (MI_INTPTR_SIZE == 8 && defined(__linux__))113if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)114#else115{116#endif117_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"118"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);119if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {120_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);121}122}123}124#endif125#if (MI_DEBUG>0 || MI_SECURE>=4)126if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {127_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);128return NULL;129}130#endif131132return segment;133}134135// Free a block136// Fast path written carefully to prevent register spilling on the stack137void mi_free(void* p) mi_attr_noexcept138{139mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");140if mi_unlikely(segment==NULL) return;141142const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));143mi_page_t* const page = _mi_segment_page_of(segment, p);144145if mi_likely(is_local) { // thread-local free?146if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)147// thread-local, aligned, and not a full page148mi_block_t* const block = (mi_block_t*)p;149mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */);150}151else {152// page is full or contains (inner) aligned blocks; use generic path153mi_free_generic_local(page, segment, p);154}155}156else {157// not thread-local; use generic path158mi_free_generic_mt(page, segment, p);159}160}161162// return true if successful163bool _mi_free_delayed_block(mi_block_t* block) {164// get segment and page165mi_assert_internal(block!=NULL);166const mi_segment_t* const segment = _mi_ptr_segment(block);167mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);168mi_assert_internal(_mi_thread_id() == segment->thread_id);169mi_page_t* const page = _mi_segment_page_of(segment, block);170171// Clear the no-delayed flag so delayed freeing is used again for this page.172// This must be done before collecting the free lists on this page -- otherwise173// some blocks may end up in the page `thread_free` list with no blocks in the174// heap `thread_delayed_free` list which may cause the page to be never freed!175// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)176if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {177return false;178}179180// collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count181_mi_page_free_collect(page, false);182183// and free the block (possibly freeing the page as well since `used` is updated)184mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */);185return true;186}187188// ------------------------------------------------------189// Multi-threaded Free (`_mt`)190// ------------------------------------------------------191192// Push a block that is owned by another thread on its page-local thread free193// list or it's heap delayed free list. Such blocks are later collected by194// the owning thread in `_mi_free_delayed_block`.195static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )196{197// Try to put the block on either the page-local thread free list,198// or the heap delayed free list (if this is the first non-local free in that page)199mi_thread_free_t tfreex;200bool use_delayed;201mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);202do {203use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);204if mi_unlikely(use_delayed) {205// unlikely: this only happens on the first concurrent free in a page that is in the full list206tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);207}208else {209// usual: directly add to page thread_free list210mi_block_set_next(page, block, mi_tf_block(tfree));211tfreex = mi_tf_set_block(tfree,block);212}213} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));214215// If this was the first non-local free, we need to push it on the heap delayed free list instead216if mi_unlikely(use_delayed) {217// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)218mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);219mi_assert_internal(heap != NULL);220if (heap != NULL) {221// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)222mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);223do {224mi_block_set_nextx(heap,block,dfree, heap->keys);225} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));226}227228// and reset the MI_DELAYED_FREEING flag229tfree = mi_atomic_load_relaxed(&page->xthread_free);230do {231tfreex = tfree;232mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);233tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);234} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));235}236}237238// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)239static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)240{241// first see if the segment was abandoned and if we can reclaim it into our thread242if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&243#if MI_HUGE_PAGE_ABANDON244segment->page_kind != MI_PAGE_HUGE &&245#endif246mi_atomic_load_relaxed(&segment->thread_id) == 0)247{248// the segment is abandoned, try to reclaim it into our heap249if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {250mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));251mi_free(block); // recursively free as now it will be a local free in our heap252return;253}254}255256// The padding check may access the non-thread-owned page for the key values.257// that is safe as these are constant and the page won't be freed (as the block is not freed yet).258mi_check_padding(page, block);259260// adjust stats (after padding check and potentially recursive `mi_free` above)261mi_stat_free(page, block); // stat_free may access the padding262mi_track_free_size(block, mi_page_usable_size_of(page,block));263264// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection265_mi_padding_shrink(page, block, sizeof(mi_block_t));266267if (segment->kind == MI_SEGMENT_HUGE) {268#if MI_HUGE_PAGE_ABANDON269// huge page segments are always abandoned and can be freed immediately270_mi_segment_huge_page_free(segment, page, block);271return;272#else273// huge pages are special as they occupy the entire segment274// as these are large we reset the memory occupied by the page so it is available to other threads275// (as the owning thread needs to actually free the memory later).276_mi_segment_huge_page_reset(segment, page, block);277#endif278}279else {280#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading281memset(block, MI_DEBUG_FREED, mi_usable_size(block));282#endif283}284285// and finally free the actual block by pushing it on the owning heap286// thread_delayed free list (or heap delayed free list)287mi_free_block_delayed_mt(page,block);288}289290291// ------------------------------------------------------292// Usable size293// ------------------------------------------------------294295// Bytes available in a block296static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {297const mi_block_t* block = _mi_page_ptr_unalign(page, p);298const size_t size = mi_page_usable_size_of(page, block);299const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;300mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);301return (size - adjust);302}303304static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {305const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);306if mi_unlikely(segment==NULL) return 0;307const mi_page_t* const page = _mi_segment_page_of(segment, p);308if mi_likely(!mi_page_has_aligned(page)) {309const mi_block_t* block = (const mi_block_t*)p;310return mi_page_usable_size_of(page, block);311}312else {313// split out to separate routine for improved code generation314return mi_page_usable_aligned_size_of(page, p);315}316}317318mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {319return _mi_usable_size(p, "mi_usable_size");320}321322323// ------------------------------------------------------324// Free variants325// ------------------------------------------------------326327void mi_free_size(void* p, size_t size) mi_attr_noexcept {328MI_UNUSED_RELEASE(size);329mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));330mi_free(p);331}332333void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {334MI_UNUSED_RELEASE(alignment);335mi_assert(((uintptr_t)p % alignment) == 0);336mi_free_size(p,size);337}338339void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {340MI_UNUSED_RELEASE(alignment);341mi_assert(((uintptr_t)p % alignment) == 0);342mi_free(p);343}344345346// ------------------------------------------------------347// Check for double free in secure and debug mode348// This is somewhat expensive so only enabled for secure mode 4349// ------------------------------------------------------350351#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))352// linear check if the free list contains a specific element353static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {354while (list != NULL) {355if (elem==list) return true;356list = mi_block_next(page, list);357}358return false;359}360361static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {362// The decoded value is in the same page (or NULL).363// Walk the free lists to verify positively if it is already freed364if (mi_list_contains(page, page->free, block) ||365mi_list_contains(page, page->local_free, block) ||366mi_list_contains(page, mi_page_thread_free(page), block))367{368_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));369return true;370}371return false;372}373374#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }375376static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {377bool is_double_free = false;378mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field379if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?380(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?381{382// Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free?383// (continue in separate function to improve code generation)384is_double_free = mi_check_is_double_freex(page, block);385}386return is_double_free;387}388#else389static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {390MI_UNUSED(page);391MI_UNUSED(block);392return false;393}394#endif395396397// ---------------------------------------------------------------------------398// Check for heap block overflow by setting up padding at the end of the block399// ---------------------------------------------------------------------------400401#if MI_PADDING // && !MI_TRACK_ENABLED402static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {403*bsize = mi_page_usable_block_size(page);404const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);405mi_track_mem_defined(padding,sizeof(mi_padding_t));406*delta = padding->delta;407uint32_t canary = padding->canary;408uintptr_t keys[2];409keys[0] = page->keys[0];410keys[1] = page->keys[1];411bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);412mi_track_mem_noaccess(padding,sizeof(mi_padding_t));413return ok;414}415416// Return the exact usable size of a block.417static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {418size_t bsize;419size_t delta;420bool ok = mi_page_decode_padding(page, block, &delta, &bsize);421mi_assert_internal(ok); mi_assert_internal(delta <= bsize);422return (ok ? bsize - delta : 0);423}424425// When a non-thread-local block is freed, it becomes part of the thread delayed free426// list that is freed later by the owning heap. If the exact usable size is too small to427// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)428// so it will later not trigger an overflow error in `mi_free_block`.429void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {430size_t bsize;431size_t delta;432bool ok = mi_page_decode_padding(page, block, &delta, &bsize);433mi_assert_internal(ok);434if (!ok || (bsize - delta) >= min_size) return; // usually already enough space435mi_assert_internal(bsize >= min_size);436if (bsize < min_size) return; // should never happen437size_t new_delta = (bsize - min_size);438mi_assert_internal(new_delta < bsize);439mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);440mi_track_mem_defined(padding,sizeof(mi_padding_t));441padding->delta = (uint32_t)new_delta;442mi_track_mem_noaccess(padding,sizeof(mi_padding_t));443}444#else445static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {446MI_UNUSED(block);447return mi_page_usable_block_size(page);448}449450void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {451MI_UNUSED(page);452MI_UNUSED(block);453MI_UNUSED(min_size);454}455#endif456457#if MI_PADDING && MI_PADDING_CHECK458459static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {460size_t bsize;461size_t delta;462bool ok = mi_page_decode_padding(page, block, &delta, &bsize);463*size = *wrong = bsize;464if (!ok) return false;465mi_assert_internal(bsize >= delta);466*size = bsize - delta;467if (!mi_page_is_huge(page)) {468uint8_t* fill = (uint8_t*)block + bsize - delta;469const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes470mi_track_mem_defined(fill, maxpad);471for (size_t i = 0; i < maxpad; i++) {472if (fill[i] != MI_DEBUG_PADDING) {473*wrong = bsize - delta + i;474ok = false;475break;476}477}478mi_track_mem_noaccess(fill, maxpad);479}480return ok;481}482483static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {484size_t size;485size_t wrong;486if (!mi_verify_padding(page,block,&size,&wrong)) {487_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );488}489}490491#else492493static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {494MI_UNUSED(page);495MI_UNUSED(block);496}497498#endif499500// only maintain stats for smaller objects if requested501#if (MI_STAT>0)502static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {503#if (MI_STAT < 2)504MI_UNUSED(block);505#endif506mi_heap_t* const heap = mi_heap_get_default();507const size_t bsize = mi_page_usable_block_size(page);508#if (MI_STAT>1)509const size_t usize = mi_page_usable_size_of(page, block);510mi_heap_stat_decrease(heap, malloc, usize);511#endif512if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {513mi_heap_stat_decrease(heap, normal, bsize);514#if (MI_STAT > 1)515mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);516#endif517}518else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {519mi_heap_stat_decrease(heap, large, bsize);520}521else {522mi_heap_stat_decrease(heap, huge, bsize);523}524}525#else526static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {527MI_UNUSED(page); MI_UNUSED(block);528}529#endif530531532