Path: blob/main/contrib/jemalloc/src/edata_cache.c
39478 views
#include "jemalloc/internal/jemalloc_preamble.h"1#include "jemalloc/internal/jemalloc_internal_includes.h"23bool4edata_cache_init(edata_cache_t *edata_cache, base_t *base) {5edata_avail_new(&edata_cache->avail);6/*7* This is not strictly necessary, since the edata_cache_t is only8* created inside an arena, which is zeroed on creation. But this is9* handy as a safety measure.10*/11atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);12if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",13WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {14return true;15}16edata_cache->base = base;17return false;18}1920edata_t *21edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {22malloc_mutex_lock(tsdn, &edata_cache->mtx);23edata_t *edata = edata_avail_first(&edata_cache->avail);24if (edata == NULL) {25malloc_mutex_unlock(tsdn, &edata_cache->mtx);26return base_alloc_edata(tsdn, edata_cache->base);27}28edata_avail_remove(&edata_cache->avail, edata);29atomic_load_sub_store_zu(&edata_cache->count, 1);30malloc_mutex_unlock(tsdn, &edata_cache->mtx);31return edata;32}3334void35edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {36malloc_mutex_lock(tsdn, &edata_cache->mtx);37edata_avail_insert(&edata_cache->avail, edata);38atomic_load_add_store_zu(&edata_cache->count, 1);39malloc_mutex_unlock(tsdn, &edata_cache->mtx);40}4142void43edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {44malloc_mutex_prefork(tsdn, &edata_cache->mtx);45}4647void48edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {49malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);50}5152void53edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {54malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);55}5657void58edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {59edata_list_inactive_init(&ecs->list);60ecs->fallback = fallback;61ecs->disabled = false;62}6364static void65edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,66edata_cache_fast_t *ecs) {67edata_t *edata;68malloc_mutex_lock(tsdn, &ecs->fallback->mtx);69for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {70edata = edata_avail_remove_first(&ecs->fallback->avail);71if (edata == NULL) {72break;73}74edata_list_inactive_append(&ecs->list, edata);75atomic_load_sub_store_zu(&ecs->fallback->count, 1);76}77malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);78}7980edata_t *81edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {82witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),83WITNESS_RANK_EDATA_CACHE, 0);8485if (ecs->disabled) {86assert(edata_list_inactive_first(&ecs->list) == NULL);87return edata_cache_get(tsdn, ecs->fallback);88}8990edata_t *edata = edata_list_inactive_first(&ecs->list);91if (edata != NULL) {92edata_list_inactive_remove(&ecs->list, edata);93return edata;94}95/* Slow path; requires synchronization. */96edata_cache_fast_try_fill_from_fallback(tsdn, ecs);97edata = edata_list_inactive_first(&ecs->list);98if (edata != NULL) {99edata_list_inactive_remove(&ecs->list, edata);100} else {101/*102* Slowest path (fallback was also empty); allocate something103* new.104*/105edata = base_alloc_edata(tsdn, ecs->fallback->base);106}107return edata;108}109110static void111edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {112/*113* You could imagine smarter cache management policies (like114* only flushing down to some threshold in anticipation of115* future get requests). But just flushing everything provides116* a good opportunity to defrag too, and lets us share code between the117* flush and disable pathways.118*/119edata_t *edata;120size_t nflushed = 0;121malloc_mutex_lock(tsdn, &ecs->fallback->mtx);122while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {123edata_list_inactive_remove(&ecs->list, edata);124edata_avail_insert(&ecs->fallback->avail, edata);125nflushed++;126}127atomic_load_add_store_zu(&ecs->fallback->count, nflushed);128malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);129}130131void132edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {133witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),134WITNESS_RANK_EDATA_CACHE, 0);135136if (ecs->disabled) {137assert(edata_list_inactive_first(&ecs->list) == NULL);138edata_cache_put(tsdn, ecs->fallback, edata);139return;140}141142/*143* Prepend rather than append, to do LIFO ordering in the hopes of some144* cache locality.145*/146edata_list_inactive_prepend(&ecs->list, edata);147}148149void150edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {151edata_cache_fast_flush_all(tsdn, ecs);152ecs->disabled = true;153}154155156