Path: blob/main/contrib/jemalloc/src/extent_dss.c
104897 views
#include "jemalloc/internal/jemalloc_preamble.h"1#include "jemalloc/internal/jemalloc_internal_includes.h"23#include "jemalloc/internal/assert.h"4#include "jemalloc/internal/extent_dss.h"5#include "jemalloc/internal/spin.h"67/******************************************************************************/8/* Data. */910const char *opt_dss = DSS_DEFAULT;1112const char *dss_prec_names[] = {13"disabled",14"primary",15"secondary",16"N/A"17};1819/*20* Current dss precedence default, used when creating new arenas. NB: This is21* stored as unsigned rather than dss_prec_t because in principle there's no22* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use23* atomic operations to synchronize the setting.24*/25static atomic_u_t dss_prec_default = ATOMIC_INIT(26(unsigned)DSS_PREC_DEFAULT);2728/* Base address of the DSS. */29static void *dss_base;30/* Atomic boolean indicating whether a thread is currently extending DSS. */31static atomic_b_t dss_extending;32/* Atomic boolean indicating whether the DSS is exhausted. */33static atomic_b_t dss_exhausted;34/* Atomic current upper limit on DSS addresses. */35static atomic_p_t dss_max;3637/******************************************************************************/3839static void *40extent_dss_sbrk(intptr_t increment) {41#ifdef JEMALLOC_DSS42return sbrk(increment);43#else44not_implemented();45return NULL;46#endif47}4849dss_prec_t50extent_dss_prec_get(void) {51dss_prec_t ret;5253if (!have_dss) {54return dss_prec_disabled;55}56ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);57return ret;58}5960bool61extent_dss_prec_set(dss_prec_t dss_prec) {62if (!have_dss) {63return (dss_prec != dss_prec_disabled);64}65atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);66return false;67}6869static void70extent_dss_extending_start(void) {71spin_t spinner = SPIN_INITIALIZER;72while (true) {73bool expected = false;74if (atomic_compare_exchange_weak_b(&dss_extending, &expected,75true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {76break;77}78spin_adaptive(&spinner);79}80}8182static void83extent_dss_extending_finish(void) {84assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));8586atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);87}8889static void *90extent_dss_max_update(void *new_addr) {91/*92* Get the current end of the DSS as max_cur and assure that dss_max is93* up to date.94*/95void *max_cur = extent_dss_sbrk(0);96if (max_cur == (void *)-1) {97return NULL;98}99atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);100/* Fixed new_addr can only be supported if it is at the edge of DSS. */101if (new_addr != NULL && max_cur != new_addr) {102return NULL;103}104return max_cur;105}106107void *108extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,109size_t alignment, bool *zero, bool *commit) {110edata_t *gap;111112cassert(have_dss);113assert(size > 0);114assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));115116/*117* sbrk() uses a signed increment argument, so take care not to118* interpret a large allocation request as a negative increment.119*/120if ((intptr_t)size < 0) {121return NULL;122}123124gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);125if (gap == NULL) {126return NULL;127}128129extent_dss_extending_start();130if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {131/*132* The loop is necessary to recover from races with other133* threads that are using the DSS for something other than134* malloc.135*/136while (true) {137void *max_cur = extent_dss_max_update(new_addr);138if (max_cur == NULL) {139goto label_oom;140}141142bool head_state = opt_retain ? EXTENT_IS_HEAD :143EXTENT_NOT_HEAD;144/*145* Compute how much page-aligned gap space (if any) is146* necessary to satisfy alignment. This space can be147* recycled for later use.148*/149void *gap_addr_page = (void *)(PAGE_CEILING(150(uintptr_t)max_cur));151void *ret = (void *)ALIGNMENT_CEILING(152(uintptr_t)gap_addr_page, alignment);153size_t gap_size_page = (uintptr_t)ret -154(uintptr_t)gap_addr_page;155if (gap_size_page != 0) {156edata_init(gap, arena_ind_get(arena),157gap_addr_page, gap_size_page, false,158SC_NSIZES, extent_sn_next(159&arena->pa_shard.pac),160extent_state_active, false, true,161EXTENT_PAI_PAC, head_state);162}163/*164* Compute the address just past the end of the desired165* allocation space.166*/167void *dss_next = (void *)((uintptr_t)ret + size);168if ((uintptr_t)ret < (uintptr_t)max_cur ||169(uintptr_t)dss_next < (uintptr_t)max_cur) {170goto label_oom; /* Wrap-around. */171}172/* Compute the increment, including subpage bytes. */173void *gap_addr_subpage = max_cur;174size_t gap_size_subpage = (uintptr_t)ret -175(uintptr_t)gap_addr_subpage;176intptr_t incr = gap_size_subpage + size;177178assert((uintptr_t)max_cur + incr == (uintptr_t)ret +179size);180181/* Try to allocate. */182void *dss_prev = extent_dss_sbrk(incr);183if (dss_prev == max_cur) {184/* Success. */185atomic_store_p(&dss_max, dss_next,186ATOMIC_RELEASE);187extent_dss_extending_finish();188189if (gap_size_page != 0) {190ehooks_t *ehooks = arena_get_ehooks(191arena);192extent_dalloc_gap(tsdn,193&arena->pa_shard.pac, ehooks, gap);194} else {195edata_cache_put(tsdn,196&arena->pa_shard.edata_cache, gap);197}198if (!*commit) {199*commit = pages_decommit(ret, size);200}201if (*zero && *commit) {202edata_t edata = {0};203ehooks_t *ehooks = arena_get_ehooks(204arena);205206edata_init(&edata,207arena_ind_get(arena), ret, size,208size, false, SC_NSIZES,209extent_state_active, false, true,210EXTENT_PAI_PAC, head_state);211if (extent_purge_forced_wrapper(tsdn,212ehooks, &edata, 0, size)) {213memset(ret, 0, size);214}215}216return ret;217}218/*219* Failure, whether due to OOM or a race with a raw220* sbrk() call from outside the allocator.221*/222if (dss_prev == (void *)-1) {223/* OOM. */224atomic_store_b(&dss_exhausted, true,225ATOMIC_RELEASE);226goto label_oom;227}228}229}230label_oom:231extent_dss_extending_finish();232edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);233return NULL;234}235236static bool237extent_in_dss_helper(void *addr, void *max) {238return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <239(uintptr_t)max);240}241242bool243extent_in_dss(void *addr) {244cassert(have_dss);245246return extent_in_dss_helper(addr, atomic_load_p(&dss_max,247ATOMIC_ACQUIRE));248}249250bool251extent_dss_mergeable(void *addr_a, void *addr_b) {252void *max;253254cassert(have_dss);255256if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <257(uintptr_t)dss_base) {258return true;259}260261max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);262return (extent_in_dss_helper(addr_a, max) ==263extent_in_dss_helper(addr_b, max));264}265266void267extent_dss_boot(void) {268cassert(have_dss);269270dss_base = extent_dss_sbrk(0);271atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);272atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);273atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);274}275276/******************************************************************************/277278279