Path: blob/main/contrib/jemalloc/src/background_thread.c
39536 views
#include "jemalloc/internal/jemalloc_preamble.h"1#include "jemalloc/internal/jemalloc_internal_includes.h"23#include "jemalloc/internal/assert.h"45JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS67/******************************************************************************/8/* Data. */910/* This option should be opt-in only. */11#define BACKGROUND_THREAD_DEFAULT false12/* Read-only after initialization. */13bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;14size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;1516/* Used for thread creation, termination and stats. */17malloc_mutex_t background_thread_lock;18/* Indicates global state. Atomic because decay reads this w/o locking. */19atomic_b_t background_thread_enabled_state;20size_t n_background_threads;21size_t max_background_threads;22/* Thread info per-index. */23background_thread_info_t *background_thread_info;2425/******************************************************************************/2627#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER2829static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,30void *(*)(void *), void *__restrict);3132static void33pthread_create_wrapper_init(void) {34#ifdef JEMALLOC_LAZY_LOCK35if (!isthreaded) {36isthreaded = true;37}38#endif39}4041int42pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,43void *(*start_routine)(void *), void *__restrict arg) {44pthread_create_wrapper_init();4546return pthread_create_fptr(thread, attr, start_routine, arg);47}48#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */4950#ifndef JEMALLOC_BACKGROUND_THREAD51#define NOT_REACHED { not_reached(); }52bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED53bool background_threads_enable(tsd_t *tsd) NOT_REACHED54bool background_threads_disable(tsd_t *tsd) NOT_REACHED55bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED56void background_thread_wakeup_early(background_thread_info_t *info,57nstime_t *remaining_sleep) NOT_REACHED58void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED59void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED60void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED61void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED62bool background_thread_stats_read(tsdn_t *tsdn,63background_thread_stats_t *stats) NOT_REACHED64void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED65#undef NOT_REACHED66#else6768static bool background_thread_enabled_at_fork;6970static void71background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {72background_thread_wakeup_time_set(tsdn, info, 0);73info->npages_to_purge_new = 0;74if (config_stats) {75info->tot_n_runs = 0;76nstime_init_zero(&info->tot_sleep_time);77}78}7980static inline bool81set_current_thread_affinity(int cpu) {82#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)83cpu_set_t cpuset;84#else85# ifndef __NetBSD__86cpuset_t cpuset;87# else88cpuset_t *cpuset;89# endif90#endif9192#ifndef __NetBSD__93CPU_ZERO(&cpuset);94CPU_SET(cpu, &cpuset);95#else96cpuset = cpuset_create();97#endif9899#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)100return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);101#else102# ifndef __NetBSD__103int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),104&cpuset);105# else106int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),107cpuset);108cpuset_destroy(cpuset);109# endif110return ret != 0;111#endif112}113114#define BILLION UINT64_C(1000000000)115/* Minimal sleep interval 100 ms. */116#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)117118static void119background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,120uint64_t interval) {121if (config_stats) {122info->tot_n_runs++;123}124info->npages_to_purge_new = 0;125126struct timeval tv;127/* Specific clock required by timedwait. */128gettimeofday(&tv, NULL);129nstime_t before_sleep;130nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);131132int ret;133if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {134background_thread_wakeup_time_set(tsdn, info,135BACKGROUND_THREAD_INDEFINITE_SLEEP);136ret = pthread_cond_wait(&info->cond, &info->mtx.lock);137assert(ret == 0);138} else {139assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&140interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);141/* We need malloc clock (can be different from tv). */142nstime_t next_wakeup;143nstime_init_update(&next_wakeup);144nstime_iadd(&next_wakeup, interval);145assert(nstime_ns(&next_wakeup) <146BACKGROUND_THREAD_INDEFINITE_SLEEP);147background_thread_wakeup_time_set(tsdn, info,148nstime_ns(&next_wakeup));149150nstime_t ts_wakeup;151nstime_copy(&ts_wakeup, &before_sleep);152nstime_iadd(&ts_wakeup, interval);153struct timespec ts;154ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);155ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);156157assert(!background_thread_indefinite_sleep(info));158ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);159assert(ret == ETIMEDOUT || ret == 0);160}161if (config_stats) {162gettimeofday(&tv, NULL);163nstime_t after_sleep;164nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);165if (nstime_compare(&after_sleep, &before_sleep) > 0) {166nstime_subtract(&after_sleep, &before_sleep);167nstime_add(&info->tot_sleep_time, &after_sleep);168}169}170}171172static bool173background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {174if (unlikely(info->state == background_thread_paused)) {175malloc_mutex_unlock(tsdn, &info->mtx);176/* Wait on global lock to update status. */177malloc_mutex_lock(tsdn, &background_thread_lock);178malloc_mutex_unlock(tsdn, &background_thread_lock);179malloc_mutex_lock(tsdn, &info->mtx);180return true;181}182183return false;184}185186static inline void187background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,188unsigned ind) {189uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;190unsigned narenas = narenas_total_get();191bool slept_indefinitely = background_thread_indefinite_sleep(info);192193for (unsigned i = ind; i < narenas; i += max_background_threads) {194arena_t *arena = arena_get(tsdn, i, false);195if (!arena) {196continue;197}198/*199* If thread was woken up from the indefinite sleep, don't200* do the work instantly, but rather check when the deferred201* work that caused this thread to wake up is scheduled for.202*/203if (!slept_indefinitely) {204arena_do_deferred_work(tsdn, arena);205}206if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {207/* Min interval will be used. */208continue;209}210uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(211tsdn, &arena->pa_shard);212if (ns_arena_deferred < ns_until_deferred) {213ns_until_deferred = ns_arena_deferred;214}215}216217uint64_t sleep_ns;218if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {219sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;220} else {221sleep_ns =222(ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)223? BACKGROUND_THREAD_MIN_INTERVAL_NS224: ns_until_deferred;225226}227228background_thread_sleep(tsdn, info, sleep_ns);229}230231static bool232background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {233if (info == &background_thread_info[0]) {234malloc_mutex_assert_owner(tsd_tsdn(tsd),235&background_thread_lock);236} else {237malloc_mutex_assert_not_owner(tsd_tsdn(tsd),238&background_thread_lock);239}240241pre_reentrancy(tsd, NULL);242malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);243bool has_thread;244assert(info->state != background_thread_paused);245if (info->state == background_thread_started) {246has_thread = true;247info->state = background_thread_stopped;248pthread_cond_signal(&info->cond);249} else {250has_thread = false;251}252malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);253254if (!has_thread) {255post_reentrancy(tsd);256return false;257}258void *ret;259if (pthread_join(info->thread, &ret)) {260post_reentrancy(tsd);261return true;262}263assert(ret == NULL);264n_background_threads--;265post_reentrancy(tsd);266267return false;268}269270static void *background_thread_entry(void *ind_arg);271272static int273background_thread_create_signals_masked(pthread_t *thread,274const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {275/*276* Mask signals during thread creation so that the thread inherits277* an empty signal set.278*/279sigset_t set;280sigfillset(&set);281sigset_t oldset;282int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);283if (mask_err != 0) {284return mask_err;285}286int create_err = pthread_create_wrapper(thread, attr, start_routine,287arg);288/*289* Restore the signal mask. Failure to restore the signal mask here290* changes program behavior.291*/292int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);293if (restore_err != 0) {294malloc_printf("<jemalloc>: background thread creation "295"failed (%d), and signal mask restoration failed "296"(%d)\n", create_err, restore_err);297if (opt_abort) {298abort();299}300}301return create_err;302}303304static bool305check_background_thread_creation(tsd_t *tsd, unsigned *n_created,306bool *created_threads) {307bool ret = false;308if (likely(*n_created == n_background_threads)) {309return ret;310}311312tsdn_t *tsdn = tsd_tsdn(tsd);313malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);314for (unsigned i = 1; i < max_background_threads; i++) {315if (created_threads[i]) {316continue;317}318background_thread_info_t *info = &background_thread_info[i];319malloc_mutex_lock(tsdn, &info->mtx);320/*321* In case of the background_thread_paused state because of322* arena reset, delay the creation.323*/324bool create = (info->state == background_thread_started);325malloc_mutex_unlock(tsdn, &info->mtx);326if (!create) {327continue;328}329330pre_reentrancy(tsd, NULL);331int err = background_thread_create_signals_masked(&info->thread,332NULL, background_thread_entry, (void *)(uintptr_t)i);333post_reentrancy(tsd);334335if (err == 0) {336(*n_created)++;337created_threads[i] = true;338} else {339malloc_printf("<jemalloc>: background thread "340"creation failed (%d)\n", err);341if (opt_abort) {342abort();343}344}345/* Return to restart the loop since we unlocked. */346ret = true;347break;348}349malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);350351return ret;352}353354static void355background_thread0_work(tsd_t *tsd) {356/* Thread0 is also responsible for launching / terminating threads. */357VARIABLE_ARRAY(bool, created_threads, max_background_threads);358unsigned i;359for (i = 1; i < max_background_threads; i++) {360created_threads[i] = false;361}362/* Start working, and create more threads when asked. */363unsigned n_created = 1;364while (background_thread_info[0].state != background_thread_stopped) {365if (background_thread_pause_check(tsd_tsdn(tsd),366&background_thread_info[0])) {367continue;368}369if (check_background_thread_creation(tsd, &n_created,370(bool *)&created_threads)) {371continue;372}373background_work_sleep_once(tsd_tsdn(tsd),374&background_thread_info[0], 0);375}376377/*378* Shut down other threads at exit. Note that the ctl thread is holding379* the global background_thread mutex (and is waiting) for us.380*/381assert(!background_thread_enabled());382for (i = 1; i < max_background_threads; i++) {383background_thread_info_t *info = &background_thread_info[i];384assert(info->state != background_thread_paused);385if (created_threads[i]) {386background_threads_disable_single(tsd, info);387} else {388malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);389if (info->state != background_thread_stopped) {390/* The thread was not created. */391assert(info->state ==392background_thread_started);393n_background_threads--;394info->state = background_thread_stopped;395}396malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);397}398}399background_thread_info[0].state = background_thread_stopped;400assert(n_background_threads == 1);401}402403static void404background_work(tsd_t *tsd, unsigned ind) {405background_thread_info_t *info = &background_thread_info[ind];406407malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);408background_thread_wakeup_time_set(tsd_tsdn(tsd), info,409BACKGROUND_THREAD_INDEFINITE_SLEEP);410if (ind == 0) {411background_thread0_work(tsd);412} else {413while (info->state != background_thread_stopped) {414if (background_thread_pause_check(tsd_tsdn(tsd),415info)) {416continue;417}418background_work_sleep_once(tsd_tsdn(tsd), info, ind);419}420}421assert(info->state == background_thread_stopped);422background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);423malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);424}425426static void *427background_thread_entry(void *ind_arg) {428unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;429assert(thread_ind < max_background_threads);430#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP431pthread_setname_np(pthread_self(), "jemalloc_bg_thd");432#elif defined(__FreeBSD__) || defined(__DragonFly__)433pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");434#endif435if (opt_percpu_arena != percpu_arena_disabled) {436set_current_thread_affinity((int)thread_ind);437}438/*439* Start periodic background work. We use internal tsd which avoids440* side effects, for example triggering new arena creation (which in441* turn triggers another background thread creation).442*/443background_work(tsd_internal_fetch(), thread_ind);444assert(pthread_equal(pthread_self(),445background_thread_info[thread_ind].thread));446447return NULL;448}449450static void451background_thread_init(tsd_t *tsd, background_thread_info_t *info) {452malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);453info->state = background_thread_started;454background_thread_info_init(tsd_tsdn(tsd), info);455n_background_threads++;456}457458static bool459background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {460assert(have_background_thread);461malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);462463/* We create at most NCPUs threads. */464size_t thread_ind = arena_ind % max_background_threads;465background_thread_info_t *info = &background_thread_info[thread_ind];466467bool need_new_thread;468malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);469need_new_thread = background_thread_enabled() &&470(info->state == background_thread_stopped);471if (need_new_thread) {472background_thread_init(tsd, info);473}474malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);475if (!need_new_thread) {476return false;477}478if (arena_ind != 0) {479/* Threads are created asynchronously by Thread 0. */480background_thread_info_t *t0 = &background_thread_info[0];481malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);482assert(t0->state == background_thread_started);483pthread_cond_signal(&t0->cond);484malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);485486return false;487}488489pre_reentrancy(tsd, NULL);490/*491* To avoid complications (besides reentrancy), create internal492* background threads with the underlying pthread_create.493*/494int err = background_thread_create_signals_masked(&info->thread, NULL,495background_thread_entry, (void *)thread_ind);496post_reentrancy(tsd);497498if (err != 0) {499malloc_printf("<jemalloc>: arena 0 background thread creation "500"failed (%d)\n", err);501malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);502info->state = background_thread_stopped;503n_background_threads--;504malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);505506return true;507}508509return false;510}511512/* Create a new background thread if needed. */513bool514background_thread_create(tsd_t *tsd, unsigned arena_ind) {515assert(have_background_thread);516517bool ret;518malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);519ret = background_thread_create_locked(tsd, arena_ind);520malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);521522return ret;523}524525bool526background_threads_enable(tsd_t *tsd) {527assert(n_background_threads == 0);528assert(background_thread_enabled());529malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);530531VARIABLE_ARRAY(bool, marked, max_background_threads);532unsigned nmarked;533for (unsigned i = 0; i < max_background_threads; i++) {534marked[i] = false;535}536nmarked = 0;537/* Thread 0 is required and created at the end. */538marked[0] = true;539/* Mark the threads we need to create for thread 0. */540unsigned narenas = narenas_total_get();541for (unsigned i = 1; i < narenas; i++) {542if (marked[i % max_background_threads] ||543arena_get(tsd_tsdn(tsd), i, false) == NULL) {544continue;545}546background_thread_info_t *info = &background_thread_info[547i % max_background_threads];548malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);549assert(info->state == background_thread_stopped);550background_thread_init(tsd, info);551malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);552marked[i % max_background_threads] = true;553if (++nmarked == max_background_threads) {554break;555}556}557558bool err = background_thread_create_locked(tsd, 0);559if (err) {560return true;561}562for (unsigned i = 0; i < narenas; i++) {563arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);564if (arena != NULL) {565pa_shard_set_deferral_allowed(tsd_tsdn(tsd),566&arena->pa_shard, true);567}568}569return false;570}571572bool573background_threads_disable(tsd_t *tsd) {574assert(!background_thread_enabled());575malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);576577/* Thread 0 will be responsible for terminating other threads. */578if (background_threads_disable_single(tsd,579&background_thread_info[0])) {580return true;581}582assert(n_background_threads == 0);583unsigned narenas = narenas_total_get();584for (unsigned i = 0; i < narenas; i++) {585arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);586if (arena != NULL) {587pa_shard_set_deferral_allowed(tsd_tsdn(tsd),588&arena->pa_shard, false);589}590}591592return false;593}594595bool596background_thread_is_started(background_thread_info_t *info) {597return info->state == background_thread_started;598}599600void601background_thread_wakeup_early(background_thread_info_t *info,602nstime_t *remaining_sleep) {603/*604* This is an optimization to increase batching. At this point605* we know that background thread wakes up soon, so the time to cache606* the just freed memory is bounded and low.607*/608if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <609BACKGROUND_THREAD_MIN_INTERVAL_NS) {610return;611}612pthread_cond_signal(&info->cond);613}614615void616background_thread_prefork0(tsdn_t *tsdn) {617malloc_mutex_prefork(tsdn, &background_thread_lock);618background_thread_enabled_at_fork = background_thread_enabled();619}620621void622background_thread_prefork1(tsdn_t *tsdn) {623for (unsigned i = 0; i < max_background_threads; i++) {624malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);625}626}627628void629background_thread_postfork_parent(tsdn_t *tsdn) {630for (unsigned i = 0; i < max_background_threads; i++) {631malloc_mutex_postfork_parent(tsdn,632&background_thread_info[i].mtx);633}634malloc_mutex_postfork_parent(tsdn, &background_thread_lock);635}636637void638background_thread_postfork_child(tsdn_t *tsdn) {639for (unsigned i = 0; i < max_background_threads; i++) {640malloc_mutex_postfork_child(tsdn,641&background_thread_info[i].mtx);642}643malloc_mutex_postfork_child(tsdn, &background_thread_lock);644if (!background_thread_enabled_at_fork) {645return;646}647648/* Clear background_thread state (reset to disabled for child). */649malloc_mutex_lock(tsdn, &background_thread_lock);650n_background_threads = 0;651background_thread_enabled_set(tsdn, false);652for (unsigned i = 0; i < max_background_threads; i++) {653background_thread_info_t *info = &background_thread_info[i];654malloc_mutex_lock(tsdn, &info->mtx);655info->state = background_thread_stopped;656int ret = pthread_cond_init(&info->cond, NULL);657assert(ret == 0);658background_thread_info_init(tsdn, info);659malloc_mutex_unlock(tsdn, &info->mtx);660}661malloc_mutex_unlock(tsdn, &background_thread_lock);662}663664bool665background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {666assert(config_stats);667malloc_mutex_lock(tsdn, &background_thread_lock);668if (!background_thread_enabled()) {669malloc_mutex_unlock(tsdn, &background_thread_lock);670return true;671}672673nstime_init_zero(&stats->run_interval);674memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));675676uint64_t num_runs = 0;677stats->num_threads = n_background_threads;678for (unsigned i = 0; i < max_background_threads; i++) {679background_thread_info_t *info = &background_thread_info[i];680if (malloc_mutex_trylock(tsdn, &info->mtx)) {681/*682* Each background thread run may take a long time;683* avoid waiting on the stats if the thread is active.684*/685continue;686}687if (info->state != background_thread_stopped) {688num_runs += info->tot_n_runs;689nstime_add(&stats->run_interval, &info->tot_sleep_time);690malloc_mutex_prof_max_update(tsdn,691&stats->max_counter_per_bg_thd, &info->mtx);692}693malloc_mutex_unlock(tsdn, &info->mtx);694}695stats->num_runs = num_runs;696if (num_runs > 0) {697nstime_idivide(&stats->run_interval, num_runs);698}699malloc_mutex_unlock(tsdn, &background_thread_lock);700701return false;702}703704#undef BACKGROUND_THREAD_NPAGES_THRESHOLD705#undef BILLION706#undef BACKGROUND_THREAD_MIN_INTERVAL_NS707708#ifdef JEMALLOC_HAVE_DLSYM709#include <dlfcn.h>710#endif711712static bool713pthread_create_fptr_init(void) {714if (pthread_create_fptr != NULL) {715return false;716}717/*718* Try the next symbol first, because 1) when use lazy_lock we have a719* wrapper for pthread_create; and 2) application may define its own720* wrapper as well (and can call malloc within the wrapper).721*/722#ifdef JEMALLOC_HAVE_DLSYM723pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");724#else725pthread_create_fptr = NULL;726#endif727if (pthread_create_fptr == NULL) {728if (config_lazy_lock) {729malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "730"\"pthread_create\")\n");731abort();732} else {733/* Fall back to the default symbol. */734pthread_create_fptr = pthread_create;735}736}737738return false;739}740741/*742* When lazy lock is enabled, we need to make sure setting isthreaded before743* taking any background_thread locks. This is called early in ctl (instead of744* wait for the pthread_create calls to trigger) because the mutex is required745* before creating background threads.746*/747void748background_thread_ctl_init(tsdn_t *tsdn) {749malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);750#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER751pthread_create_fptr_init();752pthread_create_wrapper_init();753#endif754}755756#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */757758bool759background_thread_boot0(void) {760if (!have_background_thread && opt_background_thread) {761malloc_printf("<jemalloc>: option background_thread currently "762"supports pthread only\n");763return true;764}765#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER766if ((config_lazy_lock || opt_background_thread) &&767pthread_create_fptr_init()) {768return true;769}770#endif771return false;772}773774bool775background_thread_boot1(tsdn_t *tsdn, base_t *base) {776#ifdef JEMALLOC_BACKGROUND_THREAD777assert(have_background_thread);778assert(narenas_total_get() > 0);779780if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {781opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;782}783max_background_threads = opt_max_background_threads;784785background_thread_enabled_set(tsdn, opt_background_thread);786if (malloc_mutex_init(&background_thread_lock,787"background_thread_global",788WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,789malloc_mutex_rank_exclusive)) {790return true;791}792793background_thread_info = (background_thread_info_t *)base_alloc(tsdn,794base, opt_max_background_threads *795sizeof(background_thread_info_t), CACHELINE);796if (background_thread_info == NULL) {797return true;798}799800for (unsigned i = 0; i < max_background_threads; i++) {801background_thread_info_t *info = &background_thread_info[i];802/* Thread mutex is rank_inclusive because of thread0. */803if (malloc_mutex_init(&info->mtx, "background_thread",804WITNESS_RANK_BACKGROUND_THREAD,805malloc_mutex_address_ordered)) {806return true;807}808if (pthread_cond_init(&info->cond, NULL)) {809return true;810}811malloc_mutex_lock(tsdn, &info->mtx);812info->state = background_thread_stopped;813background_thread_info_init(tsdn, info);814malloc_mutex_unlock(tsdn, &info->mtx);815}816#endif817818return false;819}820821822