Path: blob/main/contrib/jemalloc/src/prof_recent.c
39478 views
#include "jemalloc/internal/jemalloc_preamble.h"1#include "jemalloc/internal/jemalloc_internal_includes.h"23#include "jemalloc/internal/assert.h"4#include "jemalloc/internal/buf_writer.h"5#include "jemalloc/internal/emitter.h"6#include "jemalloc/internal/prof_data.h"7#include "jemalloc/internal/prof_recent.h"89ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;10malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */11static atomic_zd_t prof_recent_alloc_max;12static ssize_t prof_recent_alloc_count = 0;13prof_recent_list_t prof_recent_alloc_list;1415malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */1617static void18prof_recent_alloc_max_init() {19atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,20ATOMIC_RELAXED);21}2223static inline ssize_t24prof_recent_alloc_max_get_no_lock() {25return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);26}2728static inline ssize_t29prof_recent_alloc_max_get(tsd_t *tsd) {30malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);31return prof_recent_alloc_max_get_no_lock();32}3334static inline ssize_t35prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {36malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);37ssize_t old_max = prof_recent_alloc_max_get(tsd);38atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);39return old_max;40}4142static prof_recent_t *43prof_recent_allocate_node(tsdn_t *tsdn) {44return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),45sz_size2index(sizeof(prof_recent_t)), false, NULL, true,46arena_get(tsdn, 0, false), true);47}4849static void50prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {51assert(node != NULL);52assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));53idalloctm(tsdn, node, NULL, NULL, true, true);54}5556static inline void57increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {58malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);59++tctx->recent_count;60assert(tctx->recent_count > 0);61}6263bool64prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {65cassert(config_prof);66assert(opt_prof && prof_booted);67malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);68malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);6970/*71* Check whether last-N mode is turned on without trying to acquire the72* lock, so as to optimize for the following two scenarios:73* (1) Last-N mode is switched off;74* (2) Dumping, during which last-N mode is temporarily turned off so75* as not to block sampled allocations.76*/77if (prof_recent_alloc_max_get_no_lock() == 0) {78return false;79}8081/*82* Increment recent_count to hold the tctx so that it won't be gone83* even after tctx->tdata->lock is released. This acts as a84* "placeholder"; the real recording of the allocation requires a lock85* on prof_recent_alloc_mtx and is done in prof_recent_alloc (when86* tctx->tdata->lock has been released).87*/88increment_recent_count(tsd, tctx);89return true;90}9192static void93decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {94malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);95assert(tctx != NULL);96malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);97assert(tctx->recent_count > 0);98--tctx->recent_count;99prof_tctx_try_destroy(tsd, tctx);100}101102static inline edata_t *103prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {104return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);105}106107edata_t *108prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {109cassert(config_prof);110return prof_recent_alloc_edata_get_no_lock(n);111}112113static inline edata_t *114prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {115malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);116return prof_recent_alloc_edata_get_no_lock(n);117}118119static void120prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {121malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);122atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);123}124125void126edata_prof_recent_alloc_init(edata_t *edata) {127cassert(config_prof);128edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);129}130131static inline prof_recent_t *132edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {133cassert(config_prof);134return edata_prof_recent_alloc_get_dont_call_directly(edata);135}136137prof_recent_t *138edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {139cassert(config_prof);140return edata_prof_recent_alloc_get_no_lock(edata);141}142143static inline prof_recent_t *144edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {145malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);146prof_recent_t *recent_alloc =147edata_prof_recent_alloc_get_no_lock(edata);148assert(recent_alloc == NULL ||149prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);150return recent_alloc;151}152153static prof_recent_t *154edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,155prof_recent_t *recent_alloc) {156malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);157prof_recent_t *old_recent_alloc =158edata_prof_recent_alloc_get(tsd, edata);159edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);160return old_recent_alloc;161}162163static void164edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,165prof_recent_t *recent_alloc) {166malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);167assert(recent_alloc != NULL);168prof_recent_t *old_recent_alloc =169edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);170assert(old_recent_alloc == NULL);171prof_recent_alloc_edata_set(tsd, recent_alloc, edata);172}173174static void175edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,176prof_recent_t *recent_alloc) {177malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);178assert(recent_alloc != NULL);179prof_recent_t *old_recent_alloc =180edata_prof_recent_alloc_update_internal(tsd, edata, NULL);181assert(old_recent_alloc == recent_alloc);182assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));183prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);184}185186/*187* This function should be called right before an allocation is released, so188* that the associated recent allocation record can contain the following189* information:190* (1) The allocation is released;191* (2) The time of the deallocation; and192* (3) The prof_tctx associated with the deallocation.193*/194void195prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {196cassert(config_prof);197/*198* Check whether the recent allocation record still exists without199* trying to acquire the lock.200*/201if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {202return;203}204205prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);206/*207* In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the208* deallocation time / tctx, which is handled later, after we check209* again when holding the lock.210*/211212if (dalloc_tctx != NULL) {213malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);214increment_recent_count(tsd, dalloc_tctx);215dalloc_tctx->prepared = false;216malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);217}218219malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);220/* Check again after acquiring the lock. */221prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);222if (recent != NULL) {223assert(nstime_equals_zero(&recent->dalloc_time));224assert(recent->dalloc_tctx == NULL);225if (dalloc_tctx != NULL) {226nstime_prof_update(&recent->dalloc_time);227recent->dalloc_tctx = dalloc_tctx;228dalloc_tctx = NULL;229}230edata_prof_recent_alloc_reset(tsd, edata, recent);231}232malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);233234if (dalloc_tctx != NULL) {235/* We lost the rase - the allocation record was just gone. */236decrement_recent_count(tsd, dalloc_tctx);237}238}239240static void241prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {242malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);243edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);244if (edata != NULL) {245edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);246}247}248249static bool250prof_recent_alloc_is_empty(tsd_t *tsd) {251malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);252if (ql_empty(&prof_recent_alloc_list)) {253assert(prof_recent_alloc_count == 0);254return true;255} else {256assert(prof_recent_alloc_count > 0);257return false;258}259}260261static void262prof_recent_alloc_assert_count(tsd_t *tsd) {263malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);264if (!config_debug) {265return;266}267ssize_t count = 0;268prof_recent_t *n;269ql_foreach(n, &prof_recent_alloc_list, link) {270++count;271}272assert(count == prof_recent_alloc_count);273assert(prof_recent_alloc_max_get(tsd) == -1 ||274count <= prof_recent_alloc_max_get(tsd));275}276277void278prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {279cassert(config_prof);280assert(edata != NULL);281prof_tctx_t *tctx = edata_prof_tctx_get(edata);282283malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);284malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);285prof_recent_alloc_assert_count(tsd);286287/*288* Reserve a new prof_recent_t node if needed. If needed, we release289* the prof_recent_alloc_mtx lock and allocate. Then, rather than290* immediately checking for OOM, we regain the lock and try to make use291* of the reserve node if needed. There are six scenarios:292*293* \ now | no need | need but OOMed | need and allocated294* later \ | | |295* ------------------------------------------------------------296* no need | (1) | (2) | (3)297* ------------------------------------------------------------298* need | (4) | (5) | (6)299*300* First, "(4)" never happens, because we don't release the lock in the301* middle if there's no need for a new node; in such cases "(1)" always302* takes place, which is trivial.303*304* Out of the remaining four scenarios, "(6)" is the common case and is305* trivial. "(5)" is also trivial, in which case we'll rollback the306* effect of prof_recent_alloc_prepare() as expected.307*308* "(2)" / "(3)" occurs when the need for a new node is gone after we309* regain the lock. If the new node is successfully allocated, i.e. in310* the case of "(3)", we'll release it in the end; otherwise, i.e. in311* the case of "(2)", we do nothing - we're lucky that the OOM ends up312* doing no harm at all.313*314* Therefore, the only performance cost of the "release lock" ->315* "allocate" -> "regain lock" design is the "(3)" case, but it happens316* very rarely, so the cost is relatively small compared to the gain of317* not having to have the lock order of prof_recent_alloc_mtx above all318* the allocation locks.319*/320prof_recent_t *reserve = NULL;321if (prof_recent_alloc_max_get(tsd) == -1 ||322prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {323assert(prof_recent_alloc_max_get(tsd) != 0);324malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);325reserve = prof_recent_allocate_node(tsd_tsdn(tsd));326malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);327prof_recent_alloc_assert_count(tsd);328}329330if (prof_recent_alloc_max_get(tsd) == 0) {331assert(prof_recent_alloc_is_empty(tsd));332goto label_rollback;333}334335prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;336if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {337/* If upper limit is reached, rotate the head. */338assert(prof_recent_alloc_max_get(tsd) != -1);339assert(!prof_recent_alloc_is_empty(tsd));340prof_recent_t *head = ql_first(&prof_recent_alloc_list);341old_alloc_tctx = head->alloc_tctx;342assert(old_alloc_tctx != NULL);343old_dalloc_tctx = head->dalloc_tctx;344prof_recent_alloc_evict_edata(tsd, head);345ql_rotate(&prof_recent_alloc_list, link);346} else {347/* Otherwise make use of the new node. */348assert(prof_recent_alloc_max_get(tsd) == -1 ||349prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));350if (reserve == NULL) {351goto label_rollback;352}353ql_elm_new(reserve, link);354ql_tail_insert(&prof_recent_alloc_list, reserve, link);355reserve = NULL;356old_alloc_tctx = NULL;357old_dalloc_tctx = NULL;358++prof_recent_alloc_count;359}360361/* Fill content into the tail node. */362prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);363assert(tail != NULL);364tail->size = size;365tail->usize = usize;366nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));367tail->alloc_tctx = tctx;368nstime_init_zero(&tail->dalloc_time);369tail->dalloc_tctx = NULL;370edata_prof_recent_alloc_set(tsd, edata, tail);371372assert(!prof_recent_alloc_is_empty(tsd));373prof_recent_alloc_assert_count(tsd);374malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);375376if (reserve != NULL) {377prof_recent_free_node(tsd_tsdn(tsd), reserve);378}379380/*381* Asynchronously handle the tctx of the old node, so that there's no382* simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.383* In the worst case this may delay the tctx release but it's better384* than holding prof_recent_alloc_mtx for longer.385*/386if (old_alloc_tctx != NULL) {387decrement_recent_count(tsd, old_alloc_tctx);388}389if (old_dalloc_tctx != NULL) {390decrement_recent_count(tsd, old_dalloc_tctx);391}392return;393394label_rollback:395assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);396prof_recent_alloc_assert_count(tsd);397malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);398if (reserve != NULL) {399prof_recent_free_node(tsd_tsdn(tsd), reserve);400}401decrement_recent_count(tsd, tctx);402}403404ssize_t405prof_recent_alloc_max_ctl_read() {406cassert(config_prof);407/* Don't bother to acquire the lock. */408return prof_recent_alloc_max_get_no_lock();409}410411static void412prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {413malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);414ssize_t max = prof_recent_alloc_max_get(tsd);415if (max == -1 || prof_recent_alloc_count <= max) {416/* Easy case - no need to alter the list. */417ql_new(to_delete);418prof_recent_alloc_assert_count(tsd);419return;420}421422prof_recent_t *node;423ql_foreach(node, &prof_recent_alloc_list, link) {424if (prof_recent_alloc_count == max) {425break;426}427prof_recent_alloc_evict_edata(tsd, node);428--prof_recent_alloc_count;429}430assert(prof_recent_alloc_count == max);431432ql_move(to_delete, &prof_recent_alloc_list);433if (max == 0) {434assert(node == NULL);435} else {436assert(node != NULL);437ql_split(to_delete, node, &prof_recent_alloc_list, link);438}439assert(!ql_empty(to_delete));440prof_recent_alloc_assert_count(tsd);441}442443static void444prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {445malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);446malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);447while (!ql_empty(to_delete)) {448prof_recent_t *node = ql_first(to_delete);449ql_remove(to_delete, node, link);450decrement_recent_count(tsd, node->alloc_tctx);451if (node->dalloc_tctx != NULL) {452decrement_recent_count(tsd, node->dalloc_tctx);453}454prof_recent_free_node(tsd_tsdn(tsd), node);455}456}457458ssize_t459prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {460cassert(config_prof);461assert(max >= -1);462malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);463prof_recent_alloc_assert_count(tsd);464const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);465prof_recent_list_t to_delete;466prof_recent_alloc_restore_locked(tsd, &to_delete);467malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);468prof_recent_alloc_async_cleanup(tsd, &to_delete);469return old_max;470}471472static void473prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {474char bt_buf[2 * sizeof(intptr_t) + 3];475char *s = bt_buf;476assert(tctx != NULL);477prof_bt_t *bt = &tctx->gctx->bt;478for (size_t i = 0; i < bt->len; ++i) {479malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);480emitter_json_value(emitter, emitter_type_string, &s);481}482}483484static void485prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {486emitter_json_object_begin(emitter);487488emitter_json_kv(emitter, "size", emitter_type_size, &node->size);489emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);490bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;491emitter_json_kv(emitter, "released", emitter_type_bool, &released);492493emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,494&node->alloc_tctx->thr_uid);495prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;496assert(alloc_tdata != NULL);497if (alloc_tdata->thread_name != NULL) {498emitter_json_kv(emitter, "alloc_thread_name",499emitter_type_string, &alloc_tdata->thread_name);500}501uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);502emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,503&alloc_time_ns);504emitter_json_array_kv_begin(emitter, "alloc_trace");505prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);506emitter_json_array_end(emitter);507508if (released && node->dalloc_tctx != NULL) {509emitter_json_kv(emitter, "dalloc_thread_uid",510emitter_type_uint64, &node->dalloc_tctx->thr_uid);511prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;512assert(dalloc_tdata != NULL);513if (dalloc_tdata->thread_name != NULL) {514emitter_json_kv(emitter, "dalloc_thread_name",515emitter_type_string, &dalloc_tdata->thread_name);516}517assert(!nstime_equals_zero(&node->dalloc_time));518uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);519emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,520&dalloc_time_ns);521emitter_json_array_kv_begin(emitter, "dalloc_trace");522prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);523emitter_json_array_end(emitter);524}525526emitter_json_object_end(emitter);527}528529#define PROF_RECENT_PRINT_BUFSIZE 65536530JEMALLOC_COLD531void532prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {533cassert(config_prof);534malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);535buf_writer_t buf_writer;536buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,537PROF_RECENT_PRINT_BUFSIZE);538emitter_t emitter;539emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,540&buf_writer);541prof_recent_list_t temp_list;542543malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);544prof_recent_alloc_assert_count(tsd);545ssize_t dump_max = prof_recent_alloc_max_get(tsd);546ql_move(&temp_list, &prof_recent_alloc_list);547ssize_t dump_count = prof_recent_alloc_count;548prof_recent_alloc_count = 0;549prof_recent_alloc_assert_count(tsd);550malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);551552emitter_begin(&emitter);553uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;554emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,555&sample_interval);556emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,557&dump_max);558emitter_json_array_kv_begin(&emitter, "recent_alloc");559prof_recent_t *node;560ql_foreach(node, &temp_list, link) {561prof_recent_alloc_dump_node(&emitter, node);562}563emitter_json_array_end(&emitter);564emitter_end(&emitter);565566malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);567prof_recent_alloc_assert_count(tsd);568ql_concat(&temp_list, &prof_recent_alloc_list, link);569ql_move(&prof_recent_alloc_list, &temp_list);570prof_recent_alloc_count += dump_count;571prof_recent_alloc_restore_locked(tsd, &temp_list);572malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);573574buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);575malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);576577prof_recent_alloc_async_cleanup(tsd, &temp_list);578}579#undef PROF_RECENT_PRINT_BUFSIZE580581bool582prof_recent_init() {583cassert(config_prof);584prof_recent_alloc_max_init();585586if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",587WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {588return true;589}590591if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",592WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {593return true;594}595596ql_new(&prof_recent_alloc_list);597598return false;599}600601602