Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/jemalloc/src/edata_cache.c
39478 views
1
#include "jemalloc/internal/jemalloc_preamble.h"
2
#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4
bool
5
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
6
edata_avail_new(&edata_cache->avail);
7
/*
8
* This is not strictly necessary, since the edata_cache_t is only
9
* created inside an arena, which is zeroed on creation. But this is
10
* handy as a safety measure.
11
*/
12
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
13
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
14
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
15
return true;
16
}
17
edata_cache->base = base;
18
return false;
19
}
20
21
edata_t *
22
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
23
malloc_mutex_lock(tsdn, &edata_cache->mtx);
24
edata_t *edata = edata_avail_first(&edata_cache->avail);
25
if (edata == NULL) {
26
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
27
return base_alloc_edata(tsdn, edata_cache->base);
28
}
29
edata_avail_remove(&edata_cache->avail, edata);
30
atomic_load_sub_store_zu(&edata_cache->count, 1);
31
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
32
return edata;
33
}
34
35
void
36
edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
37
malloc_mutex_lock(tsdn, &edata_cache->mtx);
38
edata_avail_insert(&edata_cache->avail, edata);
39
atomic_load_add_store_zu(&edata_cache->count, 1);
40
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
41
}
42
43
void
44
edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
45
malloc_mutex_prefork(tsdn, &edata_cache->mtx);
46
}
47
48
void
49
edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
50
malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
51
}
52
53
void
54
edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
55
malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
56
}
57
58
void
59
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
60
edata_list_inactive_init(&ecs->list);
61
ecs->fallback = fallback;
62
ecs->disabled = false;
63
}
64
65
static void
66
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
67
edata_cache_fast_t *ecs) {
68
edata_t *edata;
69
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
70
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
71
edata = edata_avail_remove_first(&ecs->fallback->avail);
72
if (edata == NULL) {
73
break;
74
}
75
edata_list_inactive_append(&ecs->list, edata);
76
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
77
}
78
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
79
}
80
81
edata_t *
82
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
83
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
84
WITNESS_RANK_EDATA_CACHE, 0);
85
86
if (ecs->disabled) {
87
assert(edata_list_inactive_first(&ecs->list) == NULL);
88
return edata_cache_get(tsdn, ecs->fallback);
89
}
90
91
edata_t *edata = edata_list_inactive_first(&ecs->list);
92
if (edata != NULL) {
93
edata_list_inactive_remove(&ecs->list, edata);
94
return edata;
95
}
96
/* Slow path; requires synchronization. */
97
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
98
edata = edata_list_inactive_first(&ecs->list);
99
if (edata != NULL) {
100
edata_list_inactive_remove(&ecs->list, edata);
101
} else {
102
/*
103
* Slowest path (fallback was also empty); allocate something
104
* new.
105
*/
106
edata = base_alloc_edata(tsdn, ecs->fallback->base);
107
}
108
return edata;
109
}
110
111
static void
112
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
113
/*
114
* You could imagine smarter cache management policies (like
115
* only flushing down to some threshold in anticipation of
116
* future get requests). But just flushing everything provides
117
* a good opportunity to defrag too, and lets us share code between the
118
* flush and disable pathways.
119
*/
120
edata_t *edata;
121
size_t nflushed = 0;
122
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
123
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
124
edata_list_inactive_remove(&ecs->list, edata);
125
edata_avail_insert(&ecs->fallback->avail, edata);
126
nflushed++;
127
}
128
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
129
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
130
}
131
132
void
133
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
134
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
135
WITNESS_RANK_EDATA_CACHE, 0);
136
137
if (ecs->disabled) {
138
assert(edata_list_inactive_first(&ecs->list) == NULL);
139
edata_cache_put(tsdn, ecs->fallback, edata);
140
return;
141
}
142
143
/*
144
* Prepend rather than append, to do LIFO ordering in the hopes of some
145
* cache locality.
146
*/
147
edata_list_inactive_prepend(&ecs->list, edata);
148
}
149
150
void
151
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
152
edata_cache_fast_flush_all(tsdn, ecs);
153
ecs->disabled = true;
154
}
155
156