Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/jemalloc/src/pa.c
39483 views
1
#include "jemalloc/internal/jemalloc_preamble.h"
2
#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4
#include "jemalloc/internal/san.h"
5
#include "jemalloc/internal/hpa.h"
6
7
static void
8
pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
9
atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
10
}
11
12
static void
13
pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
14
assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
15
atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
16
}
17
18
bool
19
pa_central_init(pa_central_t *central, base_t *base, bool hpa,
20
hpa_hooks_t *hpa_hooks) {
21
bool err;
22
if (hpa) {
23
err = hpa_central_init(&central->hpa, base, hpa_hooks);
24
if (err) {
25
return true;
26
}
27
}
28
return false;
29
}
30
31
bool
32
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
33
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
34
malloc_mutex_t *stats_mtx, nstime_t *cur_time,
35
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
36
ssize_t muzzy_decay_ms) {
37
/* This will change eventually, but for now it should hold. */
38
assert(base_ind_get(base) == ind);
39
if (edata_cache_init(&shard->edata_cache, base)) {
40
return true;
41
}
42
43
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
44
cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
45
&stats->pac_stats, stats_mtx)) {
46
return true;
47
}
48
49
shard->ind = ind;
50
51
shard->ever_used_hpa = false;
52
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
53
54
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
55
56
shard->stats_mtx = stats_mtx;
57
shard->stats = stats;
58
memset(shard->stats, 0, sizeof(*shard->stats));
59
60
shard->central = central;
61
shard->emap = emap;
62
shard->base = base;
63
64
return false;
65
}
66
67
bool
68
pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
69
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
70
if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
71
shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
72
return true;
73
}
74
if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
75
hpa_sec_opts)) {
76
return true;
77
}
78
shard->ever_used_hpa = true;
79
atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED);
80
81
return false;
82
}
83
84
void
85
pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
86
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
87
if (shard->ever_used_hpa) {
88
sec_disable(tsdn, &shard->hpa_sec);
89
hpa_shard_disable(tsdn, &shard->hpa_shard);
90
}
91
}
92
93
void
94
pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
95
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
96
if (shard->ever_used_hpa) {
97
sec_flush(tsdn, &shard->hpa_sec);
98
}
99
}
100
101
static bool
102
pa_shard_uses_hpa(pa_shard_t *shard) {
103
return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED);
104
}
105
106
void
107
pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
108
pac_destroy(tsdn, &shard->pac);
109
if (shard->ever_used_hpa) {
110
sec_flush(tsdn, &shard->hpa_sec);
111
hpa_shard_disable(tsdn, &shard->hpa_shard);
112
}
113
}
114
115
static pai_t *
116
pa_get_pai(pa_shard_t *shard, edata_t *edata) {
117
return (edata_pai_get(edata) == EXTENT_PAI_PAC
118
? &shard->pac.pai : &shard->hpa_sec.pai);
119
}
120
121
edata_t *
122
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
123
bool slab, szind_t szind, bool zero, bool guarded,
124
bool *deferred_work_generated) {
125
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
126
WITNESS_RANK_CORE, 0);
127
assert(!guarded || alignment <= PAGE);
128
129
edata_t *edata = NULL;
130
if (!guarded && pa_shard_uses_hpa(shard)) {
131
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
132
zero, /* guarded */ false, slab, deferred_work_generated);
133
}
134
/*
135
* Fall back to the PAC if the HPA is off or couldn't serve the given
136
* allocation request.
137
*/
138
if (edata == NULL) {
139
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
140
guarded, slab, deferred_work_generated);
141
}
142
if (edata != NULL) {
143
assert(edata_size_get(edata) == size);
144
pa_nactive_add(shard, size >> LG_PAGE);
145
emap_remap(tsdn, shard->emap, edata, szind, slab);
146
edata_szind_set(edata, szind);
147
edata_slab_set(edata, slab);
148
if (slab && (size > 2 * PAGE)) {
149
emap_register_interior(tsdn, shard->emap, edata, szind);
150
}
151
assert(edata_arena_ind_get(edata) == shard->ind);
152
}
153
return edata;
154
}
155
156
bool
157
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
158
size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) {
159
assert(new_size > old_size);
160
assert(edata_size_get(edata) == old_size);
161
assert((new_size & PAGE_MASK) == 0);
162
if (edata_guarded_get(edata)) {
163
return true;
164
}
165
size_t expand_amount = new_size - old_size;
166
167
pai_t *pai = pa_get_pai(shard, edata);
168
169
bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
170
deferred_work_generated);
171
if (error) {
172
return true;
173
}
174
175
pa_nactive_add(shard, expand_amount >> LG_PAGE);
176
edata_szind_set(edata, szind);
177
emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
178
return false;
179
}
180
181
bool
182
pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
183
size_t new_size, szind_t szind, bool *deferred_work_generated) {
184
assert(new_size < old_size);
185
assert(edata_size_get(edata) == old_size);
186
assert((new_size & PAGE_MASK) == 0);
187
if (edata_guarded_get(edata)) {
188
return true;
189
}
190
size_t shrink_amount = old_size - new_size;
191
192
pai_t *pai = pa_get_pai(shard, edata);
193
bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
194
deferred_work_generated);
195
if (error) {
196
return true;
197
}
198
pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
199
200
edata_szind_set(edata, szind);
201
emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
202
return false;
203
}
204
205
void
206
pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
207
bool *deferred_work_generated) {
208
emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
209
if (edata_slab_get(edata)) {
210
emap_deregister_interior(tsdn, shard->emap, edata);
211
/*
212
* The slab state of the extent isn't cleared. It may be used
213
* by the pai implementation, e.g. to make caching decisions.
214
*/
215
}
216
edata_addr_set(edata, edata_base_get(edata));
217
edata_szind_set(edata, SC_NSIZES);
218
pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
219
pai_t *pai = pa_get_pai(shard, edata);
220
pai_dalloc(tsdn, pai, edata, deferred_work_generated);
221
}
222
223
bool
224
pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
225
size_t *old_limit, size_t *new_limit) {
226
return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
227
new_limit);
228
}
229
230
bool
231
pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
232
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
233
return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
234
}
235
236
ssize_t
237
pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
238
return pac_decay_ms_get(&shard->pac, state);
239
}
240
241
void
242
pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
243
bool deferral_allowed) {
244
if (pa_shard_uses_hpa(shard)) {
245
hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
246
deferral_allowed);
247
}
248
}
249
250
void
251
pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
252
if (pa_shard_uses_hpa(shard)) {
253
hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
254
}
255
}
256
257
/*
258
* Get time until next deferred work ought to happen. If there are multiple
259
* things that have been deferred, this function calculates the time until
260
* the soonest of those things.
261
*/
262
uint64_t
263
pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
264
uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
265
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
266
return time;
267
}
268
269
if (pa_shard_uses_hpa(shard)) {
270
uint64_t hpa =
271
pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
272
if (hpa < time) {
273
time = hpa;
274
}
275
}
276
return time;
277
}
278
279