Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/jemalloc/src/pa_extra.c
39483 views
1
#include "jemalloc/internal/jemalloc_preamble.h"
2
#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4
/*
5
* This file is logically part of the PA module. While pa.c contains the core
6
* allocator functionality, this file contains boring integration functionality;
7
* things like the pre- and post- fork handlers, and stats merging for CTL
8
* refreshes.
9
*/
10
11
void
12
pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
13
malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
14
malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
15
}
16
17
void
18
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
19
if (shard->ever_used_hpa) {
20
sec_prefork2(tsdn, &shard->hpa_sec);
21
}
22
}
23
24
void
25
pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
26
malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
27
if (shard->ever_used_hpa) {
28
hpa_shard_prefork3(tsdn, &shard->hpa_shard);
29
}
30
}
31
32
void
33
pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
34
ecache_prefork(tsdn, &shard->pac.ecache_dirty);
35
ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
36
ecache_prefork(tsdn, &shard->pac.ecache_retained);
37
if (shard->ever_used_hpa) {
38
hpa_shard_prefork4(tsdn, &shard->hpa_shard);
39
}
40
}
41
42
void
43
pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) {
44
edata_cache_prefork(tsdn, &shard->edata_cache);
45
}
46
47
void
48
pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
49
edata_cache_postfork_parent(tsdn, &shard->edata_cache);
50
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
51
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
52
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
53
malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
54
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
55
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
56
if (shard->ever_used_hpa) {
57
sec_postfork_parent(tsdn, &shard->hpa_sec);
58
hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
59
}
60
}
61
62
void
63
pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
64
edata_cache_postfork_child(tsdn, &shard->edata_cache);
65
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
66
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
67
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
68
malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
69
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
70
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
71
if (shard->ever_used_hpa) {
72
sec_postfork_child(tsdn, &shard->hpa_sec);
73
hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
74
}
75
}
76
77
void
78
pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
79
size_t *nmuzzy) {
80
*nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
81
*ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
82
*nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
83
}
84
85
void
86
pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
87
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
88
hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
89
size_t *resident) {
90
cassert(config_stats);
91
92
pa_shard_stats_out->pac_stats.retained +=
93
ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
94
pa_shard_stats_out->edata_avail += atomic_load_zu(
95
&shard->edata_cache.count, ATOMIC_RELAXED);
96
97
size_t resident_pgs = 0;
98
resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
99
resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
100
*resident += (resident_pgs << LG_PAGE);
101
102
/* Dirty decay stats */
103
locked_inc_u64_unsynchronized(
104
&pa_shard_stats_out->pac_stats.decay_dirty.npurge,
105
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
106
&shard->pac.stats->decay_dirty.npurge));
107
locked_inc_u64_unsynchronized(
108
&pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
109
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
110
&shard->pac.stats->decay_dirty.nmadvise));
111
locked_inc_u64_unsynchronized(
112
&pa_shard_stats_out->pac_stats.decay_dirty.purged,
113
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
114
&shard->pac.stats->decay_dirty.purged));
115
116
/* Muzzy decay stats */
117
locked_inc_u64_unsynchronized(
118
&pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
119
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
120
&shard->pac.stats->decay_muzzy.npurge));
121
locked_inc_u64_unsynchronized(
122
&pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
123
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
124
&shard->pac.stats->decay_muzzy.nmadvise));
125
locked_inc_u64_unsynchronized(
126
&pa_shard_stats_out->pac_stats.decay_muzzy.purged,
127
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
128
&shard->pac.stats->decay_muzzy.purged));
129
130
atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
131
atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
132
133
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
134
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
135
retained_bytes;
136
dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
137
muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
138
retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
139
dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
140
muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
141
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
142
i);
143
144
estats_out[i].ndirty = dirty;
145
estats_out[i].nmuzzy = muzzy;
146
estats_out[i].nretained = retained;
147
estats_out[i].dirty_bytes = dirty_bytes;
148
estats_out[i].muzzy_bytes = muzzy_bytes;
149
estats_out[i].retained_bytes = retained_bytes;
150
}
151
152
if (shard->ever_used_hpa) {
153
hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
154
sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
155
}
156
}
157
158
static void
159
pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data,
160
malloc_mutex_t *mtx, int ind) {
161
malloc_mutex_lock(tsdn, mtx);
162
malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx);
163
malloc_mutex_unlock(tsdn, mtx);
164
}
165
166
void
167
pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
168
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) {
169
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
170
&shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
171
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
172
&shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
173
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
174
&shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
175
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
176
&shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
177
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
178
&shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
179
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
180
&shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
181
182
if (shard->ever_used_hpa) {
183
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
184
&shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
185
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
186
&shard->hpa_shard.grow_mtx,
187
arena_prof_mutex_hpa_shard_grow);
188
sec_mutex_stats_read(tsdn, &shard->hpa_sec,
189
&mutex_prof_data[arena_prof_mutex_hpa_sec]);
190
}
191
}
192
193