Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/services/mallocTracker.hpp
32285 views
1
/*
2
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
26
#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
27
28
#if INCLUDE_NMT
29
30
#include "memory/allocation.hpp"
31
#include "runtime/atomic.hpp"
32
#include "services/nmtCommon.hpp"
33
#include "utilities/nativeCallStack.hpp"
34
35
/*
36
* This counter class counts memory allocation and deallocation,
37
* records total memory allocation size and number of allocations.
38
* The counters are updated atomically.
39
*/
40
class MemoryCounter VALUE_OBJ_CLASS_SPEC {
41
private:
42
size_t _count;
43
size_t _size;
44
45
DEBUG_ONLY(size_t _peak_count;)
46
DEBUG_ONLY(size_t _peak_size; )
47
48
public:
49
MemoryCounter() : _count(0), _size(0) {
50
DEBUG_ONLY(_peak_count = 0;)
51
DEBUG_ONLY(_peak_size = 0;)
52
}
53
54
inline void allocate(size_t sz) {
55
Atomic::add(1, (volatile MemoryCounterType*)&_count);
56
if (sz > 0) {
57
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
58
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
59
}
60
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
61
}
62
63
inline void deallocate(size_t sz) {
64
assert(_count > 0, "Negative counter");
65
assert(_size >= sz, "Negative size");
66
Atomic::add(-1, (volatile MemoryCounterType*)&_count);
67
if (sz > 0) {
68
Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
69
}
70
}
71
72
inline void resize(ssize_t sz) {
73
if (sz != 0) {
74
assert(sz >= 0 || _size >= size_t(-sz), "Must be");
75
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
76
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
77
}
78
}
79
80
inline size_t count() const { return _count; }
81
inline size_t size() const { return _size; }
82
DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
83
DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
84
85
};
86
87
/*
88
* Malloc memory used by a particular subsystem.
89
* It includes the memory acquired through os::malloc()
90
* call and arena's backing memory.
91
*/
92
class MallocMemory VALUE_OBJ_CLASS_SPEC {
93
private:
94
MemoryCounter _malloc;
95
MemoryCounter _arena;
96
97
public:
98
MallocMemory() { }
99
100
inline void record_malloc(size_t sz) {
101
_malloc.allocate(sz);
102
}
103
104
inline void record_free(size_t sz) {
105
_malloc.deallocate(sz);
106
}
107
108
inline void record_new_arena() {
109
_arena.allocate(0);
110
}
111
112
inline void record_arena_free() {
113
_arena.deallocate(0);
114
}
115
116
inline void record_arena_size_change(ssize_t sz) {
117
_arena.resize(sz);
118
}
119
120
inline size_t malloc_size() const { return _malloc.size(); }
121
inline size_t malloc_count() const { return _malloc.count();}
122
inline size_t arena_size() const { return _arena.size(); }
123
inline size_t arena_count() const { return _arena.count(); }
124
125
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
126
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
127
};
128
129
class MallocMemorySummary;
130
131
// A snapshot of malloc'd memory, includes malloc memory
132
// usage by types and memory used by tracking itself.
133
class MallocMemorySnapshot : public ResourceObj {
134
friend class MallocMemorySummary;
135
136
private:
137
MallocMemory _malloc[mt_number_of_types];
138
MemoryCounter _tracking_header;
139
140
141
public:
142
inline MallocMemory* by_type(MEMFLAGS flags) {
143
int index = NMTUtil::flag_to_index(flags);
144
return &_malloc[index];
145
}
146
147
inline MallocMemory* by_index(int index) {
148
assert(index >= 0, "Index out of bound");
149
assert(index < mt_number_of_types, "Index out of bound");
150
return &_malloc[index];
151
}
152
153
inline MemoryCounter* malloc_overhead() {
154
return &_tracking_header;
155
}
156
157
// Total malloc'd memory amount
158
size_t total() const;
159
// Total malloc'd memory used by arenas
160
size_t total_arena() const;
161
162
inline size_t thread_count() const {
163
MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
164
return s->by_type(mtThreadStack)->malloc_count();
165
}
166
167
void copy_to(MallocMemorySnapshot* s) {
168
s->_tracking_header = _tracking_header;
169
for (int index = 0; index < mt_number_of_types; index ++) {
170
s->_malloc[index] = _malloc[index];
171
}
172
}
173
174
// Make adjustment by subtracting chunks used by arenas
175
// from total chunks to get total free chunk size
176
void make_adjustment();
177
};
178
179
/*
180
* This class is for collecting malloc statistics at summary level
181
*/
182
class MallocMemorySummary : AllStatic {
183
private:
184
// Reserve memory for placement of MallocMemorySnapshot object
185
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
186
187
public:
188
static void initialize();
189
190
static inline void record_malloc(size_t size, MEMFLAGS flag) {
191
as_snapshot()->by_type(flag)->record_malloc(size);
192
}
193
194
static inline void record_free(size_t size, MEMFLAGS flag) {
195
as_snapshot()->by_type(flag)->record_free(size);
196
}
197
198
static inline void record_new_arena(MEMFLAGS flag) {
199
as_snapshot()->by_type(flag)->record_new_arena();
200
}
201
202
static inline void record_arena_free(MEMFLAGS flag) {
203
as_snapshot()->by_type(flag)->record_arena_free();
204
}
205
206
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) {
207
as_snapshot()->by_type(flag)->record_arena_size_change(size);
208
}
209
210
static void snapshot(MallocMemorySnapshot* s) {
211
as_snapshot()->copy_to(s);
212
s->make_adjustment();
213
}
214
215
// Record memory used by malloc tracking header
216
static inline void record_new_malloc_header(size_t sz) {
217
as_snapshot()->malloc_overhead()->allocate(sz);
218
}
219
220
static inline void record_free_malloc_header(size_t sz) {
221
as_snapshot()->malloc_overhead()->deallocate(sz);
222
}
223
224
// The memory used by malloc tracking headers
225
static inline size_t tracking_overhead() {
226
return as_snapshot()->malloc_overhead()->size();
227
}
228
229
static MallocMemorySnapshot* as_snapshot() {
230
return (MallocMemorySnapshot*)_snapshot;
231
}
232
};
233
234
235
/*
236
* Malloc tracking header.
237
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
238
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
239
*/
240
241
class MallocHeader VALUE_OBJ_CLASS_SPEC {
242
#ifdef _LP64
243
size_t _size : 64;
244
size_t _flags : 8;
245
size_t _pos_idx : 16;
246
size_t _bucket_idx: 40;
247
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
248
#define MAX_BUCKET_LENGTH right_n_bits(16)
249
#else
250
size_t _size : 32;
251
size_t _flags : 8;
252
size_t _pos_idx : 8;
253
size_t _bucket_idx: 16;
254
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16)
255
#define MAX_BUCKET_LENGTH right_n_bits(8)
256
#endif // _LP64
257
258
public:
259
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
260
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
261
"Wrong header size");
262
263
if (level == NMT_minimal) {
264
return;
265
}
266
267
_flags = flags;
268
set_size(size);
269
if (level == NMT_detail) {
270
size_t bucket_idx;
271
size_t pos_idx;
272
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) {
273
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
274
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
275
_bucket_idx = bucket_idx;
276
_pos_idx = pos_idx;
277
}
278
}
279
280
MallocMemorySummary::record_malloc(size, flags);
281
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
282
}
283
284
inline size_t size() const { return _size; }
285
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
286
bool get_stack(NativeCallStack& stack) const;
287
288
// Cleanup tracking information before the memory is released.
289
void release() const;
290
291
private:
292
inline void set_size(size_t size) {
293
_size = size;
294
}
295
bool record_malloc_site(const NativeCallStack& stack, size_t size,
296
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const;
297
};
298
299
300
// Main class called from MemTracker to track malloc activities
301
class MallocTracker : AllStatic {
302
public:
303
// Initialize malloc tracker for specific tracking level
304
static bool initialize(NMT_TrackingLevel level);
305
306
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
307
308
// malloc tracking header size for specific tracking level
309
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
310
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
311
}
312
313
// Parameter name convention:
314
// memblock : the beginning address for user data
315
// malloc_base: the beginning address that includes malloc tracking header
316
//
317
// The relationship:
318
// memblock = (char*)malloc_base + sizeof(nmt header)
319
//
320
321
// Record malloc on specified memory block
322
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
323
const NativeCallStack& stack, NMT_TrackingLevel level);
324
325
// Record free on specified memory block
326
static void* record_free(void* memblock);
327
328
// Offset memory address to header address
329
static inline void* get_base(void* memblock);
330
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
331
if (memblock == NULL || level == NMT_off) return memblock;
332
return (char*)memblock - malloc_header_size(level);
333
}
334
335
// Get memory size
336
static inline size_t get_size(void* memblock) {
337
MallocHeader* header = malloc_header(memblock);
338
return header->size();
339
}
340
341
// Get memory type
342
static inline MEMFLAGS get_flags(void* memblock) {
343
MallocHeader* header = malloc_header(memblock);
344
return header->flags();
345
}
346
347
// Get header size
348
static inline size_t get_header_size(void* memblock) {
349
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
350
}
351
352
static inline void record_new_arena(MEMFLAGS flags) {
353
MallocMemorySummary::record_new_arena(flags);
354
}
355
356
static inline void record_arena_free(MEMFLAGS flags) {
357
MallocMemorySummary::record_arena_free(flags);
358
}
359
360
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) {
361
MallocMemorySummary::record_arena_size_change(size, flags);
362
}
363
private:
364
static inline MallocHeader* malloc_header(void *memblock) {
365
assert(memblock != NULL, "NULL pointer");
366
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
367
return header;
368
}
369
};
370
371
#endif // INCLUDE_NMT
372
373
374
#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
375
376