Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/services/memTracker.hpp
64441 views
1
/*
2
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_SERVICES_MEMTRACKER_HPP
26
#define SHARE_SERVICES_MEMTRACKER_HPP
27
28
#include "services/nmtCommon.hpp"
29
#include "utilities/nativeCallStack.hpp"
30
31
32
#if !INCLUDE_NMT
33
34
#define CURRENT_PC NativeCallStack::empty_stack()
35
#define CALLER_PC NativeCallStack::empty_stack()
36
37
class Tracker : public StackObj {
38
public:
39
enum TrackerType {
40
uncommit,
41
release
42
};
43
Tracker(enum TrackerType type) : _type(type) { }
44
void record(address addr, size_t size) { }
45
private:
46
enum TrackerType _type;
47
};
48
49
class MemTracker : AllStatic {
50
public:
51
static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
52
static inline void shutdown() { }
53
static inline void init() { }
54
static bool check_launcher_nmt_support(const char* value) { return true; }
55
static bool verify_nmt_option() { return true; }
56
57
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
58
const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
59
static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
60
static inline size_t malloc_header_size(void* memblock) { return 0; }
61
static inline void* malloc_base(void* memblock) { return memblock; }
62
static inline void* record_free(void* memblock, NMT_TrackingLevel level) { return memblock; }
63
64
static inline void record_new_arena(MEMFLAGS flag) { }
65
static inline void record_arena_free(MEMFLAGS flag) { }
66
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { }
67
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
68
MEMFLAGS flag = mtNone) { }
69
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
70
const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
71
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split) { }
72
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
73
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
74
static inline void record_thread_stack(void* addr, size_t size) { }
75
static inline void release_thread_stack(void* addr, size_t size) { }
76
77
static void final_report(outputStream*) { }
78
static void error_report(outputStream*) { }
79
};
80
81
#else
82
83
#include "runtime/mutexLocker.hpp"
84
#include "runtime/threadCritical.hpp"
85
#include "services/mallocTracker.hpp"
86
#include "services/threadStackTracker.hpp"
87
#include "services/virtualMemoryTracker.hpp"
88
89
#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail) ? \
90
NativeCallStack(0) : NativeCallStack::empty_stack())
91
#define CALLER_PC ((MemTracker::tracking_level() == NMT_detail) ? \
92
NativeCallStack(1) : NativeCallStack::empty_stack())
93
94
class MemBaseline;
95
96
// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
97
// the other thread obtains and records the same region that is just 'released' by current
98
// thread but before it can record the operation.
99
class Tracker : public StackObj {
100
public:
101
enum TrackerType {
102
uncommit,
103
release
104
};
105
106
public:
107
Tracker(enum TrackerType type) : _type(type) { }
108
void record(address addr, size_t size);
109
private:
110
enum TrackerType _type;
111
// Virtual memory tracking data structures are protected by ThreadCritical lock.
112
ThreadCritical _tc;
113
};
114
115
class MemTracker : AllStatic {
116
friend class VirtualMemoryTrackerTest;
117
118
// Helper; asserts that we are in post-NMT-init phase
119
static void assert_post_init() {
120
assert(is_initialized(), "NMT not yet initialized.");
121
}
122
123
public:
124
125
// Initializes NMT to whatever -XX:NativeMemoryTracking says.
126
// - Can only be called once.
127
// - NativeMemoryTracking must be validated beforehand.
128
static void initialize();
129
130
// Returns true if NMT had been initialized.
131
static bool is_initialized() {
132
return _tracking_level != NMT_unknown;
133
}
134
135
static inline NMT_TrackingLevel tracking_level() {
136
return _tracking_level;
137
}
138
139
// Shutdown native memory tracking.
140
// This transitions the tracking level:
141
// summary -> minimal
142
// detail -> minimal
143
static void shutdown();
144
145
// Transition the tracking level to specified level
146
static bool transition_to(NMT_TrackingLevel level);
147
148
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
149
const NativeCallStack& stack, NMT_TrackingLevel level) {
150
if (level != NMT_off) {
151
return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
152
}
153
return mem_base;
154
}
155
156
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
157
return MallocTracker::malloc_header_size(level);
158
}
159
160
static size_t malloc_header_size(void* memblock) {
161
if (tracking_level() != NMT_off) {
162
return MallocTracker::get_header_size(memblock);
163
}
164
return 0;
165
}
166
167
// To malloc base address, which is the starting address
168
// of malloc tracking header if tracking is enabled.
169
// Otherwise, it returns the same address.
170
static void* malloc_base(void* memblock);
171
172
// Record malloc free and return malloc base address
173
static inline void* record_free(void* memblock, NMT_TrackingLevel level) {
174
// Never turned on
175
if (level == NMT_off || memblock == NULL) {
176
return memblock;
177
}
178
return MallocTracker::record_free(memblock);
179
}
180
181
182
// Record creation of an arena
183
static inline void record_new_arena(MEMFLAGS flag) {
184
if (tracking_level() < NMT_summary) return;
185
MallocTracker::record_new_arena(flag);
186
}
187
188
// Record destruction of an arena
189
static inline void record_arena_free(MEMFLAGS flag) {
190
if (tracking_level() < NMT_summary) return;
191
MallocTracker::record_arena_free(flag);
192
}
193
194
// Record arena size change. Arena size is the size of all arena
195
// chuncks that backing up the arena.
196
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) {
197
if (tracking_level() < NMT_summary) return;
198
MallocTracker::record_arena_size_change(diff, flag);
199
}
200
201
// Note: virtual memory operations should only ever be called after NMT initialization
202
// (we do not do any reservations before that).
203
204
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
205
MEMFLAGS flag = mtNone) {
206
assert_post_init();
207
if (tracking_level() < NMT_summary) return;
208
if (addr != NULL) {
209
ThreadCritical tc;
210
// Recheck to avoid potential racing during NMT shutdown
211
if (tracking_level() < NMT_summary) return;
212
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
213
}
214
}
215
216
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
217
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
218
assert_post_init();
219
if (tracking_level() < NMT_summary) return;
220
if (addr != NULL) {
221
ThreadCritical tc;
222
if (tracking_level() < NMT_summary) return;
223
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
224
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
225
}
226
}
227
228
static inline void record_virtual_memory_commit(void* addr, size_t size,
229
const NativeCallStack& stack) {
230
assert_post_init();
231
if (tracking_level() < NMT_summary) return;
232
if (addr != NULL) {
233
ThreadCritical tc;
234
if (tracking_level() < NMT_summary) return;
235
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
236
}
237
}
238
239
// Given an existing memory mapping registered with NMT and a splitting
240
// address, split the mapping in two. The memory region is supposed to
241
// be fully uncommitted.
242
//
243
// The two new memory regions will be both registered under stack and
244
// memory flags of the original region.
245
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split) {
246
assert_post_init();
247
if (tracking_level() < NMT_summary) return;
248
if (addr != NULL) {
249
ThreadCritical tc;
250
// Recheck to avoid potential racing during NMT shutdown
251
if (tracking_level() < NMT_summary) return;
252
VirtualMemoryTracker::split_reserved_region((address)addr, size, split);
253
}
254
}
255
256
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
257
assert_post_init();
258
if (tracking_level() < NMT_summary) return;
259
if (addr != NULL) {
260
ThreadCritical tc;
261
if (tracking_level() < NMT_summary) return;
262
VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
263
}
264
}
265
266
static void record_thread_stack(void* addr, size_t size) {
267
assert_post_init();
268
if (tracking_level() < NMT_summary) return;
269
if (addr != NULL) {
270
ThreadStackTracker::new_thread_stack((address)addr, size, CALLER_PC);
271
}
272
}
273
274
static inline void release_thread_stack(void* addr, size_t size) {
275
assert_post_init();
276
if (tracking_level() < NMT_summary) return;
277
if (addr != NULL) {
278
ThreadStackTracker::delete_thread_stack((address)addr, size);
279
}
280
}
281
282
// Query lock is used to synchronize the access to tracking data.
283
// So far, it is only used by JCmd query, but it may be used by
284
// other tools.
285
static inline Mutex* query_lock() {
286
assert(NMTQuery_lock != NULL, "not initialized!");
287
return NMTQuery_lock;
288
}
289
290
// Report during error reporting.
291
static void error_report(outputStream* output);
292
293
// Report when handling PrintNMTStatistics before VM shutdown.
294
static void final_report(outputStream* output);
295
296
// Stored baseline
297
static inline MemBaseline& get_baseline() {
298
return _baseline;
299
}
300
301
static NMT_TrackingLevel cmdline_tracking_level() {
302
return _cmdline_tracking_level;
303
}
304
305
static void tuning_statistics(outputStream* out);
306
307
private:
308
static NMT_TrackingLevel init_tracking_level();
309
static void report(bool summary_only, outputStream* output, size_t scale);
310
311
private:
312
// Tracking level
313
static volatile NMT_TrackingLevel _tracking_level;
314
// If NMT option value passed by launcher through environment
315
// variable is valid
316
static bool _is_nmt_env_valid;
317
// command line tracking level
318
static NMT_TrackingLevel _cmdline_tracking_level;
319
// Stored baseline
320
static MemBaseline _baseline;
321
// Query lock
322
static Mutex* _query_lock;
323
};
324
325
#endif // INCLUDE_NMT
326
327
#endif // SHARE_SERVICES_MEMTRACKER_HPP
328
329