Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
66645 views
1
/*
2
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "jfr/jfrEvents.hpp"
27
#include "jfr/jni/jfrJavaSupport.hpp"
28
#include "jfr/leakprofiler/chains/edgeStore.hpp"
29
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
30
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
31
#include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
32
#include "jfr/leakprofiler/leakProfiler.hpp"
33
#include "jfr/leakprofiler/sampling/objectSample.hpp"
34
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
35
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
36
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
37
#include "jfr/recorder/service/jfrOptionSet.hpp"
38
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
39
#include "jfr/support/jfrKlassUnloading.hpp"
40
#include "jfr/support/jfrMethodLookup.hpp"
41
#include "jfr/utilities/jfrHashtable.hpp"
42
#include "jfr/utilities/jfrPredicate.hpp"
43
#include "jfr/utilities/jfrRelation.hpp"
44
#include "memory/resourceArea.inline.hpp"
45
#include "oops/instanceKlass.inline.hpp"
46
#include "runtime/interfaceSupport.inline.hpp"
47
#include "runtime/mutexLocker.hpp"
48
#include "runtime/safepoint.hpp"
49
#include "runtime/thread.inline.hpp"
50
51
const int initial_array_size = 64;
52
53
template <typename T>
54
static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
55
return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, mtTracing);
56
}
57
58
static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
59
60
class ThreadIdExclusiveAccess : public StackObj {
61
private:
62
static Semaphore _mutex_semaphore;
63
public:
64
ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
65
~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
66
};
67
68
Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
69
70
static bool has_thread_exited(traceid tid) {
71
assert(tid != 0, "invariant");
72
if (unloaded_thread_id_set == NULL) {
73
return false;
74
}
75
ThreadIdExclusiveAccess lock;
76
return JfrPredicate<traceid, compare_traceid>::test(unloaded_thread_id_set, tid);
77
}
78
79
static void add_to_unloaded_thread_set(traceid tid) {
80
ThreadIdExclusiveAccess lock;
81
if (unloaded_thread_id_set == NULL) {
82
unloaded_thread_id_set = c_heap_allocate_array<traceid>();
83
}
84
JfrMutablePredicate<traceid, compare_traceid>::test(unloaded_thread_id_set, tid);
85
}
86
87
void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
88
assert(jt != NULL, "invariant");
89
if (LeakProfiler::is_running()) {
90
add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
91
}
92
}
93
94
void ObjectSampleCheckpoint::clear() {
95
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
96
if (unloaded_thread_id_set != NULL) {
97
delete unloaded_thread_id_set;
98
unloaded_thread_id_set = NULL;
99
}
100
assert(unloaded_thread_id_set == NULL, "invariant");
101
}
102
103
template <typename Processor>
104
static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
105
assert(sample != NULL, "invariant");
106
while (sample != end) {
107
processor.sample_do(sample);
108
sample = sample->next();
109
}
110
}
111
112
template <typename Processor>
113
static void iterate_samples(Processor& processor, bool all = false) {
114
ObjectSampler* const sampler = ObjectSampler::sampler();
115
assert(sampler != NULL, "invariant");
116
ObjectSample* const last = sampler->last();
117
assert(last != NULL, "invariant");
118
do_samples(last, all ? NULL : sampler->last_resolved(), processor);
119
}
120
121
class SampleMarker {
122
private:
123
ObjectSampleMarker& _marker;
124
jlong _last_sweep;
125
int _count;
126
public:
127
SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
128
void sample_do(ObjectSample* sample) {
129
if (sample->is_alive_and_older_than(_last_sweep)) {
130
_marker.mark(sample->object());
131
++_count;
132
}
133
}
134
int count() const {
135
return _count;
136
}
137
};
138
139
int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
140
assert(sampler != NULL, "invariant");
141
if (sampler->last() == NULL) {
142
return 0;
143
}
144
SampleMarker sample_marker(marker, emit_all ? max_jlong : ObjectSampler::last_sweep());
145
iterate_samples(sample_marker, true);
146
return sample_marker.count();
147
}
148
149
class BlobCache {
150
typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
151
typedef BlobTable::HashEntry BlobEntry;
152
private:
153
BlobTable _table;
154
traceid _lookup_id;
155
public:
156
BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
157
JfrBlobHandle get(const ObjectSample* sample);
158
void put(const ObjectSample* sample, const JfrBlobHandle& blob);
159
// Hash table callbacks
160
void on_link(const BlobEntry* entry) const;
161
bool on_equals(uintptr_t hash, const BlobEntry* entry) const;
162
void on_unlink(BlobEntry* entry) const;
163
};
164
165
JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
166
assert(sample != NULL, "invariant");
167
_lookup_id = sample->stack_trace_id();
168
assert(_lookup_id != 0, "invariant");
169
BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
170
return entry != NULL ? entry->literal() : JfrBlobHandle();
171
}
172
173
void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
174
assert(sample != NULL, "invariant");
175
assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
176
_lookup_id = sample->stack_trace_id();
177
assert(_lookup_id != 0, "invariant");
178
_table.put(sample->stack_trace_hash(), blob);
179
}
180
181
inline void BlobCache::on_link(const BlobEntry* entry) const {
182
assert(entry != NULL, "invariant");
183
assert(entry->id() == 0, "invariant");
184
entry->set_id(_lookup_id);
185
}
186
187
inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
188
assert(entry != NULL, "invariant");
189
assert(entry->hash() == hash, "invariant");
190
return entry->id() == _lookup_id;
191
}
192
193
inline void BlobCache::on_unlink(BlobEntry* entry) const {
194
assert(entry != NULL, "invariant");
195
}
196
197
static GrowableArray<traceid>* id_set = NULL;
198
199
static void prepare_for_resolution() {
200
id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
201
}
202
203
static bool stack_trace_precondition(const ObjectSample* sample) {
204
assert(sample != NULL, "invariant");
205
return sample->has_stack_trace_id() && !sample->is_dead();
206
}
207
208
class StackTraceBlobInstaller {
209
private:
210
BlobCache _cache;
211
void install(ObjectSample* sample);
212
const JfrStackTrace* resolve(const ObjectSample* sample) const;
213
public:
214
StackTraceBlobInstaller() : _cache(JfrOptionSet::old_object_queue_size()) {
215
prepare_for_resolution();
216
}
217
~StackTraceBlobInstaller() {
218
JfrStackTraceRepository::clear_leak_profiler();
219
}
220
void sample_do(ObjectSample* sample) {
221
if (stack_trace_precondition(sample)) {
222
install(sample);
223
}
224
}
225
};
226
227
#ifdef ASSERT
228
static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
229
assert(!sample->has_stacktrace(), "invariant");
230
assert(stack_trace != NULL, "invariant");
231
assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
232
assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
233
}
234
#endif
235
236
inline const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) const {
237
return JfrStackTraceRepository::lookup_for_leak_profiler(sample->stack_trace_hash(), sample->stack_trace_id());
238
}
239
240
void StackTraceBlobInstaller::install(ObjectSample* sample) {
241
JfrBlobHandle blob = _cache.get(sample);
242
if (blob.valid()) {
243
sample->set_stacktrace(blob);
244
return;
245
}
246
const JfrStackTrace* const stack_trace = resolve(sample);
247
DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
248
JfrCheckpointWriter writer;
249
writer.write_type(TYPE_STACKTRACE);
250
writer.write_count(1);
251
ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
252
blob = writer.copy();
253
_cache.put(sample, blob);
254
sample->set_stacktrace(blob);
255
}
256
257
static void install_stack_traces(const ObjectSampler* sampler) {
258
assert(sampler != NULL, "invariant");
259
const ObjectSample* const last = sampler->last();
260
if (last != sampler->last_resolved()) {
261
ResourceMark rm;
262
JfrKlassUnloading::sort();
263
StackTraceBlobInstaller installer;
264
iterate_samples(installer);
265
}
266
}
267
268
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
269
assert(sampler != NULL, "invariant");
270
assert(LeakProfiler::is_running(), "invariant");
271
JavaThread* const thread = JavaThread::current();
272
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
273
// can safepoint here
274
ThreadInVMfromNative transition(thread);
275
MutexLocker lock(ClassLoaderDataGraph_lock);
276
// the lock is needed to ensure the unload lists do not grow in the middle of inspection.
277
install_stack_traces(sampler);
278
}
279
280
static bool is_klass_unloaded(traceid klass_id) {
281
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
282
return JfrKlassUnloading::is_unloaded(klass_id);
283
}
284
285
static bool is_processed(traceid method_id) {
286
assert(method_id != 0, "invariant");
287
assert(id_set != NULL, "invariant");
288
return JfrMutablePredicate<traceid, compare_traceid>::test(id_set, method_id);
289
}
290
291
void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
292
assert(ik != NULL, "invariant");
293
if (is_processed(method_id) || is_klass_unloaded(JfrMethodLookup::klass_id(method_id))) {
294
return;
295
}
296
const Method* const method = JfrMethodLookup::lookup(ik, method_id);
297
assert(method != NULL, "invariant");
298
assert(method->method_holder() == ik, "invariant");
299
JfrTraceId::load_leakp(ik, method);
300
}
301
302
void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
303
assert(trace != NULL, "invariant");
304
// JfrStackTrace
305
writer.write(trace->id());
306
writer.write((u1)!trace->_reached_root);
307
writer.write(trace->_nr_of_frames);
308
// JfrStackFrames
309
for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
310
const JfrStackFrame& frame = trace->_frames[i];
311
frame.write(writer);
312
add_to_leakp_set(frame._klass, frame._methodid);
313
}
314
}
315
316
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
317
if (reset) {
318
blob->reset_write_state();
319
return;
320
}
321
blob->exclusive_write(writer);
322
}
323
324
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
325
if (sample->has_type_set()) {
326
write_blob(sample->type_set(), writer, reset);
327
}
328
}
329
330
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
331
assert(sample->has_thread(), "invariant");
332
if (has_thread_exited(sample->thread_id())) {
333
write_blob(sample->thread(), writer, reset);
334
}
335
}
336
337
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
338
if (sample->has_stacktrace()) {
339
write_blob(sample->stacktrace(), writer, reset);
340
}
341
}
342
343
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
344
assert(sample != NULL, "invariant");
345
write_stacktrace_blob(sample, writer, reset);
346
write_thread_blob(sample, writer, reset);
347
write_type_set_blob(sample, writer, reset);
348
}
349
350
class BlobWriter {
351
private:
352
const ObjectSampler* _sampler;
353
JfrCheckpointWriter& _writer;
354
const jlong _last_sweep;
355
bool _reset;
356
public:
357
BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
358
_sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {}
359
void sample_do(ObjectSample* sample) {
360
if (sample->is_alive_and_older_than(_last_sweep)) {
361
write_blobs(sample, _writer, _reset);
362
}
363
}
364
void set_reset() {
365
_reset = true;
366
}
367
};
368
369
static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
370
// sample set is predicated on time of last sweep
371
const jlong last_sweep = emit_all ? max_jlong : ObjectSampler::last_sweep();
372
JfrCheckpointWriter writer(thread, false);
373
BlobWriter cbw(sampler, writer, last_sweep);
374
iterate_samples(cbw, true);
375
// reset blob write states
376
cbw.set_reset();
377
iterate_samples(cbw, true);
378
}
379
380
void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
381
assert(sampler != NULL, "invariant");
382
assert(edge_store != NULL, "invariant");
383
assert(thread != NULL, "invariant");
384
write_sample_blobs(sampler, emit_all, thread);
385
// write reference chains
386
if (!edge_store->is_empty()) {
387
JfrCheckpointWriter writer(thread);
388
ObjectSampleWriter osw(writer, edge_store);
389
edge_store->iterate(osw);
390
}
391
}
392
393
// A linked list of saved type set blobs for the epoch.
394
// The link consist of a reference counted handle.
395
static JfrBlobHandle saved_type_set_blobs;
396
397
static void release_state_for_previous_epoch() {
398
// decrements the reference count and the list is reinitialized
399
saved_type_set_blobs = JfrBlobHandle();
400
}
401
402
class BlobInstaller {
403
public:
404
~BlobInstaller() {
405
release_state_for_previous_epoch();
406
}
407
void sample_do(ObjectSample* sample) {
408
if (!sample->is_dead()) {
409
sample->set_type_set(saved_type_set_blobs);
410
}
411
}
412
};
413
414
static void install_type_set_blobs() {
415
BlobInstaller installer;
416
iterate_samples(installer);
417
}
418
419
static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
420
assert(writer.has_data(), "invariant");
421
const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
422
if (saved_type_set_blobs.valid()) {
423
saved_type_set_blobs->set_next(blob);
424
} else {
425
saved_type_set_blobs = blob;
426
}
427
}
428
429
void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
430
assert(LeakProfiler::is_running(), "invariant");
431
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
432
const ObjectSample* last = ObjectSampler::sampler()->last();
433
if (writer.has_data() && last != NULL) {
434
save_type_set_blob(writer);
435
install_type_set_blobs();
436
ObjectSampler::sampler()->set_last_resolved(last);
437
}
438
}
439
440
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
441
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
442
assert(LeakProfiler::is_running(), "invariant");
443
if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
444
save_type_set_blob(writer, true);
445
}
446
}
447
448