Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
66645 views
1
/*
2
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/shared/collectedHeap.hpp"
27
#include "gc/shared/oopStorage.hpp"
28
#include "gc/shared/oopStorageSet.hpp"
29
#include "jfr/jfrEvents.hpp"
30
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
31
#include "jfr/leakprofiler/sampling/objectSample.hpp"
32
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
33
#include "jfr/leakprofiler/sampling/sampleList.hpp"
34
#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"
35
#include "jfr/recorder/jfrEventSetting.inline.hpp"
36
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
37
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
38
#include "jfr/support/jfrThreadLocal.hpp"
39
#include "jfr/utilities/jfrTime.hpp"
40
#include "jfr/utilities/jfrTryLock.hpp"
41
#include "logging/log.hpp"
42
#include "memory/universe.hpp"
43
#include "oops/oop.inline.hpp"
44
#include "runtime/atomic.hpp"
45
#include "runtime/orderAccess.hpp"
46
#include "runtime/safepoint.hpp"
47
#include "runtime/thread.hpp"
48
49
// Timestamp of when the gc last processed the set of sampled objects.
50
// Atomic access to prevent word tearing on 32-bit platforms.
51
static volatile int64_t _last_sweep;
52
53
// Condition variable to communicate that some sampled objects have been cleared by the gc
54
// and can therefore be removed from the sample priority queue.
55
static bool volatile _dead_samples = false;
56
57
// The OopStorage instance is used to hold weak references to sampled objects.
58
// It is constructed and registered during VM initialization. This is a singleton
59
// that persist independent of the state of the ObjectSampler.
60
static OopStorage* _oop_storage = NULL;
61
62
OopStorage* ObjectSampler::oop_storage() { return _oop_storage; }
63
64
// Callback invoked by the GC after an iteration over the oop storage
65
// that may have cleared dead referents. num_dead is the number of entries
66
// already NULL or cleared by the iteration.
67
void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {
68
if (num_dead != 0) {
69
// The ObjectSampler instance may have already been cleaned or a new
70
// instance was created concurrently. This allows for a small race where cleaning
71
// could be done again.
72
Atomic::store(&_dead_samples, true);
73
Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());
74
}
75
}
76
77
bool ObjectSampler::create_oop_storage() {
78
_oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing);
79
assert(_oop_storage != NULL, "invariant");
80
_oop_storage->register_num_dead_callback(&oop_storage_gc_notification);
81
return true;
82
}
83
84
static ObjectSampler* _instance = NULL;
85
86
static ObjectSampler& instance() {
87
assert(_instance != NULL, "invariant");
88
return *_instance;
89
}
90
91
ObjectSampler::ObjectSampler(size_t size) :
92
_priority_queue(new SamplePriorityQueue(size)),
93
_list(new SampleList(size)),
94
_total_allocated(0),
95
_threshold(0),
96
_size(size) {
97
Atomic::store(&_dead_samples, false);
98
Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());
99
}
100
101
ObjectSampler::~ObjectSampler() {
102
delete _priority_queue;
103
_priority_queue = NULL;
104
delete _list;
105
_list = NULL;
106
}
107
108
bool ObjectSampler::create(size_t size) {
109
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
110
assert(_oop_storage != NULL, "should be already created");
111
ObjectSampleCheckpoint::clear();
112
assert(_instance == NULL, "invariant");
113
_instance = new ObjectSampler(size);
114
return _instance != NULL;
115
}
116
117
bool ObjectSampler::is_created() {
118
return _instance != NULL;
119
}
120
121
ObjectSampler* ObjectSampler::sampler() {
122
assert(is_created(), "invariant");
123
return _instance;
124
}
125
126
void ObjectSampler::destroy() {
127
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
128
if (_instance != NULL) {
129
ObjectSampler* const sampler = _instance;
130
_instance = NULL;
131
delete sampler;
132
}
133
}
134
135
static volatile int _lock = 0;
136
137
ObjectSampler* ObjectSampler::acquire() {
138
while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
139
return _instance;
140
}
141
142
void ObjectSampler::release() {
143
OrderAccess::fence();
144
_lock = 0;
145
}
146
147
static traceid get_thread_id(JavaThread* thread) {
148
assert(thread != NULL, "invariant");
149
if (thread->threadObj() == NULL) {
150
return 0;
151
}
152
const JfrThreadLocal* const tl = thread->jfr_thread_local();
153
assert(tl != NULL, "invariant");
154
if (tl->is_excluded()) {
155
return 0;
156
}
157
if (!tl->has_thread_blob()) {
158
JfrCheckpointManager::create_thread_blob(thread);
159
}
160
assert(tl->has_thread_blob(), "invariant");
161
return tl->thread_id();
162
}
163
164
class RecordStackTrace {
165
private:
166
JavaThread* _jt;
167
bool _enabled;
168
public:
169
RecordStackTrace(JavaThread* jt) : _jt(jt),
170
_enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
171
if (_enabled) {
172
JfrStackTraceRepository::record_for_leak_profiler(jt);
173
}
174
}
175
~RecordStackTrace() {
176
if (_enabled) {
177
_jt->jfr_thread_local()->clear_cached_stack_trace();
178
}
179
}
180
};
181
182
void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
183
assert(thread != NULL, "invariant");
184
assert(is_created(), "invariant");
185
const traceid thread_id = get_thread_id(thread);
186
if (thread_id == 0) {
187
return;
188
}
189
RecordStackTrace rst(thread);
190
// try enter critical section
191
JfrTryLock tryLock(&_lock);
192
if (!tryLock.acquired()) {
193
log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
194
return;
195
}
196
instance().add(obj, allocated, thread_id, thread);
197
}
198
199
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {
200
assert(obj != NULL, "invariant");
201
assert(thread_id != 0, "invariant");
202
assert(thread != NULL, "invariant");
203
assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
204
205
if (Atomic::load(&_dead_samples)) {
206
// There's a small race where a GC scan might reset this to true, potentially
207
// causing a back-to-back scavenge.
208
Atomic::store(&_dead_samples, false);
209
scavenge();
210
}
211
212
_total_allocated += allocated;
213
const size_t span = _total_allocated - _priority_queue->total();
214
ObjectSample* sample;
215
if ((size_t)_priority_queue->count() == _size) {
216
assert(_list->count() == _size, "invariant");
217
const ObjectSample* peek = _priority_queue->peek();
218
if (peek->span() > span) {
219
// quick reject, will not fit
220
return;
221
}
222
sample = _list->reuse(_priority_queue->pop());
223
} else {
224
sample = _list->get();
225
}
226
227
assert(sample != NULL, "invariant");
228
sample->set_thread_id(thread_id);
229
230
const JfrThreadLocal* const tl = thread->jfr_thread_local();
231
sample->set_thread(tl->thread_blob());
232
233
const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
234
if (stacktrace_hash != 0) {
235
sample->set_stack_trace_id(tl->cached_stack_trace_id());
236
sample->set_stack_trace_hash(stacktrace_hash);
237
}
238
239
sample->set_span(allocated);
240
sample->set_object(cast_to_oop(obj));
241
sample->set_allocated(allocated);
242
sample->set_allocation_time(JfrTicks::now());
243
sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());
244
_priority_queue->push(sample);
245
}
246
247
void ObjectSampler::scavenge() {
248
ObjectSample* current = _list->last();
249
while (current != NULL) {
250
ObjectSample* next = current->next();
251
if (current->is_dead()) {
252
remove_dead(current);
253
}
254
current = next;
255
}
256
}
257
258
void ObjectSampler::remove_dead(ObjectSample* sample) {
259
assert(sample != NULL, "invariant");
260
assert(sample->is_dead(), "invariant");
261
sample->release();
262
263
ObjectSample* const previous = sample->prev();
264
// push span onto previous
265
if (previous != NULL) {
266
_priority_queue->remove(previous);
267
previous->add_span(sample->span());
268
_priority_queue->push(previous);
269
}
270
_priority_queue->remove(sample);
271
_list->release(sample);
272
}
273
274
ObjectSample* ObjectSampler::last() const {
275
return _list->last();
276
}
277
278
const ObjectSample* ObjectSampler::first() const {
279
return _list->first();
280
}
281
282
const ObjectSample* ObjectSampler::last_resolved() const {
283
return _list->last_resolved();
284
}
285
286
void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
287
_list->set_last_resolved(sample);
288
}
289
290
int ObjectSampler::item_count() const {
291
return _priority_queue->count();
292
}
293
294
const ObjectSample* ObjectSampler::item_at(int index) const {
295
return _priority_queue->item_at(index);
296
}
297
298
ObjectSample* ObjectSampler::item_at(int index) {
299
return const_cast<ObjectSample*>(
300
const_cast<const ObjectSampler*>(this)->item_at(index)
301
);
302
}
303
304
int64_t ObjectSampler::last_sweep() {
305
return Atomic::load(&_last_sweep);
306
}
307
308