Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
40957 views
1
/*
2
* Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/codeCache.hpp"
27
#include "code/icBuffer.hpp"
28
#include "code/nmethod.hpp"
29
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
30
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
31
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
32
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
33
#include "gc/shenandoah/shenandoahUtils.hpp"
34
#include "memory/resourceArea.hpp"
35
#include "memory/universe.hpp"
36
#include "runtime/atomic.hpp"
37
#include "utilities/powerOfTwo.hpp"
38
39
ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
40
_length = heaps->length();
41
_iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
42
for (int h = 0; h < _length; h++) {
43
_iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
44
}
45
}
46
47
ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
48
FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
49
}
50
51
void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
52
for (int c = 0; c < _length; c++) {
53
_iters[c].parallel_blobs_do(f);
54
}
55
}
56
57
ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
58
_heap(heap), _claimed_idx(0), _finished(false) {
59
}
60
61
void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
62
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
63
64
/*
65
* Parallel code heap walk.
66
*
67
* This code makes all threads scan all code heaps, but only one thread would execute the
68
* closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
69
* had claimed the block, it can process all blobs in it. Others have to fast-forward to
70
* next attempt without processing.
71
*
72
* Late threads would return immediately if iterator is finished.
73
*/
74
75
if (_finished) {
76
return;
77
}
78
79
int stride = 256; // educated guess
80
int stride_mask = stride - 1;
81
assert (is_power_of_2(stride), "sanity");
82
83
int count = 0;
84
bool process_block = true;
85
86
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
87
int current = count++;
88
if ((current & stride_mask) == 0) {
89
process_block = (current >= _claimed_idx) &&
90
(Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
91
}
92
if (process_block) {
93
if (cb->is_alive()) {
94
f->do_code_blob(cb);
95
#ifdef ASSERT
96
if (cb->is_nmethod())
97
Universe::heap()->verify_nmethod((nmethod*)cb);
98
#endif
99
}
100
}
101
}
102
103
_finished = true;
104
}
105
106
ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
107
int ShenandoahCodeRoots::_disarmed_value = 1;
108
109
void ShenandoahCodeRoots::initialize() {
110
_nmethod_table = new ShenandoahNMethodTable();
111
}
112
113
void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
114
assert_locked_or_safepoint(CodeCache_lock);
115
_nmethod_table->register_nmethod(nm);
116
}
117
118
void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
119
assert_locked_or_safepoint(CodeCache_lock);
120
_nmethod_table->unregister_nmethod(nm);
121
}
122
123
void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
124
assert_locked_or_safepoint(CodeCache_lock);
125
_nmethod_table->flush_nmethod(nm);
126
}
127
128
void ShenandoahCodeRoots::arm_nmethods() {
129
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
130
_disarmed_value ++;
131
// 0 is reserved for new nmethod
132
if (_disarmed_value == 0) {
133
_disarmed_value = 1;
134
}
135
136
JavaThreadIteratorWithHandle jtiwh;
137
for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
138
ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
139
}
140
}
141
142
class ShenandoahDisarmNMethodClosure : public NMethodClosure {
143
private:
144
BarrierSetNMethod* const _bs;
145
146
public:
147
ShenandoahDisarmNMethodClosure() :
148
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
149
}
150
151
virtual void do_nmethod(nmethod* nm) {
152
_bs->disarm(nm);
153
}
154
};
155
156
class ShenandoahDisarmNMethodsTask : public AbstractGangTask {
157
private:
158
ShenandoahDisarmNMethodClosure _cl;
159
ShenandoahConcurrentNMethodIterator _iterator;
160
161
public:
162
ShenandoahDisarmNMethodsTask() :
163
AbstractGangTask("Shenandoah Disarm NMethods"),
164
_iterator(ShenandoahCodeRoots::table()) {
165
assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint");
166
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
167
_iterator.nmethods_do_begin();
168
}
169
170
~ShenandoahDisarmNMethodsTask() {
171
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
172
_iterator.nmethods_do_end();
173
}
174
175
virtual void work(uint worker_id) {
176
ShenandoahParallelWorkerSession worker_session(worker_id);
177
_iterator.nmethods_do(&_cl);
178
}
179
};
180
181
void ShenandoahCodeRoots::disarm_nmethods() {
182
if (ShenandoahNMethodBarrier) {
183
ShenandoahDisarmNMethodsTask task;
184
ShenandoahHeap::heap()->workers()->run_task(&task);
185
}
186
}
187
188
class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
189
private:
190
bool _unloading_occurred;
191
volatile bool _failed;
192
ShenandoahHeap* const _heap;
193
BarrierSetNMethod* const _bs;
194
195
void set_failed() {
196
Atomic::store(&_failed, true);
197
}
198
199
void unlink(nmethod* nm) {
200
// Unlinking of the dependencies must happen before the
201
// handshake separating unlink and purge.
202
nm->flush_dependencies(false /* delete_immediately */);
203
204
// unlink_from_method will take the CompiledMethod_lock.
205
// In this case we don't strictly need it when unlinking nmethods from
206
// the Method, because it is only concurrently unlinked by
207
// the entry barrier, which acquires the per nmethod lock.
208
nm->unlink_from_method();
209
210
if (nm->is_osr_method()) {
211
// Invalidate the osr nmethod only once
212
nm->invalidate_osr_method();
213
}
214
}
215
public:
216
ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
217
_unloading_occurred(unloading_occurred),
218
_failed(false),
219
_heap(ShenandoahHeap::heap()),
220
_bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
221
222
virtual void do_nmethod(nmethod* nm) {
223
assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
224
if (failed()) {
225
return;
226
}
227
228
ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
229
assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
230
231
if (!nm->is_alive()) {
232
return;
233
}
234
235
if (nm->is_unloading()) {
236
ShenandoahReentrantLocker locker(nm_data->lock());
237
unlink(nm);
238
return;
239
}
240
241
ShenandoahReentrantLocker locker(nm_data->lock());
242
243
// Heal oops and disarm
244
if (_bs->is_armed(nm)) {
245
ShenandoahEvacOOMScope oom_evac_scope;
246
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
247
_bs->disarm(nm);
248
}
249
250
// Clear compiled ICs and exception caches
251
if (!nm->unload_nmethod_caches(_unloading_occurred)) {
252
set_failed();
253
}
254
}
255
256
bool failed() const {
257
return Atomic::load(&_failed);
258
}
259
};
260
261
class ShenandoahUnlinkTask : public AbstractGangTask {
262
private:
263
ShenandoahNMethodUnlinkClosure _cl;
264
ICRefillVerifier* _verifier;
265
ShenandoahConcurrentNMethodIterator _iterator;
266
267
public:
268
ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
269
AbstractGangTask("Shenandoah Unlink NMethods"),
270
_cl(unloading_occurred),
271
_verifier(verifier),
272
_iterator(ShenandoahCodeRoots::table()) {
273
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
274
_iterator.nmethods_do_begin();
275
}
276
277
~ShenandoahUnlinkTask() {
278
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
279
_iterator.nmethods_do_end();
280
}
281
282
virtual void work(uint worker_id) {
283
ICRefillVerifierMark mark(_verifier);
284
_iterator.nmethods_do(&_cl);
285
}
286
287
bool success() const {
288
return !_cl.failed();
289
}
290
};
291
292
void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
293
assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading");
294
295
for (;;) {
296
ICRefillVerifier verifier;
297
298
{
299
ShenandoahUnlinkTask task(unloading_occurred, &verifier);
300
workers->run_task(&task);
301
if (task.success()) {
302
return;
303
}
304
}
305
306
// Cleaning failed because we ran out of transitional IC stubs,
307
// so we have to refill and try again. Refilling requires taking
308
// a safepoint, so we temporarily leave the suspendible thread set.
309
SuspendibleThreadSetLeaver sts;
310
InlineCacheBuffer::refill_ic_stubs();
311
}
312
}
313
314
class ShenandoahNMethodPurgeClosure : public NMethodClosure {
315
public:
316
virtual void do_nmethod(nmethod* nm) {
317
if (nm->is_alive() && nm->is_unloading()) {
318
nm->make_unloaded();
319
}
320
}
321
};
322
323
class ShenandoahNMethodPurgeTask : public AbstractGangTask {
324
private:
325
ShenandoahNMethodPurgeClosure _cl;
326
ShenandoahConcurrentNMethodIterator _iterator;
327
328
public:
329
ShenandoahNMethodPurgeTask() :
330
AbstractGangTask("Shenandoah Purge NMethods"),
331
_cl(),
332
_iterator(ShenandoahCodeRoots::table()) {
333
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
334
_iterator.nmethods_do_begin();
335
}
336
337
~ShenandoahNMethodPurgeTask() {
338
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
339
_iterator.nmethods_do_end();
340
}
341
342
virtual void work(uint worker_id) {
343
_iterator.nmethods_do(&_cl);
344
}
345
};
346
347
void ShenandoahCodeRoots::purge(WorkGang* workers) {
348
assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading");
349
350
ShenandoahNMethodPurgeTask task;
351
workers->run_task(&task);
352
}
353
354
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
355
_par_iterator(CodeCache::heaps()),
356
_table_snapshot(NULL) {
357
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
358
assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
359
CodeCache_lock->lock_without_safepoint_check();
360
_table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
361
}
362
363
ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
364
ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
365
_table_snapshot = NULL;
366
CodeCache_lock->unlock();
367
}
368
369
void ShenandoahCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
370
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
371
assert(_table_snapshot != NULL, "Sanity");
372
_table_snapshot->parallel_blobs_do(f);
373
}
374
375