Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp
38920 views
1
/*
2
* Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3
*
4
* This code is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License version 2 only, as
6
* published by the Free Software Foundation.
7
*
8
* This code is distributed in the hope that it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11
* version 2 for more details (a copy is included in the LICENSE file that
12
* accompanied this code).
13
*
14
* You should have received a copy of the GNU General Public License version
15
* 2 along with this work; if not, write to the Free Software Foundation,
16
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17
*
18
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
* or visit www.oracle.com if you need additional information or have any
20
* questions.
21
*
22
*/
23
24
#include "precompiled.hpp"
25
26
#include "classfile/symbolTable.hpp"
27
#include "classfile/systemDictionary.hpp"
28
#include "code/codeCache.hpp"
29
30
#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
31
#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"
32
#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
33
#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
34
#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
35
#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
36
#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp"
37
#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
38
#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"
39
#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"
40
#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
41
#include "gc_implementation/shenandoah/shenandoahUtils.hpp"
42
#include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"
43
44
#include "memory/referenceProcessor.hpp"
45
#include "memory/iterator.inline.hpp"
46
#include "memory/metaspace.hpp"
47
#include "memory/resourceArea.hpp"
48
#include "oops/oop.inline.hpp"
49
50
template<UpdateRefsMode UPDATE_REFS>
51
class ShenandoahInitMarkRootsClosure : public OopClosure {
52
private:
53
ShenandoahObjToScanQueue* _queue;
54
ShenandoahHeap* _heap;
55
ShenandoahStrDedupQueue* _dedup_queue;
56
ShenandoahMarkingContext* const _mark_context;
57
58
template <class T>
59
inline void do_oop_nv(T* p) {
60
ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context, _dedup_queue);
61
}
62
63
public:
64
ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) :
65
_queue(q),
66
_heap(ShenandoahHeap::heap()),
67
_dedup_queue(dq),
68
_mark_context(_heap->marking_context()) {};
69
70
void do_oop(narrowOop* p) { do_oop_nv(p); }
71
void do_oop(oop* p) { do_oop_nv(p); }
72
};
73
74
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
75
MetadataAwareOopClosure(rp),
76
_queue(q),
77
_dedup_queue(NULL),
78
_heap(ShenandoahHeap::heap()),
79
_mark_context(_heap->marking_context())
80
{ }
81
82
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :
83
MetadataAwareOopClosure(rp),
84
_queue(q),
85
_dedup_queue(dq),
86
_heap(ShenandoahHeap::heap()),
87
_mark_context(_heap->marking_context())
88
{ }
89
90
template<UpdateRefsMode UPDATE_REFS>
91
class ShenandoahInitMarkRootsTask : public AbstractGangTask {
92
private:
93
ShenandoahAllRootScanner* _rp;
94
public:
95
ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp) :
96
AbstractGangTask("Shenandoah init mark roots task"),
97
_rp(rp) {
98
}
99
100
void work(uint worker_id) {
101
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
102
ShenandoahParallelWorkerSession worker_session(worker_id);
103
104
ShenandoahHeap* heap = ShenandoahHeap::heap();
105
ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
106
assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
107
108
ShenandoahObjToScanQueue* q = queues->queue(worker_id);
109
ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q, NULL);
110
do_work(heap, &mark_cl, worker_id);
111
}
112
113
private:
114
void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
115
// The rationale for selecting the roots to scan is as follows:
116
// a. With unload_classes = true, we only want to scan the actual strong roots from the
117
// code cache. This will allow us to identify the dead classes, unload them, *and*
118
// invalidate the relevant code cache blobs. This could be only done together with
119
// class unloading.
120
// b. With unload_classes = false, we have to nominally retain all the references from code
121
// cache, because there could be the case of embedded class/oop in the generated code,
122
// which we will never visit during mark. Without code cache invalidation, as in (a),
123
// we risk executing that code cache blob, and crashing.
124
// c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
125
// and instead do that in concurrent phase under the relevant lock. This saves init mark
126
// pause time.
127
ResourceMark m;
128
if (heap->unload_classes()) {
129
_rp->strong_roots_do(worker_id, oops);
130
} else {
131
_rp->roots_do(worker_id, oops);
132
}
133
}
134
};
135
136
class ShenandoahUpdateRootsTask : public AbstractGangTask {
137
private:
138
ShenandoahRootUpdater* _root_updater;
139
public:
140
ShenandoahUpdateRootsTask(ShenandoahRootUpdater* _root_updater) :
141
AbstractGangTask("Shenandoah update roots task"),
142
_root_updater(_root_updater) {
143
}
144
145
void work(uint worker_id) {
146
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
147
ShenandoahParallelWorkerSession worker_session(worker_id);
148
149
ShenandoahHeap* heap = ShenandoahHeap::heap();
150
ShenandoahUpdateRefsClosure cl;
151
ShenandoahIsAliveSelector is_alive;
152
_root_updater->roots_do(worker_id, is_alive.is_alive_closure(), &cl);
153
}
154
};
155
156
class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
157
private:
158
ShenandoahConcurrentMark* _cm;
159
ShenandoahTaskTerminator* _terminator;
160
161
public:
162
ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
163
AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
164
}
165
166
void work(uint worker_id) {
167
ShenandoahHeap* heap = ShenandoahHeap::heap();
168
ShenandoahConcurrentWorkerSession worker_session(worker_id);
169
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
170
ReferenceProcessor* rp;
171
if (heap->process_references()) {
172
rp = heap->ref_processor();
173
shenandoah_assert_rp_isalive_installed();
174
} else {
175
rp = NULL;
176
}
177
178
_cm->concurrent_scan_code_roots(worker_id, rp);
179
_cm->mark_loop(worker_id, _terminator, rp,
180
true, // cancellable
181
ShenandoahStringDedup::is_enabled()); // perform string dedup
182
}
183
};
184
185
class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
186
private:
187
ShenandoahSATBBufferClosure* _satb_cl;
188
OopClosure* const _cl;
189
MarkingCodeBlobClosure* _code_cl;
190
int _thread_parity;
191
192
public:
193
ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
194
_satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
195
_thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
196
197
void do_thread(Thread* thread) {
198
if (thread->is_Java_thread()) {
199
if (thread->claim_oops_do(true, _thread_parity)) {
200
JavaThread* jt = (JavaThread*)thread;
201
jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
202
if (_cl != NULL) {
203
ResourceMark rm;
204
jt->oops_do(_cl, NULL, _code_cl);
205
} else if (_code_cl != NULL) {
206
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
207
// however the liveness of oops reachable from nmethods have very complex lifecycles:
208
// * Alive if on the stack of an executing method
209
// * Weakly reachable otherwise
210
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
211
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
212
jt->nmethods_do(_code_cl);
213
}
214
}
215
} else if (thread->is_VM_thread()) {
216
if (thread->claim_oops_do(true, _thread_parity)) {
217
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
218
}
219
}
220
}
221
};
222
223
class ShenandoahFinalMarkingTask : public AbstractGangTask {
224
private:
225
ShenandoahConcurrentMark* _cm;
226
ShenandoahTaskTerminator* _terminator;
227
bool _dedup_string;
228
ShenandoahSharedFlag _claimed_syncroots;
229
230
public:
231
ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
232
AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
233
}
234
235
void work(uint worker_id) {
236
ShenandoahHeap* heap = ShenandoahHeap::heap();
237
238
ReferenceProcessor* rp;
239
if (heap->process_references()) {
240
rp = heap->ref_processor();
241
shenandoah_assert_rp_isalive_installed();
242
} else {
243
rp = NULL;
244
}
245
246
// First drain remaining SATB buffers.
247
// Notice that this is not strictly necessary for mark-compact. But since
248
// it requires a StrongRootsScope around the task, we need to claim the
249
// threads, and performance-wise it doesn't really matter. Adds about 1ms to
250
// full-gc.
251
{
252
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
253
ShenandoahStrDedupQueue *dq = NULL;
254
if (ShenandoahStringDedup::is_enabled()) {
255
dq = ShenandoahStringDedup::queue(worker_id);
256
}
257
ShenandoahSATBBufferClosure cl(q, dq);
258
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
259
while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
260
bool do_nmethods = heap->unload_classes();
261
if (heap->has_forwarded_objects()) {
262
ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
263
MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
264
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
265
ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
266
do_nmethods ? &blobsCl : NULL);
267
Threads::threads_do(&tc);
268
if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {
269
ObjectSynchronizer::oops_do(&resolve_mark_cl);
270
}
271
} else {
272
ShenandoahMarkRefsClosure mark_cl(q, rp);
273
MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
274
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
275
ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
276
do_nmethods ? &blobsCl : NULL);
277
Threads::threads_do(&tc);
278
if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {
279
ObjectSynchronizer::oops_do(&mark_cl);
280
}
281
}
282
}
283
284
if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
285
// Full GC does not execute concurrent cycle.
286
// Degenerated cycle may bypass concurrent cycle.
287
// So code roots might not be scanned, let's scan here.
288
_cm->concurrent_scan_code_roots(worker_id, rp);
289
}
290
291
_cm->mark_loop(worker_id, _terminator, rp,
292
false, // not cancellable
293
_dedup_string);
294
295
assert(_cm->task_queues()->is_empty(), "Should be empty");
296
}
297
};
298
299
void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
300
assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
301
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
302
303
ShenandoahGCPhase phase(root_phase);
304
305
WorkGang* workers = _heap->workers();
306
uint nworkers = workers->active_workers();
307
308
assert(nworkers <= task_queues()->size(), "Just check");
309
310
ShenandoahAllRootScanner root_proc(root_phase);
311
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
312
task_queues()->reserve(nworkers);
313
314
if (_heap->has_forwarded_objects()) {
315
ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
316
workers->run_task(&mark_roots);
317
} else {
318
// No need to update references, which means the heap is stable.
319
// Can save time not walking through forwarding pointers.
320
ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
321
workers->run_task(&mark_roots);
322
}
323
324
clear_claim_codecache();
325
}
326
327
void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
328
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
329
assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
330
root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
331
"Only for these phases");
332
333
ShenandoahHeap* heap = ShenandoahHeap::heap();
334
ShenandoahGCPhase phase(root_phase);
335
336
COMPILER2_PRESENT(DerivedPointerTable::clear());
337
338
uint nworkers = heap->workers()->active_workers();
339
340
ShenandoahRootUpdater root_updater(root_phase);
341
ShenandoahUpdateRootsTask update_roots(&root_updater);
342
_heap->workers()->run_task(&update_roots);
343
344
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
345
}
346
347
class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
348
private:
349
SharedHeap::StrongRootsScope _srs;
350
ShenandoahPhaseTimings::Phase _phase;
351
ShenandoahGCWorkerPhase _worker_phase;
352
public:
353
ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
354
AbstractGangTask("Shenandoah Update Thread Roots"),
355
_srs(ShenandoahHeap::heap(), true),
356
_phase(phase),
357
_worker_phase(phase) {}
358
359
void work(uint worker_id) {
360
ShenandoahUpdateRefsClosure cl;
361
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
362
ResourceMark rm;
363
Threads::possibly_parallel_oops_do(&cl, NULL, NULL);
364
}
365
};
366
367
void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
368
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
369
370
ShenandoahGCPhase phase(root_phase);
371
372
COMPILER2_PRESENT(DerivedPointerTable::clear());
373
374
WorkGang* workers = _heap->workers();
375
bool is_par = workers->active_workers() > 1;
376
377
ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
378
workers->run_task(&task);
379
380
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
381
}
382
383
void ShenandoahConcurrentMark::initialize(uint workers) {
384
_heap = ShenandoahHeap::heap();
385
386
uint num_queues = MAX2(workers, 1U);
387
388
_task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
389
390
for (uint i = 0; i < num_queues; ++i) {
391
ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
392
task_queue->initialize();
393
_task_queues->register_queue(i, task_queue);
394
}
395
396
JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
397
}
398
399
void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
400
if (claim_codecache()) {
401
ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
402
if (!_heap->unload_classes()) {
403
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
404
// TODO: We can not honor StringDeduplication here, due to lock ranking
405
// inversion. So, we may miss some deduplication candidates.
406
if (_heap->has_forwarded_objects()) {
407
ShenandoahMarkResolveRefsClosure cl(q, rp);
408
CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
409
CodeCache::blobs_do(&blobs);
410
} else {
411
ShenandoahMarkRefsClosure cl(q, rp);
412
CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
413
CodeCache::blobs_do(&blobs);
414
}
415
}
416
}
417
}
418
419
void ShenandoahConcurrentMark::mark_from_roots() {
420
WorkGang* workers = _heap->workers();
421
uint nworkers = workers->active_workers();
422
423
ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
424
425
if (_heap->process_references()) {
426
ReferenceProcessor* rp = _heap->ref_processor();
427
rp->set_active_mt_degree(nworkers);
428
429
// enable ("weak") refs discovery
430
rp->enable_discovery(true /*verify_no_refs*/, true);
431
rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
432
}
433
434
shenandoah_assert_rp_isalive_not_installed();
435
ShenandoahIsAliveSelector is_alive;
436
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
437
438
task_queues()->reserve(nworkers);
439
440
{
441
ShenandoahTaskTerminator terminator(nworkers, task_queues());
442
ShenandoahConcurrentMarkingTask task(this, &terminator);
443
workers->run_task(&task);
444
}
445
446
assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
447
if (!_heap->cancelled_gc()) {
448
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
449
}
450
451
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
452
}
453
454
void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
455
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
456
457
uint nworkers = _heap->workers()->active_workers();
458
459
// Finally mark everything else we've got in our queues during the previous steps.
460
// It does two different things for concurrent vs. mark-compact GC:
461
// - For concurrent GC, it starts with empty task queues, drains the remaining
462
// SATB buffers, and then completes the marking closure.
463
// - For mark-compact GC, it starts out with the task queues seeded by initial
464
// root scan, and completes the closure, thus marking through all live objects
465
// The implementation is the same, so it's shared here.
466
{
467
ShenandoahGCPhase phase(full_gc ?
468
ShenandoahPhaseTimings::full_gc_mark_finish_queues :
469
ShenandoahPhaseTimings::finish_queues);
470
task_queues()->reserve(nworkers);
471
472
shenandoah_assert_rp_isalive_not_installed();
473
ShenandoahIsAliveSelector is_alive;
474
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
475
476
SharedHeap::StrongRootsScope scope(_heap, true);
477
ShenandoahTaskTerminator terminator(nworkers, task_queues());
478
ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
479
_heap->workers()->run_task(&task);
480
}
481
482
assert(task_queues()->is_empty(), "Should be empty");
483
484
// Marking is completed, deactivate SATB barrier if it is active
485
_heap->complete_marking();
486
487
// When we're done marking everything, we process weak references.
488
// It is not obvious, but reference processing actually calls
489
// JNIHandle::weak_oops_do() to cleanup JNI and JVMTI weak oops.
490
if (_heap->process_references()) {
491
weak_refs_work(full_gc);
492
} else {
493
weak_roots_work(full_gc);
494
}
495
496
// And finally finish class unloading
497
if (_heap->unload_classes()) {
498
_heap->unload_classes_and_cleanup_tables(full_gc);
499
} else if (ShenandoahStringDedup::is_enabled()) {
500
ShenandoahStringDedup::parallel_cleanup();
501
}
502
assert(task_queues()->is_empty(), "Should be empty");
503
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
504
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
505
506
// Resize Metaspace
507
MetaspaceGC::compute_new_size();
508
}
509
510
// Weak Reference Closures
511
class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
512
uint _worker_id;
513
ShenandoahTaskTerminator* _terminator;
514
bool _reset_terminator;
515
516
public:
517
ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
518
_worker_id(worker_id),
519
_terminator(t),
520
_reset_terminator(reset_terminator) {
521
}
522
523
void do_void() {
524
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
525
526
ShenandoahHeap* sh = ShenandoahHeap::heap();
527
ShenandoahConcurrentMark* scm = sh->concurrent_mark();
528
assert(sh->process_references(), "why else would we be here?");
529
ReferenceProcessor* rp = sh->ref_processor();
530
531
shenandoah_assert_rp_isalive_installed();
532
533
scm->mark_loop(_worker_id, _terminator, rp,
534
false, // not cancellable
535
false); // do not do strdedup
536
537
if (_reset_terminator) {
538
_terminator->reset_for_reuse();
539
}
540
}
541
};
542
543
class ShenandoahCMKeepAliveClosure : public OopClosure {
544
private:
545
ShenandoahObjToScanQueue* _queue;
546
ShenandoahHeap* _heap;
547
ShenandoahMarkingContext* const _mark_context;
548
549
template <class T>
550
inline void do_oop_nv(T* p) {
551
ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
552
}
553
554
public:
555
ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
556
_queue(q),
557
_heap(ShenandoahHeap::heap()),
558
_mark_context(_heap->marking_context()) {}
559
560
void do_oop(narrowOop* p) { do_oop_nv(p); }
561
void do_oop(oop* p) { do_oop_nv(p); }
562
};
563
564
class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
565
private:
566
ShenandoahObjToScanQueue* _queue;
567
ShenandoahHeap* _heap;
568
ShenandoahMarkingContext* const _mark_context;
569
570
template <class T>
571
inline void do_oop_nv(T* p) {
572
ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
573
}
574
575
public:
576
ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
577
_queue(q),
578
_heap(ShenandoahHeap::heap()),
579
_mark_context(_heap->marking_context()) {}
580
581
void do_oop(narrowOop* p) { do_oop_nv(p); }
582
void do_oop(oop* p) { do_oop_nv(p); }
583
};
584
585
class ShenandoahRefProcTaskProxy : public AbstractGangTask {
586
private:
587
AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
588
ShenandoahTaskTerminator* _terminator;
589
590
public:
591
ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
592
ShenandoahTaskTerminator* t) :
593
AbstractGangTask("Process reference objects in parallel"),
594
_proc_task(proc_task),
595
_terminator(t) {
596
}
597
598
void work(uint worker_id) {
599
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
600
ShenandoahHeap* heap = ShenandoahHeap::heap();
601
ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
602
if (heap->has_forwarded_objects()) {
603
ShenandoahForwardedIsAliveClosure is_alive;
604
ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
605
_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
606
} else {
607
ShenandoahIsAliveClosure is_alive;
608
ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
609
_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
610
}
611
}
612
};
613
614
class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
615
private:
616
AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
617
618
public:
619
ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
620
AbstractGangTask("Enqueue reference objects in parallel"),
621
_enqueue_task(enqueue_task) {
622
}
623
624
void work(uint worker_id) {
625
_enqueue_task.work(worker_id);
626
}
627
};
628
629
class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
630
private:
631
WorkGang* _workers;
632
633
public:
634
ShenandoahRefProcTaskExecutor(WorkGang* workers) :
635
_workers(workers) {
636
}
637
638
// Executes a task using worker threads.
639
void execute(ProcessTask& task) {
640
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
641
642
// Shortcut execution if task is empty.
643
// This should be replaced with the generic ReferenceProcessor shortcut,
644
// see JDK-8181214, JDK-8043575, JDK-6938732.
645
if (task.is_empty()) {
646
return;
647
}
648
649
ShenandoahHeap* heap = ShenandoahHeap::heap();
650
ShenandoahConcurrentMark* cm = heap->concurrent_mark();
651
uint nworkers = _workers->active_workers();
652
cm->task_queues()->reserve(nworkers);
653
654
ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
655
ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
656
_workers->run_task(&proc_task_proxy);
657
}
658
659
void execute(EnqueueTask& task) {
660
ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
661
_workers->run_task(&enqueue_task_proxy);
662
}
663
};
664
665
void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
666
assert(_heap->process_references(), "sanity");
667
668
ShenandoahPhaseTimings::Phase phase_root =
669
full_gc ?
670
ShenandoahPhaseTimings::full_gc_weakrefs :
671
ShenandoahPhaseTimings::weakrefs;
672
673
ShenandoahGCPhase phase(phase_root);
674
675
ReferenceProcessor* rp = _heap->ref_processor();
676
weak_refs_work_doit(full_gc);
677
678
rp->verify_no_references_recorded();
679
assert(!rp->discovery_enabled(), "Post condition");
680
681
}
682
683
void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
684
ReferenceProcessor* rp = _heap->ref_processor();
685
686
ShenandoahPhaseTimings::Phase phase_process =
687
full_gc ?
688
ShenandoahPhaseTimings::full_gc_weakrefs_process :
689
ShenandoahPhaseTimings::weakrefs_process;
690
691
ShenandoahPhaseTimings::Phase phase_enqueue =
692
full_gc ?
693
ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
694
ShenandoahPhaseTimings::weakrefs_enqueue;
695
696
shenandoah_assert_rp_isalive_not_installed();
697
ShenandoahIsAliveSelector is_alive;
698
ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
699
700
WorkGang* workers = _heap->workers();
701
uint nworkers = workers->active_workers();
702
703
rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
704
rp->set_active_mt_degree(nworkers);
705
706
assert(task_queues()->is_empty(), "Should be empty");
707
708
// complete_gc and keep_alive closures instantiated here are only needed for
709
// single-threaded path in RP. They share the queue 0 for tracking work, which
710
// simplifies implementation. Since RP may decide to call complete_gc several
711
// times, we need to be able to reuse the terminator.
712
uint serial_worker_id = 0;
713
ShenandoahTaskTerminator terminator(1, task_queues());
714
ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
715
716
ShenandoahRefProcTaskExecutor executor(workers);
717
718
{
719
ShenandoahGCPhase phase(phase_process);
720
721
if (_heap->has_forwarded_objects()) {
722
ShenandoahForwardedIsAliveClosure is_alive;
723
ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
724
rp->process_discovered_references(&is_alive, &keep_alive,
725
&complete_gc, &executor,
726
NULL, _heap->shenandoah_policy()->tracer()->gc_id());
727
} else {
728
ShenandoahIsAliveClosure is_alive;
729
ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
730
rp->process_discovered_references(&is_alive, &keep_alive,
731
&complete_gc, &executor,
732
NULL, _heap->shenandoah_policy()->tracer()->gc_id());
733
}
734
735
assert(task_queues()->is_empty(), "Should be empty");
736
}
737
738
{
739
ShenandoahGCPhase phase(phase_enqueue);
740
rp->enqueue_discovered_references(&executor);
741
}
742
}
743
744
class DoNothingClosure: public OopClosure {
745
public:
746
void do_oop(oop* p) {}
747
void do_oop(narrowOop* p) {}
748
};
749
750
class ShenandoahWeakUpdateClosure : public OopClosure {
751
private:
752
ShenandoahHeap* const _heap;
753
754
template <class T>
755
inline void do_oop_work(T* p) {
756
oop o = _heap->maybe_update_with_forwarded(p);
757
shenandoah_assert_marked_except(p, o, o == NULL);
758
}
759
760
public:
761
ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
762
763
void do_oop(narrowOop* p) { do_oop_work(p); }
764
void do_oop(oop* p) { do_oop_work(p); }
765
};
766
767
void ShenandoahConcurrentMark::weak_roots_work(bool full_gc) {
768
ShenandoahPhaseTimings::Phase phase = full_gc ?
769
ShenandoahPhaseTimings::full_gc_weak_roots :
770
ShenandoahPhaseTimings::weak_roots;
771
ShenandoahGCPhase root_phase(phase);
772
ShenandoahGCWorkerPhase worker_phase(phase);
773
774
ShenandoahIsAliveSelector is_alive;
775
DoNothingClosure cl;
776
ShenandoahWeakRoots weak_roots(phase);
777
weak_roots.weak_oops_do(is_alive.is_alive_closure(), &cl, 0);
778
}
779
780
class ShenandoahCancelledGCYieldClosure : public YieldClosure {
781
private:
782
ShenandoahHeap* const _heap;
783
public:
784
ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
785
virtual bool should_return() { return _heap->cancelled_gc(); }
786
};
787
788
class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
789
public:
790
void do_void() {
791
ShenandoahHeap* sh = ShenandoahHeap::heap();
792
ShenandoahConcurrentMark* scm = sh->concurrent_mark();
793
assert(sh->process_references(), "why else would we be here?");
794
ShenandoahTaskTerminator terminator(1, scm->task_queues());
795
796
ReferenceProcessor* rp = sh->ref_processor();
797
shenandoah_assert_rp_isalive_installed();
798
799
scm->mark_loop(0, &terminator, rp,
800
false, // not cancellable
801
false); // do not do strdedup
802
}
803
};
804
805
class ShenandoahPrecleanTask : public AbstractGangTask {
806
private:
807
ReferenceProcessor* _rp;
808
809
public:
810
ShenandoahPrecleanTask(ReferenceProcessor* rp) :
811
AbstractGangTask("Precleaning task"),
812
_rp(rp) {}
813
814
void work(uint worker_id) {
815
assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
816
ShenandoahParallelWorkerSession worker_session(worker_id);
817
818
ShenandoahHeap* sh = ShenandoahHeap::heap();
819
assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");
820
821
ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
822
823
ShenandoahCancelledGCYieldClosure yield;
824
ShenandoahPrecleanCompleteGCClosure complete_gc;
825
826
ShenandoahIsAliveClosure is_alive;
827
ShenandoahCMKeepAliveClosure keep_alive(q);
828
ResourceMark rm;
829
_rp->preclean_discovered_references(&is_alive, &keep_alive,
830
&complete_gc, &yield,
831
NULL, sh->shenandoah_policy()->tracer()->gc_id());
832
}
833
};
834
835
void ShenandoahConcurrentMark::preclean_weak_refs() {
836
// Pre-cleaning weak references before diving into STW makes sense at the
837
// end of concurrent mark. This will filter out the references which referents
838
// are alive. Note that ReferenceProcessor already filters out these on reference
839
// discovery, and the bulk of work is done here. This phase processes leftovers
840
// that missed the initial filtering, i.e. when referent was marked alive after
841
// reference was discovered by RP.
842
843
assert(_heap->process_references(), "sanity");
844
845
ReferenceProcessor* rp = _heap->ref_processor();
846
847
assert(task_queues()->is_empty(), "Should be empty");
848
849
ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
850
851
shenandoah_assert_rp_isalive_not_installed();
852
ShenandoahIsAliveSelector is_alive;
853
ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
854
855
// Execute precleaning in the worker thread: it will give us GCLABs, String dedup
856
// queues and other goodies. When upstream ReferenceProcessor starts supporting
857
// parallel precleans, we can extend this to more threads.
858
WorkGang* workers = _heap->workers();
859
uint nworkers = workers->active_workers();
860
assert(nworkers == 1, "This code uses only a single worker");
861
task_queues()->reserve(nworkers);
862
863
ShenandoahPrecleanTask task(rp);
864
workers->run_task(&task);
865
866
assert(task_queues()->is_empty(), "Should be empty");
867
}
868
869
void ShenandoahConcurrentMark::cancel() {
870
// Clean up marking stacks.
871
ShenandoahObjToScanQueueSet* queues = task_queues();
872
queues->clear();
873
874
// Cancel SATB buffers.
875
JavaThread::satb_mark_queue_set().abandon_partial_marking();
876
}
877
878
ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
879
assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));
880
return _task_queues->queue(worker_id);
881
}
882
883
template <bool CANCELLABLE>
884
void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
885
bool strdedup) {
886
ShenandoahObjToScanQueue* q = get_queue(w);
887
888
ShenandoahLiveData* ld = _heap->get_liveness_cache(w);
889
890
// TODO: We can clean up this if we figure out how to do templated oop closures that
891
// play nice with specialized_oop_iterators.
892
if (_heap->unload_classes()) {
893
if (_heap->has_forwarded_objects()) {
894
if (strdedup) {
895
ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
896
ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
897
mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
898
} else {
899
ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
900
mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
901
}
902
} else {
903
if (strdedup) {
904
ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
905
ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
906
mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
907
} else {
908
ShenandoahMarkRefsMetadataClosure cl(q, rp);
909
mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
910
}
911
}
912
} else {
913
if (_heap->has_forwarded_objects()) {
914
if (strdedup) {
915
ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
916
ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
917
mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
918
} else {
919
ShenandoahMarkUpdateRefsClosure cl(q, rp);
920
mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
921
}
922
} else {
923
if (strdedup) {
924
ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
925
ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
926
mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
927
} else {
928
ShenandoahMarkRefsClosure cl(q, rp);
929
mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
930
}
931
}
932
}
933
934
_heap->flush_liveness_cache(w);
935
}
936
937
template <class T, bool CANCELLABLE>
938
void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
939
int seed = 17;
940
uintx stride = ShenandoahMarkLoopStride;
941
942
ShenandoahHeap* heap = ShenandoahHeap::heap();
943
ShenandoahObjToScanQueueSet* queues = task_queues();
944
ShenandoahObjToScanQueue* q;
945
ShenandoahMarkTask t;
946
947
/*
948
* Process outstanding queues, if any.
949
*
950
* There can be more queues than workers. To deal with the imbalance, we claim
951
* extra queues first. Since marking can push new tasks into the queue associated
952
* with this worker id, we come back to process this queue in the normal loop.
953
*/
954
assert(queues->get_reserved() == heap->workers()->active_workers(),
955
"Need to reserve proper number of queues");
956
957
q = queues->claim_next();
958
while (q != NULL) {
959
if (CANCELLABLE && heap->cancelled_gc()) {
960
return;
961
}
962
963
for (uint i = 0; i < stride; i++) {
964
if (q->pop(t)) {
965
do_task<T>(q, cl, live_data, &t);
966
} else {
967
assert(q->is_empty(), "Must be empty");
968
q = queues->claim_next();
969
break;
970
}
971
}
972
}
973
974
q = get_queue(worker_id);
975
976
ShenandoahStrDedupQueue *dq = NULL;
977
if (ShenandoahStringDedup::is_enabled()) {
978
dq = ShenandoahStringDedup::queue(worker_id);
979
}
980
981
ShenandoahSATBBufferClosure drain_satb(q, dq);
982
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
983
984
/*
985
* Normal marking loop:
986
*/
987
while (true) {
988
if (CANCELLABLE && heap->cancelled_gc()) {
989
return;
990
}
991
992
while (satb_mq_set.completed_buffers_num() > 0) {
993
satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
994
}
995
996
uint work = 0;
997
for (uint i = 0; i < stride; i++) {
998
if (q->pop(t) ||
999
queues->steal(worker_id, &seed, t)) {
1000
do_task<T>(q, cl, live_data, &t);
1001
work++;
1002
} else {
1003
break;
1004
}
1005
}
1006
1007
if (work == 0) {
1008
// No work encountered in current stride, try to terminate.
1009
ShenandoahTerminatorTerminator tt(heap);
1010
if (terminator->offer_termination(&tt)) return;
1011
}
1012
}
1013
}
1014
1015
bool ShenandoahConcurrentMark::claim_codecache() {
1016
return _claimed_codecache.try_set();
1017
}
1018
1019
void ShenandoahConcurrentMark::clear_claim_codecache() {
1020
_claimed_codecache.unset();
1021
}
1022
1023