Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
40957 views
1
/*
2
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
27
#include "gc/shared/barrierSetNMethod.hpp"
28
#include "gc/shared/collectorCounters.hpp"
29
#include "gc/shenandoah/shenandoahBreakpoint.hpp"
30
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
31
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
32
#include "gc/shenandoah/shenandoahFreeSet.hpp"
33
#include "gc/shenandoah/shenandoahLock.hpp"
34
#include "gc/shenandoah/shenandoahMark.inline.hpp"
35
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
36
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
37
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
38
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
39
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
40
#include "gc/shenandoah/shenandoahStackWatermark.hpp"
41
#include "gc/shenandoah/shenandoahUtils.hpp"
42
#include "gc/shenandoah/shenandoahVerifier.hpp"
43
#include "gc/shenandoah/shenandoahVMOperations.hpp"
44
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
45
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
46
#include "memory/allocation.hpp"
47
#include "prims/jvmtiTagMap.hpp"
48
#include "runtime/vmThread.hpp"
49
#include "utilities/events.hpp"
50
51
// Breakpoint support
52
class ShenandoahBreakpointGCScope : public StackObj {
53
public:
54
ShenandoahBreakpointGCScope() {
55
ShenandoahBreakpoint::at_before_gc();
56
}
57
58
~ShenandoahBreakpointGCScope() {
59
ShenandoahBreakpoint::at_after_gc();
60
}
61
};
62
63
class ShenandoahBreakpointMarkScope : public StackObj {
64
public:
65
ShenandoahBreakpointMarkScope() {
66
ShenandoahBreakpoint::at_after_marking_started();
67
}
68
69
~ShenandoahBreakpointMarkScope() {
70
ShenandoahBreakpoint::at_before_marking_completed();
71
}
72
};
73
74
ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
75
_mark(),
76
_degen_point(ShenandoahDegenPoint::_degenerated_unset) {
77
}
78
79
ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
80
return _degen_point;
81
}
82
83
void ShenandoahConcurrentGC::cancel() {
84
ShenandoahConcurrentMark::cancel();
85
}
86
87
bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
88
ShenandoahHeap* const heap = ShenandoahHeap::heap();
89
if (cause == GCCause::_wb_breakpoint) {
90
ShenandoahBreakpoint::start_gc();
91
}
92
ShenandoahBreakpointGCScope breakpoint_gc_scope;
93
94
// Reset for upcoming marking
95
entry_reset();
96
97
// Start initial mark under STW
98
vmop_entry_init_mark();
99
100
{
101
ShenandoahBreakpointMarkScope breakpoint_mark_scope;
102
// Concurrent mark roots
103
entry_mark_roots();
104
if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
105
106
// Continue concurrent mark
107
entry_mark();
108
if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
109
}
110
111
// Complete marking under STW, and start evacuation
112
vmop_entry_final_mark();
113
114
// Concurrent stack processing
115
if (heap->is_evacuation_in_progress()) {
116
entry_thread_roots();
117
}
118
119
// Process weak roots that might still point to regions that would be broken by cleanup
120
if (heap->is_concurrent_weak_root_in_progress()) {
121
entry_weak_refs();
122
entry_weak_roots();
123
}
124
125
// Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
126
// the space. This would be the last action if there is nothing to evacuate.
127
entry_cleanup_early();
128
129
{
130
ShenandoahHeapLocker locker(heap->lock());
131
heap->free_set()->log_status();
132
}
133
134
// Perform concurrent class unloading
135
if (heap->unload_classes() &&
136
heap->is_concurrent_weak_root_in_progress()) {
137
entry_class_unloading();
138
}
139
140
// Processing strong roots
141
// This may be skipped if there is nothing to update/evacuate.
142
// If so, strong_root_in_progress would be unset.
143
if (heap->is_concurrent_strong_root_in_progress()) {
144
entry_strong_roots();
145
}
146
147
// Continue the cycle with evacuation and optional update-refs.
148
// This may be skipped if there is nothing to evacuate.
149
// If so, evac_in_progress would be unset by collection set preparation code.
150
if (heap->is_evacuation_in_progress()) {
151
// Concurrently evacuate
152
entry_evacuate();
153
if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
154
155
// Perform update-refs phase.
156
vmop_entry_init_updaterefs();
157
entry_updaterefs();
158
if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
159
160
// Concurrent update thread roots
161
entry_update_thread_roots();
162
if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
163
164
vmop_entry_final_updaterefs();
165
166
// Update references freed up collection set, kick the cleanup to reclaim the space.
167
entry_cleanup_complete();
168
} else {
169
vmop_entry_final_roots();
170
}
171
172
return true;
173
}
174
175
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
176
ShenandoahHeap* const heap = ShenandoahHeap::heap();
177
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
178
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
179
180
heap->try_inject_alloc_failure();
181
VM_ShenandoahInitMark op(this);
182
VMThread::execute(&op); // jump to entry_init_mark() under safepoint
183
}
184
185
void ShenandoahConcurrentGC::vmop_entry_final_mark() {
186
ShenandoahHeap* const heap = ShenandoahHeap::heap();
187
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
188
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
189
190
heap->try_inject_alloc_failure();
191
VM_ShenandoahFinalMarkStartEvac op(this);
192
VMThread::execute(&op); // jump to entry_final_mark under safepoint
193
}
194
195
void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
196
ShenandoahHeap* const heap = ShenandoahHeap::heap();
197
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
198
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
199
200
heap->try_inject_alloc_failure();
201
VM_ShenandoahInitUpdateRefs op(this);
202
VMThread::execute(&op);
203
}
204
205
void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
206
ShenandoahHeap* const heap = ShenandoahHeap::heap();
207
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
208
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
209
210
heap->try_inject_alloc_failure();
211
VM_ShenandoahFinalUpdateRefs op(this);
212
VMThread::execute(&op);
213
}
214
215
void ShenandoahConcurrentGC::vmop_entry_final_roots() {
216
ShenandoahHeap* const heap = ShenandoahHeap::heap();
217
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
218
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
219
220
// This phase does not use workers, no need for setup
221
heap->try_inject_alloc_failure();
222
VM_ShenandoahFinalRoots op(this);
223
VMThread::execute(&op);
224
}
225
226
void ShenandoahConcurrentGC::entry_init_mark() {
227
const char* msg = init_mark_event_message();
228
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
229
EventMark em("%s", msg);
230
231
ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
232
ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
233
"init marking");
234
235
op_init_mark();
236
}
237
238
void ShenandoahConcurrentGC::entry_final_mark() {
239
const char* msg = final_mark_event_message();
240
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
241
EventMark em("%s", msg);
242
243
ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
244
ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
245
"final marking");
246
247
op_final_mark();
248
}
249
250
void ShenandoahConcurrentGC::entry_init_updaterefs() {
251
static const char* msg = "Pause Init Update Refs";
252
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
253
EventMark em("%s", msg);
254
255
// No workers used in this phase, no setup required
256
op_init_updaterefs();
257
}
258
259
void ShenandoahConcurrentGC::entry_final_updaterefs() {
260
static const char* msg = "Pause Final Update Refs";
261
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
262
EventMark em("%s", msg);
263
264
ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
265
ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
266
"final reference update");
267
268
op_final_updaterefs();
269
}
270
271
void ShenandoahConcurrentGC::entry_final_roots() {
272
static const char* msg = "Pause Final Roots";
273
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
274
EventMark em("%s", msg);
275
276
op_final_roots();
277
}
278
279
void ShenandoahConcurrentGC::entry_reset() {
280
ShenandoahHeap* const heap = ShenandoahHeap::heap();
281
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
282
static const char* msg = "Concurrent reset";
283
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
284
EventMark em("%s", msg);
285
286
ShenandoahWorkerScope scope(heap->workers(),
287
ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
288
"concurrent reset");
289
290
heap->try_inject_alloc_failure();
291
op_reset();
292
}
293
294
void ShenandoahConcurrentGC::entry_mark_roots() {
295
ShenandoahHeap* const heap = ShenandoahHeap::heap();
296
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
297
const char* msg = "Concurrent marking roots";
298
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
299
EventMark em("%s", msg);
300
301
ShenandoahWorkerScope scope(heap->workers(),
302
ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
303
"concurrent marking roots");
304
305
heap->try_inject_alloc_failure();
306
op_mark_roots();
307
}
308
309
void ShenandoahConcurrentGC::entry_mark() {
310
ShenandoahHeap* const heap = ShenandoahHeap::heap();
311
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
312
const char* msg = conc_mark_event_message();
313
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
314
EventMark em("%s", msg);
315
316
ShenandoahWorkerScope scope(heap->workers(),
317
ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
318
"concurrent marking");
319
320
heap->try_inject_alloc_failure();
321
op_mark();
322
}
323
324
void ShenandoahConcurrentGC::entry_thread_roots() {
325
ShenandoahHeap* const heap = ShenandoahHeap::heap();
326
static const char* msg = "Concurrent thread roots";
327
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
328
EventMark em("%s", msg);
329
330
ShenandoahWorkerScope scope(heap->workers(),
331
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
332
msg);
333
334
heap->try_inject_alloc_failure();
335
op_thread_roots();
336
}
337
338
void ShenandoahConcurrentGC::entry_weak_refs() {
339
ShenandoahHeap* const heap = ShenandoahHeap::heap();
340
static const char* msg = "Concurrent weak references";
341
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
342
EventMark em("%s", msg);
343
344
ShenandoahWorkerScope scope(heap->workers(),
345
ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
346
"concurrent weak references");
347
348
heap->try_inject_alloc_failure();
349
op_weak_refs();
350
}
351
352
void ShenandoahConcurrentGC::entry_weak_roots() {
353
ShenandoahHeap* const heap = ShenandoahHeap::heap();
354
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
355
static const char* msg = "Concurrent weak roots";
356
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
357
EventMark em("%s", msg);
358
359
ShenandoahWorkerScope scope(heap->workers(),
360
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
361
"concurrent weak root");
362
363
heap->try_inject_alloc_failure();
364
op_weak_roots();
365
}
366
367
void ShenandoahConcurrentGC::entry_class_unloading() {
368
ShenandoahHeap* const heap = ShenandoahHeap::heap();
369
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
370
static const char* msg = "Concurrent class unloading";
371
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
372
EventMark em("%s", msg);
373
374
ShenandoahWorkerScope scope(heap->workers(),
375
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
376
"concurrent class unloading");
377
378
heap->try_inject_alloc_failure();
379
op_class_unloading();
380
}
381
382
void ShenandoahConcurrentGC::entry_strong_roots() {
383
ShenandoahHeap* const heap = ShenandoahHeap::heap();
384
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
385
static const char* msg = "Concurrent strong roots";
386
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
387
EventMark em("%s", msg);
388
389
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
390
391
ShenandoahWorkerScope scope(heap->workers(),
392
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
393
"concurrent strong root");
394
395
heap->try_inject_alloc_failure();
396
op_strong_roots();
397
}
398
399
void ShenandoahConcurrentGC::entry_cleanup_early() {
400
ShenandoahHeap* const heap = ShenandoahHeap::heap();
401
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
402
static const char* msg = "Concurrent cleanup";
403
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
404
EventMark em("%s", msg);
405
406
// This phase does not use workers, no need for setup
407
heap->try_inject_alloc_failure();
408
op_cleanup_early();
409
}
410
411
void ShenandoahConcurrentGC::entry_evacuate() {
412
ShenandoahHeap* const heap = ShenandoahHeap::heap();
413
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
414
415
static const char* msg = "Concurrent evacuation";
416
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
417
EventMark em("%s", msg);
418
419
ShenandoahWorkerScope scope(heap->workers(),
420
ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
421
"concurrent evacuation");
422
423
heap->try_inject_alloc_failure();
424
op_evacuate();
425
}
426
427
void ShenandoahConcurrentGC::entry_update_thread_roots() {
428
ShenandoahHeap* const heap = ShenandoahHeap::heap();
429
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
430
431
static const char* msg = "Concurrent update thread roots";
432
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
433
EventMark em("%s", msg);
434
435
// No workers used in this phase, no setup required
436
heap->try_inject_alloc_failure();
437
op_update_thread_roots();
438
}
439
440
void ShenandoahConcurrentGC::entry_updaterefs() {
441
ShenandoahHeap* const heap = ShenandoahHeap::heap();
442
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
443
static const char* msg = "Concurrent update references";
444
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
445
EventMark em("%s", msg);
446
447
ShenandoahWorkerScope scope(heap->workers(),
448
ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
449
"concurrent reference update");
450
451
heap->try_inject_alloc_failure();
452
op_updaterefs();
453
}
454
455
void ShenandoahConcurrentGC::entry_cleanup_complete() {
456
ShenandoahHeap* const heap = ShenandoahHeap::heap();
457
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
458
static const char* msg = "Concurrent cleanup";
459
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
460
EventMark em("%s", msg);
461
462
// This phase does not use workers, no need for setup
463
heap->try_inject_alloc_failure();
464
op_cleanup_complete();
465
}
466
467
void ShenandoahConcurrentGC::op_reset() {
468
ShenandoahHeap* const heap = ShenandoahHeap::heap();
469
if (ShenandoahPacing) {
470
heap->pacer()->setup_for_reset();
471
}
472
473
heap->prepare_gc();
474
}
475
476
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
477
private:
478
ShenandoahMarkingContext* const _ctx;
479
public:
480
ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
481
482
void heap_region_do(ShenandoahHeapRegion* r) {
483
assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
484
if (r->is_active()) {
485
// Check if region needs updating its TAMS. We have updated it already during concurrent
486
// reset, so it is very likely we don't need to do another write here.
487
if (_ctx->top_at_mark_start(r) != r->top()) {
488
_ctx->capture_top_at_mark_start(r);
489
}
490
} else {
491
assert(_ctx->top_at_mark_start(r) == r->top(),
492
"Region " SIZE_FORMAT " should already have correct TAMS", r->index());
493
}
494
}
495
496
bool is_thread_safe() { return true; }
497
};
498
499
void ShenandoahConcurrentGC::op_init_mark() {
500
ShenandoahHeap* const heap = ShenandoahHeap::heap();
501
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
502
assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
503
504
assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
505
assert(!heap->marking_context()->is_complete(), "should not be complete");
506
assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
507
508
if (ShenandoahVerify) {
509
heap->verifier()->verify_before_concmark();
510
}
511
512
if (VerifyBeforeGC) {
513
Universe::verify();
514
}
515
516
heap->set_concurrent_mark_in_progress(true);
517
518
{
519
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
520
ShenandoahInitMarkUpdateRegionStateClosure cl;
521
heap->parallel_heap_region_iterate(&cl);
522
}
523
524
// Weak reference processing
525
ShenandoahReferenceProcessor* rp = heap->ref_processor();
526
rp->reset_thread_locals();
527
rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
528
529
// Make above changes visible to worker threads
530
OrderAccess::fence();
531
// Arm nmethods for concurrent marking. When a nmethod is about to be executed,
532
// we need to make sure that all its metadata are marked. alternative is to remark
533
// thread roots at final mark pause, but it can be potential latency killer.
534
if (heap->unload_classes()) {
535
ShenandoahCodeRoots::arm_nmethods();
536
}
537
538
ShenandoahStackWatermark::change_epoch_id();
539
if (ShenandoahPacing) {
540
heap->pacer()->setup_for_mark();
541
}
542
}
543
544
void ShenandoahConcurrentGC::op_mark_roots() {
545
_mark.mark_concurrent_roots();
546
}
547
548
void ShenandoahConcurrentGC::op_mark() {
549
_mark.concurrent_mark();
550
}
551
552
void ShenandoahConcurrentGC::op_final_mark() {
553
ShenandoahHeap* const heap = ShenandoahHeap::heap();
554
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
555
assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
556
557
if (ShenandoahVerify) {
558
heap->verifier()->verify_roots_no_forwarded();
559
}
560
561
if (!heap->cancelled_gc()) {
562
_mark.finish_mark();
563
assert(!heap->cancelled_gc(), "STW mark cannot OOM");
564
565
// Notify JVMTI that the tagmap table will need cleaning.
566
JvmtiTagMap::set_needs_cleaning();
567
568
heap->prepare_regions_and_collection_set(true /*concurrent*/);
569
570
// Has to be done after cset selection
571
heap->prepare_concurrent_roots();
572
573
if (!heap->collection_set()->is_empty()) {
574
if (ShenandoahVerify) {
575
heap->verifier()->verify_before_evacuation();
576
}
577
578
heap->set_evacuation_in_progress(true);
579
// From here on, we need to update references.
580
heap->set_has_forwarded_objects(true);
581
582
// Verify before arming for concurrent processing.
583
// Otherwise, verification can trigger stack processing.
584
if (ShenandoahVerify) {
585
heap->verifier()->verify_during_evacuation();
586
}
587
588
// Arm nmethods/stack for concurrent processing
589
ShenandoahCodeRoots::arm_nmethods();
590
ShenandoahStackWatermark::change_epoch_id();
591
592
// Notify JVMTI that oops are changed.
593
JvmtiTagMap::set_needs_rehashing();
594
595
if (ShenandoahPacing) {
596
heap->pacer()->setup_for_evac();
597
}
598
} else {
599
if (ShenandoahVerify) {
600
heap->verifier()->verify_after_concmark();
601
}
602
603
if (VerifyAfterGC) {
604
Universe::verify();
605
}
606
}
607
}
608
}
609
610
class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
611
private:
612
OopClosure* const _oops;
613
614
public:
615
ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
616
void do_thread(Thread* thread);
617
};
618
619
ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
620
_oops(oops) {
621
}
622
623
void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
624
JavaThread* const jt = thread->as_Java_thread();
625
StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
626
}
627
628
class ShenandoahConcurrentEvacUpdateThreadTask : public AbstractGangTask {
629
private:
630
ShenandoahJavaThreadsIterator _java_threads;
631
632
public:
633
ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
634
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
635
_java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
636
}
637
638
void work(uint worker_id) {
639
// ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
640
// Otherwise, may deadlock with watermark lock
641
ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
642
ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
643
_java_threads.threads_do(&thr_cl, worker_id);
644
}
645
};
646
647
void ShenandoahConcurrentGC::op_thread_roots() {
648
ShenandoahHeap* const heap = ShenandoahHeap::heap();
649
assert(heap->is_evacuation_in_progress(), "Checked by caller");
650
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
651
ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
652
heap->workers()->run_task(&task);
653
}
654
655
void ShenandoahConcurrentGC::op_weak_refs() {
656
ShenandoahHeap* const heap = ShenandoahHeap::heap();
657
assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
658
// Concurrent weak refs processing
659
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
660
ShenandoahBreakpoint::at_after_reference_processing_started();
661
heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
662
}
663
664
class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
665
private:
666
ShenandoahHeap* const _heap;
667
ShenandoahMarkingContext* const _mark_context;
668
bool _evac_in_progress;
669
Thread* const _thread;
670
671
public:
672
ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
673
void do_oop(oop* p);
674
void do_oop(narrowOop* p);
675
};
676
677
ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
678
_heap(ShenandoahHeap::heap()),
679
_mark_context(ShenandoahHeap::heap()->marking_context()),
680
_evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
681
_thread(Thread::current()) {
682
}
683
684
void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
685
const oop obj = RawAccess<>::oop_load(p);
686
if (!CompressedOops::is_null(obj)) {
687
if (!_mark_context->is_marked(obj)) {
688
shenandoah_assert_correct(p, obj);
689
Atomic::cmpxchg(p, obj, oop(NULL));
690
} else if (_evac_in_progress && _heap->in_collection_set(obj)) {
691
oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
692
if (resolved == obj) {
693
resolved = _heap->evacuate_object(obj, _thread);
694
}
695
Atomic::cmpxchg(p, obj, resolved);
696
assert(_heap->cancelled_gc() ||
697
_mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
698
"Sanity");
699
}
700
}
701
}
702
703
void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
704
ShouldNotReachHere();
705
}
706
707
class ShenandoahIsCLDAliveClosure : public CLDClosure {
708
public:
709
void do_cld(ClassLoaderData* cld) {
710
cld->is_alive();
711
}
712
};
713
714
class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
715
public:
716
void do_nmethod(nmethod* n) {
717
n->is_unloading();
718
}
719
};
720
721
// This task not only evacuates/updates marked weak roots, but also "NULL"
722
// dead weak roots.
723
class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
724
private:
725
ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
726
727
// Roots related to concurrent class unloading
728
ShenandoahClassLoaderDataRoots<true /* concurrent */, true /* single thread*/>
729
_cld_roots;
730
ShenandoahConcurrentNMethodIterator _nmethod_itr;
731
ShenandoahPhaseTimings::Phase _phase;
732
733
public:
734
ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
735
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
736
_vm_roots(phase),
737
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
738
_nmethod_itr(ShenandoahCodeRoots::table()),
739
_phase(phase) {
740
if (ShenandoahHeap::heap()->unload_classes()) {
741
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
742
_nmethod_itr.nmethods_do_begin();
743
}
744
}
745
746
~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
747
if (ShenandoahHeap::heap()->unload_classes()) {
748
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
749
_nmethod_itr.nmethods_do_end();
750
}
751
// Notify runtime data structures of potentially dead oops
752
_vm_roots.report_num_dead();
753
}
754
755
void work(uint worker_id) {
756
ShenandoahConcurrentWorkerSession worker_session(worker_id);
757
{
758
ShenandoahEvacOOMScope oom;
759
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
760
// may race against OopStorage::release() calls.
761
ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
762
_vm_roots.oops_do(&cl, worker_id);
763
}
764
765
// If we are going to perform concurrent class unloading later on, we need to
766
// cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
767
// can cleanup immediate garbage sooner.
768
if (ShenandoahHeap::heap()->unload_classes()) {
769
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
770
// CLD's holder or evacuate it.
771
{
772
ShenandoahIsCLDAliveClosure is_cld_alive;
773
_cld_roots.cld_do(&is_cld_alive, worker_id);
774
}
775
776
// Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
777
// The closure calls nmethod->is_unloading(). The is_unloading
778
// state is cached, therefore, during concurrent class unloading phase,
779
// we will not touch the metadata of unloading nmethods
780
{
781
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
782
ShenandoahIsNMethodAliveClosure is_nmethod_alive;
783
_nmethod_itr.nmethods_do(&is_nmethod_alive);
784
}
785
}
786
}
787
};
788
789
void ShenandoahConcurrentGC::op_weak_roots() {
790
ShenandoahHeap* const heap = ShenandoahHeap::heap();
791
assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
792
// Concurrent weak root processing
793
{
794
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
795
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
796
ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
797
heap->workers()->run_task(&task);
798
}
799
800
// Perform handshake to flush out dead oops
801
{
802
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
803
heap->rendezvous_threads();
804
}
805
}
806
807
void ShenandoahConcurrentGC::op_class_unloading() {
808
ShenandoahHeap* const heap = ShenandoahHeap::heap();
809
assert (heap->is_concurrent_weak_root_in_progress() &&
810
heap->unload_classes(),
811
"Checked by caller");
812
heap->do_class_unloading();
813
}
814
815
class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
816
private:
817
BarrierSetNMethod* const _bs;
818
ShenandoahEvacuateUpdateMetadataClosure<> _cl;
819
820
public:
821
ShenandoahEvacUpdateCodeCacheClosure() :
822
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
823
_cl() {
824
}
825
826
void do_nmethod(nmethod* n) {
827
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
828
ShenandoahReentrantLocker locker(data->lock());
829
// Setup EvacOOM scope below reentrant lock to avoid deadlock with
830
// nmethod_entry_barrier
831
ShenandoahEvacOOMScope oom;
832
data->oops_do(&_cl, true/*fix relocation*/);
833
_bs->disarm(n);
834
}
835
};
836
837
class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
838
private:
839
ShenandoahPhaseTimings::Phase _phase;
840
ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
841
ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
842
ShenandoahConcurrentNMethodIterator _nmethod_itr;
843
844
public:
845
ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
846
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
847
_phase(phase),
848
_vm_roots(phase),
849
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
850
_nmethod_itr(ShenandoahCodeRoots::table()) {
851
if (!ShenandoahHeap::heap()->unload_classes()) {
852
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
853
_nmethod_itr.nmethods_do_begin();
854
}
855
}
856
857
~ShenandoahConcurrentRootsEvacUpdateTask() {
858
if (!ShenandoahHeap::heap()->unload_classes()) {
859
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
860
_nmethod_itr.nmethods_do_end();
861
}
862
}
863
864
void work(uint worker_id) {
865
ShenandoahConcurrentWorkerSession worker_session(worker_id);
866
{
867
ShenandoahEvacOOMScope oom;
868
{
869
// vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
870
// may race against OopStorage::release() calls.
871
ShenandoahContextEvacuateUpdateRootsClosure cl;
872
_vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
873
}
874
875
{
876
ShenandoahEvacuateUpdateMetadataClosure<> cl;
877
CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
878
_cld_roots.cld_do(&clds, worker_id);
879
}
880
}
881
882
// Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
883
if (!ShenandoahHeap::heap()->unload_classes()) {
884
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
885
ShenandoahEvacUpdateCodeCacheClosure cl;
886
_nmethod_itr.nmethods_do(&cl);
887
}
888
}
889
};
890
891
void ShenandoahConcurrentGC::op_strong_roots() {
892
ShenandoahHeap* const heap = ShenandoahHeap::heap();
893
assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
894
ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
895
heap->workers()->run_task(&task);
896
heap->set_concurrent_strong_root_in_progress(false);
897
}
898
899
void ShenandoahConcurrentGC::op_cleanup_early() {
900
ShenandoahHeap::heap()->free_set()->recycle_trash();
901
}
902
903
void ShenandoahConcurrentGC::op_evacuate() {
904
ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
905
}
906
907
void ShenandoahConcurrentGC::op_init_updaterefs() {
908
ShenandoahHeap* const heap = ShenandoahHeap::heap();
909
heap->set_evacuation_in_progress(false);
910
heap->set_concurrent_weak_root_in_progress(false);
911
heap->prepare_update_heap_references(true /*concurrent*/);
912
heap->set_update_refs_in_progress(true);
913
914
if (ShenandoahPacing) {
915
heap->pacer()->setup_for_updaterefs();
916
}
917
}
918
919
void ShenandoahConcurrentGC::op_updaterefs() {
920
ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
921
}
922
923
class ShenandoahUpdateThreadClosure : public HandshakeClosure {
924
private:
925
ShenandoahUpdateRefsClosure _cl;
926
public:
927
ShenandoahUpdateThreadClosure();
928
void do_thread(Thread* thread);
929
};
930
931
ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
932
HandshakeClosure("Shenandoah Update Thread Roots") {
933
}
934
935
void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
936
if (thread->is_Java_thread()) {
937
JavaThread* jt = thread->as_Java_thread();
938
ResourceMark rm;
939
jt->oops_do(&_cl, NULL);
940
}
941
}
942
943
void ShenandoahConcurrentGC::op_update_thread_roots() {
944
ShenandoahUpdateThreadClosure cl;
945
Handshake::execute(&cl);
946
}
947
948
void ShenandoahConcurrentGC::op_final_updaterefs() {
949
ShenandoahHeap* const heap = ShenandoahHeap::heap();
950
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
951
assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
952
953
heap->finish_concurrent_roots();
954
955
// Clear cancelled GC, if set. On cancellation path, the block before would handle
956
// everything.
957
if (heap->cancelled_gc()) {
958
heap->clear_cancelled_gc();
959
}
960
961
// Has to be done before cset is clear
962
if (ShenandoahVerify) {
963
heap->verifier()->verify_roots_in_to_space();
964
}
965
966
heap->update_heap_region_states(true /*concurrent*/);
967
968
heap->set_update_refs_in_progress(false);
969
heap->set_has_forwarded_objects(false);
970
971
if (ShenandoahVerify) {
972
heap->verifier()->verify_after_updaterefs();
973
}
974
975
if (VerifyAfterGC) {
976
Universe::verify();
977
}
978
979
heap->rebuild_free_set(true /*concurrent*/);
980
}
981
982
void ShenandoahConcurrentGC::op_final_roots() {
983
ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
984
}
985
986
void ShenandoahConcurrentGC::op_cleanup_complete() {
987
ShenandoahHeap::heap()->free_set()->recycle_trash();
988
}
989
990
bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
991
if (ShenandoahHeap::heap()->cancelled_gc()) {
992
_degen_point = point;
993
return true;
994
}
995
return false;
996
}
997
998
const char* ShenandoahConcurrentGC::init_mark_event_message() const {
999
ShenandoahHeap* const heap = ShenandoahHeap::heap();
1000
assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1001
if (heap->unload_classes()) {
1002
return "Pause Init Mark (unload classes)";
1003
} else {
1004
return "Pause Init Mark";
1005
}
1006
}
1007
1008
const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1009
ShenandoahHeap* const heap = ShenandoahHeap::heap();
1010
assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1011
if (heap->unload_classes()) {
1012
return "Pause Final Mark (unload classes)";
1013
} else {
1014
return "Pause Final Mark";
1015
}
1016
}
1017
1018
const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1019
ShenandoahHeap* const heap = ShenandoahHeap::heap();
1020
assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1021
if (heap->unload_classes()) {
1022
return "Concurrent marking (unload classes)";
1023
} else {
1024
return "Concurrent marking";
1025
}
1026
}
1027
1028