Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
40957 views
1
/*
2
* Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
27
#include "compiler/oopMap.hpp"
28
#include "gc/shared/gcTraceTime.inline.hpp"
29
#include "gc/shared/preservedMarks.inline.hpp"
30
#include "gc/shared/tlab_globals.hpp"
31
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
32
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
34
#include "gc/shenandoah/shenandoahFreeSet.hpp"
35
#include "gc/shenandoah/shenandoahFullGC.hpp"
36
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
37
#include "gc/shenandoah/shenandoahMark.inline.hpp"
38
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
39
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
40
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
41
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
42
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
43
#include "gc/shenandoah/shenandoahMetrics.hpp"
44
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
45
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47
#include "gc/shenandoah/shenandoahSTWMark.hpp"
48
#include "gc/shenandoah/shenandoahUtils.hpp"
49
#include "gc/shenandoah/shenandoahVerifier.hpp"
50
#include "gc/shenandoah/shenandoahVMOperations.hpp"
51
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
52
#include "memory/metaspaceUtils.hpp"
53
#include "memory/universe.hpp"
54
#include "oops/compressedOops.inline.hpp"
55
#include "oops/oop.inline.hpp"
56
#include "runtime/biasedLocking.hpp"
57
#include "runtime/orderAccess.hpp"
58
#include "runtime/thread.hpp"
59
#include "runtime/vmThread.hpp"
60
#include "utilities/copy.hpp"
61
#include "utilities/events.hpp"
62
#include "utilities/growableArray.hpp"
63
#include "gc/shared/workgroup.hpp"
64
65
ShenandoahFullGC::ShenandoahFullGC() :
66
_gc_timer(ShenandoahHeap::heap()->gc_timer()),
67
_preserved_marks(new PreservedMarksSet(true)) {}
68
69
bool ShenandoahFullGC::collect(GCCause::Cause cause) {
70
vmop_entry_full(cause);
71
// Always success
72
return true;
73
}
74
75
void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
76
ShenandoahHeap* const heap = ShenandoahHeap::heap();
77
TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
78
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
79
80
heap->try_inject_alloc_failure();
81
VM_ShenandoahFullGC op(cause, this);
82
VMThread::execute(&op);
83
}
84
85
void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
86
static const char* msg = "Pause Full";
87
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
88
EventMark em("%s", msg);
89
90
ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
91
ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
92
"full gc");
93
94
op_full(cause);
95
}
96
97
void ShenandoahFullGC::op_full(GCCause::Cause cause) {
98
ShenandoahMetricsSnapshot metrics;
99
metrics.snap_before();
100
101
// Perform full GC
102
do_it(cause);
103
104
metrics.snap_after();
105
106
if (metrics.is_good_progress()) {
107
ShenandoahHeap::heap()->notify_gc_progress();
108
} else {
109
// Nothing to do. Tell the allocation path that we have failed to make
110
// progress, and it can finally fail.
111
ShenandoahHeap::heap()->notify_gc_no_progress();
112
}
113
}
114
115
void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
116
ShenandoahHeap* heap = ShenandoahHeap::heap();
117
118
if (ShenandoahVerify) {
119
heap->verifier()->verify_before_fullgc();
120
}
121
122
if (VerifyBeforeGC) {
123
Universe::verify();
124
}
125
126
// Degenerated GC may carry concurrent root flags when upgrading to
127
// full GC. We need to reset it before mutators resume.
128
heap->set_concurrent_strong_root_in_progress(false);
129
heap->set_concurrent_weak_root_in_progress(false);
130
131
heap->set_full_gc_in_progress(true);
132
133
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
134
assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
135
136
{
137
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
138
heap->pre_full_gc_dump(_gc_timer);
139
}
140
141
{
142
ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
143
// Full GC is supposed to recover from any GC state:
144
145
// a0. Remember if we have forwarded objects
146
bool has_forwarded_objects = heap->has_forwarded_objects();
147
148
// a1. Cancel evacuation, if in progress
149
if (heap->is_evacuation_in_progress()) {
150
heap->set_evacuation_in_progress(false);
151
}
152
assert(!heap->is_evacuation_in_progress(), "sanity");
153
154
// a2. Cancel update-refs, if in progress
155
if (heap->is_update_refs_in_progress()) {
156
heap->set_update_refs_in_progress(false);
157
}
158
assert(!heap->is_update_refs_in_progress(), "sanity");
159
160
// b. Cancel concurrent mark, if in progress
161
if (heap->is_concurrent_mark_in_progress()) {
162
ShenandoahConcurrentGC::cancel();
163
heap->set_concurrent_mark_in_progress(false);
164
}
165
assert(!heap->is_concurrent_mark_in_progress(), "sanity");
166
167
// c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
168
if (has_forwarded_objects) {
169
update_roots(true /*full_gc*/);
170
}
171
172
// d. Reset the bitmaps for new marking
173
heap->reset_mark_bitmap();
174
assert(heap->marking_context()->is_bitmap_clear(), "sanity");
175
assert(!heap->marking_context()->is_complete(), "sanity");
176
177
// e. Abandon reference discovery and clear all discovered references.
178
ShenandoahReferenceProcessor* rp = heap->ref_processor();
179
rp->abandon_partial_discovery();
180
181
// f. Sync pinned region status from the CP marks
182
heap->sync_pinned_region_status();
183
184
// The rest of prologue:
185
BiasedLocking::preserve_marks();
186
_preserved_marks->init(heap->workers()->active_workers());
187
188
assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
189
}
190
191
if (UseTLAB) {
192
heap->gclabs_retire(ResizeTLAB);
193
heap->tlabs_retire(ResizeTLAB);
194
}
195
196
OrderAccess::fence();
197
198
phase1_mark_heap();
199
200
// Once marking is done, which may have fixed up forwarded objects, we can drop it.
201
// Coming out of Full GC, we would not have any forwarded objects.
202
// This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
203
heap->set_has_forwarded_objects(false);
204
205
heap->set_full_gc_move_in_progress(true);
206
207
// Setup workers for the rest
208
OrderAccess::fence();
209
210
// Initialize worker slices
211
ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
212
for (uint i = 0; i < heap->max_workers(); i++) {
213
worker_slices[i] = new ShenandoahHeapRegionSet();
214
}
215
216
{
217
// The rest of code performs region moves, where region status is undefined
218
// until all phases run together.
219
ShenandoahHeapLocker lock(heap->lock());
220
221
phase2_calculate_target_addresses(worker_slices);
222
223
OrderAccess::fence();
224
225
phase3_update_references();
226
227
phase4_compact_objects(worker_slices);
228
}
229
230
{
231
// Epilogue
232
_preserved_marks->restore(heap->workers());
233
BiasedLocking::restore_marks();
234
_preserved_marks->reclaim();
235
}
236
237
// Resize metaspace
238
MetaspaceGC::compute_new_size();
239
240
// Free worker slices
241
for (uint i = 0; i < heap->max_workers(); i++) {
242
delete worker_slices[i];
243
}
244
FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
245
246
heap->set_full_gc_move_in_progress(false);
247
heap->set_full_gc_in_progress(false);
248
249
if (ShenandoahVerify) {
250
heap->verifier()->verify_after_fullgc();
251
}
252
253
if (VerifyAfterGC) {
254
Universe::verify();
255
}
256
257
{
258
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
259
heap->post_full_gc_dump(_gc_timer);
260
}
261
}
262
263
class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
264
private:
265
ShenandoahMarkingContext* const _ctx;
266
267
public:
268
ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
269
270
void heap_region_do(ShenandoahHeapRegion *r) {
271
_ctx->capture_top_at_mark_start(r);
272
r->clear_live_data();
273
}
274
};
275
276
void ShenandoahFullGC::phase1_mark_heap() {
277
GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
278
ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
279
280
ShenandoahHeap* heap = ShenandoahHeap::heap();
281
282
ShenandoahPrepareForMarkClosure cl;
283
heap->heap_region_iterate(&cl);
284
285
heap->set_unload_classes(heap->heuristics()->can_unload_classes());
286
287
ShenandoahReferenceProcessor* rp = heap->ref_processor();
288
// enable ("weak") refs discovery
289
rp->set_soft_reference_policy(true); // forcefully purge all soft references
290
291
ShenandoahSTWMark mark(true /*full_gc*/);
292
mark.mark();
293
heap->parallel_cleaning(true /* full_gc */);
294
}
295
296
class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
297
private:
298
PreservedMarks* const _preserved_marks;
299
ShenandoahHeap* const _heap;
300
GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
301
int _empty_regions_pos;
302
ShenandoahHeapRegion* _to_region;
303
ShenandoahHeapRegion* _from_region;
304
HeapWord* _compact_point;
305
306
public:
307
ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
308
GrowableArray<ShenandoahHeapRegion*>& empty_regions,
309
ShenandoahHeapRegion* to_region) :
310
_preserved_marks(preserved_marks),
311
_heap(ShenandoahHeap::heap()),
312
_empty_regions(empty_regions),
313
_empty_regions_pos(0),
314
_to_region(to_region),
315
_from_region(NULL),
316
_compact_point(to_region->bottom()) {}
317
318
void set_from_region(ShenandoahHeapRegion* from_region) {
319
_from_region = from_region;
320
}
321
322
void finish_region() {
323
assert(_to_region != NULL, "should not happen");
324
_to_region->set_new_top(_compact_point);
325
}
326
327
bool is_compact_same_region() {
328
return _from_region == _to_region;
329
}
330
331
int empty_regions_pos() {
332
return _empty_regions_pos;
333
}
334
335
void do_object(oop p) {
336
assert(_from_region != NULL, "must set before work");
337
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
338
assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
339
340
size_t obj_size = p->size();
341
if (_compact_point + obj_size > _to_region->end()) {
342
finish_region();
343
344
// Object doesn't fit. Pick next empty region and start compacting there.
345
ShenandoahHeapRegion* new_to_region;
346
if (_empty_regions_pos < _empty_regions.length()) {
347
new_to_region = _empty_regions.at(_empty_regions_pos);
348
_empty_regions_pos++;
349
} else {
350
// Out of empty region? Compact within the same region.
351
new_to_region = _from_region;
352
}
353
354
assert(new_to_region != _to_region, "must not reuse same to-region");
355
assert(new_to_region != NULL, "must not be NULL");
356
_to_region = new_to_region;
357
_compact_point = _to_region->bottom();
358
}
359
360
// Object fits into current region, record new location:
361
assert(_compact_point + obj_size <= _to_region->end(), "must fit");
362
shenandoah_assert_not_forwarded(NULL, p);
363
_preserved_marks->push_if_necessary(p, p->mark());
364
p->forward_to(cast_to_oop(_compact_point));
365
_compact_point += obj_size;
366
}
367
};
368
369
class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
370
private:
371
PreservedMarksSet* const _preserved_marks;
372
ShenandoahHeap* const _heap;
373
ShenandoahHeapRegionSet** const _worker_slices;
374
375
public:
376
ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
377
AbstractGangTask("Shenandoah Prepare For Compaction"),
378
_preserved_marks(preserved_marks),
379
_heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
380
}
381
382
static bool is_candidate_region(ShenandoahHeapRegion* r) {
383
// Empty region: get it into the slice to defragment the slice itself.
384
// We could have skipped this without violating correctness, but we really
385
// want to compact all live regions to the start of the heap, which sometimes
386
// means moving them into the fully empty regions.
387
if (r->is_empty()) return true;
388
389
// Can move the region, and this is not the humongous region. Humongous
390
// moves are special cased here, because their moves are handled separately.
391
return r->is_stw_move_allowed() && !r->is_humongous();
392
}
393
394
void work(uint worker_id) {
395
ShenandoahParallelWorkerSession worker_session(worker_id);
396
ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
397
ShenandoahHeapRegionSetIterator it(slice);
398
ShenandoahHeapRegion* from_region = it.next();
399
// No work?
400
if (from_region == NULL) {
401
return;
402
}
403
404
// Sliding compaction. Walk all regions in the slice, and compact them.
405
// Remember empty regions and reuse them as needed.
406
ResourceMark rm;
407
408
GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
409
410
ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
411
412
while (from_region != NULL) {
413
assert(is_candidate_region(from_region), "Sanity");
414
415
cl.set_from_region(from_region);
416
if (from_region->has_live()) {
417
_heap->marked_object_iterate(from_region, &cl);
418
}
419
420
// Compacted the region to somewhere else? From-region is empty then.
421
if (!cl.is_compact_same_region()) {
422
empty_regions.append(from_region);
423
}
424
from_region = it.next();
425
}
426
cl.finish_region();
427
428
// Mark all remaining regions as empty
429
for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
430
ShenandoahHeapRegion* r = empty_regions.at(pos);
431
r->set_new_top(r->bottom());
432
}
433
}
434
};
435
436
void ShenandoahFullGC::calculate_target_humongous_objects() {
437
ShenandoahHeap* heap = ShenandoahHeap::heap();
438
439
// Compute the new addresses for humongous objects. We need to do this after addresses
440
// for regular objects are calculated, and we know what regions in heap suffix are
441
// available for humongous moves.
442
//
443
// Scan the heap backwards, because we are compacting humongous regions towards the end.
444
// Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
445
// humongous start there.
446
//
447
// The complication is potential non-movable regions during the scan. If such region is
448
// detected, then sliding restarts towards that non-movable region.
449
450
size_t to_begin = heap->num_regions();
451
size_t to_end = heap->num_regions();
452
453
for (size_t c = heap->num_regions(); c > 0; c--) {
454
ShenandoahHeapRegion *r = heap->get_region(c - 1);
455
if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
456
// To-region candidate: record this, and continue scan
457
to_begin = r->index();
458
continue;
459
}
460
461
if (r->is_humongous_start() && r->is_stw_move_allowed()) {
462
// From-region candidate: movable humongous region
463
oop old_obj = cast_to_oop(r->bottom());
464
size_t words_size = old_obj->size();
465
size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
466
467
size_t start = to_end - num_regions;
468
469
if (start >= to_begin && start != r->index()) {
470
// Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
471
_preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
472
old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
473
to_end = start;
474
continue;
475
}
476
}
477
478
// Failed to fit. Scan starting from current region.
479
to_begin = r->index();
480
to_end = r->index();
481
}
482
}
483
484
class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
485
private:
486
ShenandoahHeap* const _heap;
487
488
public:
489
ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
490
void heap_region_do(ShenandoahHeapRegion* r) {
491
if (r->is_trash()) {
492
r->recycle();
493
}
494
if (r->is_cset()) {
495
r->make_regular_bypass();
496
}
497
if (r->is_empty_uncommitted()) {
498
r->make_committed_bypass();
499
}
500
assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
501
502
// Record current region occupancy: this communicates empty regions are free
503
// to the rest of Full GC code.
504
r->set_new_top(r->top());
505
}
506
};
507
508
class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
509
private:
510
ShenandoahHeap* const _heap;
511
ShenandoahMarkingContext* const _ctx;
512
513
public:
514
ShenandoahTrashImmediateGarbageClosure() :
515
_heap(ShenandoahHeap::heap()),
516
_ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
517
518
void heap_region_do(ShenandoahHeapRegion* r) {
519
if (r->is_humongous_start()) {
520
oop humongous_obj = cast_to_oop(r->bottom());
521
if (!_ctx->is_marked(humongous_obj)) {
522
assert(!r->has_live(),
523
"Region " SIZE_FORMAT " is not marked, should not have live", r->index());
524
_heap->trash_humongous_region_at(r);
525
} else {
526
assert(r->has_live(),
527
"Region " SIZE_FORMAT " should have live", r->index());
528
}
529
} else if (r->is_humongous_continuation()) {
530
// If we hit continuation, the non-live humongous starts should have been trashed already
531
assert(r->humongous_start_region()->has_live(),
532
"Region " SIZE_FORMAT " should have live", r->index());
533
} else if (r->is_regular()) {
534
if (!r->has_live()) {
535
r->make_trash_immediate();
536
}
537
}
538
}
539
};
540
541
void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
542
ShenandoahHeap* heap = ShenandoahHeap::heap();
543
544
uint n_workers = heap->workers()->active_workers();
545
size_t n_regions = heap->num_regions();
546
547
// What we want to accomplish: have the dense prefix of data, while still balancing
548
// out the parallel work.
549
//
550
// Assuming the amount of work is driven by the live data that needs moving, we can slice
551
// the entire heap into equal-live-sized prefix slices, and compact into them. So, each
552
// thread takes all regions in its prefix subset, and then it takes some regions from
553
// the tail.
554
//
555
// Tail region selection becomes interesting.
556
//
557
// First, we want to distribute the regions fairly between the workers, and those regions
558
// might have different amount of live data. So, until we sure no workers need live data,
559
// we need to only take what the worker needs.
560
//
561
// Second, since we slide everything to the left in each slice, the most busy regions
562
// would be the ones on the left. Which means we want to have all workers have their after-tail
563
// regions as close to the left as possible.
564
//
565
// The easiest way to do this is to distribute after-tail regions in round-robin between
566
// workers that still need live data.
567
//
568
// Consider parallel workers A, B, C, then the target slice layout would be:
569
//
570
// AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
571
//
572
// (.....dense-prefix.....) (.....................tail...................)
573
// [all regions fully live] [left-most regions are fuller that right-most]
574
//
575
576
// Compute how much live data is there. This would approximate the size of dense prefix
577
// we target to create.
578
size_t total_live = 0;
579
for (size_t idx = 0; idx < n_regions; idx++) {
580
ShenandoahHeapRegion *r = heap->get_region(idx);
581
if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
582
total_live += r->get_live_data_words();
583
}
584
}
585
586
// Estimate the size for the dense prefix. Note that we specifically count only the
587
// "full" regions, so there would be some non-full regions in the slice tail.
588
size_t live_per_worker = total_live / n_workers;
589
size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
590
size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
591
prefix_regions_total = MIN2(prefix_regions_total, n_regions);
592
assert(prefix_regions_total <= n_regions, "Sanity");
593
594
// There might be non-candidate regions in the prefix. To compute where the tail actually
595
// ends up being, we need to account those as well.
596
size_t prefix_end = prefix_regions_total;
597
for (size_t idx = 0; idx < prefix_regions_total; idx++) {
598
ShenandoahHeapRegion *r = heap->get_region(idx);
599
if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
600
prefix_end++;
601
}
602
}
603
prefix_end = MIN2(prefix_end, n_regions);
604
assert(prefix_end <= n_regions, "Sanity");
605
606
// Distribute prefix regions per worker: each thread definitely gets its own same-sized
607
// subset of dense prefix.
608
size_t prefix_idx = 0;
609
610
size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
611
612
for (size_t wid = 0; wid < n_workers; wid++) {
613
ShenandoahHeapRegionSet* slice = worker_slices[wid];
614
615
live[wid] = 0;
616
size_t regs = 0;
617
618
// Add all prefix regions for this worker
619
while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
620
ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
621
if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
622
slice->add_region(r);
623
live[wid] += r->get_live_data_words();
624
regs++;
625
}
626
prefix_idx++;
627
}
628
}
629
630
// Distribute the tail among workers in round-robin fashion.
631
size_t wid = n_workers - 1;
632
633
for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
634
ShenandoahHeapRegion *r = heap->get_region(tail_idx);
635
if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
636
assert(wid < n_workers, "Sanity");
637
638
size_t live_region = r->get_live_data_words();
639
640
// Select next worker that still needs live data.
641
size_t old_wid = wid;
642
do {
643
wid++;
644
if (wid == n_workers) wid = 0;
645
} while (live[wid] + live_region >= live_per_worker && old_wid != wid);
646
647
if (old_wid == wid) {
648
// Circled back to the same worker? This means liveness data was
649
// miscalculated. Bump the live_per_worker limit so that
650
// everyone gets a piece of the leftover work.
651
live_per_worker += ShenandoahHeapRegion::region_size_words();
652
}
653
654
worker_slices[wid]->add_region(r);
655
live[wid] += live_region;
656
}
657
}
658
659
FREE_C_HEAP_ARRAY(size_t, live);
660
661
#ifdef ASSERT
662
ResourceBitMap map(n_regions);
663
for (size_t wid = 0; wid < n_workers; wid++) {
664
ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
665
ShenandoahHeapRegion* r = it.next();
666
while (r != NULL) {
667
size_t idx = r->index();
668
assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
669
assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
670
map.at_put(idx, true);
671
r = it.next();
672
}
673
}
674
675
for (size_t rid = 0; rid < n_regions; rid++) {
676
bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
677
bool is_distributed = map.at(rid);
678
assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
679
}
680
#endif
681
}
682
683
void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
684
GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
685
ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
686
687
ShenandoahHeap* heap = ShenandoahHeap::heap();
688
689
// About to figure out which regions can be compacted, make sure pinning status
690
// had been updated in GC prologue.
691
heap->assert_pinned_region_status();
692
693
{
694
// Trash the immediately collectible regions before computing addresses
695
ShenandoahTrashImmediateGarbageClosure tigcl;
696
heap->heap_region_iterate(&tigcl);
697
698
// Make sure regions are in good state: committed, active, clean.
699
// This is needed because we are potentially sliding the data through them.
700
ShenandoahEnsureHeapActiveClosure ecl;
701
heap->heap_region_iterate(&ecl);
702
}
703
704
// Compute the new addresses for regular objects
705
{
706
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
707
708
distribute_slices(worker_slices);
709
710
ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
711
heap->workers()->run_task(&task);
712
}
713
714
// Compute the new addresses for humongous objects
715
{
716
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
717
calculate_target_humongous_objects();
718
}
719
}
720
721
class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
722
private:
723
ShenandoahHeap* const _heap;
724
ShenandoahMarkingContext* const _ctx;
725
726
template <class T>
727
inline void do_oop_work(T* p) {
728
T o = RawAccess<>::oop_load(p);
729
if (!CompressedOops::is_null(o)) {
730
oop obj = CompressedOops::decode_not_null(o);
731
assert(_ctx->is_marked(obj), "must be marked");
732
if (obj->is_forwarded()) {
733
oop forw = obj->forwardee();
734
RawAccess<IS_NOT_NULL>::oop_store(p, forw);
735
}
736
}
737
}
738
739
public:
740
ShenandoahAdjustPointersClosure() :
741
_heap(ShenandoahHeap::heap()),
742
_ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
743
744
void do_oop(oop* p) { do_oop_work(p); }
745
void do_oop(narrowOop* p) { do_oop_work(p); }
746
};
747
748
class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
749
private:
750
ShenandoahHeap* const _heap;
751
ShenandoahAdjustPointersClosure _cl;
752
753
public:
754
ShenandoahAdjustPointersObjectClosure() :
755
_heap(ShenandoahHeap::heap()) {
756
}
757
void do_object(oop p) {
758
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
759
p->oop_iterate(&_cl);
760
}
761
};
762
763
class ShenandoahAdjustPointersTask : public AbstractGangTask {
764
private:
765
ShenandoahHeap* const _heap;
766
ShenandoahRegionIterator _regions;
767
768
public:
769
ShenandoahAdjustPointersTask() :
770
AbstractGangTask("Shenandoah Adjust Pointers"),
771
_heap(ShenandoahHeap::heap()) {
772
}
773
774
void work(uint worker_id) {
775
ShenandoahParallelWorkerSession worker_session(worker_id);
776
ShenandoahAdjustPointersObjectClosure obj_cl;
777
ShenandoahHeapRegion* r = _regions.next();
778
while (r != NULL) {
779
if (!r->is_humongous_continuation() && r->has_live()) {
780
_heap->marked_object_iterate(r, &obj_cl);
781
}
782
r = _regions.next();
783
}
784
}
785
};
786
787
class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
788
private:
789
ShenandoahRootAdjuster* _rp;
790
PreservedMarksSet* _preserved_marks;
791
public:
792
ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
793
AbstractGangTask("Shenandoah Adjust Root Pointers"),
794
_rp(rp),
795
_preserved_marks(preserved_marks) {}
796
797
void work(uint worker_id) {
798
ShenandoahParallelWorkerSession worker_session(worker_id);
799
ShenandoahAdjustPointersClosure cl;
800
_rp->roots_do(worker_id, &cl);
801
_preserved_marks->get(worker_id)->adjust_during_full_gc();
802
}
803
};
804
805
void ShenandoahFullGC::phase3_update_references() {
806
GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
807
ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
808
809
ShenandoahHeap* heap = ShenandoahHeap::heap();
810
811
WorkGang* workers = heap->workers();
812
uint nworkers = workers->active_workers();
813
{
814
#if COMPILER2_OR_JVMCI
815
DerivedPointerTable::clear();
816
#endif
817
ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
818
ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
819
workers->run_task(&task);
820
#if COMPILER2_OR_JVMCI
821
DerivedPointerTable::update_pointers();
822
#endif
823
}
824
825
ShenandoahAdjustPointersTask adjust_pointers_task;
826
workers->run_task(&adjust_pointers_task);
827
}
828
829
class ShenandoahCompactObjectsClosure : public ObjectClosure {
830
private:
831
ShenandoahHeap* const _heap;
832
uint const _worker_id;
833
834
public:
835
ShenandoahCompactObjectsClosure(uint worker_id) :
836
_heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
837
838
void do_object(oop p) {
839
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
840
size_t size = (size_t)p->size();
841
if (p->is_forwarded()) {
842
HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
843
HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
844
Copy::aligned_conjoint_words(compact_from, compact_to, size);
845
oop new_obj = cast_to_oop(compact_to);
846
new_obj->init_mark();
847
}
848
}
849
};
850
851
class ShenandoahCompactObjectsTask : public AbstractGangTask {
852
private:
853
ShenandoahHeap* const _heap;
854
ShenandoahHeapRegionSet** const _worker_slices;
855
856
public:
857
ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
858
AbstractGangTask("Shenandoah Compact Objects"),
859
_heap(ShenandoahHeap::heap()),
860
_worker_slices(worker_slices) {
861
}
862
863
void work(uint worker_id) {
864
ShenandoahParallelWorkerSession worker_session(worker_id);
865
ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
866
867
ShenandoahCompactObjectsClosure cl(worker_id);
868
ShenandoahHeapRegion* r = slice.next();
869
while (r != NULL) {
870
assert(!r->is_humongous(), "must not get humongous regions here");
871
if (r->has_live()) {
872
_heap->marked_object_iterate(r, &cl);
873
}
874
r->set_top(r->new_top());
875
r = slice.next();
876
}
877
}
878
};
879
880
class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
881
private:
882
ShenandoahHeap* const _heap;
883
size_t _live;
884
885
public:
886
ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
887
_heap->free_set()->clear();
888
}
889
890
void heap_region_do(ShenandoahHeapRegion* r) {
891
assert (!r->is_cset(), "cset regions should have been demoted already");
892
893
// Need to reset the complete-top-at-mark-start pointer here because
894
// the complete marking bitmap is no longer valid. This ensures
895
// size-based iteration in marked_object_iterate().
896
// NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
897
// pinned regions.
898
if (!r->is_pinned()) {
899
_heap->complete_marking_context()->reset_top_at_mark_start(r);
900
}
901
902
size_t live = r->used();
903
904
// Make empty regions that have been allocated into regular
905
if (r->is_empty() && live > 0) {
906
r->make_regular_bypass();
907
}
908
909
// Reclaim regular regions that became empty
910
if (r->is_regular() && live == 0) {
911
r->make_trash();
912
}
913
914
// Recycle all trash regions
915
if (r->is_trash()) {
916
live = 0;
917
r->recycle();
918
}
919
920
r->set_live_data(live);
921
r->reset_alloc_metadata();
922
_live += live;
923
}
924
925
size_t get_live() {
926
return _live;
927
}
928
};
929
930
void ShenandoahFullGC::compact_humongous_objects() {
931
// Compact humongous regions, based on their fwdptr objects.
932
//
933
// This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
934
// humongous regions are already compacted, and do not require further moves, which alleviates
935
// sliding costs. We may consider doing this in parallel in future.
936
937
ShenandoahHeap* heap = ShenandoahHeap::heap();
938
939
for (size_t c = heap->num_regions(); c > 0; c--) {
940
ShenandoahHeapRegion* r = heap->get_region(c - 1);
941
if (r->is_humongous_start()) {
942
oop old_obj = cast_to_oop(r->bottom());
943
if (!old_obj->is_forwarded()) {
944
// No need to move the object, it stays at the same slot
945
continue;
946
}
947
size_t words_size = old_obj->size();
948
size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
949
950
size_t old_start = r->index();
951
size_t old_end = old_start + num_regions - 1;
952
size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
953
size_t new_end = new_start + num_regions - 1;
954
assert(old_start != new_start, "must be real move");
955
assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
956
957
Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
958
heap->get_region(new_start)->bottom(),
959
words_size);
960
961
oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
962
new_obj->init_mark();
963
964
{
965
for (size_t c = old_start; c <= old_end; c++) {
966
ShenandoahHeapRegion* r = heap->get_region(c);
967
r->make_regular_bypass();
968
r->set_top(r->bottom());
969
}
970
971
for (size_t c = new_start; c <= new_end; c++) {
972
ShenandoahHeapRegion* r = heap->get_region(c);
973
if (c == new_start) {
974
r->make_humongous_start_bypass();
975
} else {
976
r->make_humongous_cont_bypass();
977
}
978
979
// Trailing region may be non-full, record the remainder there
980
size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
981
if ((c == new_end) && (remainder != 0)) {
982
r->set_top(r->bottom() + remainder);
983
} else {
984
r->set_top(r->end());
985
}
986
987
r->reset_alloc_metadata();
988
}
989
}
990
}
991
}
992
}
993
994
// This is slightly different to ShHeap::reset_next_mark_bitmap:
995
// we need to remain able to walk pinned regions.
996
// Since pinned region do not move and don't get compacted, we will get holes with
997
// unreachable objects in them (which may have pointers to unloaded Klasses and thus
998
// cannot be iterated over using oop->size(). The only way to safely iterate over those is using
999
// a valid marking bitmap and valid TAMS pointer. This class only resets marking
1000
// bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1001
class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1002
private:
1003
ShenandoahRegionIterator _regions;
1004
1005
public:
1006
ShenandoahMCResetCompleteBitmapTask() :
1007
AbstractGangTask("Shenandoah Reset Bitmap") {
1008
}
1009
1010
void work(uint worker_id) {
1011
ShenandoahParallelWorkerSession worker_session(worker_id);
1012
ShenandoahHeapRegion* region = _regions.next();
1013
ShenandoahHeap* heap = ShenandoahHeap::heap();
1014
ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1015
while (region != NULL) {
1016
if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1017
ctx->clear_bitmap(region);
1018
}
1019
region = _regions.next();
1020
}
1021
}
1022
};
1023
1024
void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1025
GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1026
ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1027
1028
ShenandoahHeap* heap = ShenandoahHeap::heap();
1029
1030
// Compact regular objects first
1031
{
1032
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1033
ShenandoahCompactObjectsTask compact_task(worker_slices);
1034
heap->workers()->run_task(&compact_task);
1035
}
1036
1037
// Compact humongous objects after regular object moves
1038
{
1039
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1040
compact_humongous_objects();
1041
}
1042
1043
// Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1044
// and must ensure the bitmap is in sync.
1045
{
1046
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1047
ShenandoahMCResetCompleteBitmapTask task;
1048
heap->workers()->run_task(&task);
1049
}
1050
1051
// Bring regions in proper states after the collection, and set heap properties.
1052
{
1053
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1054
1055
ShenandoahPostCompactClosure post_compact;
1056
heap->heap_region_iterate(&post_compact);
1057
heap->set_used(post_compact.get_live());
1058
1059
heap->collection_set()->clear();
1060
heap->free_set()->rebuild();
1061
}
1062
1063
heap->clear_cancelled_gc();
1064
}
1065
1066