Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/gc/g1/g1CollectionSet.cpp
66644 views
1
/*
2
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/g1/g1CollectedHeap.inline.hpp"
27
#include "gc/g1/g1CollectionSet.hpp"
28
#include "gc/g1/g1CollectionSetCandidates.hpp"
29
#include "gc/g1/g1CollectorState.hpp"
30
#include "gc/g1/g1HotCardCache.hpp"
31
#include "gc/g1/g1ParScanThreadState.hpp"
32
#include "gc/g1/g1Policy.hpp"
33
#include "gc/g1/heapRegion.inline.hpp"
34
#include "gc/g1/heapRegionRemSet.hpp"
35
#include "gc/g1/heapRegionSet.hpp"
36
#include "logging/logStream.hpp"
37
#include "runtime/orderAccess.hpp"
38
#include "utilities/debug.hpp"
39
#include "utilities/globalDefinitions.hpp"
40
#include "utilities/quickSort.hpp"
41
42
G1CollectorState* G1CollectionSet::collector_state() const {
43
return _g1h->collector_state();
44
}
45
46
G1GCPhaseTimes* G1CollectionSet::phase_times() {
47
return _policy->phase_times();
48
}
49
50
double G1CollectionSet::predict_region_non_copy_time_ms(HeapRegion* hr) const {
51
return _policy->predict_region_non_copy_time_ms(hr, collector_state()->in_young_only_phase());
52
}
53
54
G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
55
_g1h(g1h),
56
_policy(policy),
57
_candidates(NULL),
58
_eden_region_length(0),
59
_survivor_region_length(0),
60
_old_region_length(0),
61
_collection_set_regions(NULL),
62
_collection_set_cur_length(0),
63
_collection_set_max_length(0),
64
_num_optional_regions(0),
65
_bytes_used_before(0),
66
_recorded_rs_length(0),
67
_inc_build_state(Inactive),
68
_inc_part_start(0),
69
_inc_collection_set_stats(NULL),
70
_inc_bytes_used_before(0),
71
_inc_recorded_rs_length(0),
72
_inc_recorded_rs_length_diff(0),
73
_inc_predicted_non_copy_time_ms(0.0),
74
_inc_predicted_non_copy_time_ms_diff(0.0) {
75
}
76
77
G1CollectionSet::~G1CollectionSet() {
78
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
79
FREE_C_HEAP_ARRAY(IncCollectionSetRegionStat, _inc_collection_set_stats);
80
free_optional_regions();
81
clear_candidates();
82
}
83
84
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
85
uint survivor_cset_region_length) {
86
assert_at_safepoint_on_vm_thread();
87
88
_eden_region_length = eden_cset_region_length;
89
_survivor_region_length = survivor_cset_region_length;
90
91
assert((size_t) young_region_length() == _collection_set_cur_length,
92
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
93
94
_old_region_length = 0;
95
free_optional_regions();
96
}
97
98
void G1CollectionSet::initialize(uint max_region_length) {
99
guarantee(_collection_set_regions == NULL, "Must only initialize once.");
100
_collection_set_max_length = max_region_length;
101
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
102
_inc_collection_set_stats = NEW_C_HEAP_ARRAY(IncCollectionSetRegionStat, max_region_length, mtGC);
103
}
104
105
void G1CollectionSet::free_optional_regions() {
106
_num_optional_regions = 0;
107
}
108
109
void G1CollectionSet::clear_candidates() {
110
delete _candidates;
111
_candidates = NULL;
112
}
113
114
bool G1CollectionSet::has_candidates() {
115
return _candidates != NULL && !_candidates->is_empty();
116
}
117
118
void G1CollectionSet::set_recorded_rs_length(size_t rs_length) {
119
_recorded_rs_length = rs_length;
120
}
121
122
// Add the heap region at the head of the non-incremental collection set
123
void G1CollectionSet::add_old_region(HeapRegion* hr) {
124
assert_at_safepoint_on_vm_thread();
125
126
assert(_inc_build_state == Active,
127
"Precondition, actively building cset or adding optional later on");
128
assert(hr->is_old(), "the region should be old");
129
130
assert(!hr->in_collection_set(), "should not already be in the collection set");
131
_g1h->register_old_region_with_region_attr(hr);
132
133
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
134
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
135
136
_bytes_used_before += hr->used();
137
_recorded_rs_length += hr->rem_set()->occupied();
138
_old_region_length++;
139
140
_g1h->old_set_remove(hr);
141
}
142
143
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
144
assert(hr->is_old(), "the region should be old");
145
assert(!hr->in_collection_set(), "should not already be in the CSet");
146
147
_g1h->register_optional_region_with_region_attr(hr);
148
149
hr->set_index_in_opt_cset(_num_optional_regions++);
150
}
151
152
void G1CollectionSet::start_incremental_building() {
153
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
154
assert(_inc_build_state == Inactive, "Precondition");
155
#ifdef ASSERT
156
for (size_t i = 0; i < _collection_set_max_length; i++) {
157
_inc_collection_set_stats[i].reset();
158
}
159
#endif
160
161
_inc_bytes_used_before = 0;
162
163
_inc_recorded_rs_length = 0;
164
_inc_recorded_rs_length_diff = 0;
165
_inc_predicted_non_copy_time_ms = 0.0;
166
_inc_predicted_non_copy_time_ms_diff = 0.0;
167
168
update_incremental_marker();
169
}
170
171
void G1CollectionSet::finalize_incremental_building() {
172
assert(_inc_build_state == Active, "Precondition");
173
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
174
175
// The two "main" fields, _inc_recorded_rs_length and
176
// _inc_predicted_non_copy_time_ms, are updated by the thread
177
// that adds a new region to the CSet. Further updates by the
178
// concurrent refinement thread that samples the young RSet lengths
179
// are accumulated in the *_diff fields. Here we add the diffs to
180
// the "main" fields.
181
182
_inc_recorded_rs_length += _inc_recorded_rs_length_diff;
183
_inc_predicted_non_copy_time_ms += _inc_predicted_non_copy_time_ms_diff;
184
185
_inc_recorded_rs_length_diff = 0;
186
_inc_predicted_non_copy_time_ms_diff = 0.0;
187
}
188
189
void G1CollectionSet::clear() {
190
assert_at_safepoint_on_vm_thread();
191
_collection_set_cur_length = 0;
192
}
193
194
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
195
size_t len = _collection_set_cur_length;
196
OrderAccess::loadload();
197
198
for (uint i = 0; i < len; i++) {
199
HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
200
bool result = cl->do_heap_region(r);
201
if (result) {
202
cl->set_incomplete();
203
return;
204
}
205
}
206
}
207
208
void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
209
HeapRegionClaimer* hr_claimer,
210
uint worker_id,
211
uint total_workers) const {
212
iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id, total_workers);
213
}
214
215
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
216
assert_at_safepoint();
217
218
for (uint i = 0; i < _num_optional_regions; i++) {
219
HeapRegion* r = _candidates->at(i);
220
bool result = cl->do_heap_region(r);
221
guarantee(!result, "Must not cancel iteration");
222
}
223
}
224
225
void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
226
HeapRegionClaimer* hr_claimer,
227
uint worker_id,
228
uint total_workers) const {
229
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id, total_workers);
230
}
231
232
void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
233
HeapRegionClaimer* hr_claimer,
234
size_t offset,
235
size_t length,
236
uint worker_id,
237
uint total_workers) const {
238
assert_at_safepoint();
239
if (length == 0) {
240
return;
241
}
242
243
size_t start_pos = (worker_id * length) / total_workers;
244
size_t cur_pos = start_pos;
245
246
do {
247
uint region_idx = _collection_set_regions[cur_pos + offset];
248
if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
249
HeapRegion* r = _g1h->region_at(region_idx);
250
bool result = cl->do_heap_region(r);
251
guarantee(!result, "Must not cancel iteration");
252
}
253
254
cur_pos++;
255
if (cur_pos == length) {
256
cur_pos = 0;
257
}
258
} while (cur_pos != start_pos);
259
}
260
261
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
262
size_t new_rs_length) {
263
// Update the CSet information that is dependent on the new RS length
264
assert(hr->is_young(), "Precondition");
265
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
266
267
IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
268
269
size_t old_rs_length = stat->_rs_length;
270
assert(old_rs_length <= new_rs_length,
271
"Remembered set decreased (changed from " SIZE_FORMAT " to " SIZE_FORMAT " region %u type %s)",
272
old_rs_length, new_rs_length, hr->hrm_index(), hr->get_short_type_str());
273
size_t rs_length_diff = new_rs_length - old_rs_length;
274
stat->_rs_length = new_rs_length;
275
_inc_recorded_rs_length_diff += rs_length_diff;
276
277
double old_non_copy_time = stat->_non_copy_time_ms;
278
assert(old_non_copy_time >= 0.0, "Non copy time for region %u not initialized yet, is %.3f", hr->hrm_index(), old_non_copy_time);
279
double new_non_copy_time = predict_region_non_copy_time_ms(hr);
280
double non_copy_time_ms_diff = new_non_copy_time - old_non_copy_time;
281
282
stat->_non_copy_time_ms = new_non_copy_time;
283
_inc_predicted_non_copy_time_ms_diff += non_copy_time_ms_diff;
284
}
285
286
void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
287
assert(hr->is_young(), "invariant");
288
assert(_inc_build_state == Active, "Precondition");
289
290
// This routine is used when:
291
// * adding survivor regions to the incremental cset at the end of an
292
// evacuation pause or
293
// * adding the current allocation region to the incremental cset
294
// when it is retired.
295
// Therefore this routine may be called at a safepoint by the
296
// VM thread, or in-between safepoints by mutator threads (when
297
// retiring the current allocation region)
298
// We need to clear and set the cached recorded/cached collection set
299
// information in the heap region here (before the region gets added
300
// to the collection set). An individual heap region's cached values
301
// are calculated, aggregated with the policy collection set info,
302
// and cached in the heap region here (initially) and (subsequently)
303
// by the Young List sampling code.
304
// Ignore calls to this due to retirement during full gc.
305
306
if (!_g1h->collector_state()->in_full_gc()) {
307
size_t rs_length = hr->rem_set()->occupied();
308
double non_copy_time = predict_region_non_copy_time_ms(hr);
309
310
// Cache the values we have added to the aggregated information
311
// in the heap region in case we have to remove this region from
312
// the incremental collection set, or it is updated by the
313
// rset sampling code
314
315
IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
316
stat->_rs_length = rs_length;
317
stat->_non_copy_time_ms = non_copy_time;
318
319
_inc_recorded_rs_length += rs_length;
320
_inc_predicted_non_copy_time_ms += non_copy_time;
321
_inc_bytes_used_before += hr->used();
322
}
323
324
assert(!hr->in_collection_set(), "invariant");
325
_g1h->register_young_region_with_region_attr(hr);
326
327
// We use UINT_MAX as "invalid" marker in verification.
328
assert(_collection_set_cur_length < (UINT_MAX - 1),
329
"Collection set is too large with " SIZE_FORMAT " entries", _collection_set_cur_length);
330
hr->set_young_index_in_cset((uint)_collection_set_cur_length + 1);
331
332
_collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
333
// Concurrent readers must observe the store of the value in the array before an
334
// update to the length field.
335
OrderAccess::storestore();
336
_collection_set_cur_length++;
337
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
338
}
339
340
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
341
assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
342
add_young_region_common(hr);
343
}
344
345
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
346
assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
347
add_young_region_common(hr);
348
}
349
350
#ifndef PRODUCT
351
class G1VerifyYoungAgesClosure : public HeapRegionClosure {
352
public:
353
bool _valid;
354
355
G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
356
357
virtual bool do_heap_region(HeapRegion* r) {
358
guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
359
360
if (!r->has_surv_rate_group()) {
361
log_error(gc, verify)("## encountered young region without surv_rate_group");
362
_valid = false;
363
}
364
365
if (!r->has_valid_age_in_surv_rate()) {
366
log_error(gc, verify)("## encountered invalid age in young region");
367
_valid = false;
368
}
369
370
return false;
371
}
372
373
bool valid() const { return _valid; }
374
};
375
376
bool G1CollectionSet::verify_young_ages() {
377
assert_at_safepoint_on_vm_thread();
378
379
G1VerifyYoungAgesClosure cl;
380
iterate(&cl);
381
382
if (!cl.valid()) {
383
LogStreamHandle(Error, gc, verify) log;
384
print(&log);
385
}
386
387
return cl.valid();
388
}
389
390
class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
391
outputStream* _st;
392
public:
393
G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
394
395
virtual bool do_heap_region(HeapRegion* r) {
396
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
397
_st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
398
HR_FORMAT_PARAMS(r),
399
p2i(r->prev_top_at_mark_start()),
400
p2i(r->next_top_at_mark_start()),
401
r->has_surv_rate_group() ? r->age_in_surv_rate_group() : -1);
402
return false;
403
}
404
};
405
406
void G1CollectionSet::print(outputStream* st) {
407
st->print_cr("\nCollection_set:");
408
409
G1PrintCollectionSetDetailClosure cl(st);
410
iterate(&cl);
411
}
412
#endif // !PRODUCT
413
414
double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) {
415
Ticks start_time = Ticks::now();
416
417
finalize_incremental_building();
418
419
guarantee(target_pause_time_ms > 0.0,
420
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
421
422
size_t pending_cards = _policy->pending_cards_at_gc_start() + _g1h->hot_card_cache()->num_entries();
423
424
log_trace(gc, ergo, cset)("Start choosing CSet. Pending cards: " SIZE_FORMAT " target pause time: %1.2fms",
425
pending_cards, target_pause_time_ms);
426
427
// The young list is laid with the survivor regions from the previous
428
// pause are appended to the RHS of the young list, i.e.
429
// [Newly Young Regions ++ Survivors from last pause].
430
431
uint eden_region_length = _g1h->eden_regions_count();
432
uint survivor_region_length = survivors->length();
433
init_region_lengths(eden_region_length, survivor_region_length);
434
435
verify_young_cset_indices();
436
437
// Clear the fields that point to the survivor list - they are all young now.
438
survivors->convert_to_eden();
439
440
_bytes_used_before = _inc_bytes_used_before;
441
442
// The number of recorded young regions is the incremental
443
// collection set's current size
444
set_recorded_rs_length(_inc_recorded_rs_length);
445
446
double predicted_base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
447
double predicted_eden_time = _inc_predicted_non_copy_time_ms + _policy->predict_eden_copy_time_ms(eden_region_length);
448
double remaining_time_ms = MAX2(target_pause_time_ms - (predicted_base_time_ms + predicted_eden_time), 0.0);
449
450
log_trace(gc, ergo, cset)("Added young regions to CSet. Eden: %u regions, Survivors: %u regions, "
451
"predicted eden time: %1.2fms, predicted base time: %1.2fms, target pause time: %1.2fms, remaining time: %1.2fms",
452
eden_region_length, survivor_region_length,
453
predicted_eden_time, predicted_base_time_ms, target_pause_time_ms, remaining_time_ms);
454
455
phase_times()->record_young_cset_choice_time_ms((Ticks::now() - start_time).seconds() * 1000.0);
456
457
return remaining_time_ms;
458
}
459
460
static int compare_region_idx(const uint a, const uint b) {
461
if (a > b) {
462
return 1;
463
} else if (a == b) {
464
return 0;
465
} else {
466
return -1;
467
}
468
}
469
470
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
471
double non_young_start_time_sec = os::elapsedTime();
472
473
if (collector_state()->in_mixed_phase()) {
474
candidates()->verify();
475
476
uint num_initial_old_regions;
477
uint num_optional_old_regions;
478
479
_policy->calculate_old_collection_set_regions(candidates(),
480
time_remaining_ms,
481
num_initial_old_regions,
482
num_optional_old_regions);
483
484
// Prepare initial old regions.
485
move_candidates_to_collection_set(num_initial_old_regions);
486
487
// Prepare optional old regions for evacuation.
488
uint candidate_idx = candidates()->cur_idx();
489
for (uint i = 0; i < num_optional_old_regions; i++) {
490
add_optional_region(candidates()->at(candidate_idx + i));
491
}
492
493
candidates()->verify();
494
}
495
496
stop_incremental_building();
497
498
double non_young_end_time_sec = os::elapsedTime();
499
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
500
501
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
502
}
503
504
void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
505
if (num_old_candidate_regions == 0) {
506
return;
507
}
508
uint candidate_idx = candidates()->cur_idx();
509
for (uint i = 0; i < num_old_candidate_regions; i++) {
510
HeapRegion* r = candidates()->at(candidate_idx + i);
511
// This potentially optional candidate region is going to be an actual collection
512
// set region. Clear cset marker.
513
_g1h->clear_region_attr(r);
514
add_old_region(r);
515
}
516
candidates()->remove(num_old_candidate_regions);
517
518
candidates()->verify();
519
}
520
521
void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
522
double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
523
finalize_old_part(time_remaining_ms);
524
}
525
526
bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
527
update_incremental_marker();
528
529
uint num_selected_regions;
530
_policy->calculate_optional_collection_set_regions(candidates(),
531
_num_optional_regions,
532
remaining_pause_time,
533
num_selected_regions);
534
535
move_candidates_to_collection_set(num_selected_regions);
536
537
_num_optional_regions -= num_selected_regions;
538
539
stop_incremental_building();
540
541
_g1h->verify_region_attr_remset_update();
542
543
return num_selected_regions > 0;
544
}
545
546
void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
547
for (uint i = 0; i < _num_optional_regions; i++) {
548
HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
549
pss->record_unused_optional_region(r);
550
// Clear collection set marker and make sure that the remembered set information
551
// is correct as we still need it later.
552
_g1h->clear_region_attr(r);
553
_g1h->register_region_with_region_attr(r);
554
r->clear_index_in_opt_cset();
555
}
556
free_optional_regions();
557
558
_g1h->verify_region_attr_remset_update();
559
}
560
561
#ifdef ASSERT
562
class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
563
private:
564
size_t _young_length;
565
uint* _heap_region_indices;
566
public:
567
G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
568
_heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
569
for (size_t i = 0; i < young_length + 1; i++) {
570
_heap_region_indices[i] = UINT_MAX;
571
}
572
}
573
~G1VerifyYoungCSetIndicesClosure() {
574
FREE_C_HEAP_ARRAY(int, _heap_region_indices);
575
}
576
577
virtual bool do_heap_region(HeapRegion* r) {
578
const uint idx = r->young_index_in_cset();
579
580
assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
581
assert(idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
582
583
assert(_heap_region_indices[idx] == UINT_MAX,
584
"Index %d used by multiple regions, first use by region %u, second by region %u",
585
idx, _heap_region_indices[idx], r->hrm_index());
586
587
_heap_region_indices[idx] = r->hrm_index();
588
589
return false;
590
}
591
};
592
593
void G1CollectionSet::verify_young_cset_indices() const {
594
assert_at_safepoint_on_vm_thread();
595
596
G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
597
iterate(&cl);
598
}
599
#endif
600
601