Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
40957 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/g1/g1CollectedHeap.inline.hpp"
27
#include "gc/g1/g1CollectionSetCandidates.hpp"
28
#include "gc/g1/g1CollectionSetChooser.hpp"
29
#include "gc/g1/heapRegionRemSet.hpp"
30
#include "gc/shared/space.inline.hpp"
31
#include "runtime/atomic.hpp"
32
#include "utilities/quickSort.hpp"
33
34
// Order regions according to GC efficiency. This will cause regions with a lot
35
// of live objects and large remembered sets to end up at the end of the array.
36
// Given that we might skip collecting the last few old regions, if after a few
37
// mixed GCs the remaining have reclaimable bytes under a certain threshold, the
38
// hope is that the ones we'll skip are ones with both large remembered sets and
39
// a lot of live objects, not the ones with just a lot of live objects if we
40
// ordered according to the amount of reclaimable bytes per region.
41
static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
42
// Make sure that NULL entries are moved to the end.
43
if (hr1 == NULL) {
44
if (hr2 == NULL) {
45
return 0;
46
} else {
47
return 1;
48
}
49
} else if (hr2 == NULL) {
50
return -1;
51
}
52
53
double gc_eff1 = hr1->gc_efficiency();
54
double gc_eff2 = hr2->gc_efficiency();
55
56
if (gc_eff1 > gc_eff2) {
57
return -1;
58
} if (gc_eff1 < gc_eff2) {
59
return 1;
60
} else {
61
return 0;
62
}
63
}
64
65
// Determine collection set candidates: For all regions determine whether they
66
// should be a collection set candidates, calculate their efficiency, sort and
67
// return them as G1CollectionSetCandidates instance.
68
// Threads calculate the GC efficiency of the regions they get to process, and
69
// put them into some work area unsorted. At the end the array is sorted and
70
// copied into the G1CollectionSetCandidates instance; the caller will be the new
71
// owner of this object.
72
class G1BuildCandidateRegionsTask : public AbstractGangTask {
73
74
// Work area for building the set of collection set candidates. Contains references
75
// to heap regions with their GC efficiencies calculated. To reduce contention
76
// on claiming array elements, worker threads claim parts of this array in chunks;
77
// Array elements may be NULL as threads might not get enough regions to fill
78
// up their chunks completely.
79
// Final sorting will remove them.
80
class G1BuildCandidateArray : public StackObj {
81
82
uint const _max_size;
83
uint const _chunk_size;
84
85
HeapRegion** _data;
86
87
uint volatile _cur_claim_idx;
88
89
// Calculates the maximum array size that will be used.
90
static uint required_array_size(uint num_regions, uint chunk_size, uint num_workers) {
91
uint const max_waste = num_workers * chunk_size;
92
// The array should be aligned with respect to chunk_size.
93
uint const aligned_num_regions = ((num_regions + chunk_size - 1) / chunk_size) * chunk_size;
94
95
return aligned_num_regions + max_waste;
96
}
97
98
public:
99
G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :
100
_max_size(required_array_size(max_num_regions, chunk_size, num_workers)),
101
_chunk_size(chunk_size),
102
_data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),
103
_cur_claim_idx(0) {
104
for (uint i = 0; i < _max_size; i++) {
105
_data[i] = NULL;
106
}
107
}
108
109
~G1BuildCandidateArray() {
110
FREE_C_HEAP_ARRAY(HeapRegion*, _data);
111
}
112
113
// Claim a new chunk, returning its bounds [from, to[.
114
void claim_chunk(uint& from, uint& to) {
115
uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
116
assert(_max_size > result - 1,
117
"Array too small, is %u should be %u with chunk size %u.",
118
_max_size, result, _chunk_size);
119
from = result - _chunk_size;
120
to = result;
121
}
122
123
// Set element in array.
124
void set(uint idx, HeapRegion* hr) {
125
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
126
assert(_data[idx] == NULL, "Value must not have been set.");
127
_data[idx] = hr;
128
}
129
130
void sort_and_copy_into(HeapRegion** dest, uint num_regions) {
131
if (_cur_claim_idx == 0) {
132
return;
133
}
134
for (uint i = _cur_claim_idx; i < _max_size; i++) {
135
assert(_data[i] == NULL, "must be");
136
}
137
QuickSort::sort(_data, _cur_claim_idx, order_regions, true);
138
for (uint i = num_regions; i < _max_size; i++) {
139
assert(_data[i] == NULL, "must be");
140
}
141
for (uint i = 0; i < num_regions; i++) {
142
dest[i] = _data[i];
143
}
144
}
145
};
146
147
// Per-region closure. In addition to determining whether a region should be
148
// added to the candidates, and calculating those regions' gc efficiencies, also
149
// gather additional statistics.
150
class G1BuildCandidateRegionsClosure : public HeapRegionClosure {
151
G1BuildCandidateArray* _array;
152
153
uint _cur_chunk_idx;
154
uint _cur_chunk_end;
155
156
uint _regions_added;
157
size_t _reclaimable_bytes_added;
158
159
void add_region(HeapRegion* hr) {
160
if (_cur_chunk_idx == _cur_chunk_end) {
161
_array->claim_chunk(_cur_chunk_idx, _cur_chunk_end);
162
}
163
assert(_cur_chunk_idx < _cur_chunk_end, "Must be");
164
165
hr->calc_gc_efficiency();
166
_array->set(_cur_chunk_idx, hr);
167
168
_cur_chunk_idx++;
169
170
_regions_added++;
171
_reclaimable_bytes_added += hr->reclaimable_bytes();
172
}
173
174
bool should_add(HeapRegion* hr) { return G1CollectionSetChooser::should_add(hr); }
175
176
public:
177
G1BuildCandidateRegionsClosure(G1BuildCandidateArray* array) :
178
_array(array),
179
_cur_chunk_idx(0),
180
_cur_chunk_end(0),
181
_regions_added(0),
182
_reclaimable_bytes_added(0) { }
183
184
bool do_heap_region(HeapRegion* r) {
185
// We will skip any region that's currently used as an old GC
186
// alloc region (we should not consider those for collection
187
// before we fill them up).
188
if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
189
add_region(r);
190
} else if (r->is_old()) {
191
// Keep remembered sets for humongous regions, otherwise clean out remembered
192
// sets for old regions.
193
r->rem_set()->clear(true /* only_cardset */);
194
} else {
195
assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),
196
"Missed to clear unused remembered set of region %u (%s) that is %s",
197
r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
198
}
199
return false;
200
}
201
202
uint regions_added() const { return _regions_added; }
203
size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; }
204
};
205
206
G1CollectedHeap* _g1h;
207
HeapRegionClaimer _hrclaimer;
208
209
uint volatile _num_regions_added;
210
size_t volatile _reclaimable_bytes_added;
211
212
G1BuildCandidateArray _result;
213
214
void update_totals(uint num_regions, size_t reclaimable_bytes) {
215
if (num_regions > 0) {
216
assert(reclaimable_bytes > 0, "invariant");
217
Atomic::add(&_num_regions_added, num_regions);
218
Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
219
} else {
220
assert(reclaimable_bytes == 0, "invariant");
221
}
222
}
223
224
public:
225
G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :
226
AbstractGangTask("G1 Build Candidate Regions"),
227
_g1h(G1CollectedHeap::heap()),
228
_hrclaimer(num_workers),
229
_num_regions_added(0),
230
_reclaimable_bytes_added(0),
231
_result(max_num_regions, chunk_size, num_workers) { }
232
233
void work(uint worker_id) {
234
G1BuildCandidateRegionsClosure cl(&_result);
235
_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
236
update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
237
}
238
239
G1CollectionSetCandidates* get_sorted_candidates() {
240
HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC);
241
_result.sort_and_copy_into(regions, _num_regions_added);
242
return new G1CollectionSetCandidates(regions,
243
_num_regions_added,
244
_reclaimable_bytes_added);
245
}
246
};
247
248
uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint num_regions) {
249
assert(num_workers > 0, "Active gc workers should be greater than 0");
250
return MAX2(num_regions / num_workers, 1U);
251
}
252
253
bool G1CollectionSetChooser::should_add(HeapRegion* hr) {
254
return !hr->is_young() &&
255
!hr->is_pinned() &&
256
region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
257
hr->rem_set()->is_complete();
258
}
259
260
// Closure implementing early pruning (removal) of regions meeting the
261
// G1HeapWastePercent criteria. That is, either until _max_pruned regions were
262
// removed (for forward progress in evacuation) or the waste accumulated by the
263
// removed regions is above max_wasted.
264
class G1PruneRegionClosure : public HeapRegionClosure {
265
uint _num_pruned;
266
size_t _cur_wasted;
267
268
uint const _max_pruned;
269
size_t const _max_wasted;
270
271
public:
272
G1PruneRegionClosure(uint max_pruned, size_t max_wasted) :
273
_num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { }
274
275
virtual bool do_heap_region(HeapRegion* r) {
276
size_t const reclaimable = r->reclaimable_bytes();
277
if (_num_pruned > _max_pruned ||
278
_cur_wasted + reclaimable > _max_wasted) {
279
return true;
280
}
281
r->rem_set()->clear(true /* cardset_only */);
282
_cur_wasted += reclaimable;
283
_num_pruned++;
284
return false;
285
}
286
287
uint num_pruned() const { return _num_pruned; }
288
size_t wasted() const { return _cur_wasted; }
289
};
290
291
void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) {
292
G1Policy* p = G1CollectedHeap::heap()->policy();
293
294
uint min_old_cset_length = p->calc_min_old_cset_length(candidates);
295
uint num_candidates = candidates->num_regions();
296
297
if (min_old_cset_length < num_candidates) {
298
size_t allowed_waste = p->allowed_waste_in_collection_set();
299
300
G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length,
301
allowed_waste);
302
candidates->iterate_backwards(&prune_cl);
303
304
log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")",
305
prune_cl.num_pruned(),
306
candidates->num_regions(),
307
prune_cl.wasted(),
308
allowed_waste);
309
310
candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted());
311
}
312
}
313
314
G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {
315
uint num_workers = workers->active_workers();
316
uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);
317
318
G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers);
319
workers->run_task(&cl, num_workers);
320
321
G1CollectionSetCandidates* result = cl.get_sorted_candidates();
322
prune(result);
323
result->verify();
324
return result;
325
}
326
327