Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
40957 views
1
/*
2
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26
#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28
#include "gc/g1/g1CollectedHeap.hpp"
29
30
#include "gc/g1/g1BarrierSet.hpp"
31
#include "gc/g1/g1CollectorState.hpp"
32
#include "gc/g1/g1Policy.hpp"
33
#include "gc/g1/g1RemSet.hpp"
34
#include "gc/g1/heapRegionManager.inline.hpp"
35
#include "gc/g1/heapRegionRemSet.hpp"
36
#include "gc/g1/heapRegionSet.inline.hpp"
37
#include "gc/shared/markBitMap.inline.hpp"
38
#include "gc/shared/taskqueue.inline.hpp"
39
#include "runtime/atomic.hpp"
40
41
G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
42
return _policy->phase_times();
43
}
44
45
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
46
switch (dest.type()) {
47
case G1HeapRegionAttr::Young:
48
return &_survivor_evac_stats;
49
case G1HeapRegionAttr::Old:
50
return &_old_evac_stats;
51
default:
52
ShouldNotReachHere();
53
return NULL; // Keep some compilers happy
54
}
55
}
56
57
size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
58
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
59
// Prevent humongous PLAB sizes for two reasons:
60
// * PLABs are allocated using a similar paths as oops, but should
61
// never be in a humongous region
62
// * Allowing humongous PLABs needlessly churns the region free lists
63
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
64
}
65
66
// Inline functions for G1CollectedHeap
67
68
// Return the region with the given index. It assumes the index is valid.
69
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
70
71
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
72
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
73
74
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
75
return _hrm.next_region_in_humongous(hr);
76
}
77
78
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
79
assert(is_in_reserved(addr),
80
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
81
p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
82
return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
83
}
84
85
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
86
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
87
}
88
89
template <class T>
90
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
91
assert(addr != NULL, "invariant");
92
assert(is_in_reserved((const void*) addr),
93
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
94
p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
95
return _hrm.addr_to_region((HeapWord*)(void*) addr);
96
}
97
98
template <class T>
99
inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
100
assert(addr != NULL, "invariant");
101
assert(is_in_reserved((const void*) addr),
102
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
103
p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
104
uint const region_idx = addr_to_region(addr);
105
return region_at_or_null(region_idx);
106
}
107
108
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
109
_old_set.add(hr);
110
}
111
112
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
113
_old_set.remove(hr);
114
}
115
116
inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
117
_archive_set.add(hr);
118
}
119
120
// It dirties the cards that cover the block so that the post
121
// write barrier never queues anything when updating objects on this
122
// block. It is assumed (and in fact we assert) that the block
123
// belongs to a young region.
124
inline void
125
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
126
assert_heap_not_locked();
127
128
// Assign the containing region to containing_hr so that we don't
129
// have to keep calling heap_region_containing() in the
130
// asserts below.
131
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
132
assert(word_size > 0, "pre-condition");
133
assert(containing_hr->is_in(start), "it should contain start");
134
assert(containing_hr->is_young(), "it should be young");
135
assert(!containing_hr->is_humongous(), "it should not be humongous");
136
137
HeapWord* end = start + word_size;
138
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
139
140
MemRegion mr(start, end);
141
card_table()->g1_mark_as_young(mr);
142
}
143
144
inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
145
return _task_queues->queue(i);
146
}
147
148
inline bool G1CollectedHeap::is_marked_next(oop obj) const {
149
return _cm->next_mark_bitmap()->is_marked(obj);
150
}
151
152
inline bool G1CollectedHeap::is_in_cset(oop obj) {
153
return is_in_cset(cast_from_oop<HeapWord*>(obj));
154
}
155
156
inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
157
return _region_attr.is_in_cset(addr);
158
}
159
160
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
161
return _region_attr.is_in_cset(hr);
162
}
163
164
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
165
return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));
166
}
167
168
G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
169
return _region_attr.at((HeapWord*)addr);
170
}
171
172
G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
173
return _region_attr.get_by_index(idx);
174
}
175
176
void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
177
_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
178
}
179
180
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
181
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
182
}
183
184
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
185
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
186
_rem_set->exclude_region_from_scan(r->hrm_index());
187
}
188
189
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
190
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
191
}
192
193
bool G1CollectedHeap::evacuation_failed() const {
194
return num_regions_failed_evacuation() > 0;
195
}
196
197
uint G1CollectedHeap::num_regions_failed_evacuation() const {
198
return Atomic::load(&_num_regions_failed_evacuation);
199
}
200
201
void G1CollectedHeap::notify_region_failed_evacuation() {
202
Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);
203
}
204
205
#ifndef PRODUCT
206
// Support for G1EvacuationFailureALot
207
208
inline bool
209
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
210
bool during_concurrent_start,
211
bool mark_or_rebuild_in_progress) {
212
bool res = false;
213
if (mark_or_rebuild_in_progress) {
214
res |= G1EvacuationFailureALotDuringConcMark;
215
}
216
if (during_concurrent_start) {
217
res |= G1EvacuationFailureALotDuringConcurrentStart;
218
}
219
if (for_young_gc) {
220
res |= G1EvacuationFailureALotDuringYoungGC;
221
} else {
222
// GCs are mixed
223
res |= G1EvacuationFailureALotDuringMixedGC;
224
}
225
return res;
226
}
227
228
inline void
229
G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
230
if (G1EvacuationFailureALot) {
231
// Note we can't assert that _evacuation_failure_alot_for_current_gc
232
// is clear here. It may have been set during a previous GC but that GC
233
// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
234
// trigger an evacuation failure and clear the flags and and counts.
235
236
// Check if we have gone over the interval.
237
const size_t gc_num = total_collections();
238
const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
239
240
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
241
242
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
243
const bool in_young_only_phase = collector_state()->in_young_only_phase();
244
const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();
245
const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
246
247
_evacuation_failure_alot_for_current_gc &=
248
evacuation_failure_alot_for_gc_type(in_young_only_phase,
249
in_concurrent_start_gc,
250
mark_or_rebuild_in_progress);
251
}
252
}
253
254
inline bool G1CollectedHeap::evacuation_should_fail() {
255
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
256
return false;
257
}
258
// G1EvacuationFailureALot is in effect for current GC
259
// Access to _evacuation_failure_alot_count is not atomic;
260
// the value does not have to be exact.
261
if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
262
return false;
263
}
264
_evacuation_failure_alot_count = 0;
265
return true;
266
}
267
268
inline void G1CollectedHeap::reset_evacuation_should_fail() {
269
if (G1EvacuationFailureALot) {
270
_evacuation_failure_alot_gc_number = total_collections();
271
_evacuation_failure_alot_count = 0;
272
_evacuation_failure_alot_for_current_gc = false;
273
}
274
}
275
#endif // #ifndef PRODUCT
276
277
inline bool G1CollectedHeap::is_in_young(const oop obj) {
278
if (obj == NULL) {
279
return false;
280
}
281
return heap_region_containing(obj)->is_young();
282
}
283
284
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
285
if (obj == NULL) {
286
return false;
287
}
288
return is_obj_dead(obj, heap_region_containing(obj));
289
}
290
291
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
292
if (obj == NULL) {
293
return false;
294
}
295
return is_obj_ill(obj, heap_region_containing(obj));
296
}
297
298
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
299
return !is_marked_next(obj) && !hr->is_closed_archive();
300
}
301
302
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
303
return is_obj_dead_full(obj, heap_region_containing(obj));
304
}
305
306
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
307
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
308
_humongous_reclaim_candidates.set_candidate(region, value);
309
}
310
311
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
312
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
313
return _humongous_reclaim_candidates.is_candidate(region);
314
}
315
316
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
317
uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
318
// Clear the flag in the humongous_reclaim_candidates table. Also
319
// reset the entry in the region attribute table so that subsequent references
320
// to the same humongous object do not go into the slow path again.
321
// This is racy, as multiple threads may at the same time enter here, but this
322
// is benign.
323
// During collection we only ever clear the "candidate" flag, and only ever clear the
324
// entry in the in_cset_fast_table.
325
// We only ever evaluate the contents of these tables (in the VM thread) after
326
// having synchronized the worker threads with the VM thread, or in the same
327
// thread (i.e. within the VM thread).
328
if (is_humongous_reclaim_candidate(region)) {
329
set_humongous_reclaim_candidate(region, false);
330
_region_attr.clear_humongous(region);
331
}
332
}
333
334
#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
335
336