Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
66644 views
1
/*
2
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26
#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28
#include "gc/g1/g1CollectedHeap.hpp"
29
30
#include "gc/g1/g1BarrierSet.hpp"
31
#include "gc/g1/g1CollectorState.hpp"
32
#include "gc/g1/g1Policy.hpp"
33
#include "gc/g1/g1RemSet.hpp"
34
#include "gc/g1/heapRegionManager.inline.hpp"
35
#include "gc/g1/heapRegionRemSet.hpp"
36
#include "gc/g1/heapRegionSet.inline.hpp"
37
#include "gc/shared/markBitMap.inline.hpp"
38
#include "gc/shared/taskqueue.inline.hpp"
39
#include "runtime/atomic.hpp"
40
41
G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
42
return _policy->phase_times();
43
}
44
45
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
46
switch (dest.type()) {
47
case G1HeapRegionAttr::Young:
48
return &_survivor_evac_stats;
49
case G1HeapRegionAttr::Old:
50
return &_old_evac_stats;
51
default:
52
ShouldNotReachHere();
53
return NULL; // Keep some compilers happy
54
}
55
}
56
57
size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
58
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
59
// Prevent humongous PLAB sizes for two reasons:
60
// * PLABs are allocated using a similar paths as oops, but should
61
// never be in a humongous region
62
// * Allowing humongous PLABs needlessly churns the region free lists
63
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
64
}
65
66
// Inline functions for G1CollectedHeap
67
68
// Return the region with the given index. It assumes the index is valid.
69
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
70
71
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
72
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
73
74
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
75
return _hrm.next_region_in_humongous(hr);
76
}
77
78
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
79
assert(is_in_reserved(addr),
80
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
81
p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
82
return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
83
}
84
85
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
86
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
87
}
88
89
template <class T>
90
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
91
assert(addr != NULL, "invariant");
92
assert(is_in_reserved((const void*) addr),
93
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
94
p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
95
return _hrm.addr_to_region((HeapWord*)(void*) addr);
96
}
97
98
template <class T>
99
inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
100
assert(addr != NULL, "invariant");
101
assert(is_in_reserved((const void*) addr),
102
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
103
p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
104
uint const region_idx = addr_to_region(addr);
105
return region_at_or_null(region_idx);
106
}
107
108
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
109
_old_set.add(hr);
110
}
111
112
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
113
_old_set.remove(hr);
114
}
115
116
inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
117
_archive_set.add(hr);
118
}
119
120
// It dirties the cards that cover the block so that the post
121
// write barrier never queues anything when updating objects on this
122
// block. It is assumed (and in fact we assert) that the block
123
// belongs to a young region.
124
inline void
125
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
126
assert_heap_not_locked();
127
128
// Assign the containing region to containing_hr so that we don't
129
// have to keep calling heap_region_containing() in the
130
// asserts below.
131
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
132
assert(word_size > 0, "pre-condition");
133
assert(containing_hr->is_in(start), "it should contain start");
134
assert(containing_hr->is_young(), "it should be young");
135
assert(!containing_hr->is_humongous(), "it should not be humongous");
136
137
HeapWord* end = start + word_size;
138
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
139
140
MemRegion mr(start, end);
141
card_table()->g1_mark_as_young(mr);
142
}
143
144
inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
145
return _task_queues->queue(i);
146
}
147
148
inline bool G1CollectedHeap::is_marked_next(oop obj) const {
149
return _cm->next_mark_bitmap()->is_marked(obj);
150
}
151
152
inline bool G1CollectedHeap::is_in_cset(oop obj) {
153
return is_in_cset(cast_from_oop<HeapWord*>(obj));
154
}
155
156
inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
157
return _region_attr.is_in_cset(addr);
158
}
159
160
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
161
return _region_attr.is_in_cset(hr);
162
}
163
164
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
165
return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));
166
}
167
168
G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
169
return _region_attr.at((HeapWord*)addr);
170
}
171
172
G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
173
return _region_attr.get_by_index(idx);
174
}
175
176
void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
177
_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
178
}
179
180
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
181
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
182
}
183
184
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
185
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
186
_rem_set->exclude_region_from_scan(r->hrm_index());
187
}
188
189
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
190
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
191
}
192
193
bool G1CollectedHeap::evacuation_failed() const {
194
return num_regions_failed_evacuation() > 0;
195
}
196
197
bool G1CollectedHeap::evacuation_failed(uint region_idx) const {
198
assert(region_idx < max_regions(), "Invalid region index %u", region_idx);
199
200
return Atomic::load(&_regions_failed_evacuation[region_idx]);
201
}
202
203
uint G1CollectedHeap::num_regions_failed_evacuation() const {
204
return Atomic::load(&_num_regions_failed_evacuation);
205
}
206
207
bool G1CollectedHeap::notify_region_failed_evacuation(uint const region_idx) {
208
assert(region_idx < max_regions(), "Invalid region index %u", region_idx);
209
210
volatile bool* region_failed_addr = &_regions_failed_evacuation[region_idx];
211
bool result = !Atomic::load(region_failed_addr) && !Atomic::cmpxchg(region_failed_addr, false, true, memory_order_relaxed);
212
if (result) {
213
Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);
214
}
215
return result;
216
}
217
218
#ifndef PRODUCT
219
// Support for G1EvacuationFailureALot
220
221
inline bool
222
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
223
bool during_concurrent_start,
224
bool mark_or_rebuild_in_progress) {
225
bool res = false;
226
if (mark_or_rebuild_in_progress) {
227
res |= G1EvacuationFailureALotDuringConcMark;
228
}
229
if (during_concurrent_start) {
230
res |= G1EvacuationFailureALotDuringConcurrentStart;
231
}
232
if (for_young_gc) {
233
res |= G1EvacuationFailureALotDuringYoungGC;
234
} else {
235
// GCs are mixed
236
res |= G1EvacuationFailureALotDuringMixedGC;
237
}
238
return res;
239
}
240
241
inline void
242
G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
243
if (G1EvacuationFailureALot) {
244
// Note we can't assert that _evacuation_failure_alot_for_current_gc
245
// is clear here. It may have been set during a previous GC but that GC
246
// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
247
// trigger an evacuation failure and clear the flags and and counts.
248
249
// Check if we have gone over the interval.
250
const size_t gc_num = total_collections();
251
const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
252
253
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
254
255
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
256
const bool in_young_only_phase = collector_state()->in_young_only_phase();
257
const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();
258
const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
259
260
_evacuation_failure_alot_for_current_gc &=
261
evacuation_failure_alot_for_gc_type(in_young_only_phase,
262
in_concurrent_start_gc,
263
mark_or_rebuild_in_progress);
264
}
265
}
266
267
inline bool G1CollectedHeap::evacuation_should_fail() {
268
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
269
return false;
270
}
271
// G1EvacuationFailureALot is in effect for current GC
272
// Access to _evacuation_failure_alot_count is not atomic;
273
// the value does not have to be exact.
274
if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
275
return false;
276
}
277
_evacuation_failure_alot_count = 0;
278
return true;
279
}
280
281
inline void G1CollectedHeap::reset_evacuation_should_fail() {
282
if (G1EvacuationFailureALot) {
283
_evacuation_failure_alot_gc_number = total_collections();
284
_evacuation_failure_alot_count = 0;
285
_evacuation_failure_alot_for_current_gc = false;
286
}
287
}
288
#endif // #ifndef PRODUCT
289
290
inline bool G1CollectedHeap::is_in_young(const oop obj) {
291
if (obj == NULL) {
292
return false;
293
}
294
return heap_region_containing(obj)->is_young();
295
}
296
297
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
298
if (obj == NULL) {
299
return false;
300
}
301
return is_obj_dead(obj, heap_region_containing(obj));
302
}
303
304
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
305
if (obj == NULL) {
306
return false;
307
}
308
return is_obj_ill(obj, heap_region_containing(obj));
309
}
310
311
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
312
return !is_marked_next(obj) && !hr->is_closed_archive();
313
}
314
315
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
316
return is_obj_dead_full(obj, heap_region_containing(obj));
317
}
318
319
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
320
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
321
_humongous_reclaim_candidates.set_candidate(region, value);
322
}
323
324
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
325
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
326
return _humongous_reclaim_candidates.is_candidate(region);
327
}
328
329
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
330
uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
331
// Clear the flag in the humongous_reclaim_candidates table. Also
332
// reset the entry in the region attribute table so that subsequent references
333
// to the same humongous object do not go into the slow path again.
334
// This is racy, as multiple threads may at the same time enter here, but this
335
// is benign.
336
// During collection we only ever clear the "candidate" flag, and only ever clear the
337
// entry in the in_cset_fast_table.
338
// We only ever evaluate the contents of these tables (in the VM thread) after
339
// having synchronized the worker threads with the VM thread, or in the same
340
// thread (i.e. within the VM thread).
341
if (is_humongous_reclaim_candidate(region)) {
342
set_humongous_reclaim_candidate(region, false);
343
_region_attr.clear_humongous(region);
344
}
345
}
346
347
#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
348
349