Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
38920 views
1
/*
2
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28
#include "gc_implementation/g1/concurrentMark.hpp"
29
#include "gc_implementation/g1/g1CollectedHeap.hpp"
30
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
32
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
33
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
34
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
35
#include "runtime/orderAccess.inline.hpp"
36
#include "utilities/taskqueue.hpp"
37
38
PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
39
switch (dest.value()) {
40
case InCSetState::Young:
41
return &_survivor_plab_stats;
42
case InCSetState::Old:
43
return &_old_plab_stats;
44
default:
45
ShouldNotReachHere();
46
return NULL; // Keep some compilers happy
47
}
48
}
49
50
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
51
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
52
// Prevent humongous PLAB sizes for two reasons:
53
// * PLABs are allocated using a similar paths as oops, but should
54
// never be in a humongous region
55
// * Allowing humongous PLABs needlessly churns the region free lists
56
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
57
}
58
59
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
60
size_t word_size,
61
AllocationContext_t context) {
62
switch (dest.value()) {
63
case InCSetState::Young:
64
return survivor_attempt_allocation(word_size, context);
65
case InCSetState::Old:
66
return old_attempt_allocation(word_size, context);
67
default:
68
ShouldNotReachHere();
69
return NULL; // Keep some compilers happy
70
}
71
}
72
73
// Inline functions for G1CollectedHeap
74
75
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
76
return _allocation_context_stats;
77
}
78
79
// Return the region with the given index. It assumes the index is valid.
80
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
81
82
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
83
assert(is_in_reserved(addr),
84
err_msg("Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
85
p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
86
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
87
}
88
89
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
90
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
91
}
92
93
template <class T>
94
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
95
assert(addr != NULL, "invariant");
96
assert(is_in_g1_reserved((const void*) addr),
97
err_msg("Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
98
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
99
return _hrm.addr_to_region((HeapWord*) addr);
100
}
101
102
template <class T>
103
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
104
HeapRegion* hr = heap_region_containing_raw(addr);
105
if (hr->continuesHumongous()) {
106
return hr->humongous_start_region();
107
}
108
return hr;
109
}
110
111
inline void G1CollectedHeap::reset_gc_time_stamp() {
112
_gc_time_stamp = 0;
113
OrderAccess::fence();
114
// Clear the cached CSet starting regions and time stamps.
115
// Their validity is dependent on the GC timestamp.
116
clear_cset_start_regions();
117
}
118
119
inline void G1CollectedHeap::increment_gc_time_stamp() {
120
++_gc_time_stamp;
121
OrderAccess::fence();
122
}
123
124
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
125
_old_set.remove(hr);
126
}
127
128
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
129
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
130
return r != NULL && r->in_collection_set();
131
}
132
133
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
134
uint* gc_count_before_ret,
135
uint* gclocker_retry_count_ret) {
136
assert_heap_not_locked_and_not_at_safepoint();
137
assert(!isHumongous(word_size), "attempt_allocation() should not "
138
"be called for humongous allocation requests");
139
140
AllocationContext_t context = AllocationContext::current();
141
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
142
false /* bot_updates */);
143
if (result == NULL) {
144
result = attempt_allocation_slow(word_size,
145
context,
146
gc_count_before_ret,
147
gclocker_retry_count_ret);
148
}
149
assert_heap_not_locked();
150
if (result != NULL) {
151
dirty_young_block(result, word_size);
152
}
153
return result;
154
}
155
156
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
157
AllocationContext_t context) {
158
assert(!isHumongous(word_size),
159
"we should not be seeing humongous-size allocations in this path");
160
161
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
162
false /* bot_updates */);
163
if (result == NULL) {
164
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
165
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
166
false /* bot_updates */);
167
}
168
if (result != NULL) {
169
dirty_young_block(result, word_size);
170
}
171
return result;
172
}
173
174
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
175
AllocationContext_t context) {
176
assert(!isHumongous(word_size),
177
"we should not be seeing humongous-size allocations in this path");
178
179
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
180
true /* bot_updates */);
181
if (result == NULL) {
182
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
183
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
184
true /* bot_updates */);
185
}
186
return result;
187
}
188
189
// It dirties the cards that cover the block so that so that the post
190
// write barrier never queues anything when updating objects on this
191
// block. It is assumed (and in fact we assert) that the block
192
// belongs to a young region.
193
inline void
194
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
195
assert_heap_not_locked();
196
197
// Assign the containing region to containing_hr so that we don't
198
// have to keep calling heap_region_containing_raw() in the
199
// asserts below.
200
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
201
assert(word_size > 0, "pre-condition");
202
assert(containing_hr->is_in(start), "it should contain start");
203
assert(containing_hr->is_young(), "it should be young");
204
assert(!containing_hr->isHumongous(), "it should not be humongous");
205
206
HeapWord* end = start + word_size;
207
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
208
209
MemRegion mr(start, end);
210
g1_barrier_set()->g1_mark_as_young(mr);
211
}
212
213
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
214
return _task_queues->queue(i);
215
}
216
217
inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
218
return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
219
}
220
221
inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
222
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
223
}
224
225
// This is a fast test on whether a reference points into the
226
// collection set or not. Assume that the reference
227
// points into the heap.
228
inline bool G1CollectedHeap::is_in_cset(oop obj) {
229
bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
230
// let's make sure the result is consistent with what the slower
231
// test returns
232
assert( ret || !obj_in_cs(obj), "sanity");
233
assert(!ret || obj_in_cs(obj), "sanity");
234
return ret;
235
}
236
237
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
238
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
239
}
240
241
InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
242
return _in_cset_fast_test.at((HeapWord*)obj);
243
}
244
245
void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
246
_in_cset_fast_test.set_humongous(index);
247
}
248
249
#ifndef PRODUCT
250
// Support for G1EvacuationFailureALot
251
252
inline bool
253
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
254
bool during_initial_mark,
255
bool during_marking) {
256
bool res = false;
257
if (during_marking) {
258
res |= G1EvacuationFailureALotDuringConcMark;
259
}
260
if (during_initial_mark) {
261
res |= G1EvacuationFailureALotDuringInitialMark;
262
}
263
if (gcs_are_young) {
264
res |= G1EvacuationFailureALotDuringYoungGC;
265
} else {
266
// GCs are mixed
267
res |= G1EvacuationFailureALotDuringMixedGC;
268
}
269
return res;
270
}
271
272
inline void
273
G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
274
if (G1EvacuationFailureALot) {
275
// Note we can't assert that _evacuation_failure_alot_for_current_gc
276
// is clear here. It may have been set during a previous GC but that GC
277
// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
278
// trigger an evacuation failure and clear the flags and and counts.
279
280
// Check if we have gone over the interval.
281
const size_t gc_num = total_collections();
282
const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
283
284
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
285
286
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
287
const bool gcs_are_young = g1_policy()->gcs_are_young();
288
const bool during_im = g1_policy()->during_initial_mark_pause();
289
const bool during_marking = mark_in_progress();
290
291
_evacuation_failure_alot_for_current_gc &=
292
evacuation_failure_alot_for_gc_type(gcs_are_young,
293
during_im,
294
during_marking);
295
}
296
}
297
298
inline bool G1CollectedHeap::evacuation_should_fail() {
299
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
300
return false;
301
}
302
// G1EvacuationFailureALot is in effect for current GC
303
// Access to _evacuation_failure_alot_count is not atomic;
304
// the value does not have to be exact.
305
if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
306
return false;
307
}
308
_evacuation_failure_alot_count = 0;
309
return true;
310
}
311
312
inline void G1CollectedHeap::reset_evacuation_should_fail() {
313
if (G1EvacuationFailureALot) {
314
_evacuation_failure_alot_gc_number = total_collections();
315
_evacuation_failure_alot_count = 0;
316
_evacuation_failure_alot_for_current_gc = false;
317
}
318
}
319
#endif // #ifndef PRODUCT
320
321
inline bool G1CollectedHeap::is_in_young(const oop obj) {
322
if (obj == NULL) {
323
return false;
324
}
325
return heap_region_containing(obj)->is_young();
326
}
327
328
// We don't need barriers for initializing stores to objects
329
// in the young gen: for the SATB pre-barrier, there is no
330
// pre-value that needs to be remembered; for the remembered-set
331
// update logging post-barrier, we don't maintain remembered set
332
// information for young gen objects.
333
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
334
return is_in_young(new_obj);
335
}
336
337
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
338
if (obj == NULL) {
339
return false;
340
}
341
return is_obj_dead(obj, heap_region_containing(obj));
342
}
343
344
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
345
if (obj == NULL) {
346
return false;
347
}
348
return is_obj_ill(obj, heap_region_containing(obj));
349
}
350
351
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
352
assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
353
_humongous_reclaim_candidates.set_candidate(region, value);
354
}
355
356
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
357
assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
358
return _humongous_reclaim_candidates.is_candidate(region);
359
}
360
361
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
362
uint region = addr_to_region((HeapWord*)obj);
363
// Clear the flag in the humongous_reclaim_candidates table. Also
364
// reset the entry in the _in_cset_fast_test table so that subsequent references
365
// to the same humongous object do not go into the slow path again.
366
// This is racy, as multiple threads may at the same time enter here, but this
367
// is benign.
368
// During collection we only ever clear the "candidate" flag, and only ever clear the
369
// entry in the in_cset_fast_table.
370
// We only ever evaluate the contents of these tables (in the VM thread) after
371
// having synchronized the worker threads with the VM thread, or in the same
372
// thread (i.e. within the VM thread).
373
if (is_humongous_reclaim_candidate(region)) {
374
set_humongous_reclaim_candidate(region, false);
375
_in_cset_fast_test.clear_humongous(region);
376
}
377
}
378
379
inline bool G1CollectedHeap::requires_marking(const void* entry) const {
380
// Includes rejection of NULL pointers.
381
assert(is_in_reserved(entry),
382
err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
383
384
HeapRegion* region = heap_region_containing(entry);
385
assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
386
if (entry >= region->next_top_at_mark_start()) {
387
return false;
388
}
389
390
assert(((oop)entry)->is_oop(true /* ignore mark word */),
391
err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
392
393
return ! isMarkedNext((oop) entry);
394
}
395
396
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
397
398