Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
38921 views
1
/*
2
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
26
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
27
28
#include "gc_implementation/g1/concurrentMark.hpp"
29
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30
#include "gc_implementation/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
31
32
// Utility routine to set an exclusive range of cards on the given
33
// card liveness bitmap
34
inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
35
BitMap::idx_t start_idx,
36
BitMap::idx_t end_idx,
37
bool is_par) {
38
39
// Set the exclusive bit range [start_idx, end_idx).
40
assert((end_idx - start_idx) > 0, "at least one card");
41
assert(end_idx <= card_bm->size(), "sanity");
42
43
// Silently clip the end index
44
end_idx = MIN2(end_idx, card_bm->size());
45
46
// For small ranges use a simple loop; otherwise use set_range or
47
// use par_at_put_range (if parallel). The range is made up of the
48
// cards that are spanned by an object/mem region so 8 cards will
49
// allow up to object sizes up to 4K to be handled using the loop.
50
if ((end_idx - start_idx) <= 8) {
51
for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
52
if (is_par) {
53
card_bm->par_set_bit(i);
54
} else {
55
card_bm->set_bit(i);
56
}
57
}
58
} else {
59
// Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive.
60
if (is_par) {
61
card_bm->par_at_put_range(start_idx, end_idx, true);
62
} else {
63
card_bm->set_range(start_idx, end_idx);
64
}
65
}
66
}
67
68
// Returns the index in the liveness accounting card bitmap
69
// for the given address
70
inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
71
// Below, the term "card num" means the result of shifting an address
72
// by the card shift -- address 0 corresponds to card number 0. One
73
// must subtract the card num of the bottom of the heap to obtain a
74
// card table index.
75
intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
76
return card_num - heap_bottom_card_num();
77
}
78
79
// Counts the given memory region in the given task/worker
80
// counting data structures.
81
inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
82
size_t* marked_bytes_array,
83
BitMap* task_card_bm) {
84
G1CollectedHeap* g1h = _g1h;
85
CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
86
87
HeapWord* start = mr.start();
88
HeapWord* end = mr.end();
89
size_t region_size_bytes = mr.byte_size();
90
uint index = hr->hrm_index();
91
92
assert(!hr->continuesHumongous(), "should not be HC region");
93
assert(hr == g1h->heap_region_containing(start), "sanity");
94
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
95
assert(marked_bytes_array != NULL, "pre-condition");
96
assert(task_card_bm != NULL, "pre-condition");
97
98
// Add to the task local marked bytes for this region.
99
marked_bytes_array[index] += region_size_bytes;
100
101
BitMap::idx_t start_idx = card_bitmap_index_for(start);
102
BitMap::idx_t end_idx = card_bitmap_index_for(end);
103
104
// Note: if we're looking at the last region in heap - end
105
// could be actually just beyond the end of the heap; end_idx
106
// will then correspond to a (non-existent) card that is also
107
// just beyond the heap.
108
if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
109
// end of region is not card aligned - incremement to cover
110
// all the cards spanned by the region.
111
end_idx += 1;
112
}
113
// The card bitmap is task/worker specific => no need to use
114
// the 'par' BitMap routines.
115
// Set bits in the exclusive bit range [start_idx, end_idx).
116
set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
117
}
118
119
// Counts the given memory region in the task/worker counting
120
// data structures for the given worker id.
121
inline void ConcurrentMark::count_region(MemRegion mr,
122
HeapRegion* hr,
123
uint worker_id) {
124
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
125
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
126
count_region(mr, hr, marked_bytes_array, task_card_bm);
127
}
128
129
// Counts the given object in the given task/worker counting data structures.
130
inline void ConcurrentMark::count_object(oop obj,
131
HeapRegion* hr,
132
size_t* marked_bytes_array,
133
BitMap* task_card_bm) {
134
MemRegion mr((HeapWord*)obj, obj->size());
135
count_region(mr, hr, marked_bytes_array, task_card_bm);
136
}
137
138
// Attempts to mark the given object and, if successful, counts
139
// the object in the given task/worker counting structures.
140
inline bool ConcurrentMark::par_mark_and_count(oop obj,
141
HeapRegion* hr,
142
size_t* marked_bytes_array,
143
BitMap* task_card_bm) {
144
HeapWord* addr = (HeapWord*)obj;
145
if (_nextMarkBitMap->parMark(addr)) {
146
// Update the task specific count data for the object.
147
count_object(obj, hr, marked_bytes_array, task_card_bm);
148
return true;
149
}
150
return false;
151
}
152
153
// Attempts to mark the given object and, if successful, counts
154
// the object in the task/worker counting structures for the
155
// given worker id.
156
inline bool ConcurrentMark::par_mark_and_count(oop obj,
157
size_t word_size,
158
HeapRegion* hr,
159
uint worker_id) {
160
HeapWord* addr = (HeapWord*)obj;
161
if (_nextMarkBitMap->parMark(addr)) {
162
MemRegion mr(addr, word_size);
163
count_region(mr, hr, worker_id);
164
return true;
165
}
166
return false;
167
}
168
169
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
170
HeapWord* start_addr = MAX2(startWord(), mr.start());
171
HeapWord* end_addr = MIN2(endWord(), mr.end());
172
173
if (end_addr > start_addr) {
174
// Right-open interval [start-offset, end-offset).
175
BitMap::idx_t start_offset = heapWordToOffset(start_addr);
176
BitMap::idx_t end_offset = heapWordToOffset(end_addr);
177
178
start_offset = _bm.get_next_one_offset(start_offset, end_offset);
179
while (start_offset < end_offset) {
180
if (!cl->do_bit(start_offset)) {
181
return false;
182
}
183
HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr);
184
BitMap::idx_t next_offset = heapWordToOffset(next_addr);
185
start_offset = _bm.get_next_one_offset(next_offset, end_offset);
186
}
187
}
188
return true;
189
}
190
191
inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
192
MemRegion mr(startWord(), sizeInWords());
193
return iterate(cl, mr);
194
}
195
196
#define check_mark(addr) \
197
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
198
"outside underlying space?"); \
199
assert(G1CollectedHeap::heap()->is_in_exact(addr), \
200
err_msg("Trying to access not available bitmap " PTR_FORMAT \
201
" corresponding to " PTR_FORMAT " (%u)", \
202
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
203
204
inline void CMBitMap::mark(HeapWord* addr) {
205
check_mark(addr);
206
_bm.set_bit(heapWordToOffset(addr));
207
}
208
209
inline void CMBitMap::clear(HeapWord* addr) {
210
check_mark(addr);
211
_bm.clear_bit(heapWordToOffset(addr));
212
}
213
214
inline bool CMBitMap::parMark(HeapWord* addr) {
215
check_mark(addr);
216
return _bm.par_set_bit(heapWordToOffset(addr));
217
}
218
219
inline bool CMBitMap::parClear(HeapWord* addr) {
220
check_mark(addr);
221
return _bm.par_clear_bit(heapWordToOffset(addr));
222
}
223
224
#undef check_mark
225
226
inline void CMTask::push(oop obj) {
227
HeapWord* objAddr = (HeapWord*) obj;
228
assert(G1CMObjArrayProcessor::is_array_slice(obj) || _g1h->is_in_g1_reserved(objAddr), "invariant");
229
assert(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->is_on_master_free_list(
230
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
231
assert(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->is_obj_ill(obj), "invariant");
232
assert(G1CMObjArrayProcessor::is_array_slice(obj) || _nextMarkBitMap->isMarked(objAddr), "invariant");
233
234
if (_cm->verbose_high()) {
235
gclog_or_tty->print_cr("[%u] pushing " PTR_FORMAT, _worker_id, p2i((void*) obj));
236
}
237
238
if (!_task_queue->push(obj)) {
239
// The local task queue looks full. We need to push some entries
240
// to the global stack.
241
242
if (_cm->verbose_medium()) {
243
gclog_or_tty->print_cr("[%u] task queue overflow, "
244
"moving entries to the global stack",
245
_worker_id);
246
}
247
move_entries_to_global_stack();
248
249
// this should succeed since, even if we overflow the global
250
// stack, we should have definitely removed some entries from the
251
// local queue. So, there must be space on it.
252
bool success = _task_queue->push(obj);
253
assert(success, "invariant");
254
}
255
256
statsOnly( int tmp_size = _task_queue->size();
257
if (tmp_size > _local_max_size) {
258
_local_max_size = tmp_size;
259
}
260
++_local_pushes );
261
}
262
263
inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
264
// If obj is above the global finger, then the mark bitmap scan
265
// will find it later, and no push is needed. Similarly, if we have
266
// a current region and obj is between the local finger and the
267
// end of the current region, then no push is needed. The tradeoff
268
// of checking both vs only checking the global finger is that the
269
// local check will be more accurate and so result in fewer pushes,
270
// but may also be a little slower.
271
HeapWord* objAddr = (HeapWord*)obj;
272
if (_finger != NULL) {
273
// We have a current region.
274
275
// Finger and region values are all NULL or all non-NULL. We
276
// use _finger to check since we immediately use its value.
277
assert(_curr_region != NULL, "invariant");
278
assert(_region_limit != NULL, "invariant");
279
assert(_region_limit <= global_finger, "invariant");
280
281
// True if obj is less than the local finger, or is between
282
// the region limit and the global finger.
283
if (objAddr < _finger) {
284
return true;
285
} else if (objAddr < _region_limit) {
286
return false;
287
} // Else check global finger.
288
}
289
// Check global finger.
290
return objAddr < global_finger;
291
}
292
293
inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
294
if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
295
296
if (_cm->verbose_high()) {
297
gclog_or_tty->print_cr("[%u] marked object " PTR_FORMAT,
298
_worker_id, p2i(obj));
299
}
300
301
// No OrderAccess:store_load() is needed. It is implicit in the
302
// CAS done in CMBitMap::parMark() call in the routine above.
303
HeapWord* global_finger = _cm->finger();
304
305
// We only need to push a newly grey object on the mark
306
// stack if it is in a section of memory the mark bitmap
307
// scan has already examined. Mark bitmap scanning
308
// maintains progress "fingers" for determining that.
309
//
310
// Notice that the global finger might be moving forward
311
// concurrently. This is not a problem. In the worst case, we
312
// mark the object while it is above the global finger and, by
313
// the time we read the global finger, it has moved forward
314
// past this object. In this case, the object will probably
315
// be visited when a task is scanning the region and will also
316
// be pushed on the stack. So, some duplicate work, but no
317
// correctness problems.
318
if (is_below_finger(obj, global_finger)) {
319
if (obj->is_typeArray()) {
320
// Immediately process arrays of primitive types, rather
321
// than pushing on the mark stack. This keeps us from
322
// adding humongous objects to the mark stack that might
323
// be reclaimed before the entry is processed - see
324
// selection of candidates for eager reclaim of humongous
325
// objects. The cost of the additional type test is
326
// mitigated by avoiding a trip through the mark stack,
327
// by only doing a bookkeeping update and avoiding the
328
// actual scan of the object - a typeArray contains no
329
// references, and the metadata is built-in.
330
process_grey_object<false>(obj);
331
} else {
332
if (_cm->verbose_high()) {
333
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
334
", global: " PTR_FORMAT ") pushing "
335
PTR_FORMAT " on mark stack",
336
_worker_id, p2i(_finger),
337
p2i(global_finger), p2i(obj));
338
}
339
push(obj);
340
}
341
}
342
}
343
}
344
345
inline void CMTask::deal_with_reference(oop obj) {
346
if (_cm->verbose_high()) {
347
gclog_or_tty->print_cr("[%u] we're dealing with reference = " PTR_FORMAT,
348
_worker_id, p2i((void*) obj));
349
}
350
351
increment_refs_reached();
352
353
HeapWord* objAddr = (HeapWord*) obj;
354
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
355
if (_g1h->is_in_g1_reserved(objAddr)) {
356
assert(obj != NULL, "null check is implicit");
357
if (!_nextMarkBitMap->isMarked(objAddr)) {
358
// Only get the containing region if the object is not marked on the
359
// bitmap (otherwise, it's a waste of time since we won't do
360
// anything with it).
361
HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
362
if (!hr->obj_allocated_since_next_marking(obj)) {
363
make_reference_grey(obj, hr);
364
}
365
}
366
}
367
}
368
369
inline size_t CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
370
obj->oop_iterate(_cm_oop_closure, mr);
371
return mr.word_size();
372
}
373
374
inline void ConcurrentMark::markPrev(oop p) {
375
assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
376
// Note we are overriding the read-only view of the prev map here, via
377
// the cast.
378
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
379
}
380
381
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
382
uint worker_id, HeapRegion* hr) {
383
assert(obj != NULL, "pre-condition");
384
HeapWord* addr = (HeapWord*) obj;
385
if (hr == NULL) {
386
hr = _g1h->heap_region_containing_raw(addr);
387
} else {
388
assert(hr->is_in(addr), "pre-condition");
389
}
390
assert(hr != NULL, "sanity");
391
// Given that we're looking for a region that contains an object
392
// header it's impossible to get back a HC region.
393
assert(!hr->continuesHumongous(), "sanity");
394
395
// We cannot assert that word_size == obj->size() given that obj
396
// might not be in a consistent state (another thread might be in
397
// the process of copying it). So the best thing we can do is to
398
// assert that word_size is under an upper bound which is its
399
// containing region's capacity.
400
assert(word_size * HeapWordSize <= hr->capacity(),
401
err_msg("size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
402
word_size * HeapWordSize, hr->capacity(),
403
HR_FORMAT_PARAMS(hr)));
404
405
if (addr < hr->next_top_at_mark_start()) {
406
if (!_nextMarkBitMap->isMarked(addr)) {
407
par_mark_and_count(obj, word_size, hr, worker_id);
408
}
409
}
410
}
411
412
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
413
414