Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shared/cardTableRS.cpp
40957 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoaderDataGraph.hpp"
27
#include "gc/shared/cardTableRS.hpp"
28
#include "gc/shared/genCollectedHeap.hpp"
29
#include "gc/shared/genOopClosures.hpp"
30
#include "gc/shared/generation.hpp"
31
#include "gc/shared/space.inline.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "memory/iterator.inline.hpp"
34
#include "oops/access.inline.hpp"
35
#include "oops/oop.inline.hpp"
36
#include "runtime/atomic.hpp"
37
#include "runtime/java.hpp"
38
#include "runtime/os.hpp"
39
#include "utilities/macros.hpp"
40
41
inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) {
42
assert(*entry == CardTableRS::dirty_card_val(), "Only look at dirty cards.");
43
*entry = CardTableRS::clean_card_val();
44
return true;
45
}
46
47
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
48
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
49
_dirty_card_closure(dirty_card_closure), _ct(ct) {
50
}
51
52
bool ClearNoncleanCardWrapper::is_word_aligned(CardTable::CardValue* entry) {
53
return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
54
}
55
56
// The regions are visited in *decreasing* address order.
57
// This order aids with imprecise card marking, where a dirty
58
// card may cause scanning, and summarization marking, of objects
59
// that extend onto subsequent cards.
60
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
61
assert(mr.word_size() > 0, "Error");
62
assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
63
// mr.end() may not necessarily be card aligned.
64
CardValue* cur_entry = _ct->byte_for(mr.last());
65
const CardValue* limit = _ct->byte_for(mr.start());
66
HeapWord* end_of_non_clean = mr.end();
67
HeapWord* start_of_non_clean = end_of_non_clean;
68
while (cur_entry >= limit) {
69
HeapWord* cur_hw = _ct->addr_for(cur_entry);
70
if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
71
// Continue the dirty range by opening the
72
// dirty window one card to the left.
73
start_of_non_clean = cur_hw;
74
} else {
75
// We hit a "clean" card; process any non-empty
76
// "dirty" range accumulated so far.
77
if (start_of_non_clean < end_of_non_clean) {
78
const MemRegion mrd(start_of_non_clean, end_of_non_clean);
79
_dirty_card_closure->do_MemRegion(mrd);
80
}
81
82
// fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
83
if (is_word_aligned(cur_entry)) {
84
CardValue* cur_row = cur_entry - BytesPerWord;
85
while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) {
86
cur_row -= BytesPerWord;
87
}
88
cur_entry = cur_row + BytesPerWord;
89
cur_hw = _ct->addr_for(cur_entry);
90
}
91
92
// Reset the dirty window, while continuing to look
93
// for the next dirty card that will start a
94
// new dirty window.
95
end_of_non_clean = cur_hw;
96
start_of_non_clean = cur_hw;
97
}
98
// Note that "cur_entry" leads "start_of_non_clean" in
99
// its leftward excursion after this point
100
// in the loop and, when we hit the left end of "mr",
101
// will point off of the left end of the card-table
102
// for "mr".
103
cur_entry--;
104
}
105
// If the first card of "mr" was dirty, we will have
106
// been left with a dirty window, co-initial with "mr",
107
// which we now process.
108
if (start_of_non_clean < end_of_non_clean) {
109
const MemRegion mrd(start_of_non_clean, end_of_non_clean);
110
_dirty_card_closure->do_MemRegion(mrd);
111
}
112
}
113
114
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
115
HeapWord* gen_boundary,
116
OopIterateClosure* cl) {
117
verify_used_region_at_save_marks(sp);
118
119
const MemRegion urasm = sp->used_region_at_save_marks();
120
non_clean_card_iterate(sp, gen_boundary, urasm, cl, this);
121
}
122
123
#ifdef ASSERT
124
void CardTableRS::verify_used_region_at_save_marks(Space* sp) const {
125
MemRegion ur = sp->used_region();
126
MemRegion urasm = sp->used_region_at_save_marks();
127
128
assert(ur.contains(urasm),
129
"Did you forget to call save_marks()? "
130
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
131
"[" PTR_FORMAT ", " PTR_FORMAT ")",
132
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
133
}
134
#endif
135
136
void CardTableRS::clear_into_younger(Generation* old_gen) {
137
assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
138
"Should only be called for the old generation");
139
// The card tables for the youngest gen need never be cleared.
140
// There's a bit of subtlety in the clear() and invalidate()
141
// methods that we exploit here and in invalidate_or_clear()
142
// below to avoid missing cards at the fringes. If clear() or
143
// invalidate() are changed in the future, this code should
144
// be revisited. 20040107.ysr
145
clear(old_gen->prev_used_region());
146
}
147
148
void CardTableRS::invalidate_or_clear(Generation* old_gen) {
149
assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
150
"Should only be called for the old generation");
151
// Invalidate the cards for the currently occupied part of
152
// the old generation and clear the cards for the
153
// unoccupied part of the generation (if any, making use
154
// of that generation's prev_used_region to determine that
155
// region). No need to do anything for the youngest
156
// generation. Also see note#20040107.ysr above.
157
MemRegion used_mr = old_gen->used_region();
158
MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
159
if (!to_be_cleared_mr.is_empty()) {
160
clear(to_be_cleared_mr);
161
}
162
invalidate(used_mr);
163
}
164
165
166
class VerifyCleanCardClosure: public BasicOopIterateClosure {
167
private:
168
HeapWord* _boundary;
169
HeapWord* _begin;
170
HeapWord* _end;
171
protected:
172
template <class T> void do_oop_work(T* p) {
173
HeapWord* jp = (HeapWord*)p;
174
assert(jp >= _begin && jp < _end,
175
"Error: jp " PTR_FORMAT " should be within "
176
"[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
177
p2i(jp), p2i(_begin), p2i(_end));
178
oop obj = RawAccess<>::oop_load(p);
179
guarantee(obj == NULL || cast_from_oop<HeapWord*>(obj) >= _boundary,
180
"pointer " PTR_FORMAT " at " PTR_FORMAT " on "
181
"clean card crosses boundary" PTR_FORMAT,
182
p2i(obj), p2i(jp), p2i(_boundary));
183
}
184
185
public:
186
VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
187
_boundary(b), _begin(begin), _end(end) {
188
assert(b <= begin,
189
"Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT,
190
p2i(b), p2i(begin));
191
assert(begin <= end,
192
"Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT,
193
p2i(begin), p2i(end));
194
}
195
196
virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
197
virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
198
};
199
200
class VerifyCTSpaceClosure: public SpaceClosure {
201
private:
202
CardTableRS* _ct;
203
HeapWord* _boundary;
204
public:
205
VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
206
_ct(ct), _boundary(boundary) {}
207
virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
208
};
209
210
class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
211
CardTableRS* _ct;
212
public:
213
VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
214
void do_generation(Generation* gen) {
215
// Skip the youngest generation.
216
if (GenCollectedHeap::heap()->is_young_gen(gen)) {
217
return;
218
}
219
// Normally, we're interested in pointers to younger generations.
220
VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
221
gen->space_iterate(&blk, true);
222
}
223
};
224
225
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
226
// We don't need to do young-gen spaces.
227
if (s->end() <= gen_boundary) return;
228
MemRegion used = s->used_region();
229
230
CardValue* cur_entry = byte_for(used.start());
231
CardValue* limit = byte_after(used.last());
232
while (cur_entry < limit) {
233
if (*cur_entry == clean_card_val()) {
234
CardValue* first_dirty = cur_entry+1;
235
while (first_dirty < limit &&
236
*first_dirty == clean_card_val()) {
237
first_dirty++;
238
}
239
// If the first object is a regular object, and it has a
240
// young-to-old field, that would mark the previous card.
241
HeapWord* boundary = addr_for(cur_entry);
242
HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
243
HeapWord* boundary_block = s->block_start(boundary);
244
HeapWord* begin = boundary; // Until proven otherwise.
245
HeapWord* start_block = boundary_block; // Until proven otherwise.
246
if (boundary_block < boundary) {
247
if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
248
oop boundary_obj = cast_to_oop(boundary_block);
249
if (!boundary_obj->is_objArray() &&
250
!boundary_obj->is_typeArray()) {
251
guarantee(cur_entry > byte_for(used.start()),
252
"else boundary would be boundary_block");
253
if (*byte_for(boundary_block) != clean_card_val()) {
254
begin = boundary_block + s->block_size(boundary_block);
255
start_block = begin;
256
}
257
}
258
}
259
}
260
// Now traverse objects until end.
261
if (begin < end) {
262
MemRegion mr(begin, end);
263
VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
264
for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) {
265
if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
266
cast_to_oop(cur)->oop_iterate(&verify_blk, mr);
267
}
268
}
269
}
270
cur_entry = first_dirty;
271
} else {
272
// We'd normally expect that cur_youngergen_and_prev_nonclean_card
273
// is a transient value, that cannot be in the card table
274
// except during GC, and thus assert that:
275
// guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
276
// "Illegal CT value");
277
// That however, need not hold, as will become clear in the
278
// following...
279
280
// We'd normally expect that if we are in the parallel case,
281
// we can't have left a prev value (which would be different
282
// from the current value) in the card table, and so we'd like to
283
// assert that:
284
// guarantee(cur_youngergen_card_val() == youngergen_card
285
// || !is_prev_youngergen_card_val(*cur_entry),
286
// "Illegal CT value");
287
// That, however, may not hold occasionally, because of
288
// CMS or MSC in the old gen. To wit, consider the
289
// following two simple illustrative scenarios:
290
// (a) CMS: Consider the case where a large object L
291
// spanning several cards is allocated in the old
292
// gen, and has a young gen reference stored in it, dirtying
293
// some interior cards. A young collection scans the card,
294
// finds a young ref and installs a youngergenP_n value.
295
// L then goes dead. Now a CMS collection starts,
296
// finds L dead and sweeps it up. Assume that L is
297
// abutting _unallocated_blk, so _unallocated_blk is
298
// adjusted down to (below) L. Assume further that
299
// no young collection intervenes during this CMS cycle.
300
// The next young gen cycle will not get to look at this
301
// youngergenP_n card since it lies in the unoccupied
302
// part of the space.
303
// Some young collections later the blocks on this
304
// card can be re-allocated either due to direct allocation
305
// or due to absorbing promotions. At this time, the
306
// before-gc verification will fail the above assert.
307
// (b) MSC: In this case, an object L with a young reference
308
// is on a card that (therefore) holds a youngergen_n value.
309
// Suppose also that L lies towards the end of the used
310
// the used space before GC. An MSC collection
311
// occurs that compacts to such an extent that this
312
// card is no longer in the occupied part of the space.
313
// Since current code in MSC does not always clear cards
314
// in the unused part of old gen, this stale youngergen_n
315
// value is left behind and can later be covered by
316
// an object when promotion or direct allocation
317
// re-allocates that part of the heap.
318
//
319
// Fortunately, the presence of such stale card values is
320
// "only" a minor annoyance in that subsequent young collections
321
// might needlessly scan such cards, but would still never corrupt
322
// the heap as a result. However, it's likely not to be a significant
323
// performance inhibitor in practice. For instance,
324
// some recent measurements with unoccupied cards eagerly cleared
325
// out to maintain this invariant, showed next to no
326
// change in young collection times; of course one can construct
327
// degenerate examples where the cost can be significant.)
328
// Note, in particular, that if the "stale" card is modified
329
// after re-allocation, it would be dirty, not "stale". Thus,
330
// we can never have a younger ref in such a card and it is
331
// safe not to scan that card in any collection. [As we see
332
// below, we do some unnecessary scanning
333
// in some cases in the current parallel scanning algorithm.]
334
//
335
// The main point below is that the parallel card scanning code
336
// deals correctly with these stale card values. There are two main
337
// cases to consider where we have a stale "young gen" value and a
338
// "derivative" case to consider, where we have a stale
339
// "cur_younger_gen_and_prev_non_clean" value, as will become
340
// apparent in the case analysis below.
341
// o Case 1. If the stale value corresponds to a younger_gen_n
342
// value other than the cur_younger_gen value then the code
343
// treats this as being tantamount to a prev_younger_gen
344
// card. This means that the card may be unnecessarily scanned.
345
// There are two sub-cases to consider:
346
// o Case 1a. Let us say that the card is in the occupied part
347
// of the generation at the time the collection begins. In
348
// that case the card will be either cleared when it is scanned
349
// for young pointers, or will be set to cur_younger_gen as a
350
// result of promotion. (We have elided the normal case where
351
// the scanning thread and the promoting thread interleave
352
// possibly resulting in a transient
353
// cur_younger_gen_and_prev_non_clean value before settling
354
// to cur_younger_gen. [End Case 1a.]
355
// o Case 1b. Consider now the case when the card is in the unoccupied
356
// part of the space which becomes occupied because of promotions
357
// into it during the current young GC. In this case the card
358
// will never be scanned for young references. The current
359
// code will set the card value to either
360
// cur_younger_gen_and_prev_non_clean or leave
361
// it with its stale value -- because the promotions didn't
362
// result in any younger refs on that card. Of these two
363
// cases, the latter will be covered in Case 1a during
364
// a subsequent scan. To deal with the former case, we need
365
// to further consider how we deal with a stale value of
366
// cur_younger_gen_and_prev_non_clean in our case analysis
367
// below. This we do in Case 3 below. [End Case 1b]
368
// [End Case 1]
369
// o Case 2. If the stale value corresponds to cur_younger_gen being
370
// a value not necessarily written by a current promotion, the
371
// card will not be scanned by the younger refs scanning code.
372
// (This is OK since as we argued above such cards cannot contain
373
// any younger refs.) The result is that this value will be
374
// treated as a prev_younger_gen value in a subsequent collection,
375
// which is addressed in Case 1 above. [End Case 2]
376
// o Case 3. We here consider the "derivative" case from Case 1b. above
377
// because of which we may find a stale
378
// cur_younger_gen_and_prev_non_clean card value in the table.
379
// Once again, as in Case 1, we consider two subcases, depending
380
// on whether the card lies in the occupied or unoccupied part
381
// of the space at the start of the young collection.
382
// o Case 3a. Let us say the card is in the occupied part of
383
// the old gen at the start of the young collection. In that
384
// case, the card will be scanned by the younger refs scanning
385
// code which will set it to cur_younger_gen. In a subsequent
386
// scan, the card will be considered again and get its final
387
// correct value. [End Case 3a]
388
// o Case 3b. Now consider the case where the card is in the
389
// unoccupied part of the old gen, and is occupied as a result
390
// of promotions during thus young gc. In that case,
391
// the card will not be scanned for younger refs. The presence
392
// of newly promoted objects on the card will then result in
393
// its keeping the value cur_younger_gen_and_prev_non_clean
394
// value, which we have dealt with in Case 3 here. [End Case 3b]
395
// [End Case 3]
396
//
397
// (Please refer to the code in the helper class
398
// ClearNonCleanCardWrapper and in CardTable for details.)
399
//
400
// The informal arguments above can be tightened into a formal
401
// correctness proof and it behooves us to write up such a proof,
402
// or to use model checking to prove that there are no lingering
403
// concerns.
404
//
405
// Clearly because of Case 3b one cannot bound the time for
406
// which a card will retain what we have called a "stale" value.
407
// However, one can obtain a Loose upper bound on the redundant
408
// work as a result of such stale values. Note first that any
409
// time a stale card lies in the occupied part of the space at
410
// the start of the collection, it is scanned by younger refs
411
// code and we can define a rank function on card values that
412
// declines when this is so. Note also that when a card does not
413
// lie in the occupied part of the space at the beginning of a
414
// young collection, its rank can either decline or stay unchanged.
415
// In this case, no extra work is done in terms of redundant
416
// younger refs scanning of that card.
417
// Then, the case analysis above reveals that, in the worst case,
418
// any such stale card will be scanned unnecessarily at most twice.
419
//
420
// It is nonetheless advisable to try and get rid of some of this
421
// redundant work in a subsequent (low priority) re-design of
422
// the card-scanning code, if only to simplify the underlying
423
// state machine analysis/proof. ysr 1/28/2002. XXX
424
cur_entry++;
425
}
426
}
427
}
428
429
void CardTableRS::verify() {
430
// At present, we only know how to verify the card table RS for
431
// generational heaps.
432
VerifyCTGenClosure blk(this);
433
GenCollectedHeap::heap()->generation_iterate(&blk, false);
434
CardTable::verify();
435
}
436
437
CardTableRS::CardTableRS(MemRegion whole_heap) :
438
CardTable(whole_heap) { }
439
440
void CardTableRS::initialize() {
441
CardTable::initialize();
442
}
443
444
void CardTableRS::non_clean_card_iterate(Space* sp,
445
HeapWord* gen_boundary,
446
MemRegion mr,
447
OopIterateClosure* cl,
448
CardTableRS* ct)
449
{
450
if (mr.is_empty()) {
451
return;
452
}
453
// clear_cl finds contiguous dirty ranges of cards to process and clear.
454
455
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), gen_boundary);
456
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
457
458
clear_cl.do_MemRegion(mr);
459
}
460
461
bool CardTableRS::is_in_young(oop obj) const {
462
return GenCollectedHeap::heap()->is_in_young(obj);
463
}
464
465