Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/cardTableModRefBS.hpp
32285 views
1
/*
2
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
26
#define SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
27
28
#include "memory/modRefBarrierSet.hpp"
29
#include "oops/oop.hpp"
30
#include "oops/oop.inline2.hpp"
31
32
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
33
// enumerate ref fields that have been modified (since the last
34
// enumeration.)
35
36
// As it currently stands, this barrier is *imprecise*: when a ref field in
37
// an object "o" is modified, the card table entry for the card containing
38
// the head of "o" is dirtied, not necessarily the card containing the
39
// modified field itself. For object arrays, however, the barrier *is*
40
// precise; only the card containing the modified element is dirtied.
41
// Any MemRegionClosures used to scan dirty cards should take these
42
// considerations into account.
43
44
class Generation;
45
class OopsInGenClosure;
46
class DirtyCardToOopClosure;
47
class ClearNoncleanCardWrapper;
48
49
class CardTableModRefBS: public ModRefBarrierSet {
50
// Some classes get to look at some private stuff.
51
friend class BytecodeInterpreter;
52
friend class VMStructs;
53
friend class CardTableRS;
54
friend class CheckForUnmarkedOops; // Needs access to raw card bytes.
55
friend class SharkBuilder;
56
#ifndef PRODUCT
57
// For debugging.
58
friend class GuaranteeNotModClosure;
59
#endif
60
protected:
61
62
enum CardValues {
63
clean_card = -1,
64
// The mask contains zeros in places for all other values.
65
clean_card_mask = clean_card - 31,
66
67
dirty_card = 0,
68
precleaned_card = 1,
69
claimed_card = 2,
70
deferred_card = 4,
71
last_card = 8,
72
CT_MR_BS_last_reserved = 16
73
};
74
75
// a word's worth (row) of clean card values
76
static const intptr_t clean_card_row = (intptr_t)(-1);
77
78
// dirty and precleaned are equivalent wrt younger_refs_iter.
79
static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
80
return cv == dirty_card || cv == precleaned_card;
81
}
82
83
// Returns "true" iff the value "cv" will cause the card containing it
84
// to be scanned in the current traversal. May be overridden by
85
// subtypes.
86
virtual bool card_will_be_scanned(jbyte cv) {
87
return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
88
}
89
90
// Returns "true" iff the value "cv" may have represented a dirty card at
91
// some point.
92
virtual bool card_may_have_been_dirty(jbyte cv) {
93
return card_is_dirty_wrt_gen_iter(cv);
94
}
95
96
// The declaration order of these const fields is important; see the
97
// constructor before changing.
98
const MemRegion _whole_heap; // the region covered by the card table
99
size_t _guard_index; // index of very last element in the card
100
// table; it is set to a guard value
101
// (last_card) and should never be modified
102
size_t _last_valid_index; // index of the last valid element
103
const size_t _page_size; // page size used when mapping _byte_map
104
size_t _byte_map_size; // in bytes
105
jbyte* _byte_map; // the card marking array
106
107
int _cur_covered_regions;
108
// The covered regions should be in address order.
109
MemRegion* _covered;
110
// The committed regions correspond one-to-one to the covered regions.
111
// They represent the card-table memory that has been committed to service
112
// the corresponding covered region. It may be that committed region for
113
// one covered region corresponds to a larger region because of page-size
114
// roundings. Thus, a committed region for one covered region may
115
// actually extend onto the card-table space for the next covered region.
116
MemRegion* _committed;
117
118
// The last card is a guard card, and we commit the page for it so
119
// we can use the card for verification purposes. We make sure we never
120
// uncommit the MemRegion for that page.
121
MemRegion _guard_region;
122
123
protected:
124
// Initialization utilities; covered_words is the size of the covered region
125
// in, um, words.
126
inline size_t cards_required(size_t covered_words) {
127
// Add one for a guard card, used to detect errors.
128
const size_t words = align_size_up(covered_words, card_size_in_words);
129
return words / card_size_in_words + 1;
130
}
131
132
inline size_t compute_byte_map_size();
133
134
// Finds and return the index of the region, if any, to which the given
135
// region would be contiguous. If none exists, assign a new region and
136
// returns its index. Requires that no more than the maximum number of
137
// covered regions defined in the constructor are ever in use.
138
int find_covering_region_by_base(HeapWord* base);
139
140
// Same as above, but finds the region containing the given address
141
// instead of starting at a given base address.
142
int find_covering_region_containing(HeapWord* addr);
143
144
// Resize one of the regions covered by the remembered set.
145
virtual void resize_covered_region(MemRegion new_region);
146
147
// Returns the leftmost end of a committed region corresponding to a
148
// covered region before covered region "ind", or else "NULL" if "ind" is
149
// the first covered region.
150
HeapWord* largest_prev_committed_end(int ind) const;
151
152
// Returns the part of the region mr that doesn't intersect with
153
// any committed region other than self. Used to prevent uncommitting
154
// regions that are also committed by other regions. Also protects
155
// against uncommitting the guard region.
156
MemRegion committed_unique_to_self(int self, MemRegion mr) const;
157
158
// Mapping from address to card marking array entry
159
jbyte* byte_for(const void* p) const {
160
assert(_whole_heap.contains(p),
161
err_msg("Attempt to access p = " PTR_FORMAT " out of bounds of "
162
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
163
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())));
164
jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
165
assert(result >= _byte_map && result < _byte_map + _byte_map_size,
166
"out of bounds accessor for card marking array");
167
return result;
168
}
169
170
// The card table byte one after the card marking array
171
// entry for argument address. Typically used for higher bounds
172
// for loops iterating through the card table.
173
jbyte* byte_after(const void* p) const {
174
return byte_for(p) + 1;
175
}
176
177
// Iterate over the portion of the card-table which covers the given
178
// region mr in the given space and apply cl to any dirty sub-regions
179
// of mr. Dirty cards are _not_ cleared by the iterator method itself,
180
// but closures may arrange to do so on their own should they so wish.
181
void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl);
182
183
// A variant of the above that will operate in a parallel mode if
184
// worker threads are available, and clear the dirty cards as it
185
// processes them.
186
// XXX ??? MemRegionClosure above vs OopsInGenClosure below XXX
187
// XXX some new_dcto_cl's take OopClosure's, plus as above there are
188
// some MemRegionClosures. Clean this up everywhere. XXX
189
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
190
OopsInGenClosure* cl, CardTableRS* ct);
191
192
private:
193
// Work method used to implement non_clean_card_iterate_possibly_parallel()
194
// above in the parallel case.
195
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
196
OopsInGenClosure* cl, CardTableRS* ct,
197
int n_threads);
198
199
protected:
200
// Dirty the bytes corresponding to "mr" (not all of which must be
201
// covered.)
202
void dirty_MemRegion(MemRegion mr);
203
204
// Clear (to clean_card) the bytes entirely contained within "mr" (not
205
// all of which must be covered.)
206
void clear_MemRegion(MemRegion mr);
207
208
// *** Support for parallel card scanning.
209
210
// This is an array, one element per covered region of the card table.
211
// Each entry is itself an array, with one element per chunk in the
212
// covered region. Each entry of these arrays is the lowest non-clean
213
// card of the corresponding chunk containing part of an object from the
214
// previous chunk, or else NULL.
215
typedef jbyte* CardPtr;
216
typedef CardPtr* CardArr;
217
CardArr* _lowest_non_clean;
218
size_t* _lowest_non_clean_chunk_size;
219
uintptr_t* _lowest_non_clean_base_chunk_index;
220
volatile int* _last_LNC_resizing_collection;
221
222
// Initializes "lowest_non_clean" to point to the array for the region
223
// covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
224
// index of the corresponding to the first element of that array.
225
// Ensures that these arrays are of sufficient size, allocating if necessary.
226
// May be called by several threads concurrently.
227
void get_LNC_array_for_space(Space* sp,
228
jbyte**& lowest_non_clean,
229
uintptr_t& lowest_non_clean_base_chunk_index,
230
size_t& lowest_non_clean_chunk_size);
231
232
// Returns the number of chunks necessary to cover "mr".
233
size_t chunks_to_cover(MemRegion mr) {
234
return (size_t)(addr_to_chunk_index(mr.last()) -
235
addr_to_chunk_index(mr.start()) + 1);
236
}
237
238
// Returns the index of the chunk in a stride which
239
// covers the given address.
240
uintptr_t addr_to_chunk_index(const void* addr) {
241
uintptr_t card = (uintptr_t) byte_for(addr);
242
return card / ParGCCardsPerStrideChunk;
243
}
244
245
// Apply cl, which must either itself apply dcto_cl or be dcto_cl,
246
// to the cards in the stride (of n_strides) within the given space.
247
void process_stride(Space* sp,
248
MemRegion used,
249
jint stride, int n_strides,
250
OopsInGenClosure* cl,
251
CardTableRS* ct,
252
jbyte** lowest_non_clean,
253
uintptr_t lowest_non_clean_base_chunk_index,
254
size_t lowest_non_clean_chunk_size);
255
256
// Makes sure that chunk boundaries are handled appropriately, by
257
// adjusting the min_done of dcto_cl, and by using a special card-table
258
// value to indicate how min_done should be set.
259
void process_chunk_boundaries(Space* sp,
260
DirtyCardToOopClosure* dcto_cl,
261
MemRegion chunk_mr,
262
MemRegion used,
263
jbyte** lowest_non_clean,
264
uintptr_t lowest_non_clean_base_chunk_index,
265
size_t lowest_non_clean_chunk_size);
266
267
public:
268
// Constants
269
enum SomePublicConstants {
270
card_shift = 9,
271
card_size = 1 << card_shift,
272
card_size_in_words = card_size / sizeof(HeapWord)
273
};
274
275
static int clean_card_val() { return clean_card; }
276
static int clean_card_mask_val() { return clean_card_mask; }
277
static int dirty_card_val() { return dirty_card; }
278
static int claimed_card_val() { return claimed_card; }
279
static int precleaned_card_val() { return precleaned_card; }
280
static int deferred_card_val() { return deferred_card; }
281
282
// For RTTI simulation.
283
bool is_a(BarrierSet::Name bsn) {
284
return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
285
}
286
287
CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
288
~CardTableModRefBS();
289
290
virtual void initialize();
291
292
// *** Barrier set functions.
293
294
bool has_write_ref_pre_barrier() { return false; }
295
296
// Record a reference update. Note that these versions are precise!
297
// The scanning code has to handle the fact that the write barrier may be
298
// either precise or imprecise. We make non-virtual inline variants of
299
// these functions here for performance.
300
protected:
301
void write_ref_field_work(oop obj, size_t offset, oop newVal);
302
virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
303
public:
304
305
bool has_write_ref_array_opt() { return true; }
306
bool has_write_region_opt() { return true; }
307
308
inline void inline_write_region(MemRegion mr) {
309
dirty_MemRegion(mr);
310
}
311
protected:
312
void write_region_work(MemRegion mr) {
313
inline_write_region(mr);
314
}
315
public:
316
317
inline void inline_write_ref_array(MemRegion mr) {
318
dirty_MemRegion(mr);
319
}
320
protected:
321
void write_ref_array_work(MemRegion mr) {
322
inline_write_ref_array(mr);
323
}
324
public:
325
326
bool is_aligned(HeapWord* addr) {
327
return is_card_aligned(addr);
328
}
329
330
// *** Card-table-barrier-specific things.
331
332
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
333
334
template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release) {
335
jbyte* byte = byte_for((void*)field);
336
if (release) {
337
// Perform a releasing store if requested.
338
OrderAccess::release_store((volatile jbyte*) byte, dirty_card);
339
} else {
340
*byte = dirty_card;
341
}
342
}
343
344
// These are used by G1, when it uses the card table as a temporary data
345
// structure for card claiming.
346
bool is_card_dirty(size_t card_index) {
347
return _byte_map[card_index] == dirty_card_val();
348
}
349
350
void mark_card_dirty(size_t card_index) {
351
_byte_map[card_index] = dirty_card_val();
352
}
353
354
bool is_card_clean(size_t card_index) {
355
return _byte_map[card_index] == clean_card_val();
356
}
357
358
// Card marking array base (adjusted for heap low boundary)
359
// This would be the 0th element of _byte_map, if the heap started at 0x0.
360
// But since the heap starts at some higher address, this points to somewhere
361
// before the beginning of the actual _byte_map.
362
jbyte* byte_map_base;
363
364
// Return true if "p" is at the start of a card.
365
bool is_card_aligned(HeapWord* p) {
366
jbyte* pcard = byte_for(p);
367
return (addr_for(pcard) == p);
368
}
369
370
HeapWord* align_to_card_boundary(HeapWord* p) {
371
jbyte* pcard = byte_for(p + card_size_in_words - 1);
372
return addr_for(pcard);
373
}
374
375
// The kinds of precision a CardTableModRefBS may offer.
376
enum PrecisionStyle {
377
Precise,
378
ObjHeadPreciseArray
379
};
380
381
// Tells what style of precision this card table offers.
382
PrecisionStyle precision() {
383
return ObjHeadPreciseArray; // Only one supported for now.
384
}
385
386
// ModRefBS functions.
387
virtual void invalidate(MemRegion mr, bool whole_heap = false);
388
void clear(MemRegion mr);
389
void dirty(MemRegion mr);
390
391
// *** Card-table-RemSet-specific things.
392
393
// Invoke "cl.do_MemRegion" on a set of MemRegions that collectively
394
// includes all the modified cards (expressing each card as a
395
// MemRegion). Thus, several modified cards may be lumped into one
396
// region. The regions are non-overlapping, and are visited in
397
// *decreasing* address order. (This order aids with imprecise card
398
// marking, where a dirty card may cause scanning, and summarization
399
// marking, of objects that extend onto subsequent cards.)
400
void mod_card_iterate(MemRegionClosure* cl) {
401
non_clean_card_iterate_serial(_whole_heap, cl);
402
}
403
404
// Like the "mod_cards_iterate" above, except only invokes the closure
405
// for cards within the MemRegion "mr" (which is required to be
406
// card-aligned and sized.)
407
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
408
non_clean_card_iterate_serial(mr, cl);
409
}
410
411
static uintx ct_max_alignment_constraint();
412
413
// Apply closure "cl" to the dirty cards containing some part of
414
// MemRegion "mr".
415
void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
416
417
// Return the MemRegion corresponding to the first maximal run
418
// of dirty cards lying completely within MemRegion mr.
419
// If reset is "true", then sets those card table entries to the given
420
// value.
421
MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
422
int reset_val);
423
424
// Provide read-only access to the card table array.
425
const jbyte* byte_for_const(const void* p) const {
426
return byte_for(p);
427
}
428
const jbyte* byte_after_const(const void* p) const {
429
return byte_after(p);
430
}
431
432
// Mapping from card marking array entry to address of first word
433
HeapWord* addr_for(const jbyte* p) const {
434
assert(p >= _byte_map && p < _byte_map + _byte_map_size,
435
"out of bounds access to card marking array");
436
size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
437
HeapWord* result = (HeapWord*) (delta << card_shift);
438
assert(_whole_heap.contains(result),
439
err_msg("Returning result = " PTR_FORMAT " out of bounds of "
440
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
441
p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end())));
442
return result;
443
}
444
445
// Mapping from address to card marking array index.
446
size_t index_for(void* p) {
447
assert(_whole_heap.contains(p),
448
err_msg("Attempt to access p = " PTR_FORMAT " out of bounds of "
449
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
450
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())));
451
return byte_for(p) - _byte_map;
452
}
453
454
const jbyte* byte_for_index(const size_t card_index) const {
455
return _byte_map + card_index;
456
}
457
458
// Print a description of the memory for the barrier set
459
virtual void print_on(outputStream* st) const;
460
461
void verify();
462
void verify_guard();
463
464
// val_equals -> it will check that all cards covered by mr equal val
465
// !val_equals -> it will check that all cards covered by mr do not equal val
466
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
467
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
468
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
469
470
static size_t par_chunk_heapword_alignment() {
471
return ParGCCardsPerStrideChunk * card_size_in_words;
472
}
473
474
};
475
476
class CardTableRS;
477
478
// A specialization for the CardTableRS gen rem set.
479
class CardTableModRefBSForCTRS: public CardTableModRefBS {
480
CardTableRS* _rs;
481
protected:
482
bool card_will_be_scanned(jbyte cv);
483
bool card_may_have_been_dirty(jbyte cv);
484
public:
485
CardTableModRefBSForCTRS(MemRegion whole_heap,
486
int max_covered_regions) :
487
CardTableModRefBS(whole_heap, max_covered_regions) {}
488
489
void set_CTRS(CardTableRS* rs) { _rs = rs; }
490
};
491
492
493
#endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
494
495