Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
38921 views
1
/*
2
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
26
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
27
28
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
31
#include "gc_implementation/shared/collectorCounters.hpp"
32
#include "gc_implementation/shared/markSweep.hpp"
33
#include "gc_implementation/shared/mutableSpace.hpp"
34
#include "memory/sharedHeap.hpp"
35
#include "oops/oop.hpp"
36
37
class ParallelScavengeHeap;
38
class PSAdaptiveSizePolicy;
39
class PSYoungGen;
40
class PSOldGen;
41
class ParCompactionManager;
42
class ParallelTaskTerminator;
43
class PSParallelCompact;
44
class GCTaskManager;
45
class GCTaskQueue;
46
class PreGCValues;
47
class MoveAndUpdateClosure;
48
class RefProcTaskExecutor;
49
class ParallelOldTracer;
50
class STWGCTimer;
51
52
// The SplitInfo class holds the information needed to 'split' a source region
53
// so that the live data can be copied to two destination *spaces*. Normally,
54
// all the live data in a region is copied to a single destination space (e.g.,
55
// everything live in a region in eden is copied entirely into the old gen).
56
// However, when the heap is nearly full, all the live data in eden may not fit
57
// into the old gen. Copying only some of the regions from eden to old gen
58
// requires finding a region that does not contain a partial object (i.e., no
59
// live object crosses the region boundary) somewhere near the last object that
60
// does fit into the old gen. Since it's not always possible to find such a
61
// region, splitting is necessary for predictable behavior.
62
//
63
// A region is always split at the end of the partial object. This avoids
64
// additional tests when calculating the new location of a pointer, which is a
65
// very hot code path. The partial object and everything to its left will be
66
// copied to another space (call it dest_space_1). The live data to the right
67
// of the partial object will be copied either within the space itself, or to a
68
// different destination space (distinct from dest_space_1).
69
//
70
// Split points are identified during the summary phase, when region
71
// destinations are computed: data about the split, including the
72
// partial_object_size, is recorded in a SplitInfo record and the
73
// partial_object_size field in the summary data is set to zero. The zeroing is
74
// possible (and necessary) since the partial object will move to a different
75
// destination space than anything to its right, thus the partial object should
76
// not affect the locations of any objects to its right.
77
//
78
// The recorded data is used during the compaction phase, but only rarely: when
79
// the partial object on the split region will be copied across a destination
80
// region boundary. This test is made once each time a region is filled, and is
81
// a simple address comparison, so the overhead is negligible (see
82
// PSParallelCompact::first_src_addr()).
83
//
84
// Notes:
85
//
86
// Only regions with partial objects are split; a region without a partial
87
// object does not need any extra bookkeeping.
88
//
89
// At most one region is split per space, so the amount of data required is
90
// constant.
91
//
92
// A region is split only when the destination space would overflow. Once that
93
// happens, the destination space is abandoned and no other data (even from
94
// other source spaces) is targeted to that destination space. Abandoning the
95
// destination space may leave a somewhat large unused area at the end, if a
96
// large object caused the overflow.
97
//
98
// Future work:
99
//
100
// More bookkeeping would be required to continue to use the destination space.
101
// The most general solution would allow data from regions in two different
102
// source spaces to be "joined" in a single destination region. At the very
103
// least, additional code would be required in next_src_region() to detect the
104
// join and skip to an out-of-order source region. If the join region was also
105
// the last destination region to which a split region was copied (the most
106
// likely case), then additional work would be needed to get fill_region() to
107
// stop iteration and switch to a new source region at the right point. Basic
108
// idea would be to use a fake value for the top of the source space. It is
109
// doable, if a bit tricky.
110
//
111
// A simpler (but less general) solution would fill the remainder of the
112
// destination region with a dummy object and continue filling the next
113
// destination region.
114
115
class SplitInfo
116
{
117
public:
118
// Return true if this split info is valid (i.e., if a split has been
119
// recorded). The very first region cannot have a partial object and thus is
120
// never split, so 0 is the 'invalid' value.
121
bool is_valid() const { return _src_region_idx > 0; }
122
123
// Return true if this split holds data for the specified source region.
124
inline bool is_split(size_t source_region) const;
125
126
// The index of the split region, the size of the partial object on that
127
// region and the destination of the partial object.
128
size_t src_region_idx() const { return _src_region_idx; }
129
size_t partial_obj_size() const { return _partial_obj_size; }
130
HeapWord* destination() const { return _destination; }
131
132
// The destination count of the partial object referenced by this split
133
// (either 1 or 2). This must be added to the destination count of the
134
// remainder of the source region.
135
unsigned int destination_count() const { return _destination_count; }
136
137
// If a word within the partial object will be written to the first word of a
138
// destination region, this is the address of the destination region;
139
// otherwise this is NULL.
140
HeapWord* dest_region_addr() const { return _dest_region_addr; }
141
142
// If a word within the partial object will be written to the first word of a
143
// destination region, this is the address of that word within the partial
144
// object; otherwise this is NULL.
145
HeapWord* first_src_addr() const { return _first_src_addr; }
146
147
// Record the data necessary to split the region src_region_idx.
148
void record(size_t src_region_idx, size_t partial_obj_size,
149
HeapWord* destination);
150
151
void clear();
152
153
DEBUG_ONLY(void verify_clear();)
154
155
private:
156
size_t _src_region_idx;
157
size_t _partial_obj_size;
158
HeapWord* _destination;
159
unsigned int _destination_count;
160
HeapWord* _dest_region_addr;
161
HeapWord* _first_src_addr;
162
};
163
164
inline bool SplitInfo::is_split(size_t region_idx) const
165
{
166
return _src_region_idx == region_idx && is_valid();
167
}
168
169
class SpaceInfo
170
{
171
public:
172
MutableSpace* space() const { return _space; }
173
174
// Where the free space will start after the collection. Valid only after the
175
// summary phase completes.
176
HeapWord* new_top() const { return _new_top; }
177
178
// Allows new_top to be set.
179
HeapWord** new_top_addr() { return &_new_top; }
180
181
// Where the smallest allowable dense prefix ends (used only for perm gen).
182
HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
183
184
// Where the dense prefix ends, or the compacted region begins.
185
HeapWord* dense_prefix() const { return _dense_prefix; }
186
187
// The start array for the (generation containing the) space, or NULL if there
188
// is no start array.
189
ObjectStartArray* start_array() const { return _start_array; }
190
191
SplitInfo& split_info() { return _split_info; }
192
193
void set_space(MutableSpace* s) { _space = s; }
194
void set_new_top(HeapWord* addr) { _new_top = addr; }
195
void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
196
void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
197
void set_start_array(ObjectStartArray* s) { _start_array = s; }
198
199
void publish_new_top() const { _space->set_top(_new_top); }
200
201
private:
202
MutableSpace* _space;
203
HeapWord* _new_top;
204
HeapWord* _min_dense_prefix;
205
HeapWord* _dense_prefix;
206
ObjectStartArray* _start_array;
207
SplitInfo _split_info;
208
};
209
210
class ParallelCompactData
211
{
212
public:
213
// Sizes are in HeapWords, unless indicated otherwise.
214
static const size_t Log2RegionSize;
215
static const size_t RegionSize;
216
static const size_t RegionSizeBytes;
217
218
// Mask for the bits in a size_t to get an offset within a region.
219
static const size_t RegionSizeOffsetMask;
220
// Mask for the bits in a pointer to get an offset within a region.
221
static const size_t RegionAddrOffsetMask;
222
// Mask for the bits in a pointer to get the address of the start of a region.
223
static const size_t RegionAddrMask;
224
225
static const size_t Log2BlockSize;
226
static const size_t BlockSize;
227
static const size_t BlockSizeBytes;
228
229
static const size_t BlockSizeOffsetMask;
230
static const size_t BlockAddrOffsetMask;
231
static const size_t BlockAddrMask;
232
233
static const size_t BlocksPerRegion;
234
static const size_t Log2BlocksPerRegion;
235
236
class RegionData
237
{
238
public:
239
// Destination address of the region.
240
HeapWord* destination() const { return _destination; }
241
242
// The first region containing data destined for this region.
243
size_t source_region() const { return _source_region; }
244
245
// The object (if any) starting in this region and ending in a different
246
// region that could not be updated during the main (parallel) compaction
247
// phase. This is different from _partial_obj_addr, which is an object that
248
// extends onto a source region. However, the two uses do not overlap in
249
// time, so the same field is used to save space.
250
HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
251
252
// The starting address of the partial object extending onto the region.
253
HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
254
255
// Size of the partial object extending onto the region (words).
256
size_t partial_obj_size() const { return _partial_obj_size; }
257
258
// Size of live data that lies within this region due to objects that start
259
// in this region (words). This does not include the partial object
260
// extending onto the region (if any), or the part of an object that extends
261
// onto the next region (if any).
262
size_t live_obj_size() const { return _dc_and_los & los_mask; }
263
264
// Total live data that lies within the region (words).
265
size_t data_size() const { return partial_obj_size() + live_obj_size(); }
266
267
// The destination_count is the number of other regions to which data from
268
// this region will be copied. At the end of the summary phase, the valid
269
// values of destination_count are
270
//
271
// 0 - data from the region will be compacted completely into itself, or the
272
// region is empty. The region can be claimed and then filled.
273
// 1 - data from the region will be compacted into 1 other region; some
274
// data from the region may also be compacted into the region itself.
275
// 2 - data from the region will be copied to 2 other regions.
276
//
277
// During compaction as regions are emptied, the destination_count is
278
// decremented (atomically) and when it reaches 0, it can be claimed and
279
// then filled.
280
//
281
// A region is claimed for processing by atomically changing the
282
// destination_count to the claimed value (dc_claimed). After a region has
283
// been filled, the destination_count should be set to the completed value
284
// (dc_completed).
285
inline uint destination_count() const;
286
inline uint destination_count_raw() const;
287
288
// Whether the block table for this region has been filled.
289
inline bool blocks_filled() const;
290
291
// Number of times the block table was filled.
292
DEBUG_ONLY(inline size_t blocks_filled_count() const;)
293
294
// The location of the java heap data that corresponds to this region.
295
inline HeapWord* data_location() const;
296
297
// The highest address referenced by objects in this region.
298
inline HeapWord* highest_ref() const;
299
300
// Whether this region is available to be claimed, has been claimed, or has
301
// been completed.
302
//
303
// Minor subtlety: claimed() returns true if the region is marked
304
// completed(), which is desirable since a region must be claimed before it
305
// can be completed.
306
bool available() const { return _dc_and_los < dc_one; }
307
bool claimed() const { return _dc_and_los >= dc_claimed; }
308
bool completed() const { return _dc_and_los >= dc_completed; }
309
310
// These are not atomic.
311
void set_destination(HeapWord* addr) { _destination = addr; }
312
void set_source_region(size_t region) { _source_region = region; }
313
void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
314
void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
315
void set_partial_obj_size(size_t words) {
316
_partial_obj_size = (region_sz_t) words;
317
}
318
inline void set_blocks_filled();
319
320
inline void set_destination_count(uint count);
321
inline void set_live_obj_size(size_t words);
322
inline void set_data_location(HeapWord* addr);
323
inline void set_completed();
324
inline bool claim_unsafe();
325
326
// These are atomic.
327
inline void add_live_obj(size_t words);
328
inline void set_highest_ref(HeapWord* addr);
329
inline void decrement_destination_count();
330
inline bool claim();
331
332
private:
333
// The type used to represent object sizes within a region.
334
typedef uint region_sz_t;
335
336
// Constants for manipulating the _dc_and_los field, which holds both the
337
// destination count and live obj size. The live obj size lives at the
338
// least significant end so no masking is necessary when adding.
339
static const region_sz_t dc_shift; // Shift amount.
340
static const region_sz_t dc_mask; // Mask for destination count.
341
static const region_sz_t dc_one; // 1, shifted appropriately.
342
static const region_sz_t dc_claimed; // Region has been claimed.
343
static const region_sz_t dc_completed; // Region has been completed.
344
static const region_sz_t los_mask; // Mask for live obj size.
345
346
HeapWord* _destination;
347
size_t _source_region;
348
HeapWord* _partial_obj_addr;
349
region_sz_t _partial_obj_size;
350
region_sz_t volatile _dc_and_los;
351
bool volatile _blocks_filled;
352
353
#ifdef ASSERT
354
size_t _blocks_filled_count; // Number of block table fills.
355
356
// These enable optimizations that are only partially implemented. Use
357
// debug builds to prevent the code fragments from breaking.
358
HeapWord* _data_location;
359
HeapWord* _highest_ref;
360
#endif // #ifdef ASSERT
361
362
#ifdef ASSERT
363
public:
364
uint _pushed; // 0 until region is pushed onto a stack
365
private:
366
#endif
367
};
368
369
// "Blocks" allow shorter sections of the bitmap to be searched. Each Block
370
// holds an offset, which is the amount of live data in the Region to the left
371
// of the first live object that starts in the Block.
372
class BlockData
373
{
374
public:
375
typedef unsigned short int blk_ofs_t;
376
377
blk_ofs_t offset() const { return _offset; }
378
void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
379
380
private:
381
blk_ofs_t _offset;
382
};
383
384
public:
385
ParallelCompactData();
386
bool initialize(MemRegion covered_region);
387
388
size_t region_count() const { return _region_count; }
389
size_t reserved_byte_size() const { return _reserved_byte_size; }
390
391
// Convert region indices to/from RegionData pointers.
392
inline RegionData* region(size_t region_idx) const;
393
inline size_t region(const RegionData* const region_ptr) const;
394
395
size_t block_count() const { return _block_count; }
396
inline BlockData* block(size_t block_idx) const;
397
inline size_t block(const BlockData* block_ptr) const;
398
399
void add_obj(HeapWord* addr, size_t len);
400
void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
401
402
// Fill in the regions covering [beg, end) so that no data moves; i.e., the
403
// destination of region n is simply the start of region n. The argument beg
404
// must be region-aligned; end need not be.
405
void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
406
407
HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
408
HeapWord* destination, HeapWord* target_end,
409
HeapWord** target_next);
410
bool summarize(SplitInfo& split_info,
411
HeapWord* source_beg, HeapWord* source_end,
412
HeapWord** source_next,
413
HeapWord* target_beg, HeapWord* target_end,
414
HeapWord** target_next);
415
416
void clear();
417
void clear_range(size_t beg_region, size_t end_region);
418
void clear_range(HeapWord* beg, HeapWord* end) {
419
clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
420
}
421
422
// Return the number of words between addr and the start of the region
423
// containing addr.
424
inline size_t region_offset(const HeapWord* addr) const;
425
426
// Convert addresses to/from a region index or region pointer.
427
inline size_t addr_to_region_idx(const HeapWord* addr) const;
428
inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
429
inline HeapWord* region_to_addr(size_t region) const;
430
inline HeapWord* region_to_addr(size_t region, size_t offset) const;
431
inline HeapWord* region_to_addr(const RegionData* region) const;
432
433
inline HeapWord* region_align_down(HeapWord* addr) const;
434
inline HeapWord* region_align_up(HeapWord* addr) const;
435
inline bool is_region_aligned(HeapWord* addr) const;
436
437
// Analogous to region_offset() for blocks.
438
size_t block_offset(const HeapWord* addr) const;
439
size_t addr_to_block_idx(const HeapWord* addr) const;
440
size_t addr_to_block_idx(const oop obj) const {
441
return addr_to_block_idx((HeapWord*) obj);
442
}
443
inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
444
inline HeapWord* block_to_addr(size_t block) const;
445
inline size_t region_to_block_idx(size_t region) const;
446
447
inline HeapWord* block_align_down(HeapWord* addr) const;
448
inline HeapWord* block_align_up(HeapWord* addr) const;
449
inline bool is_block_aligned(HeapWord* addr) const;
450
451
// Return the address one past the end of the partial object.
452
HeapWord* partial_obj_end(size_t region_idx) const;
453
454
// Return the location of the object after compaction.
455
HeapWord* calc_new_pointer(HeapWord* addr);
456
457
HeapWord* calc_new_pointer(oop p) {
458
return calc_new_pointer((HeapWord*) p);
459
}
460
461
#ifdef ASSERT
462
void verify_clear(const PSVirtualSpace* vspace);
463
void verify_clear();
464
#endif // #ifdef ASSERT
465
466
private:
467
bool initialize_block_data();
468
bool initialize_region_data(size_t region_size);
469
PSVirtualSpace* create_vspace(size_t count, size_t element_size);
470
471
private:
472
HeapWord* _region_start;
473
#ifdef ASSERT
474
HeapWord* _region_end;
475
#endif // #ifdef ASSERT
476
477
PSVirtualSpace* _region_vspace;
478
size_t _reserved_byte_size;
479
RegionData* _region_data;
480
size_t _region_count;
481
482
PSVirtualSpace* _block_vspace;
483
BlockData* _block_data;
484
size_t _block_count;
485
};
486
487
inline uint
488
ParallelCompactData::RegionData::destination_count_raw() const
489
{
490
return _dc_and_los & dc_mask;
491
}
492
493
inline uint
494
ParallelCompactData::RegionData::destination_count() const
495
{
496
return destination_count_raw() >> dc_shift;
497
}
498
499
inline bool
500
ParallelCompactData::RegionData::blocks_filled() const
501
{
502
bool result = _blocks_filled;
503
OrderAccess::acquire();
504
return result;
505
}
506
507
#ifdef ASSERT
508
inline size_t
509
ParallelCompactData::RegionData::blocks_filled_count() const
510
{
511
return _blocks_filled_count;
512
}
513
#endif // #ifdef ASSERT
514
515
inline void
516
ParallelCompactData::RegionData::set_blocks_filled()
517
{
518
OrderAccess::release();
519
_blocks_filled = true;
520
// Debug builds count the number of times the table was filled.
521
DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
522
}
523
524
inline void
525
ParallelCompactData::RegionData::set_destination_count(uint count)
526
{
527
assert(count <= (dc_completed >> dc_shift), "count too large");
528
const region_sz_t live_sz = (region_sz_t) live_obj_size();
529
_dc_and_los = (count << dc_shift) | live_sz;
530
}
531
532
inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
533
{
534
assert(words <= los_mask, "would overflow");
535
_dc_and_los = destination_count_raw() | (region_sz_t)words;
536
}
537
538
inline void ParallelCompactData::RegionData::decrement_destination_count()
539
{
540
assert(_dc_and_los < dc_claimed, "already claimed");
541
assert(_dc_and_los >= dc_one, "count would go negative");
542
Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
543
}
544
545
inline HeapWord* ParallelCompactData::RegionData::data_location() const
546
{
547
DEBUG_ONLY(return _data_location;)
548
NOT_DEBUG(return NULL;)
549
}
550
551
inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
552
{
553
DEBUG_ONLY(return _highest_ref;)
554
NOT_DEBUG(return NULL;)
555
}
556
557
inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
558
{
559
DEBUG_ONLY(_data_location = addr;)
560
}
561
562
inline void ParallelCompactData::RegionData::set_completed()
563
{
564
assert(claimed(), "must be claimed first");
565
_dc_and_los = dc_completed | (region_sz_t) live_obj_size();
566
}
567
568
// MT-unsafe claiming of a region. Should only be used during single threaded
569
// execution.
570
inline bool ParallelCompactData::RegionData::claim_unsafe()
571
{
572
if (available()) {
573
_dc_and_los |= dc_claimed;
574
return true;
575
}
576
return false;
577
}
578
579
inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
580
{
581
assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
582
Atomic::add((int) words, (volatile int*) &_dc_and_los);
583
}
584
585
inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
586
{
587
#ifdef ASSERT
588
HeapWord* tmp = _highest_ref;
589
while (addr > tmp) {
590
tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
591
}
592
#endif // #ifdef ASSERT
593
}
594
595
inline bool ParallelCompactData::RegionData::claim()
596
{
597
const int los = (int) live_obj_size();
598
const int old = Atomic::cmpxchg(dc_claimed | los,
599
(volatile int*) &_dc_and_los, los);
600
return old == los;
601
}
602
603
inline ParallelCompactData::RegionData*
604
ParallelCompactData::region(size_t region_idx) const
605
{
606
assert(region_idx <= region_count(), "bad arg");
607
return _region_data + region_idx;
608
}
609
610
inline size_t
611
ParallelCompactData::region(const RegionData* const region_ptr) const
612
{
613
assert(region_ptr >= _region_data, "bad arg");
614
assert(region_ptr <= _region_data + region_count(), "bad arg");
615
return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
616
}
617
618
inline ParallelCompactData::BlockData*
619
ParallelCompactData::block(size_t n) const {
620
assert(n < block_count(), "bad arg");
621
return _block_data + n;
622
}
623
624
inline size_t
625
ParallelCompactData::region_offset(const HeapWord* addr) const
626
{
627
assert(addr >= _region_start, "bad addr");
628
assert(addr <= _region_end, "bad addr");
629
return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
630
}
631
632
inline size_t
633
ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
634
{
635
assert(addr >= _region_start, "bad addr");
636
assert(addr <= _region_end, "bad addr");
637
return pointer_delta(addr, _region_start) >> Log2RegionSize;
638
}
639
640
inline ParallelCompactData::RegionData*
641
ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
642
{
643
return region(addr_to_region_idx(addr));
644
}
645
646
inline HeapWord*
647
ParallelCompactData::region_to_addr(size_t region) const
648
{
649
assert(region <= _region_count, "region out of range");
650
return _region_start + (region << Log2RegionSize);
651
}
652
653
inline HeapWord*
654
ParallelCompactData::region_to_addr(const RegionData* region) const
655
{
656
return region_to_addr(pointer_delta(region, _region_data,
657
sizeof(RegionData)));
658
}
659
660
inline HeapWord*
661
ParallelCompactData::region_to_addr(size_t region, size_t offset) const
662
{
663
assert(region <= _region_count, "region out of range");
664
assert(offset < RegionSize, "offset too big"); // This may be too strict.
665
return region_to_addr(region) + offset;
666
}
667
668
inline HeapWord*
669
ParallelCompactData::region_align_down(HeapWord* addr) const
670
{
671
assert(addr >= _region_start, "bad addr");
672
assert(addr < _region_end + RegionSize, "bad addr");
673
return (HeapWord*)(size_t(addr) & RegionAddrMask);
674
}
675
676
inline HeapWord*
677
ParallelCompactData::region_align_up(HeapWord* addr) const
678
{
679
assert(addr >= _region_start, "bad addr");
680
assert(addr <= _region_end, "bad addr");
681
return region_align_down(addr + RegionSizeOffsetMask);
682
}
683
684
inline bool
685
ParallelCompactData::is_region_aligned(HeapWord* addr) const
686
{
687
return region_offset(addr) == 0;
688
}
689
690
inline size_t
691
ParallelCompactData::block_offset(const HeapWord* addr) const
692
{
693
assert(addr >= _region_start, "bad addr");
694
assert(addr <= _region_end, "bad addr");
695
return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
696
}
697
698
inline size_t
699
ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
700
{
701
assert(addr >= _region_start, "bad addr");
702
assert(addr <= _region_end, "bad addr");
703
return pointer_delta(addr, _region_start) >> Log2BlockSize;
704
}
705
706
inline ParallelCompactData::BlockData*
707
ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
708
{
709
return block(addr_to_block_idx(addr));
710
}
711
712
inline HeapWord*
713
ParallelCompactData::block_to_addr(size_t block) const
714
{
715
assert(block < _block_count, "block out of range");
716
return _region_start + (block << Log2BlockSize);
717
}
718
719
inline size_t
720
ParallelCompactData::region_to_block_idx(size_t region) const
721
{
722
return region << Log2BlocksPerRegion;
723
}
724
725
inline HeapWord*
726
ParallelCompactData::block_align_down(HeapWord* addr) const
727
{
728
assert(addr >= _region_start, "bad addr");
729
assert(addr < _region_end + RegionSize, "bad addr");
730
return (HeapWord*)(size_t(addr) & BlockAddrMask);
731
}
732
733
inline HeapWord*
734
ParallelCompactData::block_align_up(HeapWord* addr) const
735
{
736
assert(addr >= _region_start, "bad addr");
737
assert(addr <= _region_end, "bad addr");
738
return block_align_down(addr + BlockSizeOffsetMask);
739
}
740
741
inline bool
742
ParallelCompactData::is_block_aligned(HeapWord* addr) const
743
{
744
return block_offset(addr) == 0;
745
}
746
747
// Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
748
// do_addr() method.
749
//
750
// The closure is initialized with the number of heap words to process
751
// (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
752
// methods in subclasses should update the total as words are processed. Since
753
// only one subclass actually uses this mechanism to terminate iteration, the
754
// default initial value is > 0. The implementation is here and not in the
755
// single subclass that uses it to avoid making is_full() virtual, and thus
756
// adding a virtual call per live object.
757
758
class ParMarkBitMapClosure: public StackObj {
759
public:
760
typedef ParMarkBitMap::idx_t idx_t;
761
typedef ParMarkBitMap::IterationStatus IterationStatus;
762
763
public:
764
inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
765
size_t words = max_uintx);
766
767
inline ParCompactionManager* compaction_manager() const;
768
inline ParMarkBitMap* bitmap() const;
769
inline size_t words_remaining() const;
770
inline bool is_full() const;
771
inline HeapWord* source() const;
772
773
inline void set_source(HeapWord* addr);
774
775
virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
776
777
protected:
778
inline void decrement_words_remaining(size_t words);
779
780
private:
781
ParMarkBitMap* const _bitmap;
782
ParCompactionManager* const _compaction_manager;
783
DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
784
size_t _words_remaining; // Words left to copy.
785
786
protected:
787
HeapWord* _source; // Next addr that would be read.
788
};
789
790
inline
791
ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
792
ParCompactionManager* cm,
793
size_t words):
794
_bitmap(bitmap), _compaction_manager(cm)
795
#ifdef ASSERT
796
, _initial_words_remaining(words)
797
#endif
798
{
799
_words_remaining = words;
800
_source = NULL;
801
}
802
803
inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
804
return _compaction_manager;
805
}
806
807
inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
808
return _bitmap;
809
}
810
811
inline size_t ParMarkBitMapClosure::words_remaining() const {
812
return _words_remaining;
813
}
814
815
inline bool ParMarkBitMapClosure::is_full() const {
816
return words_remaining() == 0;
817
}
818
819
inline HeapWord* ParMarkBitMapClosure::source() const {
820
return _source;
821
}
822
823
inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
824
_source = addr;
825
}
826
827
inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
828
assert(_words_remaining >= words, "processed too many words");
829
_words_remaining -= words;
830
}
831
832
// The UseParallelOldGC collector is a stop-the-world garbage collector that
833
// does parts of the collection using parallel threads. The collection includes
834
// the tenured generation and the young generation. The permanent generation is
835
// collected at the same time as the other two generations but the permanent
836
// generation is collect by a single GC thread. The permanent generation is
837
// collected serially because of the requirement that during the processing of a
838
// klass AAA, any objects reference by AAA must already have been processed.
839
// This requirement is enforced by a left (lower address) to right (higher
840
// address) sliding compaction.
841
//
842
// There are four phases of the collection.
843
//
844
// - marking phase
845
// - summary phase
846
// - compacting phase
847
// - clean up phase
848
//
849
// Roughly speaking these phases correspond, respectively, to
850
// - mark all the live objects
851
// - calculate the destination of each object at the end of the collection
852
// - move the objects to their destination
853
// - update some references and reinitialize some variables
854
//
855
// These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
856
// marking phase is implemented in PSParallelCompact::marking_phase() and does a
857
// complete marking of the heap. The summary phase is implemented in
858
// PSParallelCompact::summary_phase(). The move and update phase is implemented
859
// in PSParallelCompact::compact().
860
//
861
// A space that is being collected is divided into regions and with each region
862
// is associated an object of type ParallelCompactData. Each region is of a
863
// fixed size and typically will contain more than 1 object and may have parts
864
// of objects at the front and back of the region.
865
//
866
// region -----+---------------------+----------
867
// objects covered [ AAA )[ BBB )[ CCC )[ DDD )
868
//
869
// The marking phase does a complete marking of all live objects in the heap.
870
// The marking also compiles the size of the data for all live objects covered
871
// by the region. This size includes the part of any live object spanning onto
872
// the region (part of AAA if it is live) from the front, all live objects
873
// contained in the region (BBB and/or CCC if they are live), and the part of
874
// any live objects covered by the region that extends off the region (part of
875
// DDD if it is live). The marking phase uses multiple GC threads and marking
876
// is done in a bit array of type ParMarkBitMap. The marking of the bit map is
877
// done atomically as is the accumulation of the size of the live objects
878
// covered by a region.
879
//
880
// The summary phase calculates the total live data to the left of each region
881
// XXX. Based on that total and the bottom of the space, it can calculate the
882
// starting location of the live data in XXX. The summary phase calculates for
883
// each region XXX quantites such as
884
//
885
// - the amount of live data at the beginning of a region from an object
886
// entering the region.
887
// - the location of the first live data on the region
888
// - a count of the number of regions receiving live data from XXX.
889
//
890
// See ParallelCompactData for precise details. The summary phase also
891
// calculates the dense prefix for the compaction. The dense prefix is a
892
// portion at the beginning of the space that is not moved. The objects in the
893
// dense prefix do need to have their object references updated. See method
894
// summarize_dense_prefix().
895
//
896
// The summary phase is done using 1 GC thread.
897
//
898
// The compaction phase moves objects to their new location and updates all
899
// references in the object.
900
//
901
// A current exception is that objects that cross a region boundary are moved
902
// but do not have their references updated. References are not updated because
903
// it cannot easily be determined if the klass pointer KKK for the object AAA
904
// has been updated. KKK likely resides in a region to the left of the region
905
// containing AAA. These AAA's have there references updated at the end in a
906
// clean up phase. See the method PSParallelCompact::update_deferred_objects().
907
// An alternate strategy is being investigated for this deferral of updating.
908
//
909
// Compaction is done on a region basis. A region that is ready to be filled is
910
// put on a ready list and GC threads take region off the list and fill them. A
911
// region is ready to be filled if it empty of live objects. Such a region may
912
// have been initially empty (only contained dead objects) or may have had all
913
// its live objects copied out already. A region that compacts into itself is
914
// also ready for filling. The ready list is initially filled with empty
915
// regions and regions compacting into themselves. There is always at least 1
916
// region that can be put on the ready list. The regions are atomically added
917
// and removed from the ready list.
918
919
class PSParallelCompact : AllStatic {
920
public:
921
// Convenient access to type names.
922
typedef ParMarkBitMap::idx_t idx_t;
923
typedef ParallelCompactData::RegionData RegionData;
924
typedef ParallelCompactData::BlockData BlockData;
925
926
typedef enum {
927
old_space_id, eden_space_id,
928
from_space_id, to_space_id, last_space_id
929
} SpaceId;
930
931
public:
932
// Inline closure decls
933
//
934
class IsAliveClosure: public BoolObjectClosure {
935
public:
936
virtual bool do_object_b(oop p);
937
};
938
939
class KeepAliveClosure: public OopClosure {
940
private:
941
ParCompactionManager* _compaction_manager;
942
protected:
943
template <class T> inline void do_oop_work(T* p);
944
public:
945
KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
946
virtual void do_oop(oop* p);
947
virtual void do_oop(narrowOop* p);
948
};
949
950
class FollowStackClosure: public VoidClosure {
951
private:
952
ParCompactionManager* _compaction_manager;
953
public:
954
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
955
virtual void do_void();
956
};
957
958
class AdjustPointerClosure: public OopClosure {
959
public:
960
virtual void do_oop(oop* p);
961
virtual void do_oop(narrowOop* p);
962
// do not walk from thread stacks to the code cache on this phase
963
virtual void do_code_blob(CodeBlob* cb) const { }
964
};
965
966
class AdjustKlassClosure : public KlassClosure {
967
public:
968
void do_klass(Klass* klass);
969
};
970
971
friend class KeepAliveClosure;
972
friend class FollowStackClosure;
973
friend class AdjustPointerClosure;
974
friend class AdjustKlassClosure;
975
friend class FollowKlassClosure;
976
friend class InstanceClassLoaderKlass;
977
friend class RefProcTaskProxy;
978
979
private:
980
static STWGCTimer _gc_timer;
981
static ParallelOldTracer _gc_tracer;
982
static elapsedTimer _accumulated_time;
983
static unsigned int _total_invocations;
984
static unsigned int _maximum_compaction_gc_num;
985
static jlong _time_of_last_gc; // ms
986
static CollectorCounters* _counters;
987
static ParMarkBitMap _mark_bitmap;
988
static ParallelCompactData _summary_data;
989
static IsAliveClosure _is_alive_closure;
990
static SpaceInfo _space_info[last_space_id];
991
static bool _print_phases;
992
static AdjustPointerClosure _adjust_pointer_closure;
993
static AdjustKlassClosure _adjust_klass_closure;
994
995
// Reference processing (used in ...follow_contents)
996
static ReferenceProcessor* _ref_processor;
997
998
// Updated location of intArrayKlassObj.
999
static Klass* _updated_int_array_klass_obj;
1000
1001
// Values computed at initialization and used by dead_wood_limiter().
1002
static double _dwl_mean;
1003
static double _dwl_std_dev;
1004
static double _dwl_first_term;
1005
static double _dwl_adjustment;
1006
#ifdef ASSERT
1007
static bool _dwl_initialized;
1008
#endif // #ifdef ASSERT
1009
1010
1011
public:
1012
static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
1013
1014
private:
1015
1016
static void initialize_space_info();
1017
1018
// Return true if details about individual phases should be printed.
1019
static inline bool print_phases();
1020
1021
// Clear the marking bitmap and summary data that cover the specified space.
1022
static void clear_data_covering_space(SpaceId id);
1023
1024
static void pre_compact(PreGCValues* pre_gc_values);
1025
static void post_compact();
1026
1027
// Mark live objects
1028
static void marking_phase(ParCompactionManager* cm,
1029
bool maximum_heap_compaction,
1030
ParallelOldTracer *gc_tracer);
1031
1032
template <class T>
1033
static inline void follow_root(ParCompactionManager* cm, T* p);
1034
1035
// Compute the dense prefix for the designated space. This is an experimental
1036
// implementation currently not used in production.
1037
static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
1038
bool maximum_compaction);
1039
1040
// Methods used to compute the dense prefix.
1041
1042
// Compute the value of the normal distribution at x = density. The mean and
1043
// standard deviation are values saved by initialize_dead_wood_limiter().
1044
static inline double normal_distribution(double density);
1045
1046
// Initialize the static vars used by dead_wood_limiter().
1047
static void initialize_dead_wood_limiter();
1048
1049
// Return the percentage of space that can be treated as "dead wood" (i.e.,
1050
// not reclaimed).
1051
static double dead_wood_limiter(double density, size_t min_percent);
1052
1053
// Find the first (left-most) region in the range [beg, end) that has at least
1054
// dead_words of dead space to the left. The argument beg must be the first
1055
// region in the space that is not completely live.
1056
static RegionData* dead_wood_limit_region(const RegionData* beg,
1057
const RegionData* end,
1058
size_t dead_words);
1059
1060
// Return a pointer to the first region in the range [beg, end) that is not
1061
// completely full.
1062
static RegionData* first_dead_space_region(const RegionData* beg,
1063
const RegionData* end);
1064
1065
// Return a value indicating the benefit or 'yield' if the compacted region
1066
// were to start (or equivalently if the dense prefix were to end) at the
1067
// candidate region. Higher values are better.
1068
//
1069
// The value is based on the amount of space reclaimed vs. the costs of (a)
1070
// updating references in the dense prefix plus (b) copying objects and
1071
// updating references in the compacted region.
1072
static inline double reclaimed_ratio(const RegionData* const candidate,
1073
HeapWord* const bottom,
1074
HeapWord* const top,
1075
HeapWord* const new_top);
1076
1077
// Compute the dense prefix for the designated space.
1078
static HeapWord* compute_dense_prefix(const SpaceId id,
1079
bool maximum_compaction);
1080
1081
// Return true if dead space crosses onto the specified Region; bit must be
1082
// the bit index corresponding to the first word of the Region.
1083
static inline bool dead_space_crosses_boundary(const RegionData* region,
1084
idx_t bit);
1085
1086
// Summary phase utility routine to fill dead space (if any) at the dense
1087
// prefix boundary. Should only be called if the the dense prefix is
1088
// non-empty.
1089
static void fill_dense_prefix_end(SpaceId id);
1090
1091
// Clear the summary data source_region field for the specified addresses.
1092
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
1093
1094
#ifndef PRODUCT
1095
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
1096
1097
// Fill the region [start, start + words) with live object(s). Only usable
1098
// for the old and permanent generations.
1099
static void fill_with_live_objects(SpaceId id, HeapWord* const start,
1100
size_t words);
1101
// Include the new objects in the summary data.
1102
static void summarize_new_objects(SpaceId id, HeapWord* start);
1103
1104
// Add live objects to a survivor space since it's rare that both survivors
1105
// are non-empty.
1106
static void provoke_split_fill_survivor(SpaceId id);
1107
1108
// Add live objects and/or choose the dense prefix to provoke splitting.
1109
static void provoke_split(bool & maximum_compaction);
1110
#endif
1111
1112
static void summarize_spaces_quick();
1113
static void summarize_space(SpaceId id, bool maximum_compaction);
1114
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1115
1116
// Adjust addresses in roots. Does not adjust addresses in heap.
1117
static void adjust_roots();
1118
1119
DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
1120
1121
// Move objects to new locations.
1122
static void compact_perm(ParCompactionManager* cm);
1123
static void compact();
1124
1125
// Add available regions to the stack and draining tasks to the task queue.
1126
static void enqueue_region_draining_tasks(GCTaskQueue* q,
1127
uint parallel_gc_threads);
1128
1129
// Add dense prefix update tasks to the task queue.
1130
static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1131
uint parallel_gc_threads);
1132
1133
// Add region stealing tasks to the task queue.
1134
static void enqueue_region_stealing_tasks(
1135
GCTaskQueue* q,
1136
ParallelTaskTerminator* terminator_ptr,
1137
uint parallel_gc_threads);
1138
1139
// If objects are left in eden after a collection, try to move the boundary
1140
// and absorb them into the old gen. Returns true if eden was emptied.
1141
static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1142
PSYoungGen* young_gen,
1143
PSOldGen* old_gen);
1144
1145
// Reset time since last full gc
1146
static void reset_millis_since_last_gc();
1147
1148
public:
1149
class MarkAndPushClosure: public OopClosure {
1150
private:
1151
ParCompactionManager* _compaction_manager;
1152
public:
1153
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
1154
virtual void do_oop(oop* p);
1155
virtual void do_oop(narrowOop* p);
1156
};
1157
1158
// The one and only place to start following the classes.
1159
// Should only be applied to the ClassLoaderData klasses list.
1160
class FollowKlassClosure : public KlassClosure {
1161
private:
1162
MarkAndPushClosure* _mark_and_push_closure;
1163
public:
1164
FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1165
_mark_and_push_closure(mark_and_push_closure) { }
1166
void do_klass(Klass* klass);
1167
};
1168
1169
PSParallelCompact();
1170
1171
// Convenient accessor for Universe::heap().
1172
static ParallelScavengeHeap* gc_heap() {
1173
return (ParallelScavengeHeap*)Universe::heap();
1174
}
1175
1176
static void invoke(bool maximum_heap_compaction);
1177
static bool invoke_no_policy(bool maximum_heap_compaction);
1178
1179
static void post_initialize();
1180
// Perform initialization for PSParallelCompact that requires
1181
// allocations. This should be called during the VM initialization
1182
// at a pointer where it would be appropriate to return a JNI_ENOMEM
1183
// in the event of a failure.
1184
static bool initialize();
1185
1186
// Closure accessors
1187
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
1188
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1189
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1190
1191
// Public accessors
1192
static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1193
static unsigned int total_invocations() { return _total_invocations; }
1194
static CollectorCounters* counters() { return _counters; }
1195
1196
// Used to add tasks
1197
static GCTaskManager* const gc_task_manager();
1198
static Klass* updated_int_array_klass_obj() {
1199
return _updated_int_array_klass_obj;
1200
}
1201
1202
// Marking support
1203
static inline bool mark_obj(oop obj);
1204
static inline bool is_marked(oop obj);
1205
// Check mark and maybe push on marking stack
1206
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1207
T* p);
1208
template <class T> static inline void adjust_pointer(T* p);
1209
1210
static inline void follow_klass(ParCompactionManager* cm, Klass* klass);
1211
1212
static void follow_class_loader(ParCompactionManager* cm,
1213
ClassLoaderData* klass);
1214
1215
// Compaction support.
1216
// Return true if p is in the range [beg_addr, end_addr).
1217
static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1218
static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1219
1220
// Convenience wrappers for per-space data kept in _space_info.
1221
static inline MutableSpace* space(SpaceId space_id);
1222
static inline HeapWord* new_top(SpaceId space_id);
1223
static inline HeapWord* dense_prefix(SpaceId space_id);
1224
static inline ObjectStartArray* start_array(SpaceId space_id);
1225
1226
// Move and update the live objects in the specified space.
1227
static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
1228
1229
// Process the end of the given region range in the dense prefix.
1230
// This includes saving any object not updated.
1231
static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
1232
size_t region_start_index,
1233
size_t region_end_index,
1234
idx_t exiting_object_offset,
1235
idx_t region_offset_start,
1236
idx_t region_offset_end);
1237
1238
// Update a region in the dense prefix. For each live object
1239
// in the region, update it's interior references. For each
1240
// dead object, fill it with deadwood. Dead space at the end
1241
// of a region range will be filled to the start of the next
1242
// live object regardless of the region_index_end. None of the
1243
// objects in the dense prefix move and dead space is dead
1244
// (holds only dead objects that don't need any processing), so
1245
// dead space can be filled in any order.
1246
static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
1247
SpaceId space_id,
1248
size_t region_index_start,
1249
size_t region_index_end);
1250
1251
// Return the address of the count + 1st live word in the range [beg, end).
1252
static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
1253
1254
// Return the address of the word to be copied to dest_addr, which must be
1255
// aligned to a region boundary.
1256
static HeapWord* first_src_addr(HeapWord* const dest_addr,
1257
SpaceId src_space_id,
1258
size_t src_region_idx);
1259
1260
// Determine the next source region, set closure.source() to the start of the
1261
// new region return the region index. Parameter end_addr is the address one
1262
// beyond the end of source range just processed. If necessary, switch to a
1263
// new source space and set src_space_id (in-out parameter) and src_space_top
1264
// (out parameter) accordingly.
1265
static size_t next_src_region(MoveAndUpdateClosure& closure,
1266
SpaceId& src_space_id,
1267
HeapWord*& src_space_top,
1268
HeapWord* end_addr);
1269
1270
// Decrement the destination count for each non-empty source region in the
1271
// range [beg_region, region(region_align_up(end_addr))). If the destination
1272
// count for a region goes to 0 and it needs to be filled, enqueue it.
1273
static void decrement_destination_counts(ParCompactionManager* cm,
1274
SpaceId src_space_id,
1275
size_t beg_region,
1276
HeapWord* end_addr);
1277
1278
// Fill a region, copying objects from one or more source regions.
1279
static void fill_region(ParCompactionManager* cm, size_t region_idx);
1280
static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1281
fill_region(cm, region);
1282
}
1283
1284
// Fill in the block table for the specified region.
1285
static void fill_blocks(size_t region_idx);
1286
1287
// Update the deferred objects in the space.
1288
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1289
1290
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1291
static ParallelCompactData& summary_data() { return _summary_data; }
1292
1293
// Reference Processing
1294
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1295
1296
static STWGCTimer* gc_timer() { return &_gc_timer; }
1297
1298
// Return the SpaceId for the given address.
1299
static SpaceId space_id(HeapWord* addr);
1300
1301
// Time since last full gc (in milliseconds).
1302
static jlong millis_since_last_gc();
1303
1304
static void print_on_error(outputStream* st);
1305
1306
#ifndef PRODUCT
1307
// Debugging support.
1308
static const char* space_names[last_space_id];
1309
static void print_region_ranges();
1310
static void print_dense_prefix_stats(const char* const algorithm,
1311
const SpaceId id,
1312
const bool maximum_compaction,
1313
HeapWord* const addr);
1314
static void summary_phase_msg(SpaceId dst_space_id,
1315
HeapWord* dst_beg, HeapWord* dst_end,
1316
SpaceId src_space_id,
1317
HeapWord* src_beg, HeapWord* src_end);
1318
#endif // #ifndef PRODUCT
1319
1320
#ifdef ASSERT
1321
// Sanity check the new location of a word in the heap.
1322
static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1323
// Verify that all the regions have been emptied.
1324
static void verify_complete(SpaceId space_id);
1325
#endif // #ifdef ASSERT
1326
};
1327
1328
inline bool PSParallelCompact::mark_obj(oop obj) {
1329
const int obj_size = obj->size();
1330
if (mark_bitmap()->mark_obj(obj, obj_size)) {
1331
_summary_data.add_obj(obj, obj_size);
1332
return true;
1333
} else {
1334
return false;
1335
}
1336
}
1337
1338
inline bool PSParallelCompact::is_marked(oop obj) {
1339
return mark_bitmap()->is_marked(obj);
1340
}
1341
1342
template <class T>
1343
inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
1344
assert(!Universe::heap()->is_in_reserved(p),
1345
"roots shouldn't be things within the heap");
1346
1347
T heap_oop = oopDesc::load_heap_oop(p);
1348
if (!oopDesc::is_null(heap_oop)) {
1349
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1350
if (mark_bitmap()->is_unmarked(obj)) {
1351
if (mark_obj(obj)) {
1352
obj->follow_contents(cm);
1353
}
1354
}
1355
}
1356
cm->follow_marking_stacks();
1357
}
1358
1359
template <class T>
1360
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1361
T heap_oop = oopDesc::load_heap_oop(p);
1362
if (!oopDesc::is_null(heap_oop)) {
1363
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1364
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1365
cm->push(obj);
1366
}
1367
}
1368
}
1369
1370
template <class T>
1371
inline void PSParallelCompact::adjust_pointer(T* p) {
1372
T heap_oop = oopDesc::load_heap_oop(p);
1373
if (!oopDesc::is_null(heap_oop)) {
1374
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1375
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1376
assert(new_obj != NULL, // is forwarding ptr?
1377
"should be forwarded");
1378
// Just always do the update unconditionally?
1379
if (new_obj != NULL) {
1380
assert(Universe::heap()->is_in_reserved(new_obj),
1381
"should be in object space");
1382
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1383
}
1384
}
1385
}
1386
1387
inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
1388
oop holder = klass->klass_holder();
1389
PSParallelCompact::mark_and_push(cm, &holder);
1390
}
1391
1392
template <class T>
1393
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1394
mark_and_push(_compaction_manager, p);
1395
}
1396
1397
inline bool PSParallelCompact::print_phases() {
1398
return _print_phases;
1399
}
1400
1401
inline double PSParallelCompact::normal_distribution(double density) {
1402
assert(_dwl_initialized, "uninitialized");
1403
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1404
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1405
}
1406
1407
inline bool
1408
PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1409
idx_t bit)
1410
{
1411
assert(bit > 0, "cannot call this for the first bit/region");
1412
assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1413
"sanity check");
1414
1415
// Dead space crosses the boundary if (1) a partial object does not extend
1416
// onto the region, (2) an object does not start at the beginning of the
1417
// region, and (3) an object does not end at the end of the prior region.
1418
return region->partial_obj_size() == 0 &&
1419
!_mark_bitmap.is_obj_beg(bit) &&
1420
!_mark_bitmap.is_obj_end(bit - 1);
1421
}
1422
1423
inline bool
1424
PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
1425
return p >= beg_addr && p < end_addr;
1426
}
1427
1428
inline bool
1429
PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
1430
return is_in((HeapWord*)p, beg_addr, end_addr);
1431
}
1432
1433
inline MutableSpace* PSParallelCompact::space(SpaceId id) {
1434
assert(id < last_space_id, "id out of range");
1435
return _space_info[id].space();
1436
}
1437
1438
inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
1439
assert(id < last_space_id, "id out of range");
1440
return _space_info[id].new_top();
1441
}
1442
1443
inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
1444
assert(id < last_space_id, "id out of range");
1445
return _space_info[id].dense_prefix();
1446
}
1447
1448
inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
1449
assert(id < last_space_id, "id out of range");
1450
return _space_info[id].start_array();
1451
}
1452
1453
#ifdef ASSERT
1454
inline void
1455
PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
1456
{
1457
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
1458
"must move left or to a different space");
1459
assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
1460
"checking alignment");
1461
}
1462
#endif // ASSERT
1463
1464
class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1465
public:
1466
inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1467
ObjectStartArray* start_array,
1468
HeapWord* destination, size_t words);
1469
1470
// Accessors.
1471
HeapWord* destination() const { return _destination; }
1472
1473
// If the object will fit (size <= words_remaining()), copy it to the current
1474
// destination, update the interior oops and the start array and return either
1475
// full (if the closure is full) or incomplete. If the object will not fit,
1476
// return would_overflow.
1477
virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1478
1479
// Copy enough words to fill this closure, starting at source(). Interior
1480
// oops and the start array are not updated. Return full.
1481
IterationStatus copy_until_full();
1482
1483
// Copy enough words to fill this closure or to the end of an object,
1484
// whichever is smaller, starting at source(). Interior oops and the start
1485
// array are not updated.
1486
void copy_partial_obj();
1487
1488
protected:
1489
// Update variables to indicate that word_count words were processed.
1490
inline void update_state(size_t word_count);
1491
1492
protected:
1493
ObjectStartArray* const _start_array;
1494
HeapWord* _destination; // Next addr to be written.
1495
};
1496
1497
inline
1498
MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1499
ParCompactionManager* cm,
1500
ObjectStartArray* start_array,
1501
HeapWord* destination,
1502
size_t words) :
1503
ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1504
{
1505
_destination = destination;
1506
}
1507
1508
inline void MoveAndUpdateClosure::update_state(size_t words)
1509
{
1510
decrement_words_remaining(words);
1511
_source += words;
1512
_destination += words;
1513
}
1514
1515
class UpdateOnlyClosure: public ParMarkBitMapClosure {
1516
private:
1517
const PSParallelCompact::SpaceId _space_id;
1518
ObjectStartArray* const _start_array;
1519
1520
public:
1521
UpdateOnlyClosure(ParMarkBitMap* mbm,
1522
ParCompactionManager* cm,
1523
PSParallelCompact::SpaceId space_id);
1524
1525
// Update the object.
1526
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1527
1528
inline void do_addr(HeapWord* addr);
1529
};
1530
1531
inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
1532
{
1533
_start_array->allocate_block(addr);
1534
oop(addr)->update_contents(compaction_manager());
1535
}
1536
1537
class FillClosure: public ParMarkBitMapClosure
1538
{
1539
public:
1540
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
1541
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
1542
_start_array(PSParallelCompact::start_array(space_id))
1543
{
1544
assert(space_id == PSParallelCompact::old_space_id,
1545
"cannot use FillClosure in the young gen");
1546
}
1547
1548
virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
1549
CollectedHeap::fill_with_objects(addr, size);
1550
HeapWord* const end = addr + size;
1551
do {
1552
_start_array->allocate_block(addr);
1553
addr += oop(addr)->size();
1554
} while (addr < end);
1555
return ParMarkBitMap::incomplete;
1556
}
1557
1558
private:
1559
ObjectStartArray* const _start_array;
1560
};
1561
1562
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
1563
1564