Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/generation.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc_implementation/shared/gcTimer.hpp"
27
#include "gc_implementation/shared/gcTrace.hpp"
28
#include "gc_implementation/shared/spaceDecorator.hpp"
29
#include "gc_interface/collectedHeap.inline.hpp"
30
#include "memory/allocation.inline.hpp"
31
#include "memory/blockOffsetTable.inline.hpp"
32
#include "memory/cardTableRS.hpp"
33
#include "memory/gcLocker.inline.hpp"
34
#include "memory/genCollectedHeap.hpp"
35
#include "memory/genMarkSweep.hpp"
36
#include "memory/genOopClosures.hpp"
37
#include "memory/genOopClosures.inline.hpp"
38
#include "memory/generation.hpp"
39
#include "memory/generation.inline.hpp"
40
#include "memory/space.inline.hpp"
41
#include "oops/oop.inline.hpp"
42
#include "runtime/java.hpp"
43
#include "utilities/copy.hpp"
44
#include "utilities/events.hpp"
45
46
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
47
48
Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
49
_level(level),
50
_ref_processor(NULL) {
51
if (!_virtual_space.initialize(rs, initial_size)) {
52
vm_exit_during_initialization("Could not reserve enough space for "
53
"object heap");
54
}
55
// Mangle all of the the initial generation.
56
if (ZapUnusedHeapArea) {
57
MemRegion mangle_region((HeapWord*)_virtual_space.low(),
58
(HeapWord*)_virtual_space.high());
59
SpaceMangler::mangle_region(mangle_region);
60
}
61
_reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
62
(HeapWord*)_virtual_space.high_boundary());
63
}
64
65
GenerationSpec* Generation::spec() {
66
GenCollectedHeap* gch = GenCollectedHeap::heap();
67
assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
68
return gch->_gen_specs[level()];
69
}
70
71
// This is for CMS. It returns stable monotonic used space size.
72
// Remove this when CMS is removed.
73
size_t Generation::used_stable() const {
74
return used();
75
}
76
77
size_t Generation::max_capacity() const {
78
return reserved().byte_size();
79
}
80
81
void Generation::print_heap_change(size_t prev_used) const {
82
if (PrintGCDetails && Verbose) {
83
gclog_or_tty->print(" " SIZE_FORMAT
84
"->" SIZE_FORMAT
85
"(" SIZE_FORMAT ")",
86
prev_used, used(), capacity());
87
} else {
88
gclog_or_tty->print(" " SIZE_FORMAT "K"
89
"->" SIZE_FORMAT "K"
90
"(" SIZE_FORMAT "K)",
91
prev_used / K, used() / K, capacity() / K);
92
}
93
}
94
95
// By default we get a single threaded default reference processor;
96
// generations needing multi-threaded refs processing or discovery override this method.
97
void Generation::ref_processor_init() {
98
assert(_ref_processor == NULL, "a reference processor already exists");
99
assert(!_reserved.is_empty(), "empty generation?");
100
_ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
101
if (_ref_processor == NULL) {
102
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
103
}
104
}
105
106
void Generation::print() const { print_on(tty); }
107
108
void Generation::print_on(outputStream* st) const {
109
st->print(" %-20s", name());
110
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
111
capacity()/K, used()/K);
112
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
113
_virtual_space.low_boundary(),
114
_virtual_space.high(),
115
_virtual_space.high_boundary());
116
}
117
118
void Generation::print_summary_info() { print_summary_info_on(tty); }
119
120
void Generation::print_summary_info_on(outputStream* st) {
121
StatRecord* sr = stat_record();
122
double time = sr->accumulated_time.seconds();
123
st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
124
"%d GC's, avg GC time %3.7f]",
125
level(), time, sr->invocations,
126
sr->invocations > 0 ? time / sr->invocations : 0.0);
127
}
128
129
// Utility iterator classes
130
131
class GenerationIsInReservedClosure : public SpaceClosure {
132
public:
133
const void* _p;
134
Space* sp;
135
virtual void do_space(Space* s) {
136
if (sp == NULL) {
137
if (s->is_in_reserved(_p)) sp = s;
138
}
139
}
140
GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
141
};
142
143
class GenerationIsInClosure : public SpaceClosure {
144
public:
145
const void* _p;
146
Space* sp;
147
virtual void do_space(Space* s) {
148
if (sp == NULL) {
149
if (s->is_in(_p)) sp = s;
150
}
151
}
152
GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
153
};
154
155
bool Generation::is_in(const void* p) const {
156
GenerationIsInClosure blk(p);
157
((Generation*)this)->space_iterate(&blk);
158
return blk.sp != NULL;
159
}
160
161
DefNewGeneration* Generation::as_DefNewGeneration() {
162
assert((kind() == Generation::DefNew) ||
163
(kind() == Generation::ParNew) ||
164
(kind() == Generation::ASParNew),
165
"Wrong youngest generation type");
166
return (DefNewGeneration*) this;
167
}
168
169
Generation* Generation::next_gen() const {
170
GenCollectedHeap* gch = GenCollectedHeap::heap();
171
int next = level() + 1;
172
if (next < gch->_n_gens) {
173
return gch->_gens[next];
174
} else {
175
return NULL;
176
}
177
}
178
179
size_t Generation::max_contiguous_available() const {
180
// The largest number of contiguous free words in this or any higher generation.
181
size_t max = 0;
182
for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
183
size_t avail = gen->contiguous_available();
184
if (avail > max) {
185
max = avail;
186
}
187
}
188
return max;
189
}
190
191
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
192
size_t available = max_contiguous_available();
193
bool res = (available >= max_promotion_in_bytes);
194
if (PrintGC && Verbose) {
195
gclog_or_tty->print_cr(
196
"Generation: promo attempt is%s safe: available(" SIZE_FORMAT ") %s max_promo(" SIZE_FORMAT ")",
197
res? "":" not", available, res? ">=":"<",
198
max_promotion_in_bytes);
199
}
200
return res;
201
}
202
203
// Ignores "ref" and calls allocate().
204
oop Generation::promote(oop obj, size_t obj_size) {
205
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
206
207
#ifndef PRODUCT
208
if (Universe::heap()->promotion_should_fail()) {
209
return NULL;
210
}
211
#endif // #ifndef PRODUCT
212
213
HeapWord* result = allocate(obj_size, false);
214
if (result != NULL) {
215
Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
216
return oop(result);
217
} else {
218
GenCollectedHeap* gch = GenCollectedHeap::heap();
219
return gch->handle_failed_promotion(this, obj, obj_size);
220
}
221
}
222
223
oop Generation::par_promote(int thread_num,
224
oop obj, markOop m, size_t word_sz) {
225
// Could do a bad general impl here that gets a lock. But no.
226
ShouldNotCallThis();
227
return NULL;
228
}
229
230
void Generation::par_promote_alloc_undo(int thread_num,
231
HeapWord* obj, size_t word_sz) {
232
// Could do a bad general impl here that gets a lock. But no.
233
guarantee(false, "No good general implementation.");
234
}
235
236
Space* Generation::space_containing(const void* p) const {
237
GenerationIsInReservedClosure blk(p);
238
// Cast away const
239
((Generation*)this)->space_iterate(&blk);
240
return blk.sp;
241
}
242
243
// Some of these are mediocre general implementations. Should be
244
// overridden to get better performance.
245
246
class GenerationBlockStartClosure : public SpaceClosure {
247
public:
248
const void* _p;
249
HeapWord* _start;
250
virtual void do_space(Space* s) {
251
if (_start == NULL && s->is_in_reserved(_p)) {
252
_start = s->block_start(_p);
253
}
254
}
255
GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
256
};
257
258
HeapWord* Generation::block_start(const void* p) const {
259
GenerationBlockStartClosure blk(p);
260
// Cast away const
261
((Generation*)this)->space_iterate(&blk);
262
return blk._start;
263
}
264
265
class GenerationBlockSizeClosure : public SpaceClosure {
266
public:
267
const HeapWord* _p;
268
size_t size;
269
virtual void do_space(Space* s) {
270
if (size == 0 && s->is_in_reserved(_p)) {
271
size = s->block_size(_p);
272
}
273
}
274
GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
275
};
276
277
size_t Generation::block_size(const HeapWord* p) const {
278
GenerationBlockSizeClosure blk(p);
279
// Cast away const
280
((Generation*)this)->space_iterate(&blk);
281
assert(blk.size > 0, "seems reasonable");
282
return blk.size;
283
}
284
285
class GenerationBlockIsObjClosure : public SpaceClosure {
286
public:
287
const HeapWord* _p;
288
bool is_obj;
289
virtual void do_space(Space* s) {
290
if (!is_obj && s->is_in_reserved(_p)) {
291
is_obj |= s->block_is_obj(_p);
292
}
293
}
294
GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
295
};
296
297
bool Generation::block_is_obj(const HeapWord* p) const {
298
GenerationBlockIsObjClosure blk(p);
299
// Cast away const
300
((Generation*)this)->space_iterate(&blk);
301
return blk.is_obj;
302
}
303
304
class GenerationOopIterateClosure : public SpaceClosure {
305
public:
306
ExtendedOopClosure* _cl;
307
virtual void do_space(Space* s) {
308
s->oop_iterate(_cl);
309
}
310
GenerationOopIterateClosure(ExtendedOopClosure* cl) :
311
_cl(cl) {}
312
};
313
314
void Generation::oop_iterate(ExtendedOopClosure* cl) {
315
GenerationOopIterateClosure blk(cl);
316
space_iterate(&blk);
317
}
318
319
void Generation::younger_refs_in_space_iterate(Space* sp,
320
OopsInGenClosure* cl) {
321
GenRemSet* rs = SharedHeap::heap()->rem_set();
322
rs->younger_refs_in_space_iterate(sp, cl);
323
}
324
325
class GenerationObjIterateClosure : public SpaceClosure {
326
private:
327
ObjectClosure* _cl;
328
public:
329
virtual void do_space(Space* s) {
330
s->object_iterate(_cl);
331
}
332
GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
333
};
334
335
void Generation::object_iterate(ObjectClosure* cl) {
336
GenerationObjIterateClosure blk(cl);
337
space_iterate(&blk);
338
}
339
340
class GenerationSafeObjIterateClosure : public SpaceClosure {
341
private:
342
ObjectClosure* _cl;
343
public:
344
virtual void do_space(Space* s) {
345
s->safe_object_iterate(_cl);
346
}
347
GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
348
};
349
350
void Generation::safe_object_iterate(ObjectClosure* cl) {
351
GenerationSafeObjIterateClosure blk(cl);
352
space_iterate(&blk);
353
}
354
355
void Generation::prepare_for_compaction(CompactPoint* cp) {
356
// Generic implementation, can be specialized
357
CompactibleSpace* space = first_compaction_space();
358
while (space != NULL) {
359
space->prepare_for_compaction(cp);
360
space = space->next_compaction_space();
361
}
362
}
363
364
class AdjustPointersClosure: public SpaceClosure {
365
public:
366
void do_space(Space* sp) {
367
sp->adjust_pointers();
368
}
369
};
370
371
void Generation::adjust_pointers() {
372
// Note that this is done over all spaces, not just the compactible
373
// ones.
374
AdjustPointersClosure blk;
375
space_iterate(&blk, true);
376
}
377
378
void Generation::compact() {
379
CompactibleSpace* sp = first_compaction_space();
380
while (sp != NULL) {
381
sp->compact();
382
sp = sp->next_compaction_space();
383
}
384
}
385
386
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
387
int level,
388
GenRemSet* remset) :
389
Generation(rs, initial_byte_size, level), _rs(remset),
390
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
391
_used_at_prologue()
392
{
393
HeapWord* start = (HeapWord*)rs.base();
394
size_t reserved_byte_size = rs.size();
395
assert((uintptr_t(start) & 3) == 0, "bad alignment");
396
assert((reserved_byte_size & 3) == 0, "bad alignment");
397
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
398
_bts = new BlockOffsetSharedArray(reserved_mr,
399
heap_word_size(initial_byte_size));
400
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
401
_rs->resize_covered_region(committed_mr);
402
if (_bts == NULL)
403
vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
404
405
// Verify that the start and end of this generation is the start of a card.
406
// If this wasn't true, a single card could span more than on generation,
407
// which would cause problems when we commit/uncommit memory, and when we
408
// clear and dirty cards.
409
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
410
if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
411
// Don't check at the very end of the heap as we'll assert that we're probing off
412
// the end if we try.
413
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
414
}
415
_min_heap_delta_bytes = MinHeapDeltaBytes;
416
_capacity_at_prologue = initial_byte_size;
417
_used_at_prologue = 0;
418
}
419
420
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
421
assert_locked_or_safepoint(Heap_lock);
422
if (bytes == 0) {
423
return true; // That's what grow_by(0) would return
424
}
425
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
426
if (aligned_bytes == 0){
427
// The alignment caused the number of bytes to wrap. An expand_by(0) will
428
// return true with the implication that an expansion was done when it
429
// was not. A call to expand implies a best effort to expand by "bytes"
430
// but not a guarantee. Align down to give a best effort. This is likely
431
// the most that the generation can expand since it has some capacity to
432
// start with.
433
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
434
}
435
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
436
bool success = false;
437
if (aligned_expand_bytes > aligned_bytes) {
438
success = grow_by(aligned_expand_bytes);
439
}
440
if (!success) {
441
success = grow_by(aligned_bytes);
442
}
443
if (!success) {
444
success = grow_to_reserved();
445
}
446
if (PrintGC && Verbose) {
447
if (success && GC_locker::is_active_and_needs_gc()) {
448
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
449
}
450
}
451
452
return success;
453
}
454
455
456
// No young generation references, clear this generation's cards.
457
void CardGeneration::clear_remembered_set() {
458
_rs->clear(reserved());
459
}
460
461
462
// Objects in this generation may have moved, invalidate this
463
// generation's cards.
464
void CardGeneration::invalidate_remembered_set() {
465
_rs->invalidate(used_region());
466
}
467
468
469
void CardGeneration::compute_new_size() {
470
assert(_shrink_factor <= 100, "invalid shrink factor");
471
size_t current_shrink_factor = _shrink_factor;
472
_shrink_factor = 0;
473
474
// We don't have floating point command-line arguments
475
// Note: argument processing ensures that MinHeapFreeRatio < 100.
476
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
477
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
478
479
// Compute some numbers about the state of the heap.
480
const size_t used_after_gc = used();
481
const size_t capacity_after_gc = capacity();
482
483
const double min_tmp = used_after_gc / maximum_used_percentage;
484
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
485
// Don't shrink less than the initial generation size
486
minimum_desired_capacity = MAX2(minimum_desired_capacity,
487
spec()->init_size());
488
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
489
490
if (PrintGC && Verbose) {
491
const size_t free_after_gc = free();
492
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
493
gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
494
gclog_or_tty->print_cr(" "
495
" minimum_free_percentage: %6.2f"
496
" maximum_used_percentage: %6.2f",
497
minimum_free_percentage,
498
maximum_used_percentage);
499
gclog_or_tty->print_cr(" "
500
" free_after_gc : %6.1fK"
501
" used_after_gc : %6.1fK"
502
" capacity_after_gc : %6.1fK",
503
free_after_gc / (double) K,
504
used_after_gc / (double) K,
505
capacity_after_gc / (double) K);
506
gclog_or_tty->print_cr(" "
507
" free_percentage: %6.2f",
508
free_percentage);
509
}
510
511
if (capacity_after_gc < minimum_desired_capacity) {
512
// If we have less free space than we want then expand
513
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
514
// Don't expand unless it's significant
515
if (expand_bytes >= _min_heap_delta_bytes) {
516
expand(expand_bytes, 0); // safe if expansion fails
517
}
518
if (PrintGC && Verbose) {
519
gclog_or_tty->print_cr(" expanding:"
520
" minimum_desired_capacity: %6.1fK"
521
" expand_bytes: %6.1fK"
522
" _min_heap_delta_bytes: %6.1fK",
523
minimum_desired_capacity / (double) K,
524
expand_bytes / (double) K,
525
_min_heap_delta_bytes / (double) K);
526
}
527
return;
528
}
529
530
// No expansion, now see if we want to shrink
531
size_t shrink_bytes = 0;
532
// We would never want to shrink more than this
533
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
534
535
if (MaxHeapFreeRatio < 100) {
536
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
537
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
538
const double max_tmp = used_after_gc / minimum_used_percentage;
539
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
540
maximum_desired_capacity = MAX2(maximum_desired_capacity,
541
spec()->init_size());
542
if (PrintGC && Verbose) {
543
gclog_or_tty->print_cr(" "
544
" maximum_free_percentage: %6.2f"
545
" minimum_used_percentage: %6.2f",
546
maximum_free_percentage,
547
minimum_used_percentage);
548
gclog_or_tty->print_cr(" "
549
" _capacity_at_prologue: %6.1fK"
550
" minimum_desired_capacity: %6.1fK"
551
" maximum_desired_capacity: %6.1fK",
552
_capacity_at_prologue / (double) K,
553
minimum_desired_capacity / (double) K,
554
maximum_desired_capacity / (double) K);
555
}
556
assert(minimum_desired_capacity <= maximum_desired_capacity,
557
"sanity check");
558
559
if (capacity_after_gc > maximum_desired_capacity) {
560
// Capacity too large, compute shrinking size
561
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
562
// We don't want shrink all the way back to initSize if people call
563
// System.gc(), because some programs do that between "phases" and then
564
// we'd just have to grow the heap up again for the next phase. So we
565
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
566
// on the third call, and 100% by the fourth call. But if we recompute
567
// size without shrinking, it goes back to 0%.
568
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
569
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
570
if (current_shrink_factor == 0) {
571
_shrink_factor = 10;
572
} else {
573
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
574
}
575
if (PrintGC && Verbose) {
576
gclog_or_tty->print_cr(" "
577
" shrinking:"
578
" initSize: %.1fK"
579
" maximum_desired_capacity: %.1fK",
580
spec()->init_size() / (double) K,
581
maximum_desired_capacity / (double) K);
582
gclog_or_tty->print_cr(" "
583
" shrink_bytes: %.1fK"
584
" current_shrink_factor: %d"
585
" new shrink factor: %d"
586
" _min_heap_delta_bytes: %.1fK",
587
shrink_bytes / (double) K,
588
current_shrink_factor,
589
_shrink_factor,
590
_min_heap_delta_bytes / (double) K);
591
}
592
}
593
}
594
595
if (capacity_after_gc > _capacity_at_prologue) {
596
// We might have expanded for promotions, in which case we might want to
597
// take back that expansion if there's room after GC. That keeps us from
598
// stretching the heap with promotions when there's plenty of room.
599
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
600
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
601
// We have two shrinking computations, take the largest
602
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
603
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
604
if (PrintGC && Verbose) {
605
gclog_or_tty->print_cr(" "
606
" aggressive shrinking:"
607
" _capacity_at_prologue: %.1fK"
608
" capacity_after_gc: %.1fK"
609
" expansion_for_promotion: %.1fK"
610
" shrink_bytes: %.1fK",
611
capacity_after_gc / (double) K,
612
_capacity_at_prologue / (double) K,
613
expansion_for_promotion / (double) K,
614
shrink_bytes / (double) K);
615
}
616
}
617
// Don't shrink unless it's significant
618
if (shrink_bytes >= _min_heap_delta_bytes) {
619
shrink(shrink_bytes);
620
}
621
}
622
623
// Currently nothing to do.
624
void CardGeneration::prepare_for_verify() {}
625
626
627
void OneContigSpaceCardGeneration::collect(bool full,
628
bool clear_all_soft_refs,
629
size_t size,
630
bool is_tlab) {
631
GenCollectedHeap* gch = GenCollectedHeap::heap();
632
633
SpecializationStats::clear();
634
// Temporarily expand the span of our ref processor, so
635
// refs discovery is over the entire heap, not just this generation
636
ReferenceProcessorSpanMutator
637
x(ref_processor(), gch->reserved_region());
638
639
STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
640
gc_timer->register_gc_start();
641
642
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
643
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
644
645
GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
646
647
gc_timer->register_gc_end();
648
649
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
650
651
SpecializationStats::print();
652
}
653
654
HeapWord*
655
OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
656
bool is_tlab,
657
bool parallel) {
658
assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
659
if (parallel) {
660
MutexLocker x(ParGCRareEvent_lock);
661
HeapWord* result = NULL;
662
size_t byte_size = word_size * HeapWordSize;
663
while (true) {
664
expand(byte_size, _min_heap_delta_bytes);
665
if (GCExpandToAllocateDelayMillis > 0) {
666
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
667
}
668
result = _the_space->par_allocate(word_size);
669
if ( result != NULL) {
670
return result;
671
} else {
672
// If there's not enough expansion space available, give up.
673
if (_virtual_space.uncommitted_size() < byte_size) {
674
return NULL;
675
}
676
// else try again
677
}
678
}
679
} else {
680
expand(word_size*HeapWordSize, _min_heap_delta_bytes);
681
return _the_space->allocate(word_size);
682
}
683
}
684
685
bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
686
GCMutexLocker x(ExpandHeap_lock);
687
return CardGeneration::expand(bytes, expand_bytes);
688
}
689
690
691
void OneContigSpaceCardGeneration::shrink(size_t bytes) {
692
assert_locked_or_safepoint(ExpandHeap_lock);
693
size_t size = ReservedSpace::page_align_size_down(bytes);
694
if (size > 0) {
695
shrink_by(size);
696
}
697
}
698
699
700
size_t OneContigSpaceCardGeneration::capacity() const {
701
return _the_space->capacity();
702
}
703
704
705
size_t OneContigSpaceCardGeneration::used() const {
706
return _the_space->used();
707
}
708
709
710
size_t OneContigSpaceCardGeneration::free() const {
711
return _the_space->free();
712
}
713
714
MemRegion OneContigSpaceCardGeneration::used_region() const {
715
return the_space()->used_region();
716
}
717
718
size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
719
return _the_space->free();
720
}
721
722
size_t OneContigSpaceCardGeneration::contiguous_available() const {
723
return _the_space->free() + _virtual_space.uncommitted_size();
724
}
725
726
bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
727
assert_locked_or_safepoint(ExpandHeap_lock);
728
bool result = _virtual_space.expand_by(bytes);
729
if (result) {
730
size_t new_word_size =
731
heap_word_size(_virtual_space.committed_size());
732
MemRegion mr(_the_space->bottom(), new_word_size);
733
// Expand card table
734
Universe::heap()->barrier_set()->resize_covered_region(mr);
735
// Expand shared block offset array
736
_bts->resize(new_word_size);
737
738
// Fix for bug #4668531
739
if (ZapUnusedHeapArea) {
740
MemRegion mangle_region(_the_space->end(),
741
(HeapWord*)_virtual_space.high());
742
SpaceMangler::mangle_region(mangle_region);
743
}
744
745
// Expand space -- also expands space's BOT
746
// (which uses (part of) shared array above)
747
_the_space->set_end((HeapWord*)_virtual_space.high());
748
749
// update the space and generation capacity counters
750
update_counters();
751
752
if (Verbose && PrintGC) {
753
size_t new_mem_size = _virtual_space.committed_size();
754
size_t old_mem_size = new_mem_size - bytes;
755
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
756
SIZE_FORMAT "K to " SIZE_FORMAT "K",
757
name(), old_mem_size/K, bytes/K, new_mem_size/K);
758
}
759
}
760
return result;
761
}
762
763
764
bool OneContigSpaceCardGeneration::grow_to_reserved() {
765
assert_locked_or_safepoint(ExpandHeap_lock);
766
bool success = true;
767
const size_t remaining_bytes = _virtual_space.uncommitted_size();
768
if (remaining_bytes > 0) {
769
success = grow_by(remaining_bytes);
770
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
771
}
772
return success;
773
}
774
775
void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
776
assert_locked_or_safepoint(ExpandHeap_lock);
777
// Shrink committed space
778
_virtual_space.shrink_by(bytes);
779
// Shrink space; this also shrinks the space's BOT
780
_the_space->set_end((HeapWord*) _virtual_space.high());
781
size_t new_word_size = heap_word_size(_the_space->capacity());
782
// Shrink the shared block offset array
783
_bts->resize(new_word_size);
784
MemRegion mr(_the_space->bottom(), new_word_size);
785
// Shrink the card table
786
Universe::heap()->barrier_set()->resize_covered_region(mr);
787
788
if (Verbose && PrintGC) {
789
size_t new_mem_size = _virtual_space.committed_size();
790
size_t old_mem_size = new_mem_size + bytes;
791
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
792
name(), old_mem_size/K, new_mem_size/K);
793
}
794
}
795
796
// Currently nothing to do.
797
void OneContigSpaceCardGeneration::prepare_for_verify() {}
798
799
800
// Override for a card-table generation with one contiguous
801
// space. NOTE: For reasons that are lost in the fog of history,
802
// this code is used when you iterate over perm gen objects,
803
// even when one uses CDS, where the perm gen has a couple of
804
// other spaces; this is because CompactingPermGenGen derives
805
// from OneContigSpaceCardGeneration. This should be cleaned up,
806
// see CR 6897789..
807
void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
808
_the_space->object_iterate(blk);
809
}
810
811
void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
812
bool usedOnly) {
813
blk->do_space(_the_space);
814
}
815
816
void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
817
blk->set_generation(this);
818
younger_refs_in_space_iterate(_the_space, blk);
819
blk->reset_generation();
820
}
821
822
void OneContigSpaceCardGeneration::save_marks() {
823
_the_space->set_saved_mark();
824
}
825
826
827
void OneContigSpaceCardGeneration::reset_saved_marks() {
828
_the_space->reset_saved_mark();
829
}
830
831
832
bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
833
return _the_space->saved_mark_at_top();
834
}
835
836
#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
837
\
838
void OneContigSpaceCardGeneration:: \
839
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
840
blk->set_generation(this); \
841
_the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
842
blk->reset_generation(); \
843
save_marks(); \
844
}
845
846
ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
847
848
#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
849
850
851
void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
852
_last_gc = WaterMark(the_space(), the_space()->top());
853
854
// update the generation and space performance counters
855
update_counters();
856
if (ZapUnusedHeapArea) {
857
the_space()->check_mangled_unused_area_complete();
858
}
859
}
860
861
void OneContigSpaceCardGeneration::record_spaces_top() {
862
assert(ZapUnusedHeapArea, "Not mangling unused space");
863
the_space()->set_top_for_allocations();
864
}
865
866
void OneContigSpaceCardGeneration::verify() {
867
the_space()->verify();
868
}
869
870
void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
871
Generation::print_on(st);
872
st->print(" the");
873
the_space()->print_on(st);
874
}
875
876