Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
38920 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
28
#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
29
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
30
#include "gc_implementation/shared/spaceDecorator.hpp"
31
#include "memory/cardTableModRefBS.hpp"
32
#include "memory/gcLocker.inline.hpp"
33
#include "oops/oop.inline.hpp"
34
#include "runtime/java.hpp"
35
36
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
37
38
inline const char* PSOldGen::select_name() {
39
return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
40
}
41
42
PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
43
size_t initial_size, size_t min_size, size_t max_size,
44
const char* perf_data_name, int level):
45
_name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
46
_max_gen_size(max_size)
47
{
48
initialize(rs, alignment, perf_data_name, level);
49
}
50
51
PSOldGen::PSOldGen(size_t initial_size,
52
size_t min_size, size_t max_size,
53
const char* perf_data_name, int level):
54
_name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
55
_max_gen_size(max_size)
56
{}
57
58
void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
59
const char* perf_data_name, int level) {
60
initialize_virtual_space(rs, alignment);
61
initialize_work(perf_data_name, level);
62
63
// The old gen can grow to gen_size_limit(). _reserve reflects only
64
// the current maximum that can be committed.
65
assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
66
67
initialize_performance_counters(perf_data_name, level);
68
}
69
70
void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
71
72
_virtual_space = new PSVirtualSpace(rs, alignment);
73
if (!_virtual_space->expand_by(_init_gen_size)) {
74
vm_exit_during_initialization("Could not reserve enough space for "
75
"object heap");
76
}
77
}
78
79
void PSOldGen::initialize_work(const char* perf_data_name, int level) {
80
//
81
// Basic memory initialization
82
//
83
84
MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
85
heap_word_size(_max_gen_size));
86
assert(limit_reserved.byte_size() == _max_gen_size,
87
"word vs bytes confusion");
88
//
89
// Object start stuff
90
//
91
92
start_array()->initialize(limit_reserved);
93
94
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
95
(HeapWord*)virtual_space()->high_boundary());
96
97
//
98
// Card table stuff
99
//
100
101
MemRegion cmr((HeapWord*)virtual_space()->low(),
102
(HeapWord*)virtual_space()->high());
103
if (ZapUnusedHeapArea) {
104
// Mangle newly committed space immediately rather than
105
// waiting for the initialization of the space even though
106
// mangling is related to spaces. Doing it here eliminates
107
// the need to carry along information that a complete mangling
108
// (bottom to end) needs to be done.
109
SpaceMangler::mangle_region(cmr);
110
}
111
112
Universe::heap()->barrier_set()->resize_covered_region(cmr);
113
114
CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
115
assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
116
117
// Verify that the start and end of this generation is the start of a card.
118
// If this wasn't true, a single card could span more than one generation,
119
// which would cause problems when we commit/uncommit memory, and when we
120
// clear and dirty cards.
121
guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
122
if (_reserved.end() != Universe::heap()->reserved_region().end()) {
123
// Don't check at the very end of the heap as we'll assert that we're probing off
124
// the end if we try.
125
guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
126
}
127
128
//
129
// ObjectSpace stuff
130
//
131
132
_object_space = new MutableSpace(virtual_space()->alignment());
133
134
if (_object_space == NULL)
135
vm_exit_during_initialization("Could not allocate an old gen space");
136
137
object_space()->initialize(cmr,
138
SpaceDecorator::Clear,
139
SpaceDecorator::Mangle);
140
141
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
142
143
if (_object_mark_sweep == NULL)
144
vm_exit_during_initialization("Could not complete allocation of old generation");
145
146
// Update the start_array
147
start_array()->set_covered_region(cmr);
148
}
149
150
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
151
// Generation Counters, generation 'level', 1 subspace
152
_gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
153
virtual_space());
154
_space_counters = new SpaceCounters(perf_data_name, 0,
155
virtual_space()->reserved_size(),
156
_object_space, _gen_counters);
157
}
158
159
// Assume that the generation has been allocated if its
160
// reserved size is not 0.
161
bool PSOldGen::is_allocated() {
162
return virtual_space()->reserved_size() != 0;
163
}
164
165
void PSOldGen::precompact() {
166
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
167
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
168
169
// Reset start array first.
170
start_array()->reset();
171
172
object_mark_sweep()->precompact();
173
174
// Now compact the young gen
175
heap->young_gen()->precompact();
176
}
177
178
void PSOldGen::adjust_pointers() {
179
object_mark_sweep()->adjust_pointers();
180
}
181
182
void PSOldGen::compact() {
183
object_mark_sweep()->compact(ZapUnusedHeapArea);
184
}
185
186
size_t PSOldGen::contiguous_available() const {
187
return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
188
}
189
190
// Allocation. We report all successful allocations to the size policy
191
// Note that the perm gen does not use this method, and should not!
192
HeapWord* PSOldGen::allocate(size_t word_size) {
193
assert_locked_or_safepoint(Heap_lock);
194
HeapWord* res = allocate_noexpand(word_size);
195
196
if (res == NULL) {
197
res = expand_and_allocate(word_size);
198
}
199
200
// Allocations in the old generation need to be reported
201
if (res != NULL) {
202
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
203
heap->size_policy()->tenured_allocation(word_size);
204
}
205
206
return res;
207
}
208
209
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
210
expand(word_size*HeapWordSize);
211
if (GCExpandToAllocateDelayMillis > 0) {
212
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
213
}
214
return allocate_noexpand(word_size);
215
}
216
217
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
218
expand(word_size*HeapWordSize);
219
if (GCExpandToAllocateDelayMillis > 0) {
220
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
221
}
222
return cas_allocate_noexpand(word_size);
223
}
224
225
void PSOldGen::expand(size_t bytes) {
226
if (bytes == 0) {
227
return;
228
}
229
MutexLocker x(ExpandHeap_lock);
230
const size_t alignment = virtual_space()->alignment();
231
size_t aligned_bytes = align_size_up(bytes, alignment);
232
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
233
234
if (UseNUMA) {
235
// With NUMA we use round-robin page allocation for the old gen. Expand by at least
236
// providing a page per lgroup. Alignment is larger or equal to the page size.
237
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
238
}
239
if (aligned_bytes == 0){
240
// The alignment caused the number of bytes to wrap. An expand_by(0) will
241
// return true with the implication that and expansion was done when it
242
// was not. A call to expand implies a best effort to expand by "bytes"
243
// but not a guarantee. Align down to give a best effort. This is likely
244
// the most that the generation can expand since it has some capacity to
245
// start with.
246
aligned_bytes = align_size_down(bytes, alignment);
247
}
248
249
bool success = false;
250
if (aligned_expand_bytes > aligned_bytes) {
251
success = expand_by(aligned_expand_bytes);
252
}
253
if (!success) {
254
success = expand_by(aligned_bytes);
255
}
256
if (!success) {
257
success = expand_to_reserved();
258
}
259
260
if (PrintGC && Verbose) {
261
if (success && GC_locker::is_active_and_needs_gc()) {
262
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
263
}
264
}
265
}
266
267
bool PSOldGen::expand_by(size_t bytes) {
268
assert_lock_strong(ExpandHeap_lock);
269
assert_locked_or_safepoint(Heap_lock);
270
if (bytes == 0) {
271
return true; // That's what virtual_space()->expand_by(0) would return
272
}
273
bool result = virtual_space()->expand_by(bytes);
274
if (result) {
275
if (ZapUnusedHeapArea) {
276
// We need to mangle the newly expanded area. The memregion spans
277
// end -> new_end, we assume that top -> end is already mangled.
278
// Do the mangling before post_resize() is called because
279
// the space is available for allocation after post_resize();
280
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
281
assert(object_space()->end() < virtual_space_high,
282
"Should be true before post_resize()");
283
MemRegion mangle_region(object_space()->end(), virtual_space_high);
284
// Note that the object space has not yet been updated to
285
// coincede with the new underlying virtual space.
286
SpaceMangler::mangle_region(mangle_region);
287
}
288
post_resize();
289
if (UsePerfData) {
290
_space_counters->update_capacity();
291
_gen_counters->update_all();
292
}
293
}
294
295
if (result && Verbose && PrintGC) {
296
size_t new_mem_size = virtual_space()->committed_size();
297
size_t old_mem_size = new_mem_size - bytes;
298
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
299
SIZE_FORMAT "K to "
300
SIZE_FORMAT "K",
301
name(), old_mem_size/K, bytes/K, new_mem_size/K);
302
}
303
304
return result;
305
}
306
307
bool PSOldGen::expand_to_reserved() {
308
assert_lock_strong(ExpandHeap_lock);
309
assert_locked_or_safepoint(Heap_lock);
310
311
bool result = true;
312
const size_t remaining_bytes = virtual_space()->uncommitted_size();
313
if (remaining_bytes > 0) {
314
result = expand_by(remaining_bytes);
315
DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
316
}
317
return result;
318
}
319
320
void PSOldGen::shrink(size_t bytes) {
321
assert_lock_strong(ExpandHeap_lock);
322
assert_locked_or_safepoint(Heap_lock);
323
324
size_t size = align_size_down(bytes, virtual_space()->alignment());
325
if (size > 0) {
326
assert_lock_strong(ExpandHeap_lock);
327
virtual_space()->shrink_by(bytes);
328
post_resize();
329
330
if (Verbose && PrintGC) {
331
size_t new_mem_size = virtual_space()->committed_size();
332
size_t old_mem_size = new_mem_size + bytes;
333
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
334
SIZE_FORMAT "K to "
335
SIZE_FORMAT "K",
336
name(), old_mem_size/K, bytes/K, new_mem_size/K);
337
}
338
}
339
}
340
341
void PSOldGen::resize(size_t desired_free_space) {
342
const size_t alignment = virtual_space()->alignment();
343
const size_t size_before = virtual_space()->committed_size();
344
size_t new_size = used_in_bytes() + desired_free_space;
345
if (new_size < used_in_bytes()) {
346
// Overflowed the addition.
347
new_size = gen_size_limit();
348
}
349
// Adjust according to our min and max
350
new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
351
352
assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
353
new_size = align_size_up(new_size, alignment);
354
355
const size_t current_size = capacity_in_bytes();
356
357
if (PrintAdaptiveSizePolicy && Verbose) {
358
gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
359
"desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
360
" new size: " SIZE_FORMAT " current size " SIZE_FORMAT
361
" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
362
desired_free_space, used_in_bytes(), new_size, current_size,
363
gen_size_limit(), min_gen_size());
364
}
365
366
if (new_size == current_size) {
367
// No change requested
368
return;
369
}
370
if (new_size > current_size) {
371
size_t change_bytes = new_size - current_size;
372
expand(change_bytes);
373
} else {
374
size_t change_bytes = current_size - new_size;
375
// shrink doesn't grab this lock, expand does. Is that right?
376
MutexLocker x(ExpandHeap_lock);
377
shrink(change_bytes);
378
}
379
380
if (PrintAdaptiveSizePolicy) {
381
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
382
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
383
gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
384
"collection: %d "
385
"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
386
heap->total_collections(),
387
size_before, virtual_space()->committed_size());
388
}
389
}
390
391
// NOTE! We need to be careful about resizing. During a GC, multiple
392
// allocators may be active during heap expansion. If we allow the
393
// heap resizing to become visible before we have correctly resized
394
// all heap related data structures, we may cause program failures.
395
void PSOldGen::post_resize() {
396
// First construct a memregion representing the new size
397
MemRegion new_memregion((HeapWord*)virtual_space()->low(),
398
(HeapWord*)virtual_space()->high());
399
size_t new_word_size = new_memregion.word_size();
400
401
start_array()->set_covered_region(new_memregion);
402
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
403
404
// The update of the space's end is done by this call. As that
405
// makes the new space available for concurrent allocation, this
406
// must be the last step when expanding.
407
object_space()->initialize(new_memregion,
408
SpaceDecorator::DontClear,
409
SpaceDecorator::DontMangle);
410
411
assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
412
"Sanity");
413
}
414
415
size_t PSOldGen::gen_size_limit() {
416
return _max_gen_size;
417
}
418
419
void PSOldGen::reset_after_change() {
420
ShouldNotReachHere();
421
return;
422
}
423
424
size_t PSOldGen::available_for_expansion() {
425
ShouldNotReachHere();
426
return 0;
427
}
428
429
size_t PSOldGen::available_for_contraction() {
430
ShouldNotReachHere();
431
return 0;
432
}
433
434
void PSOldGen::print() const { print_on(tty);}
435
void PSOldGen::print_on(outputStream* st) const {
436
st->print(" %-15s", name());
437
if (PrintGCDetails && Verbose) {
438
st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
439
capacity_in_bytes(), used_in_bytes());
440
} else {
441
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
442
capacity_in_bytes()/K, used_in_bytes()/K);
443
}
444
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
445
virtual_space()->low_boundary(),
446
virtual_space()->high(),
447
virtual_space()->high_boundary());
448
449
st->print(" object"); object_space()->print_on(st);
450
}
451
452
void PSOldGen::print_used_change(size_t prev_used) const {
453
gclog_or_tty->print(" [%s:", name());
454
gclog_or_tty->print(" " SIZE_FORMAT "K"
455
"->" SIZE_FORMAT "K"
456
"(" SIZE_FORMAT "K)",
457
prev_used / K, used_in_bytes() / K,
458
capacity_in_bytes() / K);
459
gclog_or_tty->print("]");
460
}
461
462
void PSOldGen::update_counters() {
463
if (UsePerfData) {
464
_space_counters->update_all();
465
_gen_counters->update_all();
466
}
467
}
468
469
#ifndef PRODUCT
470
471
void PSOldGen::space_invariants() {
472
assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
473
"Space invariant");
474
assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
475
"Space invariant");
476
assert(virtual_space()->low_boundary() <= virtual_space()->low(),
477
"Space invariant");
478
assert(virtual_space()->high_boundary() >= virtual_space()->high(),
479
"Space invariant");
480
assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
481
"Space invariant");
482
assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
483
"Space invariant");
484
assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
485
"Space invariant");
486
}
487
#endif
488
489
void PSOldGen::verify() {
490
object_space()->verify();
491
}
492
class VerifyObjectStartArrayClosure : public ObjectClosure {
493
PSOldGen* _gen;
494
ObjectStartArray* _start_array;
495
496
public:
497
VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
498
_gen(gen), _start_array(start_array) { }
499
500
virtual void do_object(oop obj) {
501
HeapWord* test_addr = (HeapWord*)obj + 1;
502
guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
503
guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
504
}
505
};
506
507
void PSOldGen::verify_object_start_array() {
508
VerifyObjectStartArrayClosure check( this, &_start_array );
509
object_iterate(&check);
510
}
511
512
#ifndef PRODUCT
513
void PSOldGen::record_spaces_top() {
514
assert(ZapUnusedHeapArea, "Not mangling unused space");
515
object_space()->set_top_for_allocations();
516
}
517
#endif
518
519