Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/collectorPolicy.cpp
32285 views
1
/*
2
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
27
#include "gc_implementation/shared/gcPolicyCounters.hpp"
28
#include "gc_implementation/shared/vmGCOperations.hpp"
29
#include "memory/cardTableRS.hpp"
30
#include "memory/collectorPolicy.hpp"
31
#include "memory/gcLocker.inline.hpp"
32
#include "memory/genCollectedHeap.hpp"
33
#include "memory/generationSpec.hpp"
34
#include "memory/space.hpp"
35
#include "memory/universe.hpp"
36
#include "runtime/arguments.hpp"
37
#include "runtime/globals_extension.hpp"
38
#include "runtime/handles.inline.hpp"
39
#include "runtime/java.hpp"
40
#include "runtime/thread.inline.hpp"
41
#include "runtime/vmThread.hpp"
42
#include "utilities/macros.hpp"
43
#if INCLUDE_ALL_GCS
44
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
45
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
46
#endif // INCLUDE_ALL_GCS
47
48
// CollectorPolicy methods.
49
50
CollectorPolicy::CollectorPolicy() :
51
_space_alignment(0),
52
_heap_alignment(0),
53
_initial_heap_byte_size(InitialHeapSize),
54
_max_heap_byte_size(MaxHeapSize),
55
_min_heap_byte_size(Arguments::min_heap_size()),
56
_max_heap_size_cmdline(false),
57
_size_policy(NULL),
58
_should_clear_all_soft_refs(false),
59
_all_soft_refs_clear(false)
60
{}
61
62
#ifdef ASSERT
63
void CollectorPolicy::assert_flags() {
64
assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
65
assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment");
66
assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment");
67
}
68
69
void CollectorPolicy::assert_size_info() {
70
assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage");
71
assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage");
72
assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes");
73
assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes");
74
assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes");
75
assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment");
76
assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment");
77
assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment");
78
}
79
#endif // ASSERT
80
81
void CollectorPolicy::initialize_flags() {
82
assert(_space_alignment != 0, "Space alignment not set up properly");
83
assert(_heap_alignment != 0, "Heap alignment not set up properly");
84
assert(_heap_alignment >= _space_alignment,
85
err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT,
86
_heap_alignment, _space_alignment));
87
assert(_heap_alignment % _space_alignment == 0,
88
err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
89
_heap_alignment, _space_alignment));
90
91
if (FLAG_IS_CMDLINE(MaxHeapSize)) {
92
if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
93
vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
94
}
95
if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) {
96
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
97
}
98
_max_heap_size_cmdline = true;
99
}
100
101
// Check heap parameter properties
102
if (InitialHeapSize < M) {
103
vm_exit_during_initialization("Too small initial heap");
104
}
105
if (_min_heap_byte_size < M) {
106
vm_exit_during_initialization("Too small minimum heap");
107
}
108
109
// User inputs from -Xmx and -Xms must be aligned
110
_min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
111
uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
112
uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);
113
114
// Write back to flags if the values changed
115
if (aligned_initial_heap_size != InitialHeapSize) {
116
FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size);
117
}
118
if (aligned_max_heap_size != MaxHeapSize) {
119
FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size);
120
}
121
122
if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
123
InitialHeapSize < _min_heap_byte_size) {
124
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
125
}
126
if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
127
FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize);
128
} else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
129
FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize);
130
if (InitialHeapSize < _min_heap_byte_size) {
131
_min_heap_byte_size = InitialHeapSize;
132
}
133
}
134
135
_initial_heap_byte_size = InitialHeapSize;
136
_max_heap_byte_size = MaxHeapSize;
137
138
FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
139
140
DEBUG_ONLY(CollectorPolicy::assert_flags();)
141
}
142
143
void CollectorPolicy::initialize_size_info() {
144
if (PrintGCDetails && Verbose) {
145
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
146
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
147
_min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
148
}
149
150
DEBUG_ONLY(CollectorPolicy::assert_size_info();)
151
}
152
153
bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
154
bool result = _should_clear_all_soft_refs;
155
set_should_clear_all_soft_refs(false);
156
return result;
157
}
158
159
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
160
int max_covered_regions) {
161
return new CardTableRS(whole_heap, max_covered_regions);
162
}
163
164
void CollectorPolicy::cleared_all_soft_refs() {
165
// If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
166
// have been cleared in the last collection but if the gc overhear
167
// limit continues to be near, SoftRefs should still be cleared.
168
if (size_policy() != NULL) {
169
_should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
170
}
171
_all_soft_refs_clear = true;
172
}
173
174
size_t CollectorPolicy::compute_heap_alignment() {
175
// The card marking array and the offset arrays for old generations are
176
// committed in os pages as well. Make sure they are entirely full (to
177
// avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
178
// byte entry and the os page size is 4096, the maximum heap size should
179
// be 512*4096 = 2MB aligned.
180
181
// There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
182
// is supported.
183
// Requirements of any new remembered set implementations must be added here.
184
size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
185
186
if (UseLargePages) {
187
// In presence of large pages we have to make sure that our
188
// alignment is large page aware.
189
alignment = lcm(os::large_page_size(), alignment);
190
}
191
192
return alignment;
193
}
194
195
// GenCollectorPolicy methods.
196
197
GenCollectorPolicy::GenCollectorPolicy() :
198
_min_gen0_size(0),
199
_initial_gen0_size(0),
200
_max_gen0_size(0),
201
_gen_alignment(0),
202
_generations(NULL)
203
{}
204
205
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
206
return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
207
}
208
209
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
210
size_t maximum_size) {
211
size_t max_minus = maximum_size - _gen_alignment;
212
return desired_size < max_minus ? desired_size : max_minus;
213
}
214
215
216
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
217
size_t init_promo_size,
218
size_t init_survivor_size) {
219
const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
220
_size_policy = new AdaptiveSizePolicy(init_eden_size,
221
init_promo_size,
222
init_survivor_size,
223
max_gc_pause_sec,
224
GCTimeRatio);
225
}
226
227
size_t GenCollectorPolicy::young_gen_size_lower_bound() {
228
// The young generation must be aligned and have room for eden + two survivors
229
return align_size_up(3 * _space_alignment, _gen_alignment);
230
}
231
232
#ifdef ASSERT
233
void GenCollectorPolicy::assert_flags() {
234
CollectorPolicy::assert_flags();
235
assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size");
236
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
237
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
238
assert(NewSize % _gen_alignment == 0, "NewSize alignment");
239
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment");
240
}
241
242
void TwoGenerationCollectorPolicy::assert_flags() {
243
GenCollectorPolicy::assert_flags();
244
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
245
assert(OldSize % _gen_alignment == 0, "OldSize alignment");
246
}
247
248
void GenCollectorPolicy::assert_size_info() {
249
CollectorPolicy::assert_size_info();
250
// GenCollectorPolicy::initialize_size_info may update the MaxNewSize
251
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
252
assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage");
253
assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage");
254
assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes");
255
assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes");
256
assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
257
assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
258
assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
259
}
260
261
void TwoGenerationCollectorPolicy::assert_size_info() {
262
GenCollectorPolicy::assert_size_info();
263
assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage");
264
assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes");
265
assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes");
266
assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
267
assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
268
assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
269
}
270
#endif // ASSERT
271
272
void GenCollectorPolicy::initialize_flags() {
273
CollectorPolicy::initialize_flags();
274
275
assert(_gen_alignment != 0, "Generation alignment not set up properly");
276
assert(_heap_alignment >= _gen_alignment,
277
err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
278
_heap_alignment, _gen_alignment));
279
assert(_gen_alignment % _space_alignment == 0,
280
err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
281
_gen_alignment, _space_alignment));
282
assert(_heap_alignment % _gen_alignment == 0,
283
err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
284
_heap_alignment, _gen_alignment));
285
286
// All generational heaps have a youngest gen; handle those flags here
287
288
// Make sure the heap is large enough for two generations
289
uintx smallest_new_size = young_gen_size_lower_bound();
290
uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
291
_heap_alignment);
292
if (MaxHeapSize < smallest_heap_size) {
293
FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size);
294
_max_heap_byte_size = MaxHeapSize;
295
}
296
// If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
297
if (_min_heap_byte_size < smallest_heap_size) {
298
_min_heap_byte_size = smallest_heap_size;
299
if (InitialHeapSize < _min_heap_byte_size) {
300
FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size);
301
_initial_heap_byte_size = smallest_heap_size;
302
}
303
}
304
305
// Now take the actual NewSize into account. We will silently increase NewSize
306
// if the user specified a smaller or unaligned value.
307
smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
308
if (smallest_new_size != NewSize) {
309
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
310
// if NewSize was set on the command line or not. This information is needed
311
// later when setting the initial and minimum young generation size.
312
NewSize = smallest_new_size;
313
}
314
_initial_gen0_size = NewSize;
315
316
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
317
uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);
318
319
if (MaxNewSize >= MaxHeapSize) {
320
// Make sure there is room for an old generation
321
uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
322
if (FLAG_IS_CMDLINE(MaxNewSize)) {
323
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
324
"heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
325
MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
326
}
327
FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size);
328
if (NewSize > MaxNewSize) {
329
FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
330
_initial_gen0_size = NewSize;
331
}
332
} else if (MaxNewSize < min_new_size) {
333
FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
334
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
335
FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
336
}
337
_max_gen0_size = MaxNewSize;
338
}
339
340
if (NewSize > MaxNewSize) {
341
// At this point this should only happen if the user specifies a large NewSize and/or
342
// a small (but not too small) MaxNewSize.
343
if (FLAG_IS_CMDLINE(MaxNewSize)) {
344
warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
345
"A new max generation size of " SIZE_FORMAT "k will be used.",
346
NewSize/K, MaxNewSize/K, NewSize/K);
347
}
348
FLAG_SET_ERGO(uintx, MaxNewSize, NewSize);
349
_max_gen0_size = MaxNewSize;
350
}
351
352
if (SurvivorRatio < 1 || NewRatio < 1) {
353
vm_exit_during_initialization("Invalid young gen ratio specified");
354
}
355
356
DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
357
}
358
359
void TwoGenerationCollectorPolicy::initialize_flags() {
360
GenCollectorPolicy::initialize_flags();
361
362
if (!is_size_aligned(OldSize, _gen_alignment)) {
363
FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
364
}
365
366
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
367
// NewRatio will be used later to set the young generation size so we use
368
// it to calculate how big the heap should be based on the requested OldSize
369
// and NewRatio.
370
assert(NewRatio > 0, "NewRatio should have been set up earlier");
371
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
372
373
calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
374
FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize);
375
_max_heap_byte_size = MaxHeapSize;
376
FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize);
377
_initial_heap_byte_size = InitialHeapSize;
378
}
379
380
// adjust max heap size if necessary
381
if (NewSize + OldSize > MaxHeapSize) {
382
if (_max_heap_size_cmdline) {
383
// somebody set a maximum heap size with the intention that we should not
384
// exceed it. Adjust New/OldSize as necessary.
385
uintx calculated_size = NewSize + OldSize;
386
double shrink_factor = (double) MaxHeapSize / calculated_size;
387
uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
388
FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
389
_initial_gen0_size = NewSize;
390
391
// OldSize is already aligned because above we aligned MaxHeapSize to
392
// _heap_alignment, and we just made sure that NewSize is aligned to
393
// _gen_alignment. In initialize_flags() we verified that _heap_alignment
394
// is a multiple of _gen_alignment.
395
FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize);
396
} else {
397
FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
398
_max_heap_byte_size = MaxHeapSize;
399
}
400
}
401
402
always_do_update_barrier = UseConcMarkSweepGC;
403
404
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
405
}
406
407
// Values set on the command line win over any ergonomically
408
// set command line parameters.
409
// Ergonomic choice of parameters are done before this
410
// method is called. Values for command line parameters such as NewSize
411
// and MaxNewSize feed those ergonomic choices into this method.
412
// This method makes the final generation sizings consistent with
413
// themselves and with overall heap sizings.
414
// In the absence of explicitly set command line flags, policies
415
// such as the use of NewRatio are used to size the generation.
416
void GenCollectorPolicy::initialize_size_info() {
417
CollectorPolicy::initialize_size_info();
418
419
// _space_alignment is used for alignment within a generation.
420
// There is additional alignment done down stream for some
421
// collectors that sometimes causes unwanted rounding up of
422
// generations sizes.
423
424
// Determine maximum size of gen0
425
426
size_t max_new_size = 0;
427
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
428
max_new_size = MaxNewSize;
429
} else {
430
max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
431
// Bound the maximum size by NewSize below (since it historically
432
// would have been NewSize and because the NewRatio calculation could
433
// yield a size that is too small) and bound it by MaxNewSize above.
434
// Ergonomics plays here by previously calculating the desired
435
// NewSize and MaxNewSize.
436
max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
437
}
438
assert(max_new_size > 0, "All paths should set max_new_size");
439
440
// Given the maximum gen0 size, determine the initial and
441
// minimum gen0 sizes.
442
443
if (_max_heap_byte_size == _min_heap_byte_size) {
444
// The maximum and minimum heap sizes are the same so
445
// the generations minimum and initial must be the
446
// same as its maximum.
447
_min_gen0_size = max_new_size;
448
_initial_gen0_size = max_new_size;
449
_max_gen0_size = max_new_size;
450
} else {
451
size_t desired_new_size = 0;
452
if (FLAG_IS_CMDLINE(NewSize)) {
453
// If NewSize is set on the command line, we must use it as
454
// the initial size and it also makes sense to use it as the
455
// lower limit.
456
_min_gen0_size = NewSize;
457
desired_new_size = NewSize;
458
max_new_size = MAX2(max_new_size, NewSize);
459
} else if (FLAG_IS_ERGO(NewSize)) {
460
// If NewSize is set ergonomically, we should use it as a lower
461
// limit, but use NewRatio to calculate the initial size.
462
_min_gen0_size = NewSize;
463
desired_new_size =
464
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
465
max_new_size = MAX2(max_new_size, NewSize);
466
} else {
467
// For the case where NewSize is the default, use NewRatio
468
// to size the minimum and initial generation sizes.
469
// Use the default NewSize as the floor for these values. If
470
// NewRatio is overly large, the resulting sizes can be too
471
// small.
472
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
473
desired_new_size =
474
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
475
}
476
477
assert(_min_gen0_size > 0, "Sanity check");
478
_initial_gen0_size = desired_new_size;
479
_max_gen0_size = max_new_size;
480
481
// At this point the desirable initial and minimum sizes have been
482
// determined without regard to the maximum sizes.
483
484
// Bound the sizes by the corresponding overall heap sizes.
485
_min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
486
_initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
487
_max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
488
489
// At this point all three sizes have been checked against the
490
// maximum sizes but have not been checked for consistency
491
// among the three.
492
493
// Final check min <= initial <= max
494
_min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
495
_initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
496
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
497
}
498
499
// Write back to flags if necessary
500
if (NewSize != _initial_gen0_size) {
501
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
502
}
503
504
if (MaxNewSize != _max_gen0_size) {
505
FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
506
}
507
508
if (PrintGCDetails && Verbose) {
509
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
510
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
511
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
512
}
513
514
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
515
}
516
517
// Call this method during the sizing of the gen1 to make
518
// adjustments to gen0 because of gen1 sizing policy. gen0 initially has
519
// the most freedom in sizing because it is done before the
520
// policy for gen1 is applied. Once gen1 policies have been applied,
521
// there may be conflicts in the shape of the heap and this method
522
// is used to make the needed adjustments. The application of the
523
// policies could be more sophisticated (iterative for example) but
524
// keeping it simple also seems a worthwhile goal.
525
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
526
size_t* gen1_size_ptr,
527
const size_t heap_size) {
528
bool result = false;
529
530
if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) {
531
uintx smallest_new_size = young_gen_size_lower_bound();
532
if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) &&
533
(heap_size >= _min_gen1_size + smallest_new_size)) {
534
// Adjust gen0 down to accommodate _min_gen1_size
535
*gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment);
536
result = true;
537
} else {
538
*gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment);
539
}
540
}
541
return result;
542
}
543
544
// Minimum sizes of the generations may be different than
545
// the initial sizes. An inconsistently is permitted here
546
// in the total size that can be specified explicitly by
547
// command line specification of OldSize and NewSize and
548
// also a command line specification of -Xms. Issue a warning
549
// but allow the values to pass.
550
551
void TwoGenerationCollectorPolicy::initialize_size_info() {
552
GenCollectorPolicy::initialize_size_info();
553
554
// At this point the minimum, initial and maximum sizes
555
// of the overall heap and of gen0 have been determined.
556
// The maximum gen1 size can be determined from the maximum gen0
557
// and maximum heap size since no explicit flags exits
558
// for setting the gen1 maximum.
559
_max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
560
561
// If no explicit command line flag has been set for the
562
// gen1 size, use what is left for gen1.
563
if (!FLAG_IS_CMDLINE(OldSize)) {
564
// The user has not specified any value but the ergonomics
565
// may have chosen a value (which may or may not be consistent
566
// with the overall heap size). In either case make
567
// the minimum, maximum and initial sizes consistent
568
// with the gen0 sizes and the overall heap sizes.
569
_min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
570
_initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
571
// _max_gen1_size has already been made consistent above
572
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
573
} else {
574
// It's been explicitly set on the command line. Use the
575
// OldSize and then determine the consequences.
576
_min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
577
_initial_gen1_size = OldSize;
578
579
// If the user has explicitly set an OldSize that is inconsistent
580
// with other command line flags, issue a warning.
581
// The generation minimums and the overall heap mimimum should
582
// be within one generation alignment.
583
if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
584
warning("Inconsistency between minimum heap size and minimum "
585
"generation sizes: using minimum heap = " SIZE_FORMAT,
586
_min_heap_byte_size);
587
}
588
if (OldSize > _max_gen1_size) {
589
warning("Inconsistency between maximum heap size and maximum "
590
"generation sizes: using maximum heap = " SIZE_FORMAT
591
" -XX:OldSize flag is being ignored",
592
_max_heap_byte_size);
593
}
594
// If there is an inconsistency between the OldSize and the minimum and/or
595
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
596
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
597
if (PrintGCDetails && Verbose) {
598
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
599
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
600
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
601
}
602
}
603
// Initial size
604
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
605
_initial_heap_byte_size)) {
606
if (PrintGCDetails && Verbose) {
607
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
608
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
609
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
610
}
611
}
612
}
613
// Enforce the maximum gen1 size.
614
_min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
615
616
// Check that min gen1 <= initial gen1 <= max gen1
617
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
618
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
619
620
// Write back to flags if necessary
621
if (NewSize != _initial_gen0_size) {
622
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
623
}
624
625
if (MaxNewSize != _max_gen0_size) {
626
FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
627
}
628
629
if (OldSize != _initial_gen1_size) {
630
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
631
}
632
633
if (PrintGCDetails && Verbose) {
634
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
635
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
636
_min_gen1_size, _initial_gen1_size, _max_gen1_size);
637
}
638
639
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();)
640
}
641
642
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
643
bool is_tlab,
644
bool* gc_overhead_limit_was_exceeded) {
645
GenCollectedHeap *gch = GenCollectedHeap::heap();
646
647
debug_only(gch->check_for_valid_allocation_state());
648
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
649
650
// In general gc_overhead_limit_was_exceeded should be false so
651
// set it so here and reset it to true only if the gc time
652
// limit is being exceeded as checked below.
653
*gc_overhead_limit_was_exceeded = false;
654
655
HeapWord* result = NULL;
656
657
// Loop until the allocation is satisified,
658
// or unsatisfied after GC.
659
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
660
HandleMark hm; // discard any handles allocated in each iteration
661
662
// First allocation attempt is lock-free.
663
Generation *gen0 = gch->get_gen(0);
664
assert(gen0->supports_inline_contig_alloc(),
665
"Otherwise, must do alloc within heap lock");
666
if (gen0->should_allocate(size, is_tlab)) {
667
result = gen0->par_allocate(size, is_tlab);
668
if (result != NULL) {
669
assert(gch->is_in_reserved(result), "result not in heap");
670
return result;
671
}
672
}
673
uint gc_count_before; // read inside the Heap_lock locked region
674
{
675
MutexLocker ml(Heap_lock);
676
if (PrintGC && Verbose) {
677
gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
678
" attempting locked slow path allocation");
679
}
680
// Note that only large objects get a shot at being
681
// allocated in later generations.
682
bool first_only = ! should_try_older_generation_allocation(size);
683
684
result = gch->attempt_allocation(size, is_tlab, first_only);
685
if (result != NULL) {
686
assert(gch->is_in_reserved(result), "result not in heap");
687
return result;
688
}
689
690
if (GC_locker::is_active_and_needs_gc()) {
691
if (is_tlab) {
692
return NULL; // Caller will retry allocating individual object
693
}
694
if (!gch->is_maximal_no_gc()) {
695
// Try and expand heap to satisfy request
696
result = expand_heap_and_allocate(size, is_tlab);
697
// result could be null if we are out of space
698
if (result != NULL) {
699
return result;
700
}
701
}
702
703
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
704
return NULL; // we didn't get to do a GC and we didn't get any memory
705
}
706
707
// If this thread is not in a jni critical section, we stall
708
// the requestor until the critical section has cleared and
709
// GC allowed. When the critical section clears, a GC is
710
// initiated by the last thread exiting the critical section; so
711
// we retry the allocation sequence from the beginning of the loop,
712
// rather than causing more, now probably unnecessary, GC attempts.
713
JavaThread* jthr = JavaThread::current();
714
if (!jthr->in_critical()) {
715
MutexUnlocker mul(Heap_lock);
716
// Wait for JNI critical section to be exited
717
GC_locker::stall_until_clear();
718
gclocker_stalled_count += 1;
719
continue;
720
} else {
721
if (CheckJNICalls) {
722
fatal("Possible deadlock due to allocating while"
723
" in jni critical section");
724
}
725
return NULL;
726
}
727
}
728
729
// Read the gc count while the heap lock is held.
730
gc_count_before = Universe::heap()->total_collections();
731
}
732
733
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
734
VMThread::execute(&op);
735
if (op.prologue_succeeded()) {
736
result = op.result();
737
if (op.gc_locked()) {
738
assert(result == NULL, "must be NULL if gc_locked() is true");
739
continue; // retry and/or stall as necessary
740
}
741
742
// Allocation has failed and a collection
743
// has been done. If the gc time limit was exceeded the
744
// this time, return NULL so that an out-of-memory
745
// will be thrown. Clear gc_overhead_limit_exceeded
746
// so that the overhead exceeded does not persist.
747
748
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
749
const bool softrefs_clear = all_soft_refs_clear();
750
751
if (limit_exceeded && softrefs_clear) {
752
*gc_overhead_limit_was_exceeded = true;
753
size_policy()->set_gc_overhead_limit_exceeded(false);
754
if (op.result() != NULL) {
755
CollectedHeap::fill_with_object(op.result(), size);
756
}
757
return NULL;
758
}
759
assert(result == NULL || gch->is_in_reserved(result),
760
"result not in heap");
761
return result;
762
}
763
764
// Give a warning if we seem to be looping forever.
765
if ((QueuedAllocationWarningCount > 0) &&
766
(try_count % QueuedAllocationWarningCount == 0)) {
767
warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
768
" size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
769
}
770
}
771
}
772
773
HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
774
bool is_tlab) {
775
GenCollectedHeap *gch = GenCollectedHeap::heap();
776
HeapWord* result = NULL;
777
for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
778
Generation *gen = gch->get_gen(i);
779
if (gen->should_allocate(size, is_tlab)) {
780
result = gen->expand_and_allocate(size, is_tlab);
781
}
782
}
783
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
784
return result;
785
}
786
787
HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
788
bool is_tlab) {
789
GenCollectedHeap *gch = GenCollectedHeap::heap();
790
GCCauseSetter x(gch, GCCause::_allocation_failure);
791
HeapWord* result = NULL;
792
793
assert(size != 0, "Precondition violated");
794
if (GC_locker::is_active_and_needs_gc()) {
795
// GC locker is active; instead of a collection we will attempt
796
// to expand the heap, if there's room for expansion.
797
if (!gch->is_maximal_no_gc()) {
798
result = expand_heap_and_allocate(size, is_tlab);
799
}
800
return result; // could be null if we are out of space
801
} else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
802
// Do an incremental collection.
803
gch->do_collection(false /* full */,
804
false /* clear_all_soft_refs */,
805
size /* size */,
806
is_tlab /* is_tlab */,
807
number_of_generations() - 1 /* max_level */);
808
} else {
809
if (Verbose && PrintGCDetails) {
810
gclog_or_tty->print(" :: Trying full because partial may fail :: ");
811
}
812
// Try a full collection; see delta for bug id 6266275
813
// for the original code and why this has been simplified
814
// with from-space allocation criteria modified and
815
// such allocation moved out of the safepoint path.
816
gch->do_collection(true /* full */,
817
false /* clear_all_soft_refs */,
818
size /* size */,
819
is_tlab /* is_tlab */,
820
number_of_generations() - 1 /* max_level */);
821
}
822
823
result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
824
825
if (result != NULL) {
826
assert(gch->is_in_reserved(result), "result not in heap");
827
return result;
828
}
829
830
// OK, collection failed, try expansion.
831
result = expand_heap_and_allocate(size, is_tlab);
832
if (result != NULL) {
833
return result;
834
}
835
836
// If we reach this point, we're really out of memory. Try every trick
837
// we can to reclaim memory. Force collection of soft references. Force
838
// a complete compaction of the heap. Any additional methods for finding
839
// free memory should be here, especially if they are expensive. If this
840
// attempt fails, an OOM exception will be thrown.
841
{
842
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
843
844
gch->do_collection(true /* full */,
845
true /* clear_all_soft_refs */,
846
size /* size */,
847
is_tlab /* is_tlab */,
848
number_of_generations() - 1 /* max_level */);
849
}
850
851
result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
852
if (result != NULL) {
853
assert(gch->is_in_reserved(result), "result not in heap");
854
return result;
855
}
856
857
assert(!should_clear_all_soft_refs(),
858
"Flag should have been handled and cleared prior to this point");
859
860
// What else? We might try synchronous finalization later. If the total
861
// space available is large enough for the allocation, then a more
862
// complete compaction phase than we've tried so far might be
863
// appropriate.
864
return NULL;
865
}
866
867
MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
868
ClassLoaderData* loader_data,
869
size_t word_size,
870
Metaspace::MetadataType mdtype) {
871
uint loop_count = 0;
872
uint gc_count = 0;
873
uint full_gc_count = 0;
874
875
assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
876
877
do {
878
MetaWord* result = NULL;
879
if (GC_locker::is_active_and_needs_gc()) {
880
// If the GC_locker is active, just expand and allocate.
881
// If that does not succeed, wait if this thread is not
882
// in a critical section itself.
883
result =
884
loader_data->metaspace_non_null()->expand_and_allocate(word_size,
885
mdtype);
886
if (result != NULL) {
887
return result;
888
}
889
JavaThread* jthr = JavaThread::current();
890
if (!jthr->in_critical()) {
891
// Wait for JNI critical section to be exited
892
GC_locker::stall_until_clear();
893
// The GC invoked by the last thread leaving the critical
894
// section will be a young collection and a full collection
895
// is (currently) needed for unloading classes so continue
896
// to the next iteration to get a full GC.
897
continue;
898
} else {
899
if (CheckJNICalls) {
900
fatal("Possible deadlock due to allocating while"
901
" in jni critical section");
902
}
903
return NULL;
904
}
905
}
906
907
{ // Need lock to get self consistent gc_count's
908
MutexLocker ml(Heap_lock);
909
gc_count = Universe::heap()->total_collections();
910
full_gc_count = Universe::heap()->total_full_collections();
911
}
912
913
// Generate a VM operation
914
VM_CollectForMetadataAllocation op(loader_data,
915
word_size,
916
mdtype,
917
gc_count,
918
full_gc_count,
919
GCCause::_metadata_GC_threshold);
920
VMThread::execute(&op);
921
922
// If GC was locked out, try again. Check
923
// before checking success because the prologue
924
// could have succeeded and the GC still have
925
// been locked out.
926
if (op.gc_locked()) {
927
continue;
928
}
929
930
if (op.prologue_succeeded()) {
931
return op.result();
932
}
933
loop_count++;
934
if ((QueuedAllocationWarningCount > 0) &&
935
(loop_count % QueuedAllocationWarningCount == 0)) {
936
warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
937
" size=" SIZE_FORMAT, loop_count, word_size);
938
}
939
} while (true); // Until a GC is done
940
}
941
942
// Return true if any of the following is true:
943
// . the allocation won't fit into the current young gen heap
944
// . gc locker is occupied (jni critical section)
945
// . heap memory is tight -- the most recent previous collection
946
// was a full collection because a partial collection (would
947
// have) failed and is likely to fail again
948
bool GenCollectorPolicy::should_try_older_generation_allocation(
949
size_t word_size) const {
950
GenCollectedHeap* gch = GenCollectedHeap::heap();
951
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
952
return (word_size > heap_word_size(gen0_capacity))
953
|| GC_locker::is_active_and_needs_gc()
954
|| gch->incremental_collection_failed();
955
}
956
957
958
//
959
// MarkSweepPolicy methods
960
//
961
962
void MarkSweepPolicy::initialize_alignments() {
963
_space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
964
_heap_alignment = compute_heap_alignment();
965
}
966
967
void MarkSweepPolicy::initialize_generations() {
968
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
969
AllocFailStrategy::RETURN_NULL);
970
if (_generations == NULL) {
971
vm_exit_during_initialization("Unable to allocate gen spec");
972
}
973
974
if (UseParNewGC) {
975
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
976
} else {
977
_generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
978
}
979
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
980
981
if (_generations[0] == NULL || _generations[1] == NULL) {
982
vm_exit_during_initialization("Unable to allocate gen spec");
983
}
984
}
985
986
void MarkSweepPolicy::initialize_gc_policy_counters() {
987
// initialize the policy counters - 2 collectors, 3 generations
988
if (UseParNewGC) {
989
_gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
990
} else {
991
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
992
}
993
}
994
995
/////////////// Unit tests ///////////////
996
997
#ifndef PRODUCT
998
// Testing that the NewSize flag is handled correct is hard because it
999
// depends on so many other configurable variables. This test only tries to
1000
// verify that there are some basic rules for NewSize honored by the policies.
1001
class TestGenCollectorPolicy {
1002
public:
1003
static void test() {
1004
size_t flag_value;
1005
1006
save_flags();
1007
1008
// Set some limits that makes the math simple.
1009
FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
1010
FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
1011
Arguments::set_min_heap_size(40 * M);
1012
1013
// If NewSize is set on the command line, it should be used
1014
// for both min and initial young size if less than min heap.
1015
flag_value = 20 * M;
1016
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
1017
verify_min(flag_value);
1018
verify_initial(flag_value);
1019
1020
// If NewSize is set on command line, but is larger than the min
1021
// heap size, it should only be used for initial young size.
1022
flag_value = 80 * M;
1023
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
1024
verify_initial(flag_value);
1025
1026
// If NewSize has been ergonomically set, the collector policy
1027
// should use it for min but calculate the initial young size
1028
// using NewRatio.
1029
flag_value = 20 * M;
1030
FLAG_SET_ERGO(uintx, NewSize, flag_value);
1031
verify_min(flag_value);
1032
verify_scaled_initial(InitialHeapSize);
1033
1034
restore_flags();
1035
1036
}
1037
1038
static void verify_min(size_t expected) {
1039
MarkSweepPolicy msp;
1040
msp.initialize_all();
1041
1042
assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected));
1043
}
1044
1045
static void verify_initial(size_t expected) {
1046
MarkSweepPolicy msp;
1047
msp.initialize_all();
1048
1049
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
1050
}
1051
1052
static void verify_scaled_initial(size_t initial_heap_size) {
1053
MarkSweepPolicy msp;
1054
msp.initialize_all();
1055
1056
size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
1057
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
1058
assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
1059
err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
1060
}
1061
1062
private:
1063
static size_t original_InitialHeapSize;
1064
static size_t original_MaxHeapSize;
1065
static size_t original_MaxNewSize;
1066
static size_t original_MinHeapDeltaBytes;
1067
static size_t original_NewSize;
1068
static size_t original_OldSize;
1069
1070
static void save_flags() {
1071
original_InitialHeapSize = InitialHeapSize;
1072
original_MaxHeapSize = MaxHeapSize;
1073
original_MaxNewSize = MaxNewSize;
1074
original_MinHeapDeltaBytes = MinHeapDeltaBytes;
1075
original_NewSize = NewSize;
1076
original_OldSize = OldSize;
1077
}
1078
1079
static void restore_flags() {
1080
InitialHeapSize = original_InitialHeapSize;
1081
MaxHeapSize = original_MaxHeapSize;
1082
MaxNewSize = original_MaxNewSize;
1083
MinHeapDeltaBytes = original_MinHeapDeltaBytes;
1084
NewSize = original_NewSize;
1085
OldSize = original_OldSize;
1086
}
1087
};
1088
1089
size_t TestGenCollectorPolicy::original_InitialHeapSize = 0;
1090
size_t TestGenCollectorPolicy::original_MaxHeapSize = 0;
1091
size_t TestGenCollectorPolicy::original_MaxNewSize = 0;
1092
size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0;
1093
size_t TestGenCollectorPolicy::original_NewSize = 0;
1094
size_t TestGenCollectorPolicy::original_OldSize = 0;
1095
1096
void TestNewSize_test() {
1097
TestGenCollectorPolicy::test();
1098
}
1099
1100
#endif
1101
1102