Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp
38920 views
1
/*
2
* Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3
*
4
* This code is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License version 2 only, as
6
* published by the Free Software Foundation.
7
*
8
* This code is distributed in the hope that it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11
* version 2 for more details (a copy is included in the LICENSE file that
12
* accompanied this code).
13
*
14
* You should have received a copy of the GNU General Public License version
15
* 2 along with this work; if not, write to the Free Software Foundation,
16
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17
*
18
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
* or visit www.oracle.com if you need additional information or have any
20
* questions.
21
*
22
*/
23
24
#include "precompiled.hpp"
25
26
#include "memory/allocation.hpp"
27
#include "gc_implementation/shared/spaceDecorator.hpp"
28
#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
29
#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
30
#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
31
#include "jfr/jfrEvents.hpp"
32
#include "memory/space.inline.hpp"
33
#include "memory/resourceArea.hpp"
34
#include "memory/universe.hpp"
35
#include "oops/oop.inline.hpp"
36
#include "runtime/java.hpp"
37
#include "runtime/mutexLocker.hpp"
38
#include "runtime/os.hpp"
39
#include "runtime/safepoint.hpp"
40
41
size_t ShenandoahHeapRegion::RegionCount = 0;
42
size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
43
size_t ShenandoahHeapRegion::RegionSizeWords = 0;
44
size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
45
size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
46
size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
47
size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
48
size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
49
size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
50
size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
51
size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
52
53
ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
54
_index(index),
55
_bottom(start),
56
_end(start + RegionSizeWords),
57
_new_top(NULL),
58
_empty_time(os::elapsedTime()),
59
_state(committed ? _empty_committed : _empty_uncommitted),
60
_top(start),
61
_tlab_allocs(0),
62
_gclab_allocs(0),
63
_live_data(0),
64
_critical_pins(0),
65
_update_watermark(start) {
66
67
assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
68
"invalid space boundaries");
69
if (ZapUnusedHeapArea && committed) {
70
SpaceMangler::mangle_region(MemRegion(_bottom, _end));
71
}
72
}
73
74
void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
75
ResourceMark rm;
76
stringStream ss;
77
ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
78
print_on(&ss);
79
fatal(ss.as_string());
80
}
81
82
void ShenandoahHeapRegion::make_regular_allocation() {
83
shenandoah_assert_heaplocked();
84
switch (_state) {
85
case _empty_uncommitted:
86
do_commit();
87
case _empty_committed:
88
set_state(_regular);
89
case _regular:
90
case _pinned:
91
return;
92
default:
93
report_illegal_transition("regular allocation");
94
}
95
}
96
97
void ShenandoahHeapRegion::make_regular_bypass() {
98
shenandoah_assert_heaplocked();
99
assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
100
"only for full or degen GC");
101
102
switch (_state) {
103
case _empty_uncommitted:
104
do_commit();
105
case _empty_committed:
106
case _cset:
107
case _humongous_start:
108
case _humongous_cont:
109
set_state(_regular);
110
return;
111
case _pinned_cset:
112
set_state(_pinned);
113
return;
114
case _regular:
115
case _pinned:
116
return;
117
default:
118
report_illegal_transition("regular bypass");
119
}
120
}
121
122
void ShenandoahHeapRegion::make_humongous_start() {
123
shenandoah_assert_heaplocked();
124
switch (_state) {
125
case _empty_uncommitted:
126
do_commit();
127
case _empty_committed:
128
set_state(_humongous_start);
129
return;
130
default:
131
report_illegal_transition("humongous start allocation");
132
}
133
}
134
135
void ShenandoahHeapRegion::make_humongous_start_bypass() {
136
shenandoah_assert_heaplocked();
137
assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
138
139
switch (_state) {
140
case _empty_committed:
141
case _regular:
142
case _humongous_start:
143
case _humongous_cont:
144
set_state(_humongous_start);
145
return;
146
default:
147
report_illegal_transition("humongous start bypass");
148
}
149
}
150
151
void ShenandoahHeapRegion::make_humongous_cont() {
152
shenandoah_assert_heaplocked();
153
switch (_state) {
154
case _empty_uncommitted:
155
do_commit();
156
case _empty_committed:
157
set_state(_humongous_cont);
158
return;
159
default:
160
report_illegal_transition("humongous continuation allocation");
161
}
162
}
163
164
void ShenandoahHeapRegion::make_humongous_cont_bypass() {
165
shenandoah_assert_heaplocked();
166
assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
167
168
switch (_state) {
169
case _empty_committed:
170
case _regular:
171
case _humongous_start:
172
case _humongous_cont:
173
set_state(_humongous_cont);
174
return;
175
default:
176
report_illegal_transition("humongous continuation bypass");
177
}
178
}
179
180
void ShenandoahHeapRegion::make_pinned() {
181
shenandoah_assert_heaplocked();
182
assert(pin_count() > 0, err_msg("Should have pins: " SIZE_FORMAT, pin_count()));
183
184
switch (_state) {
185
case _regular:
186
set_state(_pinned);
187
case _pinned_cset:
188
case _pinned:
189
return;
190
case _humongous_start:
191
set_state(_pinned_humongous_start);
192
case _pinned_humongous_start:
193
return;
194
case _cset:
195
set_state(_pinned_cset);
196
return;
197
default:
198
report_illegal_transition("pinning");
199
}
200
}
201
202
void ShenandoahHeapRegion::make_unpinned() {
203
shenandoah_assert_heaplocked();
204
assert(pin_count() == 0, err_msg("Should not have pins: " SIZE_FORMAT, pin_count()));
205
206
switch (_state) {
207
case _pinned:
208
set_state(_regular);
209
return;
210
case _regular:
211
case _humongous_start:
212
return;
213
case _pinned_cset:
214
set_state(_cset);
215
return;
216
case _pinned_humongous_start:
217
set_state(_humongous_start);
218
return;
219
default:
220
report_illegal_transition("unpinning");
221
}
222
}
223
224
void ShenandoahHeapRegion::make_cset() {
225
shenandoah_assert_heaplocked();
226
switch (_state) {
227
case _regular:
228
set_state(_cset);
229
case _cset:
230
return;
231
default:
232
report_illegal_transition("cset");
233
}
234
}
235
236
void ShenandoahHeapRegion::make_trash() {
237
shenandoah_assert_heaplocked();
238
switch (_state) {
239
case _cset:
240
// Reclaiming cset regions
241
case _humongous_start:
242
case _humongous_cont:
243
// Reclaiming humongous regions
244
case _regular:
245
// Immediate region reclaim
246
set_state(_trash);
247
return;
248
default:
249
report_illegal_transition("trashing");
250
}
251
}
252
253
void ShenandoahHeapRegion::make_trash_immediate() {
254
make_trash();
255
256
// On this path, we know there are no marked objects in the region,
257
// tell marking context about it to bypass bitmap resets.
258
ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
259
}
260
261
void ShenandoahHeapRegion::make_empty() {
262
shenandoah_assert_heaplocked();
263
switch (_state) {
264
case _trash:
265
set_state(_empty_committed);
266
_empty_time = os::elapsedTime();
267
return;
268
default:
269
report_illegal_transition("emptying");
270
}
271
}
272
273
void ShenandoahHeapRegion::make_uncommitted() {
274
shenandoah_assert_heaplocked();
275
switch (_state) {
276
case _empty_committed:
277
do_uncommit();
278
set_state(_empty_uncommitted);
279
return;
280
default:
281
report_illegal_transition("uncommiting");
282
}
283
}
284
285
void ShenandoahHeapRegion::make_committed_bypass() {
286
shenandoah_assert_heaplocked();
287
assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
288
289
switch (_state) {
290
case _empty_uncommitted:
291
do_commit();
292
set_state(_empty_committed);
293
return;
294
default:
295
report_illegal_transition("commit bypass");
296
}
297
}
298
299
void ShenandoahHeapRegion::reset_alloc_metadata() {
300
_tlab_allocs = 0;
301
_gclab_allocs = 0;
302
}
303
304
size_t ShenandoahHeapRegion::get_shared_allocs() const {
305
return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
306
}
307
308
size_t ShenandoahHeapRegion::get_tlab_allocs() const {
309
return _tlab_allocs * HeapWordSize;
310
}
311
312
size_t ShenandoahHeapRegion::get_gclab_allocs() const {
313
return _gclab_allocs * HeapWordSize;
314
}
315
316
void ShenandoahHeapRegion::set_live_data(size_t s) {
317
assert(Thread::current()->is_VM_thread(), "by VM thread");
318
size_t v = s >> LogHeapWordSize;
319
assert(v < (size_t)max_jint, "sanity");
320
_live_data = (jint)v;
321
}
322
323
void ShenandoahHeapRegion::print_on(outputStream* st) const {
324
st->print("|");
325
st->print(SIZE_FORMAT_W(5), this->_index);
326
327
switch (_state) {
328
case _empty_uncommitted:
329
st->print("|EU ");
330
break;
331
case _empty_committed:
332
st->print("|EC ");
333
break;
334
case _regular:
335
st->print("|R ");
336
break;
337
case _humongous_start:
338
st->print("|H ");
339
break;
340
case _pinned_humongous_start:
341
st->print("|HP ");
342
break;
343
case _humongous_cont:
344
st->print("|HC ");
345
break;
346
case _cset:
347
st->print("|CS ");
348
break;
349
case _trash:
350
st->print("|T ");
351
break;
352
case _pinned:
353
st->print("|P ");
354
break;
355
case _pinned_cset:
356
st->print("|CSP");
357
break;
358
default:
359
ShouldNotReachHere();
360
}
361
st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
362
p2i(bottom()), p2i(top()), p2i(end()));
363
st->print("|TAMS " INTPTR_FORMAT_W(12),
364
p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
365
st->print("|UWM " INTPTR_FORMAT_W(12),
366
p2i(_update_watermark));
367
st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
368
st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
369
st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
370
st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
371
st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
372
st->print("|CP " SIZE_FORMAT_W(3), pin_count());
373
st->cr();
374
}
375
376
ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
377
ShenandoahHeap* heap = ShenandoahHeap::heap();
378
assert(is_humongous(), "Must be a part of the humongous region");
379
size_t i = index();
380
ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
381
while (!r->is_humongous_start()) {
382
assert(i > 0, "Sanity");
383
i--;
384
r = heap->get_region(i);
385
assert(r->is_humongous(), "Must be a part of the humongous region");
386
}
387
assert(r->is_humongous_start(), "Must be");
388
return r;
389
}
390
391
void ShenandoahHeapRegion::recycle() {
392
set_top(bottom());
393
clear_live_data();
394
reset_alloc_metadata();
395
396
ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
397
set_update_watermark(bottom());
398
399
make_empty();
400
401
if (ZapUnusedHeapArea) {
402
SpaceMangler::mangle_region(MemRegion(bottom(), end()));
403
}
404
}
405
406
HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
407
assert(MemRegion(bottom(), end()).contains(p),
408
err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
409
p2i(p), p2i(bottom()), p2i(end())));
410
if (p >= top()) {
411
return top();
412
} else {
413
HeapWord* last = bottom();
414
HeapWord* cur = last;
415
while (cur <= p) {
416
last = cur;
417
cur += oop(cur)->size();
418
}
419
shenandoah_assert_correct(NULL, oop(last));
420
return last;
421
}
422
}
423
424
size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
425
assert(MemRegion(bottom(), end()).contains(p),
426
err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
427
p2i(p), p2i(bottom()), p2i(end())));
428
if (p < top()) {
429
return oop(p)->size();
430
} else {
431
assert(p == top(), "just checking");
432
return pointer_delta(end(), (HeapWord*) p);
433
}
434
}
435
436
void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
437
// Absolute minimums we should not ever break:
438
static const size_t MIN_REGION_SIZE = 256*K;
439
440
size_t region_size;
441
if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
442
if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
443
err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
444
"of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
445
byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
446
MIN_NUM_REGIONS,
447
byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
448
proper_unit_for_byte_size(ShenandoahMinRegionSize));
449
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
450
}
451
if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
452
err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
453
byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
454
proper_unit_for_byte_size(ShenandoahMinRegionSize),
455
byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
456
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
457
}
458
if (ShenandoahMinRegionSize < MinTLABSize) {
459
err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
460
byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
461
proper_unit_for_byte_size(ShenandoahMinRegionSize),
462
byte_size_in_proper_unit<size_t>(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
463
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
464
}
465
if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
466
err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
467
byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
468
proper_unit_for_byte_size(ShenandoahMaxRegionSize),
469
byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
470
vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
471
}
472
if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
473
err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
474
byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
475
proper_unit_for_byte_size(ShenandoahMinRegionSize),
476
byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
477
proper_unit_for_byte_size(ShenandoahMaxRegionSize));
478
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
479
}
480
481
// We rapidly expand to max_heap_size in most scenarios, so that is the measure
482
// for usual heap sizes. Do not depend on initial_heap_size here.
483
region_size = max_heap_size / ShenandoahTargetNumRegions;
484
485
// Now make sure that we don't go over or under our limits.
486
region_size = MAX2<size_t>(ShenandoahMinRegionSize, region_size);
487
region_size = MIN2<size_t>(ShenandoahMaxRegionSize, region_size);
488
489
} else {
490
if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
491
err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
492
"of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
493
byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
494
MIN_NUM_REGIONS,
495
byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
496
proper_unit_for_byte_size(ShenandoahRegionSize));
497
vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
498
}
499
if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
500
err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
501
byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
502
proper_unit_for_byte_size(ShenandoahRegionSize),
503
byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
504
proper_unit_for_byte_size(ShenandoahMinRegionSize));
505
vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
506
}
507
if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
508
err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
509
byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
510
proper_unit_for_byte_size(ShenandoahRegionSize),
511
byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
512
proper_unit_for_byte_size(ShenandoahMaxRegionSize));
513
vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
514
}
515
region_size = ShenandoahRegionSize;
516
}
517
518
if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) {
519
vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]");
520
}
521
522
// Make sure region size is at least one large page, if enabled.
523
// Otherwise, uncommitting one region may falsely uncommit the adjacent
524
// regions too.
525
// Also see shenandoahArguments.cpp, where it handles UseLargePages.
526
if (UseLargePages && ShenandoahUncommit) {
527
region_size = MAX2(region_size, os::large_page_size());
528
}
529
530
int region_size_log = log2_long((jlong) region_size);
531
// Recalculate the region size to make sure it's a power of
532
// 2. This means that region_size is the largest power of 2 that's
533
// <= what we've calculated so far.
534
region_size = size_t(1) << region_size_log;
535
536
// Now, set up the globals.
537
guarantee(RegionSizeBytesShift == 0, "we should only set it once");
538
RegionSizeBytesShift = (size_t)region_size_log;
539
540
guarantee(RegionSizeWordsShift == 0, "we should only set it once");
541
RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
542
543
guarantee(RegionSizeBytes == 0, "we should only set it once");
544
RegionSizeBytes = region_size;
545
RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
546
assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
547
548
guarantee(RegionSizeWordsMask == 0, "we should only set it once");
549
RegionSizeWordsMask = RegionSizeWords - 1;
550
551
guarantee(RegionSizeBytesMask == 0, "we should only set it once");
552
RegionSizeBytesMask = RegionSizeBytes - 1;
553
554
guarantee(RegionCount == 0, "we should only set it once");
555
RegionCount = max_heap_size / RegionSizeBytes;
556
557
guarantee(HumongousThresholdWords == 0, "we should only set it once");
558
HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
559
HumongousThresholdWords = (size_t)align_size_down(HumongousThresholdWords, MinObjAlignment);
560
assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
561
562
guarantee(HumongousThresholdBytes == 0, "we should only set it once");
563
HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
564
assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
565
566
// The rationale for trimming the TLAB sizes has to do with the raciness in
567
// TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
568
// about next free size, gets the answer for region #N, goes away for a while, then
569
// tries to allocate in region #N, and fail because some other thread have claimed part
570
// of the region #N, and then the freeset allocation code has to retire the region #N,
571
// before moving the allocation to region #N+1.
572
//
573
// The worst case realizes when "answer" is "region size", which means it could
574
// prematurely retire an entire region. Having smaller TLABs does not fix that
575
// completely, but reduces the probability of too wasteful region retirement.
576
// With current divisor, we will waste no more than 1/8 of region size in the worst
577
// case. This also has a secondary effect on collection set selection: even under
578
// the race, the regions would be at least 7/8 used, which allows relying on
579
// "used" - "live" for cset selection. Otherwise, we can get the fragmented region
580
// below the garbage threshold that would never be considered for collection.
581
//
582
// The whole thing would be mitigated if Elastic TLABs were enabled, but there
583
// is no support in this JDK.
584
//
585
guarantee(MaxTLABSizeWords == 0, "we should only set it once");
586
MaxTLABSizeWords = MIN2(RegionSizeWords / 8, HumongousThresholdWords);
587
MaxTLABSizeWords = (size_t)align_size_down(MaxTLABSizeWords, MinObjAlignment);
588
589
guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
590
MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
591
assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
592
593
log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
594
RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
595
log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
596
byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
597
log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
598
byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
599
}
600
601
void ShenandoahHeapRegion::do_commit() {
602
ShenandoahHeap* heap = ShenandoahHeap::heap();
603
if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
604
report_java_out_of_memory("Unable to commit region");
605
}
606
if (!heap->commit_bitmap_slice(this)) {
607
report_java_out_of_memory("Unable to commit bitmaps for region");
608
}
609
if (AlwaysPreTouch) {
610
os::pretouch_memory((char*)bottom(), (char*)end());
611
}
612
heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
613
}
614
615
void ShenandoahHeapRegion::do_uncommit() {
616
ShenandoahHeap* heap = ShenandoahHeap::heap();
617
if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
618
report_java_out_of_memory("Unable to uncommit region");
619
}
620
if (!heap->uncommit_bitmap_slice(this)) {
621
report_java_out_of_memory("Unable to uncommit bitmaps for region");
622
}
623
heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
624
}
625
626
void ShenandoahHeapRegion::record_pin() {
627
Atomic::add(1, &_critical_pins);
628
}
629
630
void ShenandoahHeapRegion::record_unpin() {
631
assert(pin_count() > 0, err_msg("Region " SIZE_FORMAT " should have non-zero pins", index()));
632
Atomic::add(-1, &_critical_pins);
633
}
634
635
size_t ShenandoahHeapRegion::pin_count() const {
636
jint v = OrderAccess::load_acquire((volatile jint*)&_critical_pins);
637
assert(v >= 0, "sanity");
638
return (size_t)v;
639
}
640
641
void ShenandoahHeapRegion::set_state(RegionState to) {
642
EventShenandoahHeapRegionStateChange evt;
643
if (evt.should_commit()){
644
evt.set_index((unsigned)index());
645
evt.set_start((uintptr_t)bottom());
646
evt.set_used(used());
647
evt.set_from(_state);
648
evt.set_to(to);
649
evt.commit();
650
}
651
_state = to;
652
}
653
654