Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp
38920 views
1
/*
2
* Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
3
*
4
* This code is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License version 2 only, as
6
* published by the Free Software Foundation.
7
*
8
* This code is distributed in the hope that it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11
* version 2 for more details (a copy is included in the LICENSE file that
12
* accompanied this code).
13
*
14
* You should have received a copy of the GNU General Public License version
15
* 2 along with this work; if not, write to the Free Software Foundation,
16
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17
*
18
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
* or visit www.oracle.com if you need additional information or have any
20
* questions.
21
*
22
*/
23
24
#include "precompiled.hpp"
25
#include "memory/allocation.hpp"
26
27
#include "gc_implementation/shared/gcTimer.hpp"
28
#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
29
30
#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
31
#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"
32
#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
33
#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
34
#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
35
#include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
36
#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
37
#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
38
#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
39
#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
40
#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
41
#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
42
#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
43
#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
44
#include "gc_implementation/shenandoah/shenandoahMetrics.hpp"
45
#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
46
#include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"
47
#include "gc_implementation/shenandoah/shenandoahPadding.hpp"
48
#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp"
49
#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"
50
#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
51
#include "gc_implementation/shenandoah/shenandoahUtils.hpp"
52
#include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
53
#include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
54
#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
55
#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
56
#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
57
#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"
58
#include "gc_implementation/shenandoah/mode/shenandoahIUMode.hpp"
59
#include "gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp"
60
#include "gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp"
61
#if INCLUDE_JFR
62
#include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp"
63
#endif
64
65
#include "memory/metaspace.hpp"
66
#include "runtime/vmThread.hpp"
67
#include "services/mallocTracker.hpp"
68
69
ShenandoahHeap* ShenandoahHeap::_heap = NULL;
70
71
class ShenandoahPretouchHeapTask : public AbstractGangTask {
72
private:
73
ShenandoahRegionIterator _regions;
74
const size_t _page_size;
75
public:
76
ShenandoahPretouchHeapTask(size_t page_size) :
77
AbstractGangTask("Shenandoah Pretouch Heap"),
78
_page_size(page_size) {}
79
80
virtual void work(uint worker_id) {
81
ShenandoahHeapRegion* r = _regions.next();
82
while (r != NULL) {
83
if (r->is_committed()) {
84
os::pretouch_memory((char *) r->bottom(), (char *) r->end());
85
}
86
r = _regions.next();
87
}
88
}
89
};
90
91
class ShenandoahPretouchBitmapTask : public AbstractGangTask {
92
private:
93
ShenandoahRegionIterator _regions;
94
char* _bitmap_base;
95
const size_t _bitmap_size;
96
const size_t _page_size;
97
public:
98
ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
99
AbstractGangTask("Shenandoah Pretouch Bitmap"),
100
_bitmap_base(bitmap_base),
101
_bitmap_size(bitmap_size),
102
_page_size(page_size) {}
103
104
virtual void work(uint worker_id) {
105
ShenandoahHeapRegion* r = _regions.next();
106
while (r != NULL) {
107
size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
108
size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
109
assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));
110
111
if (r->is_committed()) {
112
os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
113
}
114
115
r = _regions.next();
116
}
117
}
118
};
119
120
jint ShenandoahHeap::initialize() {
121
CollectedHeap::pre_initialize();
122
123
//
124
// Figure out heap sizing
125
//
126
127
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
128
size_t min_byte_size = collector_policy()->min_heap_byte_size();
129
size_t max_byte_size = collector_policy()->max_heap_byte_size();
130
size_t heap_alignment = collector_policy()->heap_alignment();
131
132
size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
133
134
Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
135
Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
136
137
_num_regions = ShenandoahHeapRegion::region_count();
138
139
// Now we know the number of regions, initialize the heuristics.
140
initialize_heuristics();
141
142
size_t num_committed_regions = init_byte_size / reg_size_bytes;
143
num_committed_regions = MIN2(num_committed_regions, _num_regions);
144
assert(num_committed_regions <= _num_regions, "sanity");
145
_initial_size = num_committed_regions * reg_size_bytes;
146
147
size_t num_min_regions = min_byte_size / reg_size_bytes;
148
num_min_regions = MIN2(num_min_regions, _num_regions);
149
assert(num_min_regions <= _num_regions, "sanity");
150
_minimum_size = num_min_regions * reg_size_bytes;
151
152
// Default to max heap size.
153
_soft_max_size = _num_regions * reg_size_bytes;
154
155
_committed = _initial_size;
156
157
size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
158
size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
159
size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
160
161
//
162
// Reserve and commit memory for heap
163
//
164
165
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
166
_reserved.set_word_size(0);
167
_reserved.set_start((HeapWord*)heap_rs.base());
168
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
169
_heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
170
_heap_region_special = heap_rs.special();
171
172
assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
173
err_msg("Misaligned heap: " PTR_FORMAT, p2i(base())));
174
175
#if SHENANDOAH_OPTIMIZED_MARKTASK
176
// The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
177
// Fail if we ever attempt to address more than we can.
178
if ((uintptr_t)(heap_rs.base() + heap_rs.size()) >= ShenandoahMarkTask::max_addressable()) {
179
FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
180
"but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
181
"VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
182
p2i(heap_rs.base()), p2i(heap_rs.base() + heap_rs.size()), ShenandoahMarkTask::max_addressable());
183
vm_exit_during_initialization("Fatal Error", buf);
184
}
185
#endif
186
187
ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
188
if (!_heap_region_special) {
189
os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
190
"Cannot commit heap memory");
191
}
192
193
//
194
// Reserve and commit memory for bitmap(s)
195
//
196
197
_bitmap_size = MarkBitMap::compute_size(heap_rs.size());
198
_bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);
199
200
size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
201
202
guarantee(bitmap_bytes_per_region != 0,
203
err_msg("Bitmap bytes per region should not be zero"));
204
guarantee(is_power_of_2(bitmap_bytes_per_region),
205
err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));
206
207
if (bitmap_page_size > bitmap_bytes_per_region) {
208
_bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
209
_bitmap_bytes_per_slice = bitmap_page_size;
210
} else {
211
_bitmap_regions_per_slice = 1;
212
_bitmap_bytes_per_slice = bitmap_bytes_per_region;
213
}
214
215
guarantee(_bitmap_regions_per_slice >= 1,
216
err_msg("Should have at least one region per slice: " SIZE_FORMAT,
217
_bitmap_regions_per_slice));
218
219
guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
220
err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
221
_bitmap_bytes_per_slice, bitmap_page_size));
222
223
ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
224
MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
225
_bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
226
_bitmap_region_special = bitmap.special();
227
228
size_t bitmap_init_commit = _bitmap_bytes_per_slice *
229
align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
230
bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
231
if (!_bitmap_region_special) {
232
os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
233
"Cannot commit bitmap memory");
234
}
235
236
_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
237
238
if (ShenandoahVerify) {
239
ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
240
if (!verify_bitmap.special()) {
241
os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
242
"Cannot commit verification bitmap memory");
243
}
244
MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
245
MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
246
_verification_bit_map.initialize(_heap_region, verify_bitmap_region);
247
_verifier = new ShenandoahVerifier(this, &_verification_bit_map);
248
}
249
250
// Reserve aux bitmap for use in object_iterate(). We don't commit it here.
251
ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
252
MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
253
_aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
254
_aux_bitmap_region_special = aux_bitmap.special();
255
_aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
256
257
//
258
// Create regions and region sets
259
//
260
size_t region_align = align_size_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
261
size_t region_storage_size = align_size_up(region_align * _num_regions, region_page_size);
262
region_storage_size = align_size_up(region_storage_size, os::vm_allocation_granularity());
263
264
ReservedSpace region_storage(region_storage_size, region_page_size);
265
MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
266
if (!region_storage.special()) {
267
os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
268
"Cannot commit region memory");
269
}
270
271
// Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
272
// Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
273
// If not successful, bite a bullet and allocate at whatever address.
274
{
275
size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
276
size_t cset_size = align_size_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
277
278
uintptr_t min = ShenandoahUtils::round_up_power_of_2(cset_align);
279
uintptr_t max = (1u << 30u);
280
281
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
282
char* req_addr = (char*)addr;
283
assert(is_ptr_aligned(req_addr, cset_align), "Should be aligned");
284
ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
285
if (cset_rs.is_reserved()) {
286
assert(cset_rs.base() == req_addr, err_msg("Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr));
287
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
288
break;
289
}
290
}
291
292
if (_collection_set == NULL) {
293
ReservedSpace cset_rs(cset_size, cset_align, false);
294
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
295
}
296
}
297
298
_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
299
_free_set = new ShenandoahFreeSet(this, _num_regions);
300
301
{
302
ShenandoahHeapLocker locker(lock());
303
304
for (size_t i = 0; i < _num_regions; i++) {
305
HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
306
bool is_committed = i < num_committed_regions;
307
void* loc = region_storage.base() + i * region_align;
308
309
ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
310
assert(is_ptr_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
311
312
_marking_context->initialize_top_at_mark_start(r);
313
_regions[i] = r;
314
assert(!collection_set()->is_in(i), "New region should not be in collection set");
315
}
316
317
// Initialize to complete
318
_marking_context->mark_complete();
319
320
_free_set->rebuild();
321
}
322
323
if (AlwaysPreTouch) {
324
// For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
325
// before initialize() below zeroes it with initializing thread. For any given region,
326
// we touch the region and the corresponding bitmaps from the same thread.
327
ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
328
329
_pretouch_heap_page_size = heap_page_size;
330
_pretouch_bitmap_page_size = bitmap_page_size;
331
332
#ifdef LINUX
333
// UseTransparentHugePages would madvise that backing memory can be coalesced into huge
334
// pages. But, the kernel needs to know that every small page is used, in order to coalesce
335
// them into huge one. Therefore, we need to pretouch with smaller pages.
336
if (UseTransparentHugePages) {
337
_pretouch_heap_page_size = (size_t)os::vm_page_size();
338
_pretouch_bitmap_page_size = (size_t)os::vm_page_size();
339
}
340
#endif
341
342
// OS memory managers may want to coalesce back-to-back pages. Make their jobs
343
// simpler by pre-touching continuous spaces (heap and bitmap) separately.
344
345
ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
346
_workers->run_task(&bcl);
347
348
ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
349
_workers->run_task(&hcl);
350
}
351
352
//
353
// Initialize the rest of GC subsystems
354
//
355
356
set_barrier_set(new ShenandoahBarrierSet(this));
357
358
_liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
359
for (uint worker = 0; worker < _max_workers; worker++) {
360
_liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
361
Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
362
}
363
364
// The call below uses stuff (the SATB* things) that are in G1, but probably
365
// belong into a shared location.
366
JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
367
SATB_Q_FL_lock,
368
20 /*G1SATBProcessCompletedThreshold */,
369
Shared_SATB_Q_lock);
370
371
_monitoring_support = new ShenandoahMonitoringSupport(this);
372
_phase_timings = new ShenandoahPhaseTimings(max_workers());
373
ShenandoahStringDedup::initialize();
374
ShenandoahCodeRoots::initialize();
375
376
if (ShenandoahPacing) {
377
_pacer = new ShenandoahPacer(this);
378
_pacer->setup_for_idle();
379
} else {
380
_pacer = NULL;
381
}
382
383
_control_thread = new ShenandoahControlThread();
384
385
log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
386
byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),
387
byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),
388
byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
389
);
390
391
return JNI_OK;
392
}
393
394
#ifdef _MSC_VER
395
#pragma warning( push )
396
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
397
#endif
398
399
void ShenandoahHeap::initialize_heuristics() {
400
if (ShenandoahGCMode != NULL) {
401
if (strcmp(ShenandoahGCMode, "satb") == 0) {
402
_gc_mode = new ShenandoahSATBMode();
403
} else if (strcmp(ShenandoahGCMode, "iu") == 0) {
404
_gc_mode = new ShenandoahIUMode();
405
} else if (strcmp(ShenandoahGCMode, "passive") == 0) {
406
_gc_mode = new ShenandoahPassiveMode();
407
} else {
408
vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
409
}
410
} else {
411
ShouldNotReachHere();
412
}
413
_gc_mode->initialize_flags();
414
if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
415
vm_exit_during_initialization(
416
err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
417
_gc_mode->name()));
418
}
419
if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
420
vm_exit_during_initialization(
421
err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
422
_gc_mode->name()));
423
}
424
log_info(gc, init)("Shenandoah GC mode: %s",
425
_gc_mode->name());
426
427
_heuristics = _gc_mode->initialize_heuristics();
428
429
if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
430
vm_exit_during_initialization(
431
err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
432
_heuristics->name()));
433
}
434
if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
435
vm_exit_during_initialization(
436
err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
437
_heuristics->name()));
438
}
439
log_info(gc, init)("Shenandoah heuristics: %s",
440
_heuristics->name());
441
}
442
443
ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
444
SharedHeap(policy),
445
_shenandoah_policy(policy),
446
_heap_region_special(false),
447
_regions(NULL),
448
_free_set(NULL),
449
_collection_set(NULL),
450
_update_refs_iterator(this),
451
_bytes_allocated_since_gc_start(0),
452
_max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
453
_ref_processor(NULL),
454
_marking_context(NULL),
455
_bitmap_size(0),
456
_bitmap_regions_per_slice(0),
457
_bitmap_bytes_per_slice(0),
458
_bitmap_region_special(false),
459
_aux_bitmap_region_special(false),
460
_liveness_cache(NULL),
461
_aux_bit_map(),
462
_verifier(NULL),
463
_pacer(NULL),
464
_gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
465
_phase_timings(NULL)
466
{
467
_heap = this;
468
469
log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
470
471
_scm = new ShenandoahConcurrentMark();
472
473
_full_gc = new ShenandoahMarkCompact();
474
_used = 0;
475
476
_max_workers = MAX2(_max_workers, 1U);
477
478
// SharedHeap did not initialize this for us, and we want our own workgang anyway.
479
assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet");
480
_workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
481
/* are_GC_task_threads */true,
482
/* are_ConcurrentGC_threads */false);
483
if (_workers == NULL) {
484
vm_exit_during_initialization("Failed necessary allocation.");
485
} else {
486
_workers->initialize_workers();
487
}
488
assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field");
489
}
490
491
#ifdef _MSC_VER
492
#pragma warning( pop )
493
#endif
494
495
class ShenandoahResetBitmapTask : public AbstractGangTask {
496
private:
497
ShenandoahRegionIterator _regions;
498
499
public:
500
ShenandoahResetBitmapTask() :
501
AbstractGangTask("Parallel Reset Bitmap Task") {}
502
503
void work(uint worker_id) {
504
ShenandoahHeapRegion* region = _regions.next();
505
ShenandoahHeap* heap = ShenandoahHeap::heap();
506
ShenandoahMarkingContext* const ctx = heap->marking_context();
507
while (region != NULL) {
508
if (heap->is_bitmap_slice_committed(region)) {
509
ctx->clear_bitmap(region);
510
}
511
region = _regions.next();
512
}
513
}
514
};
515
516
void ShenandoahHeap::reset_mark_bitmap() {
517
assert_gc_workers(_workers->active_workers());
518
mark_incomplete_marking_context();
519
520
ShenandoahResetBitmapTask task;
521
_workers->run_task(&task);
522
}
523
524
void ShenandoahHeap::print_on(outputStream* st) const {
525
st->print_cr("Shenandoah Heap");
526
st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
527
byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
528
byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
529
byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
530
byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
531
st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
532
num_regions(),
533
byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
534
proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
535
536
st->print("Status: ");
537
if (has_forwarded_objects()) st->print("has forwarded objects, ");
538
if (is_concurrent_mark_in_progress()) st->print("marking, ");
539
if (is_evacuation_in_progress()) st->print("evacuating, ");
540
if (is_update_refs_in_progress()) st->print("updating refs, ");
541
if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
542
if (is_full_gc_in_progress()) st->print("full gc, ");
543
if (is_full_gc_move_in_progress()) st->print("full gc move, ");
544
545
if (cancelled_gc()) {
546
st->print("cancelled");
547
} else {
548
st->print("not cancelled");
549
}
550
st->cr();
551
552
st->print_cr("Reserved region:");
553
st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
554
p2i(reserved_region().start()),
555
p2i(reserved_region().end()));
556
557
ShenandoahCollectionSet* cset = collection_set();
558
st->print_cr("Collection set:");
559
if (cset != NULL) {
560
st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
561
st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
562
} else {
563
st->print_cr(" (NULL)");
564
}
565
566
st->cr();
567
MetaspaceAux::print_on(st);
568
569
if (Verbose) {
570
print_heap_regions_on(st);
571
}
572
}
573
574
class ShenandoahInitGCLABClosure : public ThreadClosure {
575
public:
576
void do_thread(Thread* thread) {
577
assert(thread == NULL || !thread->is_Java_thread(), "Don't expect JavaThread this early");
578
if (thread != NULL && thread->is_Worker_thread()) {
579
thread->gclab().initialize(true);
580
}
581
}
582
};
583
584
void ShenandoahHeap::post_initialize() {
585
if (UseTLAB) {
586
MutexLocker ml(Threads_lock);
587
588
ShenandoahInitGCLABClosure init_gclabs;
589
Threads::threads_do(&init_gclabs);
590
}
591
592
_scm->initialize(_max_workers);
593
_full_gc->initialize(_gc_timer);
594
595
ref_processing_init();
596
597
_heuristics->initialize();
598
599
JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
600
}
601
602
size_t ShenandoahHeap::used() const {
603
OrderAccess::acquire();
604
return (size_t) _used;
605
}
606
607
size_t ShenandoahHeap::committed() const {
608
OrderAccess::acquire();
609
return _committed;
610
}
611
612
void ShenandoahHeap::increase_committed(size_t bytes) {
613
shenandoah_assert_heaplocked_or_safepoint();
614
_committed += bytes;
615
}
616
617
void ShenandoahHeap::decrease_committed(size_t bytes) {
618
shenandoah_assert_heaplocked_or_safepoint();
619
_committed -= bytes;
620
}
621
622
void ShenandoahHeap::increase_used(size_t bytes) {
623
Atomic::add(bytes, &_used);
624
}
625
626
void ShenandoahHeap::set_used(size_t bytes) {
627
OrderAccess::release_store_fence(&_used, bytes);
628
}
629
630
void ShenandoahHeap::decrease_used(size_t bytes) {
631
assert(used() >= bytes, "never decrease heap size by more than we've left");
632
Atomic::add(-(jlong)bytes, &_used);
633
}
634
635
void ShenandoahHeap::increase_allocated(size_t bytes) {
636
Atomic::add(bytes, &_bytes_allocated_since_gc_start);
637
}
638
639
void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
640
size_t bytes = words * HeapWordSize;
641
if (!waste) {
642
increase_used(bytes);
643
}
644
increase_allocated(bytes);
645
if (ShenandoahPacing) {
646
control_thread()->pacing_notify_alloc(words);
647
if (waste) {
648
pacer()->claim_for_alloc(words, true);
649
}
650
}
651
}
652
653
size_t ShenandoahHeap::capacity() const {
654
return committed();
655
}
656
657
size_t ShenandoahHeap::max_capacity() const {
658
return _num_regions * ShenandoahHeapRegion::region_size_bytes();
659
}
660
661
size_t ShenandoahHeap::soft_max_capacity() const {
662
size_t v = OrderAccess::load_acquire((volatile size_t*)&_soft_max_size);
663
assert(min_capacity() <= v && v <= max_capacity(),
664
err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
665
min_capacity(), v, max_capacity()));
666
return v;
667
}
668
669
void ShenandoahHeap::set_soft_max_capacity(size_t v) {
670
assert(min_capacity() <= v && v <= max_capacity(),
671
err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
672
min_capacity(), v, max_capacity()));
673
OrderAccess::release_store_fence(&_soft_max_size, v);
674
}
675
676
size_t ShenandoahHeap::min_capacity() const {
677
return _minimum_size;
678
}
679
680
size_t ShenandoahHeap::initial_capacity() const {
681
return _initial_size;
682
}
683
684
bool ShenandoahHeap::is_in(const void* p) const {
685
HeapWord* heap_base = (HeapWord*) base();
686
HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
687
return p >= heap_base && p < last_region_end;
688
}
689
690
void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
691
assert (ShenandoahUncommit, "should be enabled");
692
693
// Application allocates from the beginning of the heap, and GC allocates at
694
// the end of it. It is more efficient to uncommit from the end, so that applications
695
// could enjoy the near committed regions. GC allocations are much less frequent,
696
// and therefore can accept the committing costs.
697
698
size_t count = 0;
699
for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
700
ShenandoahHeapRegion* r = get_region(i - 1);
701
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
702
ShenandoahHeapLocker locker(lock());
703
if (r->is_empty_committed()) {
704
if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
705
break;
706
}
707
708
r->make_uncommitted();
709
count++;
710
}
711
}
712
SpinPause(); // allow allocators to take the lock
713
}
714
715
if (count > 0) {
716
_control_thread->notify_heap_changed();
717
}
718
}
719
720
HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
721
// Retain tlab and allocate object in shared space if
722
// the amount free in the tlab is too large to discard.
723
if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
724
thread->gclab().record_slow_allocation(size);
725
return NULL;
726
}
727
728
// Discard gclab and allocate a new one.
729
// To minimize fragmentation, the last GCLAB may be smaller than the rest.
730
size_t new_gclab_size = thread->gclab().compute_size(size);
731
732
thread->gclab().clear_before_allocation();
733
734
if (new_gclab_size == 0) {
735
return NULL;
736
}
737
738
// Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min
739
size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size());
740
new_gclab_size = MAX2(new_gclab_size, min_size);
741
742
// Allocate a new GCLAB...
743
size_t actual_size = 0;
744
HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size);
745
746
if (obj == NULL) {
747
return NULL;
748
}
749
750
assert (size <= actual_size, "allocation should fit");
751
752
if (ZeroTLAB) {
753
// ..and clear it.
754
Copy::zero_to_words(obj, actual_size);
755
} else {
756
// ...and zap just allocated object.
757
#ifdef ASSERT
758
// Skip mangling the space corresponding to the object header to
759
// ensure that the returned space is not considered parsable by
760
// any concurrent GC thread.
761
size_t hdr_size = oopDesc::header_size();
762
Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal);
763
#endif // ASSERT
764
}
765
thread->gclab().fill(obj, obj + size, actual_size);
766
return obj;
767
}
768
769
HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
770
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size);
771
return allocate_memory(req);
772
}
773
774
HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
775
size_t word_size,
776
size_t* actual_size) {
777
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
778
HeapWord* res = allocate_memory(req);
779
if (res != NULL) {
780
*actual_size = req.actual_size();
781
} else {
782
*actual_size = 0;
783
}
784
return res;
785
}
786
787
HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
788
intptr_t pacer_epoch = 0;
789
bool in_new_region = false;
790
HeapWord* result = NULL;
791
792
if (req.is_mutator_alloc()) {
793
if (ShenandoahPacing) {
794
pacer()->pace_for_alloc(req.size());
795
pacer_epoch = pacer()->epoch();
796
}
797
798
if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
799
result = allocate_memory_under_lock(req, in_new_region);
800
}
801
802
// Allocation failed, block until control thread reacted, then retry allocation.
803
//
804
// It might happen that one of the threads requesting allocation would unblock
805
// way later after GC happened, only to fail the second allocation, because
806
// other threads have already depleted the free storage. In this case, a better
807
// strategy is to try again, as long as GC makes progress.
808
//
809
// Then, we need to make sure the allocation was retried after at least one
810
// Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
811
812
size_t tries = 0;
813
814
while (result == NULL && _progress_last_gc.is_set()) {
815
tries++;
816
control_thread()->handle_alloc_failure(req);
817
result = allocate_memory_under_lock(req, in_new_region);
818
}
819
820
while (result == NULL && tries <= ShenandoahFullGCThreshold) {
821
tries++;
822
control_thread()->handle_alloc_failure(req);
823
result = allocate_memory_under_lock(req, in_new_region);
824
}
825
826
} else {
827
assert(req.is_gc_alloc(), "Can only accept GC allocs here");
828
result = allocate_memory_under_lock(req, in_new_region);
829
// Do not call handle_alloc_failure() here, because we cannot block.
830
// The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
831
}
832
833
if (in_new_region) {
834
control_thread()->notify_heap_changed();
835
}
836
837
if (result != NULL) {
838
size_t requested = req.size();
839
size_t actual = req.actual_size();
840
841
assert (req.is_lab_alloc() || (requested == actual),
842
err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
843
ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual));
844
845
if (req.is_mutator_alloc()) {
846
notify_mutator_alloc_words(actual, false);
847
848
// If we requested more than we were granted, give the rest back to pacer.
849
// This only matters if we are in the same pacing epoch: do not try to unpace
850
// over the budget for the other phase.
851
if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
852
pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
853
}
854
} else {
855
increase_used(actual*HeapWordSize);
856
}
857
}
858
859
return result;
860
}
861
862
HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
863
ShenandoahHeapLocker locker(lock());
864
return _free_set->allocate(req, in_new_region);
865
}
866
867
HeapWord* ShenandoahHeap::mem_allocate(size_t size,
868
bool* gc_overhead_limit_was_exceeded) {
869
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
870
return allocate_memory(req);
871
}
872
873
class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
874
private:
875
ShenandoahHeap* const _heap;
876
Thread* const _thread;
877
public:
878
ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
879
_heap(heap), _thread(Thread::current()) {}
880
881
void do_object(oop p) {
882
shenandoah_assert_marked(NULL, p);
883
if (!p->is_forwarded()) {
884
_heap->evacuate_object(p, _thread);
885
}
886
}
887
};
888
889
class ShenandoahEvacuationTask : public AbstractGangTask {
890
private:
891
ShenandoahHeap* const _sh;
892
ShenandoahCollectionSet* const _cs;
893
bool _concurrent;
894
public:
895
ShenandoahEvacuationTask(ShenandoahHeap* sh,
896
ShenandoahCollectionSet* cs,
897
bool concurrent) :
898
AbstractGangTask("Parallel Evacuation Task"),
899
_sh(sh),
900
_cs(cs),
901
_concurrent(concurrent)
902
{}
903
904
void work(uint worker_id) {
905
ShenandoahEvacOOMScope oom_evac_scope;
906
if (_concurrent) {
907
ShenandoahConcurrentWorkerSession worker_session(worker_id);
908
do_work();
909
} else {
910
ShenandoahParallelWorkerSession worker_session(worker_id);
911
do_work();
912
}
913
}
914
915
private:
916
void do_work() {
917
ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
918
ShenandoahHeapRegion* r;
919
while ((r =_cs->claim_next()) != NULL) {
920
assert(r->has_live(), err_msg("Region " SIZE_FORMAT " should have been reclaimed early", r->index()));
921
_sh->marked_object_iterate(r, &cl);
922
923
if (ShenandoahPacing) {
924
_sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
925
}
926
927
if (_sh->cancelled_gc()) {
928
break;
929
}
930
}
931
}
932
};
933
934
void ShenandoahHeap::trash_cset_regions() {
935
ShenandoahHeapLocker locker(lock());
936
937
ShenandoahCollectionSet* set = collection_set();
938
ShenandoahHeapRegion* r;
939
set->clear_current_index();
940
while ((r = set->next()) != NULL) {
941
r->make_trash();
942
}
943
collection_set()->clear();
944
}
945
946
void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
947
st->print_cr("Heap Regions:");
948
st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
949
st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
950
st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
951
st->print_cr("SN=alloc sequence number");
952
953
for (size_t i = 0; i < num_regions(); i++) {
954
get_region(i)->print_on(st);
955
}
956
}
957
958
void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
959
assert(start->is_humongous_start(), "reclaim regions starting with the first one");
960
961
oop humongous_obj = oop(start->bottom());
962
size_t size = humongous_obj->size();
963
size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
964
size_t index = start->index() + required_regions - 1;
965
966
assert(!start->has_live(), "liveness must be zero");
967
968
for(size_t i = 0; i < required_regions; i++) {
969
// Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
970
// as it expects that every region belongs to a humongous region starting with a humongous start region.
971
ShenandoahHeapRegion* region = get_region(index --);
972
973
assert(region->is_humongous(), "expect correct humongous start or continuation");
974
assert(!region->is_cset(), "Humongous region should not be in collection set");
975
976
region->make_trash_immediate();
977
}
978
}
979
980
class ShenandoahRetireGCLABClosure : public ThreadClosure {
981
private:
982
bool _retire;
983
public:
984
ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {};
985
986
void do_thread(Thread* thread) {
987
assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
988
thread->gclab().make_parsable(_retire);
989
}
990
};
991
992
void ShenandoahHeap::make_parsable(bool retire_tlabs) {
993
if (UseTLAB) {
994
CollectedHeap::ensure_parsability(retire_tlabs);
995
ShenandoahRetireGCLABClosure cl(retire_tlabs);
996
Threads::java_threads_do(&cl);
997
_workers->threads_do(&cl);
998
}
999
}
1000
1001
class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1002
private:
1003
ShenandoahRootEvacuator* _rp;
1004
1005
public:
1006
ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1007
AbstractGangTask("Shenandoah evacuate and update roots"),
1008
_rp(rp) {}
1009
1010
void work(uint worker_id) {
1011
ShenandoahParallelWorkerSession worker_session(worker_id);
1012
ShenandoahEvacOOMScope oom_evac_scope;
1013
ShenandoahEvacuateUpdateRootsClosure cl;
1014
1015
MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1016
_rp->roots_do(worker_id, &cl);
1017
}
1018
};
1019
1020
void ShenandoahHeap::evacuate_and_update_roots() {
1021
COMPILER2_PRESENT(DerivedPointerTable::clear());
1022
1023
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1024
1025
{
1026
ShenandoahRootEvacuator rp(ShenandoahPhaseTimings::init_evac);
1027
ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1028
workers()->run_task(&roots_task);
1029
}
1030
1031
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1032
}
1033
1034
size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1035
// Returns size in bytes
1036
return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1037
}
1038
1039
size_t ShenandoahHeap::max_tlab_size() const {
1040
// Returns size in words
1041
return ShenandoahHeapRegion::max_tlab_size_words();
1042
}
1043
1044
class ShenandoahResizeGCLABClosure : public ThreadClosure {
1045
public:
1046
void do_thread(Thread* thread) {
1047
assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
1048
thread->gclab().resize();
1049
}
1050
};
1051
1052
void ShenandoahHeap::resize_all_tlabs() {
1053
CollectedHeap::resize_all_tlabs();
1054
1055
ShenandoahResizeGCLABClosure cl;
1056
Threads::java_threads_do(&cl);
1057
_workers->threads_do(&cl);
1058
}
1059
1060
class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1061
public:
1062
void do_thread(Thread* thread) {
1063
assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
1064
thread->gclab().accumulate_statistics();
1065
thread->gclab().initialize_statistics();
1066
}
1067
};
1068
1069
void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1070
ShenandoahAccumulateStatisticsGCLABClosure cl;
1071
Threads::java_threads_do(&cl);
1072
_workers->threads_do(&cl);
1073
}
1074
1075
void ShenandoahHeap::collect(GCCause::Cause cause) {
1076
_control_thread->request_gc(cause);
1077
}
1078
1079
void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1080
//assert(false, "Shouldn't need to do full collections");
1081
}
1082
1083
CollectorPolicy* ShenandoahHeap::collector_policy() const {
1084
return _shenandoah_policy;
1085
}
1086
1087
void ShenandoahHeap::resize_tlabs() {
1088
CollectedHeap::resize_all_tlabs();
1089
}
1090
1091
void ShenandoahHeap::accumulate_statistics_tlabs() {
1092
CollectedHeap::accumulate_statistics_all_tlabs();
1093
}
1094
1095
HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1096
ShenandoahHeapRegion* r = heap_region_containing(addr);
1097
if (r != NULL) {
1098
return r->block_start(addr);
1099
}
1100
return NULL;
1101
}
1102
1103
size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1104
ShenandoahHeapRegion* r = heap_region_containing(addr);
1105
return r->block_size(addr);
1106
}
1107
1108
bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1109
ShenandoahHeapRegion* r = heap_region_containing(addr);
1110
return r->block_is_obj(addr);
1111
}
1112
1113
jlong ShenandoahHeap::millis_since_last_gc() {
1114
double v = heuristics()->time_since_last_gc() * 1000;
1115
assert(0 <= v && v <= max_jlong, err_msg("value should fit: %f", v));
1116
return (jlong)v;
1117
}
1118
1119
void ShenandoahHeap::prepare_for_verify() {
1120
if (SafepointSynchronize::is_at_safepoint()) {
1121
make_parsable(false);
1122
}
1123
}
1124
1125
void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1126
workers()->print_worker_threads_on(st);
1127
if (ShenandoahStringDedup::is_enabled()) {
1128
ShenandoahStringDedup::print_worker_threads_on(st);
1129
}
1130
}
1131
1132
void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1133
workers()->threads_do(tcl);
1134
if (ShenandoahStringDedup::is_enabled()) {
1135
ShenandoahStringDedup::threads_do(tcl);
1136
}
1137
}
1138
1139
void ShenandoahHeap::print_tracing_info() const {
1140
if (PrintGC || TraceGen0Time || TraceGen1Time) {
1141
ResourceMark rm;
1142
outputStream* out = gclog_or_tty;
1143
phase_timings()->print_global_on(out);
1144
1145
out->cr();
1146
out->cr();
1147
1148
shenandoah_policy()->print_gc_stats(out);
1149
1150
out->cr();
1151
out->cr();
1152
}
1153
}
1154
1155
void ShenandoahHeap::verify(bool silent, VerifyOption vo) {
1156
if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) {
1157
if (ShenandoahVerify) {
1158
verifier()->verify_generic(vo);
1159
} else {
1160
// TODO: Consider allocating verification bitmaps on demand,
1161
// and turn this on unconditionally.
1162
}
1163
}
1164
}
1165
size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1166
return _free_set->capacity();
1167
}
1168
1169
class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1170
private:
1171
MarkBitMap* _bitmap;
1172
Stack<oop,mtGC>* _oop_stack;
1173
1174
template <class T>
1175
void do_oop_work(T* p) {
1176
T o = oopDesc::load_heap_oop(p);
1177
if (!oopDesc::is_null(o)) {
1178
oop obj = oopDesc::decode_heap_oop_not_null(o);
1179
obj = (oop) ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1180
assert(obj->is_oop(), "must be a valid oop");
1181
if (!_bitmap->isMarked((HeapWord*) obj)) {
1182
_bitmap->mark((HeapWord*) obj);
1183
_oop_stack->push(obj);
1184
}
1185
}
1186
}
1187
public:
1188
ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1189
_bitmap(bitmap), _oop_stack(oop_stack) {}
1190
void do_oop(oop* p) { do_oop_work(p); }
1191
void do_oop(narrowOop* p) { do_oop_work(p); }
1192
};
1193
1194
/*
1195
* This is public API, used in preparation of object_iterate().
1196
* Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1197
* need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1198
* control, we call SH::make_parsable().
1199
*/
1200
void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1201
// No-op.
1202
}
1203
1204
/*
1205
* Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1206
*
1207
* We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1208
* scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1209
* calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1210
* scanning therefore depends on having a valid marking bitmap to support it. However, we only
1211
* have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1212
* marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1213
* wiped the bitmap in preparation for next marking).
1214
*
1215
* For all those reasons, we implement object iteration as a single marking traversal, reporting
1216
* objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1217
* is allowed to report dead objects, but is not required to do so.
1218
*/
1219
void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1220
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1221
if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1222
log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1223
return;
1224
}
1225
1226
// Reset bitmap
1227
_aux_bit_map.clear();
1228
1229
Stack<oop,mtGC> oop_stack;
1230
1231
ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1232
1233
{
1234
// First, we process GC roots according to current GC cycle.
1235
// This populates the work stack with initial objects.
1236
// It is important to relinquish the associated locks before diving
1237
// into heap dumper.
1238
ShenandoahHeapIterationRootScanner rp;
1239
rp.roots_do(&oops);
1240
}
1241
1242
// Work through the oop stack to traverse heap.
1243
while (! oop_stack.is_empty()) {
1244
oop obj = oop_stack.pop();
1245
assert(obj->is_oop(), "must be a valid oop");
1246
cl->do_object(obj);
1247
obj->oop_iterate(&oops);
1248
}
1249
1250
assert(oop_stack.is_empty(), "should be empty");
1251
1252
if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1253
log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1254
}
1255
}
1256
1257
void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1258
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1259
object_iterate(cl);
1260
}
1261
1262
void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {
1263
ObjectToOopClosure cl2(cl);
1264
object_iterate(&cl2);
1265
}
1266
1267
void ShenandoahHeap::gc_prologue(bool b) {
1268
Unimplemented();
1269
}
1270
1271
void ShenandoahHeap::gc_epilogue(bool b) {
1272
Unimplemented();
1273
}
1274
1275
void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1276
for (size_t i = 0; i < num_regions(); i++) {
1277
ShenandoahHeapRegion* current = get_region(i);
1278
blk->heap_region_do(current);
1279
}
1280
}
1281
1282
class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1283
private:
1284
ShenandoahHeap* const _heap;
1285
ShenandoahHeapRegionClosure* const _blk;
1286
1287
shenandoah_padding(0);
1288
volatile jint _index;
1289
shenandoah_padding(1);
1290
1291
public:
1292
ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1293
AbstractGangTask("Parallel Region Task"),
1294
_heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1295
1296
void work(uint worker_id) {
1297
jint stride = (jint)ShenandoahParallelRegionStride;
1298
1299
jint max = (jint)_heap->num_regions();
1300
while (_index < max) {
1301
jint cur = Atomic::add(stride, &_index) - stride;
1302
jint start = cur;
1303
jint end = MIN2(cur + stride, max);
1304
if (start >= max) break;
1305
1306
for (jint i = cur; i < end; i++) {
1307
ShenandoahHeapRegion* current = _heap->get_region((size_t)i);
1308
_blk->heap_region_do(current);
1309
}
1310
}
1311
}
1312
};
1313
1314
void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1315
assert(blk->is_thread_safe(), "Only thread-safe closures here");
1316
if (num_regions() > ShenandoahParallelRegionStride) {
1317
ShenandoahParallelHeapRegionTask task(blk);
1318
workers()->run_task(&task);
1319
} else {
1320
heap_region_iterate(blk);
1321
}
1322
}
1323
1324
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1325
private:
1326
ShenandoahMarkingContext* const _ctx;
1327
public:
1328
ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1329
1330
void heap_region_do(ShenandoahHeapRegion* r) {
1331
assert(!r->has_live(),
1332
err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));
1333
if (r->is_active()) {
1334
// Check if region needs updating its TAMS. We have updated it already during concurrent
1335
// reset, so it is very likely we don't need to do another write here.
1336
if (_ctx->top_at_mark_start(r) != r->top()) {
1337
_ctx->capture_top_at_mark_start(r);
1338
}
1339
} else {
1340
assert(_ctx->top_at_mark_start(r) == r->top(),
1341
err_msg("Region " SIZE_FORMAT " should already have correct TAMS", r->index()));
1342
}
1343
}
1344
1345
bool is_thread_safe() { return true; }
1346
};
1347
1348
void ShenandoahHeap::op_init_mark() {
1349
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1350
assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1351
1352
assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1353
assert(!marking_context()->is_complete(), "should not be complete");
1354
assert(!has_forwarded_objects(), "No forwarded objects on this path");
1355
1356
if (ShenandoahVerify) {
1357
verifier()->verify_before_concmark();
1358
}
1359
1360
{
1361
ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1362
accumulate_statistics_tlabs();
1363
}
1364
1365
if (VerifyBeforeGC) {
1366
Universe::verify();
1367
}
1368
1369
set_concurrent_mark_in_progress(true);
1370
// We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1371
if (UseTLAB) {
1372
ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1373
make_parsable(true);
1374
}
1375
1376
{
1377
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1378
ShenandoahInitMarkUpdateRegionStateClosure cl;
1379
parallel_heap_region_iterate(&cl);
1380
}
1381
1382
// Make above changes visible to worker threads
1383
OrderAccess::fence();
1384
1385
concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1386
1387
if (UseTLAB) {
1388
ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1389
resize_tlabs();
1390
}
1391
1392
if (ShenandoahPacing) {
1393
pacer()->setup_for_mark();
1394
}
1395
}
1396
1397
void ShenandoahHeap::op_mark() {
1398
concurrent_mark()->mark_from_roots();
1399
}
1400
1401
class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1402
private:
1403
ShenandoahMarkingContext* const _ctx;
1404
ShenandoahHeapLock* const _lock;
1405
1406
public:
1407
ShenandoahFinalMarkUpdateRegionStateClosure() :
1408
_ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1409
1410
void heap_region_do(ShenandoahHeapRegion* r) {
1411
if (r->is_active()) {
1412
// All allocations past TAMS are implicitly live, adjust the region data.
1413
// Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1414
HeapWord *tams = _ctx->top_at_mark_start(r);
1415
HeapWord *top = r->top();
1416
if (top > tams) {
1417
r->increase_live_data_alloc_words(pointer_delta(top, tams));
1418
}
1419
1420
// We are about to select the collection set, make sure it knows about
1421
// current pinning status. Also, this allows trashing more regions that
1422
// now have their pinning status dropped.
1423
if (r->is_pinned()) {
1424
if (r->pin_count() == 0) {
1425
ShenandoahHeapLocker locker(_lock);
1426
r->make_unpinned();
1427
}
1428
} else {
1429
if (r->pin_count() > 0) {
1430
ShenandoahHeapLocker locker(_lock);
1431
r->make_pinned();
1432
}
1433
}
1434
1435
// Remember limit for updating refs. It's guaranteed that we get no
1436
// from-space-refs written from here on.
1437
r->set_update_watermark_at_safepoint(r->top());
1438
} else {
1439
assert(!r->has_live(),
1440
err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));
1441
assert(_ctx->top_at_mark_start(r) == r->top(),
1442
err_msg("Region " SIZE_FORMAT " should have correct TAMS", r->index()));
1443
}
1444
}
1445
1446
bool is_thread_safe() { return true; }
1447
};
1448
1449
void ShenandoahHeap::op_final_mark() {
1450
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1451
assert(!has_forwarded_objects(), "No forwarded objects on this path");
1452
1453
// It is critical that we
1454
// evacuate roots right after finishing marking, so that we don't
1455
// get unmarked objects in the roots.
1456
1457
if (!cancelled_gc()) {
1458
concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1459
1460
TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats());
1461
1462
if (ShenandoahVerify) {
1463
verifier()->verify_roots_no_forwarded();
1464
}
1465
1466
TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats());
1467
1468
{
1469
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1470
ShenandoahFinalMarkUpdateRegionStateClosure cl;
1471
parallel_heap_region_iterate(&cl);
1472
1473
assert_pinned_region_status();
1474
}
1475
1476
// Force the threads to reacquire their TLABs outside the collection set.
1477
{
1478
ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1479
make_parsable(true);
1480
}
1481
1482
{
1483
ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1484
ShenandoahHeapLocker locker(lock());
1485
_collection_set->clear();
1486
heuristics()->choose_collection_set(_collection_set);
1487
}
1488
1489
{
1490
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1491
ShenandoahHeapLocker locker(lock());
1492
_free_set->rebuild();
1493
}
1494
1495
// If collection set has candidates, start evacuation.
1496
// Otherwise, bypass the rest of the cycle.
1497
if (!collection_set()->is_empty()) {
1498
ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1499
1500
if (ShenandoahVerify) {
1501
verifier()->verify_before_evacuation();
1502
}
1503
1504
set_evacuation_in_progress(true);
1505
// From here on, we need to update references.
1506
set_has_forwarded_objects(true);
1507
1508
if (!is_degenerated_gc_in_progress()) {
1509
evacuate_and_update_roots();
1510
}
1511
1512
if (ShenandoahPacing) {
1513
pacer()->setup_for_evac();
1514
}
1515
1516
if (ShenandoahVerify) {
1517
verifier()->verify_roots_no_forwarded();
1518
verifier()->verify_during_evacuation();
1519
}
1520
} else {
1521
if (ShenandoahVerify) {
1522
verifier()->verify_after_concmark();
1523
}
1524
1525
if (VerifyAfterGC) {
1526
Universe::verify();
1527
}
1528
}
1529
1530
} else {
1531
concurrent_mark()->cancel();
1532
complete_marking();
1533
1534
if (process_references()) {
1535
// Abandon reference processing right away: pre-cleaning must have failed.
1536
ReferenceProcessor *rp = ref_processor();
1537
rp->disable_discovery();
1538
rp->abandon_partial_discovery();
1539
rp->verify_no_references_recorded();
1540
}
1541
}
1542
}
1543
1544
void ShenandoahHeap::op_conc_evac() {
1545
ShenandoahEvacuationTask task(this, _collection_set, true);
1546
workers()->run_task(&task);
1547
}
1548
1549
void ShenandoahHeap::op_stw_evac() {
1550
ShenandoahEvacuationTask task(this, _collection_set, false);
1551
workers()->run_task(&task);
1552
}
1553
1554
void ShenandoahHeap::op_updaterefs() {
1555
update_heap_references(true);
1556
}
1557
1558
void ShenandoahHeap::op_cleanup_early() {
1559
free_set()->recycle_trash();
1560
}
1561
1562
void ShenandoahHeap::op_cleanup_complete() {
1563
free_set()->recycle_trash();
1564
}
1565
1566
class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1567
private:
1568
ShenandoahMarkingContext* const _ctx;
1569
public:
1570
ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1571
1572
void heap_region_do(ShenandoahHeapRegion* r) {
1573
if (r->is_active()) {
1574
// Reset live data and set TAMS optimistically. We would recheck these under the pause
1575
// anyway to capture any updates that happened since now.
1576
r->clear_live_data();
1577
_ctx->capture_top_at_mark_start(r);
1578
}
1579
}
1580
1581
bool is_thread_safe() { return true; }
1582
};
1583
1584
void ShenandoahHeap::op_reset() {
1585
if (ShenandoahPacing) {
1586
pacer()->setup_for_reset();
1587
}
1588
reset_mark_bitmap();
1589
1590
ShenandoahResetUpdateRegionStateClosure cl;
1591
parallel_heap_region_iterate(&cl);
1592
}
1593
1594
void ShenandoahHeap::op_preclean() {
1595
if (ShenandoahPacing) {
1596
pacer()->setup_for_preclean();
1597
}
1598
concurrent_mark()->preclean_weak_refs();
1599
}
1600
1601
void ShenandoahHeap::op_full(GCCause::Cause cause) {
1602
ShenandoahMetricsSnapshot metrics;
1603
metrics.snap_before();
1604
1605
full_gc()->do_it(cause);
1606
1607
metrics.snap_after();
1608
1609
if (metrics.is_good_progress()) {
1610
_progress_last_gc.set();
1611
} else {
1612
// Nothing to do. Tell the allocation path that we have failed to make
1613
// progress, and it can finally fail.
1614
_progress_last_gc.unset();
1615
}
1616
}
1617
1618
void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1619
// Degenerated GC is STW, but it can also fail. Current mechanics communicates
1620
// GC failure via cancelled_concgc() flag. So, if we detect the failure after
1621
// some phase, we have to upgrade the Degenerate GC to Full GC.
1622
1623
clear_cancelled_gc();
1624
1625
ShenandoahMetricsSnapshot metrics;
1626
metrics.snap_before();
1627
1628
switch (point) {
1629
// The cases below form the Duff's-like device: it describes the actual GC cycle,
1630
// but enters it at different points, depending on which concurrent phase had
1631
// degenerated.
1632
1633
case _degenerated_outside_cycle:
1634
// We have degenerated from outside the cycle, which means something is bad with
1635
// the heap, most probably heavy humongous fragmentation, or we are very low on free
1636
// space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1637
// we can do the most aggressive degen cycle, which includes processing references and
1638
// class unloading, unless those features are explicitly disabled.
1639
//
1640
// Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1641
// changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1642
set_process_references(heuristics()->can_process_references());
1643
set_unload_classes(heuristics()->can_unload_classes());
1644
1645
op_reset();
1646
1647
op_init_mark();
1648
if (cancelled_gc()) {
1649
op_degenerated_fail();
1650
return;
1651
}
1652
1653
case _degenerated_mark:
1654
op_final_mark();
1655
if (cancelled_gc()) {
1656
op_degenerated_fail();
1657
return;
1658
}
1659
1660
op_cleanup_early();
1661
1662
case _degenerated_evac:
1663
// If heuristics thinks we should do the cycle, this flag would be set,
1664
// and we can do evacuation. Otherwise, it would be the shortcut cycle.
1665
if (is_evacuation_in_progress()) {
1666
1667
// Degeneration under oom-evac protocol might have left some objects in
1668
// collection set un-evacuated. Restart evacuation from the beginning to
1669
// capture all objects. For all the objects that are already evacuated,
1670
// it would be a simple check, which is supposed to be fast. This is also
1671
// safe to do even without degeneration, as CSet iterator is at beginning
1672
// in preparation for evacuation anyway.
1673
//
1674
// Before doing that, we need to make sure we never had any cset-pinned
1675
// regions. This may happen if allocation failure happened when evacuating
1676
// the about-to-be-pinned object, oom-evac protocol left the object in
1677
// the collection set, and then the pin reached the cset region. If we continue
1678
// the cycle here, we would trash the cset and alive objects in it. To avoid
1679
// it, we fail degeneration right away and slide into Full GC to recover.
1680
1681
{
1682
sync_pinned_region_status();
1683
collection_set()->clear_current_index();
1684
1685
ShenandoahHeapRegion* r;
1686
while ((r = collection_set()->next()) != NULL) {
1687
if (r->is_pinned()) {
1688
cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1689
op_degenerated_fail();
1690
return;
1691
}
1692
}
1693
1694
collection_set()->clear_current_index();
1695
}
1696
1697
op_stw_evac();
1698
if (cancelled_gc()) {
1699
op_degenerated_fail();
1700
return;
1701
}
1702
}
1703
1704
// If heuristics thinks we should do the cycle, this flag would be set,
1705
// and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1706
if (has_forwarded_objects()) {
1707
op_init_updaterefs();
1708
if (cancelled_gc()) {
1709
op_degenerated_fail();
1710
return;
1711
}
1712
}
1713
1714
case _degenerated_updaterefs:
1715
if (has_forwarded_objects()) {
1716
op_final_updaterefs();
1717
if (cancelled_gc()) {
1718
op_degenerated_fail();
1719
return;
1720
}
1721
}
1722
1723
op_cleanup_complete();
1724
break;
1725
1726
default:
1727
ShouldNotReachHere();
1728
}
1729
1730
if (ShenandoahVerify) {
1731
verifier()->verify_after_degenerated();
1732
}
1733
1734
if (VerifyAfterGC) {
1735
Universe::verify();
1736
}
1737
1738
metrics.snap_after();
1739
1740
// Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1741
// because that probably means the heap is overloaded and/or fragmented.
1742
if (!metrics.is_good_progress()) {
1743
_progress_last_gc.unset();
1744
cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1745
op_degenerated_futile();
1746
} else {
1747
_progress_last_gc.set();
1748
}
1749
}
1750
1751
void ShenandoahHeap::op_degenerated_fail() {
1752
log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1753
shenandoah_policy()->record_degenerated_upgrade_to_full();
1754
op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1755
}
1756
1757
void ShenandoahHeap::op_degenerated_futile() {
1758
shenandoah_policy()->record_degenerated_upgrade_to_full();
1759
op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1760
}
1761
1762
void ShenandoahHeap::complete_marking() {
1763
if (is_concurrent_mark_in_progress()) {
1764
set_concurrent_mark_in_progress(false);
1765
}
1766
1767
if (!cancelled_gc()) {
1768
// If we needed to update refs, and concurrent marking has been cancelled,
1769
// we need to finish updating references.
1770
set_has_forwarded_objects(false);
1771
mark_complete_marking_context();
1772
}
1773
}
1774
1775
void ShenandoahHeap::force_satb_flush_all_threads() {
1776
if (!is_concurrent_mark_in_progress()) {
1777
// No need to flush SATBs
1778
return;
1779
}
1780
1781
// Do not block if Threads lock is busy. This avoids the potential deadlock
1782
// when this code is called from the periodic task, and something else is
1783
// expecting the periodic task to complete without blocking. On the off-chance
1784
// Threads lock is busy momentarily, try to acquire several times.
1785
for (int t = 0; t < 10; t++) {
1786
if (Threads_lock->try_lock()) {
1787
JavaThread::set_force_satb_flush_all_threads(true);
1788
Threads_lock->unlock();
1789
1790
// The threads are not "acquiring" their thread-local data, but it does not
1791
// hurt to "release" the updates here anyway.
1792
OrderAccess::fence();
1793
break;
1794
}
1795
os::naked_short_sleep(1);
1796
}
1797
}
1798
1799
void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1800
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1801
_gc_state.set_cond(mask, value);
1802
JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1803
}
1804
1805
void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1806
if (has_forwarded_objects()) {
1807
set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1808
} else {
1809
set_gc_state_mask(MARKING, in_progress);
1810
}
1811
JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1812
}
1813
1814
void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1815
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1816
set_gc_state_mask(EVACUATION, in_progress);
1817
}
1818
1819
void ShenandoahHeap::ref_processing_init() {
1820
MemRegion mr = reserved_region();
1821
1822
assert(_max_workers > 0, "Sanity");
1823
1824
bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1825
bool mt_discovery = _max_workers > 1;
1826
1827
_ref_processor =
1828
new ReferenceProcessor(mr, // span
1829
mt_processing, // MT processing
1830
_max_workers, // Degree of MT processing
1831
mt_discovery, // MT discovery
1832
_max_workers, // Degree of MT discovery
1833
false, // Reference discovery is not atomic
1834
NULL); // No closure, should be installed before use
1835
1836
log_info(gc, init)("Reference processing: %s discovery, %s processing",
1837
mt_discovery ? "parallel" : "serial",
1838
mt_processing ? "parallel" : "serial");
1839
1840
shenandoah_assert_rp_isalive_not_installed();
1841
}
1842
1843
void ShenandoahHeap::acquire_pending_refs_lock() {
1844
_control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
1845
}
1846
1847
void ShenandoahHeap::release_pending_refs_lock() {
1848
_control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
1849
}
1850
1851
GCTracer* ShenandoahHeap::tracer() {
1852
return shenandoah_policy()->tracer();
1853
}
1854
1855
size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1856
return _free_set->used();
1857
}
1858
1859
void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1860
if (try_cancel_gc()) {
1861
FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1862
log_info(gc)("%s", msg.buffer());
1863
Events::log(Thread::current(), "%s", msg.buffer());
1864
}
1865
}
1866
1867
uint ShenandoahHeap::max_workers() {
1868
return _max_workers;
1869
}
1870
1871
void ShenandoahHeap::stop() {
1872
// The shutdown sequence should be able to terminate when GC is running.
1873
1874
// Step 0. Notify policy to disable event recording.
1875
_shenandoah_policy->record_shutdown();
1876
1877
// Step 1. Notify control thread that we are in shutdown.
1878
// Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1879
// Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1880
_control_thread->prepare_for_graceful_shutdown();
1881
1882
// Step 2. Notify GC workers that we are cancelling GC.
1883
cancel_gc(GCCause::_shenandoah_stop_vm);
1884
1885
// Step 3. Wait until GC worker exits normally.
1886
_control_thread->stop();
1887
1888
// Step 4. Stop String Dedup thread if it is active
1889
if (ShenandoahStringDedup::is_enabled()) {
1890
ShenandoahStringDedup::stop();
1891
}
1892
}
1893
1894
void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1895
assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
1896
1897
ShenandoahGCPhase root_phase(full_gc ?
1898
ShenandoahPhaseTimings::full_gc_purge :
1899
ShenandoahPhaseTimings::purge);
1900
1901
ShenandoahIsAliveSelector alive;
1902
BoolObjectClosure* is_alive = alive.is_alive_closure();
1903
1904
// Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
1905
// part is too slow to be done serially, so it is handled during the ShenandoahParallelCleaning phase.
1906
// Defer the cleaning until we have complete on_stack data.
1907
MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
1908
1909
bool purged_class;
1910
1911
// Unload classes and purge SystemDictionary.
1912
{
1913
ShenandoahGCPhase phase(full_gc ?
1914
ShenandoahPhaseTimings::full_gc_purge_class_unload :
1915
ShenandoahPhaseTimings::purge_class_unload);
1916
purged_class = SystemDictionary::do_unloading(is_alive,
1917
false /* Defer klass cleaning */);
1918
}
1919
{
1920
ShenandoahGCPhase phase(full_gc ?
1921
ShenandoahPhaseTimings::full_gc_purge_par :
1922
ShenandoahPhaseTimings::purge_par);
1923
uint active = _workers->active_workers();
1924
ShenandoahParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
1925
_workers->run_task(&unlink_task);
1926
}
1927
1928
{
1929
ShenandoahGCPhase phase(full_gc ?
1930
ShenandoahPhaseTimings::full_gc_purge_metadata :
1931
ShenandoahPhaseTimings::purge_metadata);
1932
ClassLoaderDataGraph::free_deallocate_lists();
1933
}
1934
1935
if (ShenandoahStringDedup::is_enabled()) {
1936
ShenandoahGCPhase phase(full_gc ?
1937
ShenandoahPhaseTimings::full_gc_purge_string_dedup :
1938
ShenandoahPhaseTimings::purge_string_dedup);
1939
ShenandoahStringDedup::parallel_cleanup();
1940
}
1941
1942
{
1943
ShenandoahGCPhase phase(full_gc ?
1944
ShenandoahPhaseTimings::full_gc_purge_cldg :
1945
ShenandoahPhaseTimings::purge_cldg);
1946
ClassLoaderDataGraph::purge();
1947
}
1948
}
1949
1950
void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1951
set_gc_state_mask(HAS_FORWARDED, cond);
1952
}
1953
1954
void ShenandoahHeap::set_process_references(bool pr) {
1955
_process_references.set_cond(pr);
1956
}
1957
1958
void ShenandoahHeap::set_unload_classes(bool uc) {
1959
_unload_classes.set_cond(uc);
1960
}
1961
1962
bool ShenandoahHeap::process_references() const {
1963
return _process_references.is_set();
1964
}
1965
1966
bool ShenandoahHeap::unload_classes() const {
1967
return _unload_classes.is_set();
1968
}
1969
1970
address ShenandoahHeap::in_cset_fast_test_addr() {
1971
ShenandoahHeap* heap = ShenandoahHeap::heap();
1972
assert(heap->collection_set() != NULL, "Sanity");
1973
return (address) heap->collection_set()->biased_map_address();
1974
}
1975
1976
address ShenandoahHeap::cancelled_gc_addr() {
1977
return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1978
}
1979
1980
address ShenandoahHeap::gc_state_addr() {
1981
return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1982
}
1983
1984
size_t ShenandoahHeap::conservative_max_heap_alignment() {
1985
size_t align = ShenandoahMaxRegionSize;
1986
if (UseLargePages) {
1987
align = MAX2(align, os::large_page_size());
1988
}
1989
return align;
1990
}
1991
1992
size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1993
return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
1994
}
1995
1996
void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1997
OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
1998
}
1999
2000
void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2001
_degenerated_gc_in_progress.set_cond(in_progress);
2002
}
2003
2004
void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2005
_full_gc_in_progress.set_cond(in_progress);
2006
}
2007
2008
void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2009
assert (is_full_gc_in_progress(), "should be");
2010
_full_gc_move_in_progress.set_cond(in_progress);
2011
}
2012
2013
void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2014
set_gc_state_mask(UPDATEREFS, in_progress);
2015
}
2016
2017
void ShenandoahHeap::register_nmethod(nmethod* nm) {
2018
ShenandoahCodeRoots::add_nmethod(nm);
2019
}
2020
2021
void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2022
ShenandoahCodeRoots::remove_nmethod(nm);
2023
}
2024
2025
oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2026
heap_region_containing(o)->record_pin();
2027
return o;
2028
}
2029
2030
void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2031
heap_region_containing(o)->record_unpin();
2032
}
2033
2034
void ShenandoahHeap::sync_pinned_region_status() {
2035
ShenandoahHeapLocker locker(lock());
2036
2037
for (size_t i = 0; i < num_regions(); i++) {
2038
ShenandoahHeapRegion *r = get_region(i);
2039
if (r->is_active()) {
2040
if (r->is_pinned()) {
2041
if (r->pin_count() == 0) {
2042
r->make_unpinned();
2043
}
2044
} else {
2045
if (r->pin_count() > 0) {
2046
r->make_pinned();
2047
}
2048
}
2049
}
2050
}
2051
2052
assert_pinned_region_status();
2053
}
2054
2055
#ifdef ASSERT
2056
void ShenandoahHeap::assert_pinned_region_status() {
2057
for (size_t i = 0; i < num_regions(); i++) {
2058
ShenandoahHeapRegion* r = get_region(i);
2059
assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2060
err_msg("Region " SIZE_FORMAT " pinning status is inconsistent", i));
2061
}
2062
}
2063
#endif
2064
2065
GCTimer* ShenandoahHeap::gc_timer() const {
2066
return _gc_timer;
2067
}
2068
2069
#ifdef ASSERT
2070
void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2071
assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2072
2073
if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2074
if (UseDynamicNumberOfGCThreads ||
2075
(FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2076
assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2077
} else {
2078
// Use ParallelGCThreads inside safepoints
2079
assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2080
}
2081
} else {
2082
if (UseDynamicNumberOfGCThreads ||
2083
(FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2084
assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2085
} else {
2086
// Use ConcGCThreads outside safepoints
2087
assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2088
}
2089
}
2090
}
2091
#endif
2092
2093
ShenandoahVerifier* ShenandoahHeap::verifier() {
2094
guarantee(ShenandoahVerify, "Should be enabled");
2095
assert (_verifier != NULL, "sanity");
2096
return _verifier;
2097
}
2098
2099
ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() :
2100
_heap(ShenandoahHeap::heap()) {}
2101
2102
class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2103
private:
2104
ShenandoahHeap* _heap;
2105
ShenandoahRegionIterator* _regions;
2106
bool _concurrent;
2107
2108
public:
2109
ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2110
AbstractGangTask("Concurrent Update References Task"),
2111
_heap(ShenandoahHeap::heap()),
2112
_regions(regions),
2113
_concurrent(concurrent) {
2114
}
2115
2116
void work(uint worker_id) {
2117
ShenandoahConcurrentWorkerSession worker_session(worker_id);
2118
ShenandoahUpdateHeapRefsClosure cl;
2119
ShenandoahHeapRegion* r = _regions->next();
2120
ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2121
while (r != NULL) {
2122
HeapWord* update_watermark = r->get_update_watermark();
2123
assert (update_watermark >= r->bottom(), "sanity");
2124
if (r->is_active() && !r->is_cset()) {
2125
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
2126
}
2127
if (ShenandoahPacing) {
2128
_heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2129
}
2130
if (_heap->cancelled_gc()) {
2131
return;
2132
}
2133
r = _regions->next();
2134
}
2135
}
2136
};
2137
2138
void ShenandoahHeap::update_heap_references(bool concurrent) {
2139
ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent);
2140
workers()->run_task(&task);
2141
}
2142
2143
void ShenandoahHeap::op_init_updaterefs() {
2144
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2145
2146
set_evacuation_in_progress(false);
2147
2148
if (ShenandoahVerify) {
2149
if (!is_degenerated_gc_in_progress()) {
2150
verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2151
}
2152
verifier()->verify_before_updaterefs();
2153
}
2154
2155
set_update_refs_in_progress(true);
2156
2157
{
2158
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);
2159
2160
make_parsable(true);
2161
2162
// Reset iterator.
2163
_update_refs_iterator.reset();
2164
}
2165
2166
if (ShenandoahPacing) {
2167
pacer()->setup_for_updaterefs();
2168
}
2169
}
2170
2171
class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2172
private:
2173
ShenandoahHeapLock* const _lock;
2174
2175
public:
2176
ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2177
2178
void heap_region_do(ShenandoahHeapRegion* r) {
2179
// Drop unnecessary "pinned" state from regions that does not have CP marks
2180
// anymore, as this would allow trashing them.
2181
2182
if (r->is_active()) {
2183
if (r->is_pinned()) {
2184
if (r->pin_count() == 0) {
2185
ShenandoahHeapLocker locker(_lock);
2186
r->make_unpinned();
2187
}
2188
} else {
2189
if (r->pin_count() > 0) {
2190
ShenandoahHeapLocker locker(_lock);
2191
r->make_pinned();
2192
}
2193
}
2194
}
2195
}
2196
2197
bool is_thread_safe() { return true; }
2198
};
2199
2200
void ShenandoahHeap::op_final_updaterefs() {
2201
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2202
2203
// Check if there is left-over work, and finish it
2204
if (_update_refs_iterator.has_next()) {
2205
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2206
2207
// Finish updating references where we left off.
2208
clear_cancelled_gc();
2209
update_heap_references(false);
2210
}
2211
2212
// Clear cancelled GC, if set. On cancellation path, the block before would handle
2213
// everything. On degenerated paths, cancelled gc would not be set anyway.
2214
if (cancelled_gc()) {
2215
clear_cancelled_gc();
2216
}
2217
assert(!cancelled_gc(), "Should have been done right before");
2218
2219
if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2220
verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2221
}
2222
2223
if (is_degenerated_gc_in_progress()) {
2224
concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2225
} else {
2226
concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2227
}
2228
2229
// Has to be done before cset is clear
2230
if (ShenandoahVerify) {
2231
verifier()->verify_roots_in_to_space();
2232
}
2233
2234
{
2235
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2236
trash_cset_regions();
2237
}
2238
2239
set_has_forwarded_objects(false);
2240
set_update_refs_in_progress(false);
2241
2242
if (ShenandoahVerify) {
2243
verifier()->verify_after_updaterefs();
2244
}
2245
2246
if (VerifyAfterGC) {
2247
Universe::verify();
2248
}
2249
2250
{
2251
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2252
ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2253
parallel_heap_region_iterate(&cl);
2254
2255
assert_pinned_region_status();
2256
}
2257
2258
{
2259
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2260
ShenandoahHeapLocker locker(lock());
2261
_free_set->rebuild();
2262
}
2263
}
2264
2265
void ShenandoahHeap::print_extended_on(outputStream *st) const {
2266
print_on(st);
2267
print_heap_regions_on(st);
2268
}
2269
2270
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2271
size_t slice = r->index() / _bitmap_regions_per_slice;
2272
2273
size_t regions_from = _bitmap_regions_per_slice * slice;
2274
size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2275
for (size_t g = regions_from; g < regions_to; g++) {
2276
assert (g / _bitmap_regions_per_slice == slice, "same slice");
2277
if (skip_self && g == r->index()) continue;
2278
if (get_region(g)->is_committed()) {
2279
return true;
2280
}
2281
}
2282
return false;
2283
}
2284
2285
bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2286
shenandoah_assert_heaplocked();
2287
2288
// Bitmaps in special regions do not need commits
2289
if (_bitmap_region_special) {
2290
return true;
2291
}
2292
2293
if (is_bitmap_slice_committed(r, true)) {
2294
// Some other region from the group is already committed, meaning the bitmap
2295
// slice is already committed, we exit right away.
2296
return true;
2297
}
2298
2299
// Commit the bitmap slice:
2300
size_t slice = r->index() / _bitmap_regions_per_slice;
2301
size_t off = _bitmap_bytes_per_slice * slice;
2302
size_t len = _bitmap_bytes_per_slice;
2303
char* start = (char*) _bitmap_region.start() + off;
2304
2305
if (!os::commit_memory(start, len, false)) {
2306
return false;
2307
}
2308
2309
if (AlwaysPreTouch) {
2310
os::pretouch_memory(start, start + len);
2311
}
2312
2313
return true;
2314
}
2315
2316
bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2317
shenandoah_assert_heaplocked();
2318
2319
// Bitmaps in special regions do not need uncommits
2320
if (_bitmap_region_special) {
2321
return true;
2322
}
2323
2324
if (is_bitmap_slice_committed(r, true)) {
2325
// Some other region from the group is still committed, meaning the bitmap
2326
// slice is should stay committed, exit right away.
2327
return true;
2328
}
2329
2330
// Uncommit the bitmap slice:
2331
size_t slice = r->index() / _bitmap_regions_per_slice;
2332
size_t off = _bitmap_bytes_per_slice * slice;
2333
size_t len = _bitmap_bytes_per_slice;
2334
if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2335
return false;
2336
}
2337
return true;
2338
}
2339
2340
void ShenandoahHeap::vmop_entry_init_mark() {
2341
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2342
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2343
2344
try_inject_alloc_failure();
2345
VM_ShenandoahInitMark op;
2346
VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2347
}
2348
2349
void ShenandoahHeap::vmop_entry_final_mark() {
2350
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2351
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2352
2353
try_inject_alloc_failure();
2354
VM_ShenandoahFinalMarkStartEvac op;
2355
VMThread::execute(&op); // jump to entry_final_mark under safepoint
2356
}
2357
2358
void ShenandoahHeap::vmop_entry_init_updaterefs() {
2359
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2360
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2361
2362
try_inject_alloc_failure();
2363
VM_ShenandoahInitUpdateRefs op;
2364
VMThread::execute(&op);
2365
}
2366
2367
void ShenandoahHeap::vmop_entry_final_updaterefs() {
2368
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2369
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2370
2371
try_inject_alloc_failure();
2372
VM_ShenandoahFinalUpdateRefs op;
2373
VMThread::execute(&op);
2374
}
2375
2376
void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2377
TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2378
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2379
2380
try_inject_alloc_failure();
2381
VM_ShenandoahFullGC op(cause);
2382
VMThread::execute(&op);
2383
}
2384
2385
void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2386
TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2387
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2388
2389
VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2390
VMThread::execute(&degenerated_gc);
2391
}
2392
2393
void ShenandoahHeap::entry_init_mark() {
2394
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2395
2396
const char* msg = init_mark_event_message();
2397
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2398
EventMark em("%s", msg);
2399
2400
ShenandoahWorkerScope scope(workers(),
2401
ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2402
"init marking");
2403
2404
op_init_mark();
2405
}
2406
2407
void ShenandoahHeap::entry_final_mark() {
2408
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2409
2410
const char* msg = final_mark_event_message();
2411
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2412
EventMark em("%s", msg);
2413
2414
ShenandoahWorkerScope scope(workers(),
2415
ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2416
"final marking");
2417
2418
op_final_mark();
2419
}
2420
2421
void ShenandoahHeap::entry_init_updaterefs() {
2422
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2423
2424
static const char* msg = "Pause Init Update Refs";
2425
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2426
EventMark em("%s", msg);
2427
2428
// No workers used in this phase, no setup required
2429
2430
op_init_updaterefs();
2431
}
2432
2433
void ShenandoahHeap::entry_final_updaterefs() {
2434
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2435
2436
static const char* msg = "Pause Final Update Refs";
2437
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2438
EventMark em("%s", msg);
2439
2440
ShenandoahWorkerScope scope(workers(),
2441
ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2442
"final reference update");
2443
2444
op_final_updaterefs();
2445
}
2446
2447
void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2448
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2449
2450
static const char* msg = "Pause Full";
2451
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2452
EventMark em("%s", msg);
2453
2454
ShenandoahWorkerScope scope(workers(),
2455
ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2456
"full gc");
2457
2458
op_full(cause);
2459
}
2460
2461
void ShenandoahHeap::entry_degenerated(int point) {
2462
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2463
2464
ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2465
const char* msg = degen_event_message(dpoint);
2466
GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2467
EventMark em("%s", msg);
2468
2469
ShenandoahWorkerScope scope(workers(),
2470
ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2471
"stw degenerated gc");
2472
2473
set_degenerated_gc_in_progress(true);
2474
op_degenerated(dpoint);
2475
set_degenerated_gc_in_progress(false);
2476
}
2477
2478
void ShenandoahHeap::entry_mark() {
2479
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2480
2481
const char* msg = conc_mark_event_message();
2482
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2483
EventMark em("%s", msg);
2484
2485
ShenandoahWorkerScope scope(workers(),
2486
ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2487
"concurrent marking");
2488
2489
try_inject_alloc_failure();
2490
op_mark();
2491
}
2492
2493
void ShenandoahHeap::entry_evac() {
2494
ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2495
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2496
2497
static const char *msg = "Concurrent evacuation";
2498
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2499
EventMark em("%s", msg);
2500
2501
ShenandoahWorkerScope scope(workers(),
2502
ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2503
"concurrent evacuation");
2504
2505
try_inject_alloc_failure();
2506
op_conc_evac();
2507
}
2508
2509
void ShenandoahHeap::entry_updaterefs() {
2510
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2511
2512
static const char* msg = "Concurrent update references";
2513
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2514
EventMark em("%s", msg);
2515
2516
ShenandoahWorkerScope scope(workers(),
2517
ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2518
"concurrent reference update");
2519
2520
try_inject_alloc_failure();
2521
op_updaterefs();
2522
}
2523
2524
void ShenandoahHeap::entry_cleanup_early() {
2525
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);
2526
2527
static const char* msg = "Concurrent cleanup";
2528
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2529
EventMark em("%s", msg);
2530
2531
// This phase does not use workers, no need for setup
2532
2533
try_inject_alloc_failure();
2534
op_cleanup_early();
2535
}
2536
2537
void ShenandoahHeap::entry_cleanup_complete() {
2538
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);
2539
2540
static const char* msg = "Concurrent cleanup";
2541
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2542
EventMark em("%s", msg);
2543
2544
// This phase does not use workers, no need for setup
2545
2546
try_inject_alloc_failure();
2547
op_cleanup_complete();
2548
}
2549
2550
void ShenandoahHeap::entry_reset() {
2551
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2552
2553
static const char* msg = "Concurrent reset";
2554
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2555
EventMark em("%s", msg);
2556
2557
ShenandoahWorkerScope scope(workers(),
2558
ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2559
"concurrent reset");
2560
2561
try_inject_alloc_failure();
2562
op_reset();
2563
}
2564
2565
void ShenandoahHeap::entry_preclean() {
2566
if (ShenandoahPreclean && process_references()) {
2567
ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2568
2569
static const char* msg = "Concurrent precleaning";
2570
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2571
EventMark em("%s", msg);
2572
2573
ShenandoahWorkerScope scope(workers(),
2574
ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2575
"concurrent preclean",
2576
/* check_workers = */ false);
2577
2578
try_inject_alloc_failure();
2579
op_preclean();
2580
}
2581
}
2582
2583
void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2584
static const char *msg = "Concurrent uncommit";
2585
GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2586
EventMark em("%s", msg);
2587
2588
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2589
2590
op_uncommit(shrink_before, shrink_until);
2591
}
2592
2593
void ShenandoahHeap::try_inject_alloc_failure() {
2594
if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2595
_inject_alloc_failure.set();
2596
os::naked_short_sleep(1);
2597
if (cancelled_gc()) {
2598
log_info(gc)("Allocation failure was successfully injected");
2599
}
2600
}
2601
}
2602
2603
bool ShenandoahHeap::should_inject_alloc_failure() {
2604
return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2605
}
2606
2607
void ShenandoahHeap::enter_evacuation() {
2608
_oom_evac_handler.enter_evacuation();
2609
}
2610
2611
void ShenandoahHeap::leave_evacuation() {
2612
_oom_evac_handler.leave_evacuation();
2613
}
2614
2615
ShenandoahRegionIterator::ShenandoahRegionIterator() :
2616
_heap(ShenandoahHeap::heap()),
2617
_index(0) {}
2618
2619
ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2620
_heap(heap),
2621
_index(0) {}
2622
2623
void ShenandoahRegionIterator::reset() {
2624
_index = 0;
2625
}
2626
2627
bool ShenandoahRegionIterator::has_next() const {
2628
return _index < (jint)_heap->num_regions();
2629
}
2630
2631
char ShenandoahHeap::gc_state() {
2632
return _gc_state.raw_value();
2633
}
2634
2635
const char* ShenandoahHeap::init_mark_event_message() const {
2636
assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2637
2638
bool proc_refs = process_references();
2639
bool unload_cls = unload_classes();
2640
2641
if (proc_refs && unload_cls) {
2642
return "Pause Init Mark (process weakrefs) (unload classes)";
2643
} else if (proc_refs) {
2644
return "Pause Init Mark (process weakrefs)";
2645
} else if (unload_cls) {
2646
return "Pause Init Mark (unload classes)";
2647
} else {
2648
return "Pause Init Mark";
2649
}
2650
}
2651
2652
const char* ShenandoahHeap::final_mark_event_message() const {
2653
assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2654
2655
bool proc_refs = process_references();
2656
bool unload_cls = unload_classes();
2657
2658
if (proc_refs && unload_cls) {
2659
return "Pause Final Mark (process weakrefs) (unload classes)";
2660
} else if (proc_refs) {
2661
return "Pause Final Mark (process weakrefs)";
2662
} else if (unload_cls) {
2663
return "Pause Final Mark (unload classes)";
2664
} else {
2665
return "Pause Final Mark";
2666
}
2667
}
2668
2669
const char* ShenandoahHeap::conc_mark_event_message() const {
2670
assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2671
2672
bool proc_refs = process_references();
2673
bool unload_cls = unload_classes();
2674
2675
if (proc_refs && unload_cls) {
2676
return "Concurrent marking (process weakrefs) (unload classes)";
2677
} else if (proc_refs) {
2678
return "Concurrent marking (process weakrefs)";
2679
} else if (unload_cls) {
2680
return "Concurrent marking (unload classes)";
2681
} else {
2682
return "Concurrent marking";
2683
}
2684
}
2685
2686
const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2687
switch (point) {
2688
case _degenerated_unset:
2689
return "Pause Degenerated GC (<UNSET>)";
2690
case _degenerated_outside_cycle:
2691
return "Pause Degenerated GC (Outside of Cycle)";
2692
case _degenerated_mark:
2693
return "Pause Degenerated GC (Mark)";
2694
case _degenerated_evac:
2695
return "Pause Degenerated GC (Evacuation)";
2696
case _degenerated_updaterefs:
2697
return "Pause Degenerated GC (Update Refs)";
2698
default:
2699
ShouldNotReachHere();
2700
return "ERROR";
2701
}
2702
}
2703
2704
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2705
#ifdef ASSERT
2706
assert(_liveness_cache != NULL, "sanity");
2707
assert(worker_id < _max_workers, "sanity");
2708
for (uint i = 0; i < num_regions(); i++) {
2709
assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2710
}
2711
#endif
2712
return _liveness_cache[worker_id];
2713
}
2714
2715
void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2716
assert(worker_id < _max_workers, "sanity");
2717
assert(_liveness_cache != NULL, "sanity");
2718
ShenandoahLiveData* ld = _liveness_cache[worker_id];
2719
for (uint i = 0; i < num_regions(); i++) {
2720
ShenandoahLiveData live = ld[i];
2721
if (live > 0) {
2722
ShenandoahHeapRegion* r = get_region(i);
2723
r->increase_live_data_gc_words(live);
2724
ld[i] = 0;
2725
}
2726
}
2727
}
2728
2729