Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
66644 views
1
/*
2
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
27
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
28
#include "gc/shenandoah/shenandoahControlThread.hpp"
29
#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
30
#include "gc/shenandoah/shenandoahFreeSet.hpp"
31
#include "gc/shenandoah/shenandoahFullGC.hpp"
32
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
33
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
34
#include "gc/shenandoah/shenandoahMark.inline.hpp"
35
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
36
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
37
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
38
#include "gc/shenandoah/shenandoahUtils.hpp"
39
#include "gc/shenandoah/shenandoahVMOperations.hpp"
40
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
41
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
42
#include "memory/iterator.hpp"
43
#include "memory/metaspaceUtils.hpp"
44
#include "memory/metaspaceStats.hpp"
45
#include "memory/universe.hpp"
46
#include "runtime/atomic.hpp"
47
48
ShenandoahControlThread::ShenandoahControlThread() :
49
ConcurrentGCThread(),
50
_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
51
_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
52
_periodic_task(this),
53
_requested_gc_cause(GCCause::_no_cause_specified),
54
_degen_point(ShenandoahGC::_degenerated_outside_cycle),
55
_allocs_seen(0) {
56
57
reset_gc_id();
58
create_and_start();
59
_periodic_task.enroll();
60
if (ShenandoahPacing) {
61
_periodic_pacer_notify_task.enroll();
62
}
63
}
64
65
ShenandoahControlThread::~ShenandoahControlThread() {
66
// This is here so that super is called.
67
}
68
69
void ShenandoahPeriodicTask::task() {
70
_thread->handle_force_counters_update();
71
_thread->handle_counters_update();
72
}
73
74
void ShenandoahPeriodicPacerNotify::task() {
75
assert(ShenandoahPacing, "Should not be here otherwise");
76
ShenandoahHeap::heap()->pacer()->notify_waiters();
77
}
78
79
void ShenandoahControlThread::run_service() {
80
ShenandoahHeap* heap = ShenandoahHeap::heap();
81
82
GCMode default_mode = concurrent_normal;
83
GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
84
int sleep = ShenandoahControlIntervalMin;
85
86
double last_shrink_time = os::elapsedTime();
87
double last_sleep_adjust_time = os::elapsedTime();
88
89
// Shrink period avoids constantly polling regions for shrinking.
90
// Having a period 10x lower than the delay would mean we hit the
91
// shrinking with lag of less than 1/10-th of true delay.
92
// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
93
double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
94
95
ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
96
ShenandoahHeuristics* heuristics = heap->heuristics();
97
while (!in_graceful_shutdown() && !should_terminate()) {
98
// Figure out if we have pending requests.
99
bool alloc_failure_pending = _alloc_failure_gc.is_set();
100
bool is_gc_requested = _gc_requested.is_set();
101
GCCause::Cause requested_gc_cause = _requested_gc_cause;
102
bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
103
bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
104
105
// This control loop iteration have seen this much allocations.
106
size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
107
108
// Check if we have seen a new target for soft max heap size.
109
bool soft_max_changed = check_soft_max_changed();
110
111
// Choose which GC mode to run in. The block below should select a single mode.
112
GCMode mode = none;
113
GCCause::Cause cause = GCCause::_last_gc_cause;
114
ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
115
116
if (alloc_failure_pending) {
117
// Allocation failure takes precedence: we have to deal with it first thing
118
log_info(gc)("Trigger: Handle Allocation Failure");
119
120
cause = GCCause::_allocation_failure;
121
122
// Consume the degen point, and seed it with default value
123
degen_point = _degen_point;
124
_degen_point = ShenandoahGC::_degenerated_outside_cycle;
125
126
if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
127
heuristics->record_allocation_failure_gc();
128
policy->record_alloc_failure_to_degenerated(degen_point);
129
mode = stw_degenerated;
130
} else {
131
heuristics->record_allocation_failure_gc();
132
policy->record_alloc_failure_to_full();
133
mode = stw_full;
134
}
135
136
} else if (explicit_gc_requested) {
137
cause = requested_gc_cause;
138
log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
139
140
heuristics->record_requested_gc();
141
142
if (ExplicitGCInvokesConcurrent) {
143
policy->record_explicit_to_concurrent();
144
mode = default_mode;
145
// Unload and clean up everything
146
heap->set_unload_classes(heuristics->can_unload_classes());
147
} else {
148
policy->record_explicit_to_full();
149
mode = stw_full;
150
}
151
} else if (implicit_gc_requested) {
152
cause = requested_gc_cause;
153
log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
154
155
heuristics->record_requested_gc();
156
157
if (ShenandoahImplicitGCInvokesConcurrent) {
158
policy->record_implicit_to_concurrent();
159
mode = default_mode;
160
161
// Unload and clean up everything
162
heap->set_unload_classes(heuristics->can_unload_classes());
163
} else {
164
policy->record_implicit_to_full();
165
mode = stw_full;
166
}
167
} else {
168
// Potential normal cycle: ask heuristics if it wants to act
169
if (heuristics->should_start_gc()) {
170
mode = default_mode;
171
cause = default_cause;
172
}
173
174
// Ask policy if this cycle wants to process references or unload classes
175
heap->set_unload_classes(heuristics->should_unload_classes());
176
}
177
178
// Blow all soft references on this cycle, if handling allocation failure,
179
// either implicit or explicit GC request, or we are requested to do so unconditionally.
180
if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
181
heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
182
}
183
184
bool gc_requested = (mode != none);
185
assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
186
187
if (gc_requested) {
188
// GC is starting, bump the internal ID
189
update_gc_id();
190
191
heap->reset_bytes_allocated_since_gc_start();
192
193
MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
194
195
// If GC was requested, we are sampling the counters even without actual triggers
196
// from allocation machinery. This captures GC phases more accurately.
197
set_forced_counters_update(true);
198
199
// If GC was requested, we better dump freeset data for performance debugging
200
{
201
ShenandoahHeapLocker locker(heap->lock());
202
heap->free_set()->log_status();
203
}
204
205
switch (mode) {
206
case concurrent_normal:
207
service_concurrent_normal_cycle(cause);
208
break;
209
case stw_degenerated:
210
service_stw_degenerated_cycle(cause, degen_point);
211
break;
212
case stw_full:
213
service_stw_full_cycle(cause);
214
break;
215
default:
216
ShouldNotReachHere();
217
}
218
219
// If this was the requested GC cycle, notify waiters about it
220
if (explicit_gc_requested || implicit_gc_requested) {
221
notify_gc_waiters();
222
}
223
224
// If this was the allocation failure GC cycle, notify waiters about it
225
if (alloc_failure_pending) {
226
notify_alloc_failure_waiters();
227
}
228
229
// Report current free set state at the end of cycle, whether
230
// it is a normal completion, or the abort.
231
{
232
ShenandoahHeapLocker locker(heap->lock());
233
heap->free_set()->log_status();
234
235
// Notify Universe about new heap usage. This has implications for
236
// global soft refs policy, and we better report it every time heap
237
// usage goes down.
238
Universe::heap()->update_capacity_and_used_at_gc();
239
240
// Signal that we have completed a visit to all live objects.
241
Universe::heap()->record_whole_heap_examined_timestamp();
242
}
243
244
// Disable forced counters update, and update counters one more time
245
// to capture the state at the end of GC session.
246
handle_force_counters_update();
247
set_forced_counters_update(false);
248
249
// Retract forceful part of soft refs policy
250
heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
251
252
// Clear metaspace oom flag, if current cycle unloaded classes
253
if (heap->unload_classes()) {
254
heuristics->clear_metaspace_oom();
255
}
256
257
// Commit worker statistics to cycle data
258
heap->phase_timings()->flush_par_workers_to_cycle();
259
if (ShenandoahPacing) {
260
heap->pacer()->flush_stats_to_cycle();
261
}
262
263
// Print GC stats for current cycle
264
{
265
LogTarget(Info, gc, stats) lt;
266
if (lt.is_enabled()) {
267
ResourceMark rm;
268
LogStream ls(lt);
269
heap->phase_timings()->print_cycle_on(&ls);
270
if (ShenandoahPacing) {
271
heap->pacer()->print_cycle_on(&ls);
272
}
273
}
274
}
275
276
// Commit statistics to globals
277
heap->phase_timings()->flush_cycle_to_global();
278
279
// Print Metaspace change following GC (if logging is enabled).
280
MetaspaceUtils::print_metaspace_change(meta_sizes);
281
282
// GC is over, we are at idle now
283
if (ShenandoahPacing) {
284
heap->pacer()->setup_for_idle();
285
}
286
} else {
287
// Allow allocators to know we have seen this much regions
288
if (ShenandoahPacing && (allocs_seen > 0)) {
289
heap->pacer()->report_alloc(allocs_seen);
290
}
291
}
292
293
double current = os::elapsedTime();
294
295
if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
296
// Explicit GC tries to uncommit everything down to min capacity.
297
// Soft max change tries to uncommit everything down to target capacity.
298
// Periodic uncommit tries to uncommit suitable regions down to min capacity.
299
300
double shrink_before = (explicit_gc_requested || soft_max_changed) ?
301
current :
302
current - (ShenandoahUncommitDelay / 1000.0);
303
304
size_t shrink_until = soft_max_changed ?
305
heap->soft_max_capacity() :
306
heap->min_capacity();
307
308
service_uncommit(shrink_before, shrink_until);
309
heap->phase_timings()->flush_cycle_to_global();
310
last_shrink_time = current;
311
}
312
313
// Wait before performing the next action. If allocation happened during this wait,
314
// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
315
// back off exponentially.
316
if (_heap_changed.try_unset()) {
317
sleep = ShenandoahControlIntervalMin;
318
} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
319
sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
320
last_sleep_adjust_time = current;
321
}
322
os::naked_short_sleep(sleep);
323
}
324
325
// Wait for the actual stop(), can't leave run_service() earlier.
326
while (!should_terminate()) {
327
os::naked_short_sleep(ShenandoahControlIntervalMin);
328
}
329
}
330
331
bool ShenandoahControlThread::check_soft_max_changed() const {
332
ShenandoahHeap* heap = ShenandoahHeap::heap();
333
size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
334
size_t old_soft_max = heap->soft_max_capacity();
335
if (new_soft_max != old_soft_max) {
336
new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
337
new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
338
if (new_soft_max != old_soft_max) {
339
log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
340
byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
341
byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
342
);
343
heap->set_soft_max_capacity(new_soft_max);
344
return true;
345
}
346
}
347
return false;
348
}
349
350
void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
351
// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
352
// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
353
// If second allocation failure happens during Degenerated GC cycle (for example, when GC
354
// tries to evac something and no memory is available), cycle degrades to Full GC.
355
//
356
// There are also a shortcut through the normal cycle: immediate garbage shortcut, when
357
// heuristics says there are no regions to compact, and all the collection comes from immediately
358
// reclaimable regions.
359
//
360
// ................................................................................................
361
//
362
// (immediate garbage shortcut) Concurrent GC
363
// /-------------------------------------------\
364
// | |
365
// | |
366
// | |
367
// | v
368
// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
369
// | | | ^
370
// | (af) | (af) | (af) |
371
// ..................|....................|.................|..............|.......................
372
// | | | |
373
// | | | | Degenerated GC
374
// v v v |
375
// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
376
// | | | ^
377
// | (af) | (af) | (af) |
378
// ..................|....................|.................|..............|.......................
379
// | | | |
380
// | v | | Full GC
381
// \------------------->o<----------------/ |
382
// | |
383
// v |
384
// Full GC --------------------------/
385
//
386
ShenandoahHeap* heap = ShenandoahHeap::heap();
387
if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
388
389
GCIdMark gc_id_mark;
390
ShenandoahGCSession session(cause);
391
392
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
393
394
ShenandoahConcurrentGC gc;
395
if (gc.collect(cause)) {
396
// Cycle is complete
397
heap->heuristics()->record_success_concurrent();
398
heap->shenandoah_policy()->record_success_concurrent();
399
} else {
400
assert(heap->cancelled_gc(), "Must have been cancelled");
401
check_cancellation_or_degen(gc.degen_point());
402
}
403
}
404
405
bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
406
ShenandoahHeap* heap = ShenandoahHeap::heap();
407
if (heap->cancelled_gc()) {
408
assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
409
if (!in_graceful_shutdown()) {
410
assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
411
"Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
412
_degen_point = point;
413
}
414
return true;
415
}
416
return false;
417
}
418
419
void ShenandoahControlThread::stop_service() {
420
// Nothing to do here.
421
}
422
423
void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
424
GCIdMark gc_id_mark;
425
ShenandoahGCSession session(cause);
426
427
ShenandoahFullGC gc;
428
gc.collect(cause);
429
430
ShenandoahHeap* const heap = ShenandoahHeap::heap();
431
heap->heuristics()->record_success_full();
432
heap->shenandoah_policy()->record_success_full();
433
}
434
435
void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
436
assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
437
438
GCIdMark gc_id_mark;
439
ShenandoahGCSession session(cause);
440
441
ShenandoahDegenGC gc(point);
442
gc.collect(cause);
443
444
ShenandoahHeap* const heap = ShenandoahHeap::heap();
445
heap->heuristics()->record_success_degenerated();
446
heap->shenandoah_policy()->record_success_degenerated();
447
}
448
449
void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
450
ShenandoahHeap* heap = ShenandoahHeap::heap();
451
452
// Determine if there is work to do. This avoids taking heap lock if there is
453
// no work available, avoids spamming logs with superfluous logging messages,
454
// and minimises the amount of work while locks are taken.
455
456
if (heap->committed() <= shrink_until) return;
457
458
bool has_work = false;
459
for (size_t i = 0; i < heap->num_regions(); i++) {
460
ShenandoahHeapRegion *r = heap->get_region(i);
461
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
462
has_work = true;
463
break;
464
}
465
}
466
467
if (has_work) {
468
heap->entry_uncommit(shrink_before, shrink_until);
469
}
470
}
471
472
bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
473
return GCCause::is_user_requested_gc(cause) ||
474
GCCause::is_serviceability_requested_gc(cause);
475
}
476
477
void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
478
assert(GCCause::is_user_requested_gc(cause) ||
479
GCCause::is_serviceability_requested_gc(cause) ||
480
cause == GCCause::_metadata_GC_clear_soft_refs ||
481
cause == GCCause::_full_gc_alot ||
482
cause == GCCause::_wb_full_gc ||
483
cause == GCCause::_wb_breakpoint ||
484
cause == GCCause::_scavenge_alot,
485
"only requested GCs here");
486
487
if (is_explicit_gc(cause)) {
488
if (!DisableExplicitGC) {
489
handle_requested_gc(cause);
490
}
491
} else {
492
handle_requested_gc(cause);
493
}
494
}
495
496
void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
497
// Make sure we have at least one complete GC cycle before unblocking
498
// from the explicit GC request.
499
//
500
// This is especially important for weak references cleanup and/or native
501
// resources (e.g. DirectByteBuffers) machinery: when explicit GC request
502
// comes very late in the already running cycle, it would miss lots of new
503
// opportunities for cleanup that were made available before the caller
504
// requested the GC.
505
506
MonitorLocker ml(&_gc_waiters_lock);
507
size_t current_gc_id = get_gc_id();
508
size_t required_gc_id = current_gc_id + 1;
509
while (current_gc_id < required_gc_id) {
510
// Although setting gc request is under _gc_waiters_lock, but read side (run_service())
511
// does not take the lock. We need to enforce following order, so that read side sees
512
// latest requested gc cause when the flag is set.
513
_requested_gc_cause = cause;
514
_gc_requested.set();
515
516
if (cause != GCCause::_wb_breakpoint) {
517
ml.wait();
518
}
519
current_gc_id = get_gc_id();
520
}
521
}
522
523
void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
524
ShenandoahHeap* heap = ShenandoahHeap::heap();
525
526
assert(current()->is_Java_thread(), "expect Java thread here");
527
528
if (try_set_alloc_failure_gc()) {
529
// Only report the first allocation failure
530
log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
531
req.type_string(),
532
byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
533
534
// Now that alloc failure GC is scheduled, we can abort everything else
535
heap->cancel_gc(GCCause::_allocation_failure);
536
}
537
538
MonitorLocker ml(&_alloc_failure_waiters_lock);
539
while (is_alloc_failure_gc()) {
540
ml.wait();
541
}
542
}
543
544
void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
545
ShenandoahHeap* heap = ShenandoahHeap::heap();
546
547
if (try_set_alloc_failure_gc()) {
548
// Only report the first allocation failure
549
log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
550
byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
551
}
552
553
// Forcefully report allocation failure
554
heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
555
}
556
557
void ShenandoahControlThread::notify_alloc_failure_waiters() {
558
_alloc_failure_gc.unset();
559
MonitorLocker ml(&_alloc_failure_waiters_lock);
560
ml.notify_all();
561
}
562
563
bool ShenandoahControlThread::try_set_alloc_failure_gc() {
564
return _alloc_failure_gc.try_set();
565
}
566
567
bool ShenandoahControlThread::is_alloc_failure_gc() {
568
return _alloc_failure_gc.is_set();
569
}
570
571
void ShenandoahControlThread::notify_gc_waiters() {
572
_gc_requested.unset();
573
MonitorLocker ml(&_gc_waiters_lock);
574
ml.notify_all();
575
}
576
577
void ShenandoahControlThread::handle_counters_update() {
578
if (_do_counters_update.is_set()) {
579
_do_counters_update.unset();
580
ShenandoahHeap::heap()->monitoring_support()->update_counters();
581
}
582
}
583
584
void ShenandoahControlThread::handle_force_counters_update() {
585
if (_force_counters_update.is_set()) {
586
_do_counters_update.unset(); // reset these too, we do update now!
587
ShenandoahHeap::heap()->monitoring_support()->update_counters();
588
}
589
}
590
591
void ShenandoahControlThread::notify_heap_changed() {
592
// This is called from allocation path, and thus should be fast.
593
594
// Update monitoring counters when we took a new region. This amortizes the
595
// update costs on slow path.
596
if (_do_counters_update.is_unset()) {
597
_do_counters_update.set();
598
}
599
// Notify that something had changed.
600
if (_heap_changed.is_unset()) {
601
_heap_changed.set();
602
}
603
}
604
605
void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
606
assert(ShenandoahPacing, "should only call when pacing is enabled");
607
Atomic::add(&_allocs_seen, words, memory_order_relaxed);
608
}
609
610
void ShenandoahControlThread::set_forced_counters_update(bool value) {
611
_force_counters_update.set_cond(value);
612
}
613
614
void ShenandoahControlThread::reset_gc_id() {
615
Atomic::store(&_gc_id, (size_t)0);
616
}
617
618
void ShenandoahControlThread::update_gc_id() {
619
Atomic::inc(&_gc_id);
620
}
621
622
size_t ShenandoahControlThread::get_gc_id() {
623
return Atomic::load(&_gc_id);
624
}
625
626
void ShenandoahControlThread::print() const {
627
print_on(tty);
628
}
629
630
void ShenandoahControlThread::print_on(outputStream* st) const {
631
st->print("Shenandoah Concurrent Thread");
632
Thread::print_on(st);
633
st->cr();
634
}
635
636
void ShenandoahControlThread::start() {
637
create_and_start();
638
}
639
640
void ShenandoahControlThread::prepare_for_graceful_shutdown() {
641
_graceful_shutdown.set();
642
}
643
644
bool ShenandoahControlThread::in_graceful_shutdown() {
645
return _graceful_shutdown.is_set();
646
}
647
648