Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp
38920 views
/*1* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.2*3* This code is free software; you can redistribute it and/or modify it4* under the terms of the GNU General Public License version 2 only, as5* published by the Free Software Foundation.6*7* This code is distributed in the hope that it will be useful, but WITHOUT8* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or9* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License10* version 2 for more details (a copy is included in the LICENSE file that11* accompanied this code).12*13* You should have received a copy of the GNU General Public License version14* 2 along with this work; if not, write to the Free Software Foundation,15* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.16*17* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA18* or visit www.oracle.com if you need additional information or have any19* questions.20*21*/2223#include "precompiled.hpp"2425#include "gc_implementation/shared/gcTimer.hpp"26#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"27#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"28#include "gc_implementation/shenandoah/shenandoahControlThread.hpp"29#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"30#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"31#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"32#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"33#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"34#include "gc_implementation/shenandoah/shenandoahUtils.hpp"35#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"36#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"37#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"38#include "memory/iterator.hpp"39#include "memory/universe.hpp"4041#ifdef _WINDOWS42#pragma warning(disable : 4355)43#endif4445SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;4647ShenandoahControlThread::ShenandoahControlThread() :48ConcurrentGCThread(),49_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),50_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),51_periodic_task(this),52_requested_gc_cause(GCCause::_no_cause_specified),53_degen_point(ShenandoahHeap::_degenerated_outside_cycle),54_allocs_seen(0) {5556reset_gc_id();57if (os::create_thread(this, os::cgc_thread)) {58os::set_native_priority(this, os::java_to_os_priority[NearMaxPriority]);59if (!_should_terminate && !DisableStartThread) {60os::start_thread(this);61}62}6364_periodic_task.enroll();65_periodic_satb_flush_task.enroll();66if (ShenandoahPacing) {67_periodic_pacer_notify_task.enroll();68}69}7071ShenandoahControlThread::~ShenandoahControlThread() {72// This is here so that super is called.73}7475void ShenandoahPeriodicTask::task() {76_thread->handle_force_counters_update();77_thread->handle_counters_update();78}7980void ShenandoahPeriodicSATBFlushTask::task() {81ShenandoahHeap::heap()->force_satb_flush_all_threads();82}8384void ShenandoahPeriodicPacerNotify::task() {85assert(ShenandoahPacing, "Should not be here otherwise");86ShenandoahHeap::heap()->pacer()->notify_waiters();87}8889void ShenandoahControlThread::run() {90initialize_in_thread();9192wait_for_universe_init();9394// Wait until we have the surrogate locker thread in place.95{96MutexLockerEx x(CGC_lock, true);97while(_slt == NULL && !_should_terminate) {98CGC_lock->wait(true, 200);99}100}101102ShenandoahHeap* heap = ShenandoahHeap::heap();103104GCMode default_mode = concurrent_normal;105GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;106int sleep = ShenandoahControlIntervalMin;107108double last_shrink_time = os::elapsedTime();109double last_sleep_adjust_time = os::elapsedTime();110111// Shrink period avoids constantly polling regions for shrinking.112// Having a period 10x lower than the delay would mean we hit the113// shrinking with lag of less than 1/10-th of true delay.114// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.115double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;116117ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();118119ShenandoahHeuristics* heuristics = heap->heuristics();120while (!in_graceful_shutdown() && !_should_terminate) {121// Figure out if we have pending requests.122bool alloc_failure_pending = _alloc_failure_gc.is_set();123bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);124bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);125126// This control loop iteration have seen this much allocations.127intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen));128129// Check if we have seen a new target for soft max heap size.130bool soft_max_changed = check_soft_max_changed();131132// Choose which GC mode to run in. The block below should select a single mode.133GCMode mode = none;134GCCause::Cause cause = GCCause::_last_gc_cause;135ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;136137if (alloc_failure_pending) {138// Allocation failure takes precedence: we have to deal with it first thing139log_info(gc)("Trigger: Handle Allocation Failure");140141cause = GCCause::_allocation_failure;142143// Consume the degen point, and seed it with default value144degen_point = _degen_point;145_degen_point = ShenandoahHeap::_degenerated_outside_cycle;146147if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {148heuristics->record_allocation_failure_gc();149policy->record_alloc_failure_to_degenerated(degen_point);150mode = stw_degenerated;151} else {152heuristics->record_allocation_failure_gc();153policy->record_alloc_failure_to_full();154mode = stw_full;155}156157} else if (explicit_gc_requested) {158cause = _requested_gc_cause;159log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));160161heuristics->record_requested_gc();162163if (ExplicitGCInvokesConcurrent) {164policy->record_explicit_to_concurrent();165mode = default_mode;166// Unload and clean up everything167heap->set_process_references(heuristics->can_process_references());168heap->set_unload_classes(heuristics->can_unload_classes());169} else {170policy->record_explicit_to_full();171mode = stw_full;172}173} else if (implicit_gc_requested) {174cause = _requested_gc_cause;175log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));176177heuristics->record_requested_gc();178179if (ShenandoahImplicitGCInvokesConcurrent) {180policy->record_implicit_to_concurrent();181mode = default_mode;182183// Unload and clean up everything184heap->set_process_references(heuristics->can_process_references());185heap->set_unload_classes(heuristics->can_unload_classes());186} else {187policy->record_implicit_to_full();188mode = stw_full;189}190} else {191// Potential normal cycle: ask heuristics if it wants to act192if (heuristics->should_start_gc()) {193mode = default_mode;194cause = default_cause;195}196197// Ask policy if this cycle wants to process references or unload classes198heap->set_process_references(heuristics->should_process_references());199heap->set_unload_classes(heuristics->should_unload_classes());200}201202// Blow all soft references on this cycle, if handling allocation failure,203// either implicit or explicit GC request, or we are requested to do so unconditionally.204if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {205heap->collector_policy()->set_should_clear_all_soft_refs(true);206}207208bool gc_requested = (mode != none);209assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");210211if (gc_requested) {212// GC is starting, bump the internal ID213update_gc_id();214215heap->reset_bytes_allocated_since_gc_start();216217// Capture metaspace usage before GC.218const size_t metadata_prev_used = MetaspaceAux::used_bytes();219220// If GC was requested, we are sampling the counters even without actual triggers221// from allocation machinery. This captures GC phases more accurately.222set_forced_counters_update(true);223224// If GC was requested, we better dump freeset data for performance debugging225{226ShenandoahHeapLocker locker(heap->lock());227heap->free_set()->log_status();228}229230switch (mode) {231case none:232break;233case concurrent_normal:234service_concurrent_normal_cycle(cause);235break;236case stw_degenerated:237service_stw_degenerated_cycle(cause, degen_point);238break;239case stw_full:240service_stw_full_cycle(cause);241break;242default:243ShouldNotReachHere();244}245246// If this was the requested GC cycle, notify waiters about it247if (explicit_gc_requested || implicit_gc_requested) {248notify_gc_waiters();249}250251// If this was the allocation failure GC cycle, notify waiters about it252if (alloc_failure_pending) {253notify_alloc_failure_waiters();254}255256// Report current free set state at the end of cycle, whether257// it is a normal completion, or the abort.258{259ShenandoahHeapLocker locker(heap->lock());260heap->free_set()->log_status();261262// Notify Universe about new heap usage. This has implications for263// global soft refs policy, and we better report it every time heap264// usage goes down.265Universe::update_heap_info_at_gc();266}267268// Disable forced counters update, and update counters one more time269// to capture the state at the end of GC session.270handle_force_counters_update();271set_forced_counters_update(false);272273// Retract forceful part of soft refs policy274heap->collector_policy()->set_should_clear_all_soft_refs(false);275276// Clear metaspace oom flag, if current cycle unloaded classes277if (heap->unload_classes()) {278heuristics->clear_metaspace_oom();279}280281// Commit worker statistics to cycle data282heap->phase_timings()->flush_par_workers_to_cycle();283if (ShenandoahPacing) {284heap->pacer()->flush_stats_to_cycle();285}286287// Print GC stats for current cycle288if (PrintGCDetails) {289ResourceMark rm;290heap->phase_timings()->print_cycle_on(gclog_or_tty);291if (ShenandoahPacing) {292heap->pacer()->print_cycle_on(gclog_or_tty);293}294}295296// Commit statistics to globals297heap->phase_timings()->flush_cycle_to_global();298299// Print Metaspace change following GC (if logging is enabled).300if (PrintGCDetails) {301MetaspaceAux::print_metaspace_change(metadata_prev_used);302}303304// GC is over, we are at idle now305if (ShenandoahPacing) {306heap->pacer()->setup_for_idle();307}308} else {309// Allow allocators to know we have seen this much regions310if (ShenandoahPacing && (allocs_seen > 0)) {311heap->pacer()->report_alloc(allocs_seen);312}313}314315double current = os::elapsedTime();316317if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {318// Explicit GC tries to uncommit everything down to min capacity.319// Soft max change tries to uncommit everything down to target capacity.320// Periodic uncommit tries to uncommit suitable regions down to min capacity.321322double shrink_before = (explicit_gc_requested || soft_max_changed) ?323current :324current - (ShenandoahUncommitDelay / 1000.0);325326size_t shrink_until = soft_max_changed ?327heap->soft_max_capacity() :328heap->min_capacity();329330service_uncommit(shrink_before, shrink_until);331heap->phase_timings()->flush_cycle_to_global();332last_shrink_time = current;333}334335// Wait before performing the next action. If allocation happened during this wait,336// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,337// back off exponentially.338if (_heap_changed.try_unset()) {339sleep = ShenandoahControlIntervalMin;340} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){341sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));342last_sleep_adjust_time = current;343}344os::naked_short_sleep(sleep);345}346347// Wait for the actual stop(), can't leave run_service() earlier.348while (! _should_terminate) {349os::naked_short_sleep(ShenandoahControlIntervalMin);350}351terminate();352}353354bool ShenandoahControlThread::check_soft_max_changed() const {355ShenandoahHeap* heap = ShenandoahHeap::heap();356size_t new_soft_max = OrderAccess::load_acquire(&ShenandoahSoftMaxHeapSize);357size_t old_soft_max = heap->soft_max_capacity();358if (new_soft_max != old_soft_max) {359new_soft_max = MAX2(heap->min_capacity(), new_soft_max);360new_soft_max = MIN2(heap->max_capacity(), new_soft_max);361if (new_soft_max != old_soft_max) {362log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",363byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),364byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)365);366heap->set_soft_max_capacity(new_soft_max);367return true;368}369}370return false;371}372373void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {374// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during375// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.376// If second allocation failure happens during Degenerated GC cycle (for example, when GC377// tries to evac something and no memory is available), cycle degrades to Full GC.378//379// There are also a shortcut through the normal cycle: immediate garbage shortcut, when380// heuristics says there are no regions to compact, and all the collection comes from immediately381// reclaimable regions.382//383// ................................................................................................384//385// (immediate garbage shortcut) Concurrent GC386// /-------------------------------------------\387// | |388// | |389// | |390// | v391// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]392// | | | ^393// | (af) | (af) | (af) |394// ..................|....................|.................|..............|.......................395// | | | |396// | | | | Degenerated GC397// v v v |398// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o399// | | | ^400// | (af) | (af) | (af) |401// ..................|....................|.................|..............|.......................402// | | | |403// | v | | Full GC404// \------------------->o<----------------/ |405// | |406// v |407// Full GC --------------------------/408//409410ShenandoahHeap* heap = ShenandoahHeap::heap();411412if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;413414ShenandoahGCSession session(cause);415416TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());417418// Reset for upcoming marking419heap->entry_reset();420421// Start initial mark under STW422heap->vmop_entry_init_mark();423424// Continue concurrent mark425heap->entry_mark();426if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;427428// If not cancelled, can try to concurrently pre-clean429heap->entry_preclean();430431// Complete marking under STW, and start evacuation432heap->vmop_entry_final_mark();433434// Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim435// the space. This would be the last action if there is nothing to evacuate.436heap->entry_cleanup_early();437438{439ShenandoahHeapLocker locker(heap->lock());440heap->free_set()->log_status();441}442443// Continue the cycle with evacuation and optional update-refs.444// This may be skipped if there is nothing to evacuate.445// If so, evac_in_progress would be unset by collection set preparation code.446if (heap->is_evacuation_in_progress()) {447// Concurrently evacuate448heap->entry_evac();449if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;450451// Perform update-refs phase.452heap->vmop_entry_init_updaterefs();453heap->entry_updaterefs();454if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;455456heap->vmop_entry_final_updaterefs();457458// Update references freed up collection set, kick the cleanup to reclaim the space.459heap->entry_cleanup_complete();460}461462// Cycle is complete463heap->heuristics()->record_success_concurrent();464heap->shenandoah_policy()->record_success_concurrent();465}466467bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {468ShenandoahHeap* heap = ShenandoahHeap::heap();469if (heap->cancelled_gc()) {470assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");471if (!in_graceful_shutdown()) {472assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,473err_msg("Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)));474_degen_point = point;475}476return true;477}478return false;479}480481void ShenandoahControlThread::stop() {482{483MutexLockerEx ml(Terminator_lock);484_should_terminate = true;485}486487{488MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);489CGC_lock->notify_all();490}491492{493MutexLockerEx ml(Terminator_lock);494while (!_has_terminated) {495Terminator_lock->wait();496}497}498}499500void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {501ShenandoahHeap* heap = ShenandoahHeap::heap();502ShenandoahGCSession session(cause);503504heap->vmop_entry_full(cause);505506heap->heuristics()->record_success_full();507heap->shenandoah_policy()->record_success_full();508}509510void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {511assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");512ShenandoahHeap* heap = ShenandoahHeap::heap();513ShenandoahGCSession session(cause);514515heap->vmop_degenerated(point);516517heap->heuristics()->record_success_degenerated();518heap->shenandoah_policy()->record_success_degenerated();519}520521void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {522ShenandoahHeap* heap = ShenandoahHeap::heap();523524// Determine if there is work to do. This avoids taking heap lock if there is525// no work available, avoids spamming logs with superfluous logging messages,526// and minimises the amount of work while locks are taken.527528if (heap->committed() <= shrink_until) return;529530bool has_work = false;531for (size_t i = 0; i < heap->num_regions(); i++) {532ShenandoahHeapRegion *r = heap->get_region(i);533if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {534has_work = true;535break;536}537}538539if (has_work) {540heap->entry_uncommit(shrink_before, shrink_until);541}542}543544bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {545return GCCause::is_user_requested_gc(cause) ||546GCCause::is_serviceability_requested_gc(cause);547}548549void ShenandoahControlThread::request_gc(GCCause::Cause cause) {550assert(GCCause::is_user_requested_gc(cause) ||551GCCause::is_serviceability_requested_gc(cause) ||552cause == GCCause::_shenandoah_metadata_gc_clear_softrefs ||553cause == GCCause::_full_gc_alot ||554cause == GCCause::_scavenge_alot,555"only requested GCs here");556557if (is_explicit_gc(cause)) {558if (!DisableExplicitGC) {559handle_requested_gc(cause);560}561} else {562handle_requested_gc(cause);563}564}565566void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {567// Make sure we have at least one complete GC cycle before unblocking568// from the explicit GC request.569//570// This is especially important for weak references cleanup and/or native571// resources (e.g. DirectByteBuffers) machinery: when explicit GC request572// comes very late in the already running cycle, it would miss lots of new573// opportunities for cleanup that were made available before the caller574// requested the GC.575576MonitorLockerEx ml(&_gc_waiters_lock);577size_t current_gc_id = get_gc_id();578size_t required_gc_id = current_gc_id + 1;579while (current_gc_id < required_gc_id) {580_gc_requested.set();581_requested_gc_cause = cause;582ml.wait();583current_gc_id = get_gc_id();584}585}586587void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {588ShenandoahHeap* heap = ShenandoahHeap::heap();589590assert(current()->is_Java_thread(), "expect Java thread here");591592if (try_set_alloc_failure_gc()) {593// Only report the first allocation failure594log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",595req.type_string(),596byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));597598// Now that alloc failure GC is scheduled, we can abort everything else599heap->cancel_gc(GCCause::_allocation_failure);600}601602MonitorLockerEx ml(&_alloc_failure_waiters_lock);603while (is_alloc_failure_gc()) {604ml.wait();605}606}607608void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {609Thread* t = Thread::current();610611ShenandoahHeap* heap = ShenandoahHeap::heap();612613if (try_set_alloc_failure_gc()) {614// Only report the first allocation failure615log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",616byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));617}618619heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);620}621622void ShenandoahControlThread::notify_alloc_failure_waiters() {623_alloc_failure_gc.unset();624MonitorLockerEx ml(&_alloc_failure_waiters_lock);625ml.notify_all();626}627628bool ShenandoahControlThread::try_set_alloc_failure_gc() {629return _alloc_failure_gc.try_set();630}631632bool ShenandoahControlThread::is_alloc_failure_gc() {633return _alloc_failure_gc.is_set();634}635636void ShenandoahControlThread::notify_gc_waiters() {637_gc_requested.unset();638MonitorLockerEx ml(&_gc_waiters_lock);639ml.notify_all();640}641642void ShenandoahControlThread::handle_counters_update() {643if (_do_counters_update.is_set()) {644_do_counters_update.unset();645ShenandoahHeap::heap()->monitoring_support()->update_counters();646}647}648649void ShenandoahControlThread::handle_force_counters_update() {650if (_force_counters_update.is_set()) {651_do_counters_update.unset(); // reset these too, we do update now!652ShenandoahHeap::heap()->monitoring_support()->update_counters();653}654}655656void ShenandoahControlThread::notify_heap_changed() {657// This is called from allocation path, and thus should be fast.658659// Update monitoring counters when we took a new region. This amortizes the660// update costs on slow path.661if (_do_counters_update.is_unset()) {662_do_counters_update.set();663}664// Notify that something had changed.665if (_heap_changed.is_unset()) {666_heap_changed.set();667}668}669670void ShenandoahControlThread::pacing_notify_alloc(size_t words) {671assert(ShenandoahPacing, "should only call when pacing is enabled");672Atomic::add(words, &_allocs_seen);673}674675void ShenandoahControlThread::set_forced_counters_update(bool value) {676_force_counters_update.set_cond(value);677}678679void ShenandoahControlThread::reset_gc_id() {680OrderAccess::release_store_ptr_fence(&_gc_id, 0);681}682683void ShenandoahControlThread::update_gc_id() {684Atomic::add(1, &_gc_id);685}686687size_t ShenandoahControlThread::get_gc_id() {688return OrderAccess::load_acquire(&_gc_id);689}690691void ShenandoahControlThread::print() const {692print_on(tty);693}694695void ShenandoahControlThread::print_on(outputStream* st) const {696st->print("Shenandoah Concurrent Thread");697Thread::print_on(st);698st->cr();699}700701void ShenandoahControlThread::start() {702create_and_start();703}704705void ShenandoahControlThread::makeSurrogateLockerThread(TRAPS) {706assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");707assert(THREAD->is_Java_thread(), "must be a Java thread");708assert(_slt == NULL, "SLT already created");709_slt = SurrogateLockerThread::make(THREAD);710}711712void ShenandoahControlThread::prepare_for_graceful_shutdown() {713_graceful_shutdown.set();714}715716bool ShenandoahControlThread::in_graceful_shutdown() {717return _graceful_shutdown.is_set();718}719720721