Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
40961 views
/*1* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"26#include "gc/shenandoah/shenandoahConcurrentGC.hpp"27#include "gc/shenandoah/shenandoahControlThread.hpp"28#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"29#include "gc/shenandoah/shenandoahFreeSet.hpp"30#include "gc/shenandoah/shenandoahFullGC.hpp"31#include "gc/shenandoah/shenandoahPhaseTimings.hpp"32#include "gc/shenandoah/shenandoahHeap.inline.hpp"33#include "gc/shenandoah/shenandoahMark.inline.hpp"34#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"35#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"36#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"37#include "gc/shenandoah/shenandoahUtils.hpp"38#include "gc/shenandoah/shenandoahVMOperations.hpp"39#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"40#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"41#include "memory/iterator.hpp"42#include "memory/metaspaceUtils.hpp"43#include "memory/metaspaceStats.hpp"44#include "memory/universe.hpp"45#include "runtime/atomic.hpp"4647ShenandoahControlThread::ShenandoahControlThread() :48ConcurrentGCThread(),49_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),50_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),51_periodic_task(this),52_requested_gc_cause(GCCause::_no_cause_specified),53_degen_point(ShenandoahGC::_degenerated_outside_cycle),54_allocs_seen(0) {5556reset_gc_id();57create_and_start();58_periodic_task.enroll();59if (ShenandoahPacing) {60_periodic_pacer_notify_task.enroll();61}62}6364ShenandoahControlThread::~ShenandoahControlThread() {65// This is here so that super is called.66}6768void ShenandoahPeriodicTask::task() {69_thread->handle_force_counters_update();70_thread->handle_counters_update();71}7273void ShenandoahPeriodicPacerNotify::task() {74assert(ShenandoahPacing, "Should not be here otherwise");75ShenandoahHeap::heap()->pacer()->notify_waiters();76}7778void ShenandoahControlThread::run_service() {79ShenandoahHeap* heap = ShenandoahHeap::heap();8081GCMode default_mode = concurrent_normal;82GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;83int sleep = ShenandoahControlIntervalMin;8485double last_shrink_time = os::elapsedTime();86double last_sleep_adjust_time = os::elapsedTime();8788// Shrink period avoids constantly polling regions for shrinking.89// Having a period 10x lower than the delay would mean we hit the90// shrinking with lag of less than 1/10-th of true delay.91// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.92double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;9394ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();95ShenandoahHeuristics* heuristics = heap->heuristics();96while (!in_graceful_shutdown() && !should_terminate()) {97// Figure out if we have pending requests.98bool alloc_failure_pending = _alloc_failure_gc.is_set();99bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);100bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);101102// This control loop iteration have seen this much allocations.103size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);104105// Check if we have seen a new target for soft max heap size.106bool soft_max_changed = check_soft_max_changed();107108// Choose which GC mode to run in. The block below should select a single mode.109GCMode mode = none;110GCCause::Cause cause = GCCause::_last_gc_cause;111ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;112113if (alloc_failure_pending) {114// Allocation failure takes precedence: we have to deal with it first thing115log_info(gc)("Trigger: Handle Allocation Failure");116117cause = GCCause::_allocation_failure;118119// Consume the degen point, and seed it with default value120degen_point = _degen_point;121_degen_point = ShenandoahGC::_degenerated_outside_cycle;122123if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {124heuristics->record_allocation_failure_gc();125policy->record_alloc_failure_to_degenerated(degen_point);126mode = stw_degenerated;127} else {128heuristics->record_allocation_failure_gc();129policy->record_alloc_failure_to_full();130mode = stw_full;131}132133} else if (explicit_gc_requested) {134cause = _requested_gc_cause;135log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));136137heuristics->record_requested_gc();138139if (ExplicitGCInvokesConcurrent) {140policy->record_explicit_to_concurrent();141mode = default_mode;142// Unload and clean up everything143heap->set_unload_classes(heuristics->can_unload_classes());144} else {145policy->record_explicit_to_full();146mode = stw_full;147}148} else if (implicit_gc_requested) {149cause = _requested_gc_cause;150log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));151152heuristics->record_requested_gc();153154if (ShenandoahImplicitGCInvokesConcurrent) {155policy->record_implicit_to_concurrent();156mode = default_mode;157158// Unload and clean up everything159heap->set_unload_classes(heuristics->can_unload_classes());160} else {161policy->record_implicit_to_full();162mode = stw_full;163}164} else {165// Potential normal cycle: ask heuristics if it wants to act166if (heuristics->should_start_gc()) {167mode = default_mode;168cause = default_cause;169}170171// Ask policy if this cycle wants to process references or unload classes172heap->set_unload_classes(heuristics->should_unload_classes());173}174175// Blow all soft references on this cycle, if handling allocation failure,176// either implicit or explicit GC request, or we are requested to do so unconditionally.177if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {178heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);179}180181bool gc_requested = (mode != none);182assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");183184if (gc_requested) {185// GC is starting, bump the internal ID186update_gc_id();187188heap->reset_bytes_allocated_since_gc_start();189190MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();191192// If GC was requested, we are sampling the counters even without actual triggers193// from allocation machinery. This captures GC phases more accurately.194set_forced_counters_update(true);195196// If GC was requested, we better dump freeset data for performance debugging197{198ShenandoahHeapLocker locker(heap->lock());199heap->free_set()->log_status();200}201202switch (mode) {203case concurrent_normal:204service_concurrent_normal_cycle(cause);205break;206case stw_degenerated:207service_stw_degenerated_cycle(cause, degen_point);208break;209case stw_full:210service_stw_full_cycle(cause);211break;212default:213ShouldNotReachHere();214}215216// If this was the requested GC cycle, notify waiters about it217if (explicit_gc_requested || implicit_gc_requested) {218notify_gc_waiters();219}220221// If this was the allocation failure GC cycle, notify waiters about it222if (alloc_failure_pending) {223notify_alloc_failure_waiters();224}225226// Report current free set state at the end of cycle, whether227// it is a normal completion, or the abort.228{229ShenandoahHeapLocker locker(heap->lock());230heap->free_set()->log_status();231232// Notify Universe about new heap usage. This has implications for233// global soft refs policy, and we better report it every time heap234// usage goes down.235Universe::heap()->update_capacity_and_used_at_gc();236237// Signal that we have completed a visit to all live objects.238Universe::heap()->record_whole_heap_examined_timestamp();239}240241// Disable forced counters update, and update counters one more time242// to capture the state at the end of GC session.243handle_force_counters_update();244set_forced_counters_update(false);245246// Retract forceful part of soft refs policy247heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);248249// Clear metaspace oom flag, if current cycle unloaded classes250if (heap->unload_classes()) {251heuristics->clear_metaspace_oom();252}253254// Commit worker statistics to cycle data255heap->phase_timings()->flush_par_workers_to_cycle();256if (ShenandoahPacing) {257heap->pacer()->flush_stats_to_cycle();258}259260// Print GC stats for current cycle261{262LogTarget(Info, gc, stats) lt;263if (lt.is_enabled()) {264ResourceMark rm;265LogStream ls(lt);266heap->phase_timings()->print_cycle_on(&ls);267if (ShenandoahPacing) {268heap->pacer()->print_cycle_on(&ls);269}270}271}272273// Commit statistics to globals274heap->phase_timings()->flush_cycle_to_global();275276// Print Metaspace change following GC (if logging is enabled).277MetaspaceUtils::print_metaspace_change(meta_sizes);278279// GC is over, we are at idle now280if (ShenandoahPacing) {281heap->pacer()->setup_for_idle();282}283} else {284// Allow allocators to know we have seen this much regions285if (ShenandoahPacing && (allocs_seen > 0)) {286heap->pacer()->report_alloc(allocs_seen);287}288}289290double current = os::elapsedTime();291292if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {293// Explicit GC tries to uncommit everything down to min capacity.294// Soft max change tries to uncommit everything down to target capacity.295// Periodic uncommit tries to uncommit suitable regions down to min capacity.296297double shrink_before = (explicit_gc_requested || soft_max_changed) ?298current :299current - (ShenandoahUncommitDelay / 1000.0);300301size_t shrink_until = soft_max_changed ?302heap->soft_max_capacity() :303heap->min_capacity();304305service_uncommit(shrink_before, shrink_until);306heap->phase_timings()->flush_cycle_to_global();307last_shrink_time = current;308}309310// Wait before performing the next action. If allocation happened during this wait,311// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,312// back off exponentially.313if (_heap_changed.try_unset()) {314sleep = ShenandoahControlIntervalMin;315} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){316sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));317last_sleep_adjust_time = current;318}319os::naked_short_sleep(sleep);320}321322// Wait for the actual stop(), can't leave run_service() earlier.323while (!should_terminate()) {324os::naked_short_sleep(ShenandoahControlIntervalMin);325}326}327328bool ShenandoahControlThread::check_soft_max_changed() const {329ShenandoahHeap* heap = ShenandoahHeap::heap();330size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);331size_t old_soft_max = heap->soft_max_capacity();332if (new_soft_max != old_soft_max) {333new_soft_max = MAX2(heap->min_capacity(), new_soft_max);334new_soft_max = MIN2(heap->max_capacity(), new_soft_max);335if (new_soft_max != old_soft_max) {336log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",337byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),338byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)339);340heap->set_soft_max_capacity(new_soft_max);341return true;342}343}344return false;345}346347void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {348// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during349// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.350// If second allocation failure happens during Degenerated GC cycle (for example, when GC351// tries to evac something and no memory is available), cycle degrades to Full GC.352//353// There are also a shortcut through the normal cycle: immediate garbage shortcut, when354// heuristics says there are no regions to compact, and all the collection comes from immediately355// reclaimable regions.356//357// ................................................................................................358//359// (immediate garbage shortcut) Concurrent GC360// /-------------------------------------------\361// | |362// | |363// | |364// | v365// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]366// | | | ^367// | (af) | (af) | (af) |368// ..................|....................|.................|..............|.......................369// | | | |370// | | | | Degenerated GC371// v v v |372// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o373// | | | ^374// | (af) | (af) | (af) |375// ..................|....................|.................|..............|.......................376// | | | |377// | v | | Full GC378// \------------------->o<----------------/ |379// | |380// v |381// Full GC --------------------------/382//383ShenandoahHeap* heap = ShenandoahHeap::heap();384if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;385386GCIdMark gc_id_mark;387ShenandoahGCSession session(cause);388389TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());390391ShenandoahConcurrentGC gc;392if (gc.collect(cause)) {393// Cycle is complete394heap->heuristics()->record_success_concurrent();395heap->shenandoah_policy()->record_success_concurrent();396} else {397assert(heap->cancelled_gc(), "Must have been cancelled");398check_cancellation_or_degen(gc.degen_point());399}400}401402bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {403ShenandoahHeap* heap = ShenandoahHeap::heap();404if (heap->cancelled_gc()) {405assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");406if (!in_graceful_shutdown()) {407assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,408"Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));409_degen_point = point;410}411return true;412}413return false;414}415416void ShenandoahControlThread::stop_service() {417// Nothing to do here.418}419420void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {421GCIdMark gc_id_mark;422ShenandoahGCSession session(cause);423424ShenandoahFullGC gc;425gc.collect(cause);426427ShenandoahHeap* const heap = ShenandoahHeap::heap();428heap->heuristics()->record_success_full();429heap->shenandoah_policy()->record_success_full();430}431432void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {433assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");434435GCIdMark gc_id_mark;436ShenandoahGCSession session(cause);437438ShenandoahDegenGC gc(point);439gc.collect(cause);440441ShenandoahHeap* const heap = ShenandoahHeap::heap();442heap->heuristics()->record_success_degenerated();443heap->shenandoah_policy()->record_success_degenerated();444}445446void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {447ShenandoahHeap* heap = ShenandoahHeap::heap();448449// Determine if there is work to do. This avoids taking heap lock if there is450// no work available, avoids spamming logs with superfluous logging messages,451// and minimises the amount of work while locks are taken.452453if (heap->committed() <= shrink_until) return;454455bool has_work = false;456for (size_t i = 0; i < heap->num_regions(); i++) {457ShenandoahHeapRegion *r = heap->get_region(i);458if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {459has_work = true;460break;461}462}463464if (has_work) {465heap->entry_uncommit(shrink_before, shrink_until);466}467}468469bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {470return GCCause::is_user_requested_gc(cause) ||471GCCause::is_serviceability_requested_gc(cause);472}473474void ShenandoahControlThread::request_gc(GCCause::Cause cause) {475assert(GCCause::is_user_requested_gc(cause) ||476GCCause::is_serviceability_requested_gc(cause) ||477cause == GCCause::_metadata_GC_clear_soft_refs ||478cause == GCCause::_full_gc_alot ||479cause == GCCause::_wb_full_gc ||480cause == GCCause::_wb_breakpoint ||481cause == GCCause::_scavenge_alot,482"only requested GCs here");483484if (is_explicit_gc(cause)) {485if (!DisableExplicitGC) {486handle_requested_gc(cause);487}488} else {489handle_requested_gc(cause);490}491}492493void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {494// Make sure we have at least one complete GC cycle before unblocking495// from the explicit GC request.496//497// This is especially important for weak references cleanup and/or native498// resources (e.g. DirectByteBuffers) machinery: when explicit GC request499// comes very late in the already running cycle, it would miss lots of new500// opportunities for cleanup that were made available before the caller501// requested the GC.502503MonitorLocker ml(&_gc_waiters_lock);504size_t current_gc_id = get_gc_id();505size_t required_gc_id = current_gc_id + 1;506while (current_gc_id < required_gc_id) {507_gc_requested.set();508_requested_gc_cause = cause;509510if (cause != GCCause::_wb_breakpoint) {511ml.wait();512}513current_gc_id = get_gc_id();514}515}516517void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {518ShenandoahHeap* heap = ShenandoahHeap::heap();519520assert(current()->is_Java_thread(), "expect Java thread here");521522if (try_set_alloc_failure_gc()) {523// Only report the first allocation failure524log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",525req.type_string(),526byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));527528// Now that alloc failure GC is scheduled, we can abort everything else529heap->cancel_gc(GCCause::_allocation_failure);530}531532MonitorLocker ml(&_alloc_failure_waiters_lock);533while (is_alloc_failure_gc()) {534ml.wait();535}536}537538void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {539ShenandoahHeap* heap = ShenandoahHeap::heap();540541if (try_set_alloc_failure_gc()) {542// Only report the first allocation failure543log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",544byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));545}546547// Forcefully report allocation failure548heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);549}550551void ShenandoahControlThread::notify_alloc_failure_waiters() {552_alloc_failure_gc.unset();553MonitorLocker ml(&_alloc_failure_waiters_lock);554ml.notify_all();555}556557bool ShenandoahControlThread::try_set_alloc_failure_gc() {558return _alloc_failure_gc.try_set();559}560561bool ShenandoahControlThread::is_alloc_failure_gc() {562return _alloc_failure_gc.is_set();563}564565void ShenandoahControlThread::notify_gc_waiters() {566_gc_requested.unset();567MonitorLocker ml(&_gc_waiters_lock);568ml.notify_all();569}570571void ShenandoahControlThread::handle_counters_update() {572if (_do_counters_update.is_set()) {573_do_counters_update.unset();574ShenandoahHeap::heap()->monitoring_support()->update_counters();575}576}577578void ShenandoahControlThread::handle_force_counters_update() {579if (_force_counters_update.is_set()) {580_do_counters_update.unset(); // reset these too, we do update now!581ShenandoahHeap::heap()->monitoring_support()->update_counters();582}583}584585void ShenandoahControlThread::notify_heap_changed() {586// This is called from allocation path, and thus should be fast.587588// Update monitoring counters when we took a new region. This amortizes the589// update costs on slow path.590if (_do_counters_update.is_unset()) {591_do_counters_update.set();592}593// Notify that something had changed.594if (_heap_changed.is_unset()) {595_heap_changed.set();596}597}598599void ShenandoahControlThread::pacing_notify_alloc(size_t words) {600assert(ShenandoahPacing, "should only call when pacing is enabled");601Atomic::add(&_allocs_seen, words, memory_order_relaxed);602}603604void ShenandoahControlThread::set_forced_counters_update(bool value) {605_force_counters_update.set_cond(value);606}607608void ShenandoahControlThread::reset_gc_id() {609Atomic::store(&_gc_id, (size_t)0);610}611612void ShenandoahControlThread::update_gc_id() {613Atomic::inc(&_gc_id);614}615616size_t ShenandoahControlThread::get_gc_id() {617return Atomic::load(&_gc_id);618}619620void ShenandoahControlThread::print() const {621print_on(tty);622}623624void ShenandoahControlThread::print_on(outputStream* st) const {625st->print("Shenandoah Concurrent Thread");626Thread::print_on(st);627st->cr();628}629630void ShenandoahControlThread::start() {631create_and_start();632}633634void ShenandoahControlThread::prepare_for_graceful_shutdown() {635_graceful_shutdown.set();636}637638bool ShenandoahControlThread::in_graceful_shutdown() {639return _graceful_shutdown.is_set();640}641642643