Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
66644 views
/*1* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"26#include "gc/shenandoah/shenandoahConcurrentGC.hpp"27#include "gc/shenandoah/shenandoahControlThread.hpp"28#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"29#include "gc/shenandoah/shenandoahFreeSet.hpp"30#include "gc/shenandoah/shenandoahFullGC.hpp"31#include "gc/shenandoah/shenandoahPhaseTimings.hpp"32#include "gc/shenandoah/shenandoahHeap.inline.hpp"33#include "gc/shenandoah/shenandoahMark.inline.hpp"34#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"35#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"36#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"37#include "gc/shenandoah/shenandoahUtils.hpp"38#include "gc/shenandoah/shenandoahVMOperations.hpp"39#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"40#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"41#include "memory/iterator.hpp"42#include "memory/metaspaceUtils.hpp"43#include "memory/metaspaceStats.hpp"44#include "memory/universe.hpp"45#include "runtime/atomic.hpp"4647ShenandoahControlThread::ShenandoahControlThread() :48ConcurrentGCThread(),49_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),50_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),51_periodic_task(this),52_requested_gc_cause(GCCause::_no_cause_specified),53_degen_point(ShenandoahGC::_degenerated_outside_cycle),54_allocs_seen(0) {5556reset_gc_id();57create_and_start();58_periodic_task.enroll();59if (ShenandoahPacing) {60_periodic_pacer_notify_task.enroll();61}62}6364ShenandoahControlThread::~ShenandoahControlThread() {65// This is here so that super is called.66}6768void ShenandoahPeriodicTask::task() {69_thread->handle_force_counters_update();70_thread->handle_counters_update();71}7273void ShenandoahPeriodicPacerNotify::task() {74assert(ShenandoahPacing, "Should not be here otherwise");75ShenandoahHeap::heap()->pacer()->notify_waiters();76}7778void ShenandoahControlThread::run_service() {79ShenandoahHeap* heap = ShenandoahHeap::heap();8081GCMode default_mode = concurrent_normal;82GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;83int sleep = ShenandoahControlIntervalMin;8485double last_shrink_time = os::elapsedTime();86double last_sleep_adjust_time = os::elapsedTime();8788// Shrink period avoids constantly polling regions for shrinking.89// Having a period 10x lower than the delay would mean we hit the90// shrinking with lag of less than 1/10-th of true delay.91// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.92double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;9394ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();95ShenandoahHeuristics* heuristics = heap->heuristics();96while (!in_graceful_shutdown() && !should_terminate()) {97// Figure out if we have pending requests.98bool alloc_failure_pending = _alloc_failure_gc.is_set();99bool is_gc_requested = _gc_requested.is_set();100GCCause::Cause requested_gc_cause = _requested_gc_cause;101bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);102bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);103104// This control loop iteration have seen this much allocations.105size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);106107// Check if we have seen a new target for soft max heap size.108bool soft_max_changed = check_soft_max_changed();109110// Choose which GC mode to run in. The block below should select a single mode.111GCMode mode = none;112GCCause::Cause cause = GCCause::_last_gc_cause;113ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;114115if (alloc_failure_pending) {116// Allocation failure takes precedence: we have to deal with it first thing117log_info(gc)("Trigger: Handle Allocation Failure");118119cause = GCCause::_allocation_failure;120121// Consume the degen point, and seed it with default value122degen_point = _degen_point;123_degen_point = ShenandoahGC::_degenerated_outside_cycle;124125if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {126heuristics->record_allocation_failure_gc();127policy->record_alloc_failure_to_degenerated(degen_point);128mode = stw_degenerated;129} else {130heuristics->record_allocation_failure_gc();131policy->record_alloc_failure_to_full();132mode = stw_full;133}134135} else if (explicit_gc_requested) {136cause = requested_gc_cause;137log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));138139heuristics->record_requested_gc();140141if (ExplicitGCInvokesConcurrent) {142policy->record_explicit_to_concurrent();143mode = default_mode;144// Unload and clean up everything145heap->set_unload_classes(heuristics->can_unload_classes());146} else {147policy->record_explicit_to_full();148mode = stw_full;149}150} else if (implicit_gc_requested) {151cause = requested_gc_cause;152log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));153154heuristics->record_requested_gc();155156if (ShenandoahImplicitGCInvokesConcurrent) {157policy->record_implicit_to_concurrent();158mode = default_mode;159160// Unload and clean up everything161heap->set_unload_classes(heuristics->can_unload_classes());162} else {163policy->record_implicit_to_full();164mode = stw_full;165}166} else {167// Potential normal cycle: ask heuristics if it wants to act168if (heuristics->should_start_gc()) {169mode = default_mode;170cause = default_cause;171}172173// Ask policy if this cycle wants to process references or unload classes174heap->set_unload_classes(heuristics->should_unload_classes());175}176177// Blow all soft references on this cycle, if handling allocation failure,178// either implicit or explicit GC request, or we are requested to do so unconditionally.179if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {180heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);181}182183bool gc_requested = (mode != none);184assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");185186if (gc_requested) {187// GC is starting, bump the internal ID188update_gc_id();189190heap->reset_bytes_allocated_since_gc_start();191192MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();193194// If GC was requested, we are sampling the counters even without actual triggers195// from allocation machinery. This captures GC phases more accurately.196set_forced_counters_update(true);197198// If GC was requested, we better dump freeset data for performance debugging199{200ShenandoahHeapLocker locker(heap->lock());201heap->free_set()->log_status();202}203204switch (mode) {205case concurrent_normal:206service_concurrent_normal_cycle(cause);207break;208case stw_degenerated:209service_stw_degenerated_cycle(cause, degen_point);210break;211case stw_full:212service_stw_full_cycle(cause);213break;214default:215ShouldNotReachHere();216}217218// If this was the requested GC cycle, notify waiters about it219if (explicit_gc_requested || implicit_gc_requested) {220notify_gc_waiters();221}222223// If this was the allocation failure GC cycle, notify waiters about it224if (alloc_failure_pending) {225notify_alloc_failure_waiters();226}227228// Report current free set state at the end of cycle, whether229// it is a normal completion, or the abort.230{231ShenandoahHeapLocker locker(heap->lock());232heap->free_set()->log_status();233234// Notify Universe about new heap usage. This has implications for235// global soft refs policy, and we better report it every time heap236// usage goes down.237Universe::heap()->update_capacity_and_used_at_gc();238239// Signal that we have completed a visit to all live objects.240Universe::heap()->record_whole_heap_examined_timestamp();241}242243// Disable forced counters update, and update counters one more time244// to capture the state at the end of GC session.245handle_force_counters_update();246set_forced_counters_update(false);247248// Retract forceful part of soft refs policy249heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);250251// Clear metaspace oom flag, if current cycle unloaded classes252if (heap->unload_classes()) {253heuristics->clear_metaspace_oom();254}255256// Commit worker statistics to cycle data257heap->phase_timings()->flush_par_workers_to_cycle();258if (ShenandoahPacing) {259heap->pacer()->flush_stats_to_cycle();260}261262// Print GC stats for current cycle263{264LogTarget(Info, gc, stats) lt;265if (lt.is_enabled()) {266ResourceMark rm;267LogStream ls(lt);268heap->phase_timings()->print_cycle_on(&ls);269if (ShenandoahPacing) {270heap->pacer()->print_cycle_on(&ls);271}272}273}274275// Commit statistics to globals276heap->phase_timings()->flush_cycle_to_global();277278// Print Metaspace change following GC (if logging is enabled).279MetaspaceUtils::print_metaspace_change(meta_sizes);280281// GC is over, we are at idle now282if (ShenandoahPacing) {283heap->pacer()->setup_for_idle();284}285} else {286// Allow allocators to know we have seen this much regions287if (ShenandoahPacing && (allocs_seen > 0)) {288heap->pacer()->report_alloc(allocs_seen);289}290}291292double current = os::elapsedTime();293294if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {295// Explicit GC tries to uncommit everything down to min capacity.296// Soft max change tries to uncommit everything down to target capacity.297// Periodic uncommit tries to uncommit suitable regions down to min capacity.298299double shrink_before = (explicit_gc_requested || soft_max_changed) ?300current :301current - (ShenandoahUncommitDelay / 1000.0);302303size_t shrink_until = soft_max_changed ?304heap->soft_max_capacity() :305heap->min_capacity();306307service_uncommit(shrink_before, shrink_until);308heap->phase_timings()->flush_cycle_to_global();309last_shrink_time = current;310}311312// Wait before performing the next action. If allocation happened during this wait,313// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,314// back off exponentially.315if (_heap_changed.try_unset()) {316sleep = ShenandoahControlIntervalMin;317} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){318sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));319last_sleep_adjust_time = current;320}321os::naked_short_sleep(sleep);322}323324// Wait for the actual stop(), can't leave run_service() earlier.325while (!should_terminate()) {326os::naked_short_sleep(ShenandoahControlIntervalMin);327}328}329330bool ShenandoahControlThread::check_soft_max_changed() const {331ShenandoahHeap* heap = ShenandoahHeap::heap();332size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);333size_t old_soft_max = heap->soft_max_capacity();334if (new_soft_max != old_soft_max) {335new_soft_max = MAX2(heap->min_capacity(), new_soft_max);336new_soft_max = MIN2(heap->max_capacity(), new_soft_max);337if (new_soft_max != old_soft_max) {338log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",339byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),340byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)341);342heap->set_soft_max_capacity(new_soft_max);343return true;344}345}346return false;347}348349void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {350// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during351// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.352// If second allocation failure happens during Degenerated GC cycle (for example, when GC353// tries to evac something and no memory is available), cycle degrades to Full GC.354//355// There are also a shortcut through the normal cycle: immediate garbage shortcut, when356// heuristics says there are no regions to compact, and all the collection comes from immediately357// reclaimable regions.358//359// ................................................................................................360//361// (immediate garbage shortcut) Concurrent GC362// /-------------------------------------------\363// | |364// | |365// | |366// | v367// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]368// | | | ^369// | (af) | (af) | (af) |370// ..................|....................|.................|..............|.......................371// | | | |372// | | | | Degenerated GC373// v v v |374// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o375// | | | ^376// | (af) | (af) | (af) |377// ..................|....................|.................|..............|.......................378// | | | |379// | v | | Full GC380// \------------------->o<----------------/ |381// | |382// v |383// Full GC --------------------------/384//385ShenandoahHeap* heap = ShenandoahHeap::heap();386if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;387388GCIdMark gc_id_mark;389ShenandoahGCSession session(cause);390391TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());392393ShenandoahConcurrentGC gc;394if (gc.collect(cause)) {395// Cycle is complete396heap->heuristics()->record_success_concurrent();397heap->shenandoah_policy()->record_success_concurrent();398} else {399assert(heap->cancelled_gc(), "Must have been cancelled");400check_cancellation_or_degen(gc.degen_point());401}402}403404bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {405ShenandoahHeap* heap = ShenandoahHeap::heap();406if (heap->cancelled_gc()) {407assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");408if (!in_graceful_shutdown()) {409assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,410"Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));411_degen_point = point;412}413return true;414}415return false;416}417418void ShenandoahControlThread::stop_service() {419// Nothing to do here.420}421422void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {423GCIdMark gc_id_mark;424ShenandoahGCSession session(cause);425426ShenandoahFullGC gc;427gc.collect(cause);428429ShenandoahHeap* const heap = ShenandoahHeap::heap();430heap->heuristics()->record_success_full();431heap->shenandoah_policy()->record_success_full();432}433434void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {435assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");436437GCIdMark gc_id_mark;438ShenandoahGCSession session(cause);439440ShenandoahDegenGC gc(point);441gc.collect(cause);442443ShenandoahHeap* const heap = ShenandoahHeap::heap();444heap->heuristics()->record_success_degenerated();445heap->shenandoah_policy()->record_success_degenerated();446}447448void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {449ShenandoahHeap* heap = ShenandoahHeap::heap();450451// Determine if there is work to do. This avoids taking heap lock if there is452// no work available, avoids spamming logs with superfluous logging messages,453// and minimises the amount of work while locks are taken.454455if (heap->committed() <= shrink_until) return;456457bool has_work = false;458for (size_t i = 0; i < heap->num_regions(); i++) {459ShenandoahHeapRegion *r = heap->get_region(i);460if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {461has_work = true;462break;463}464}465466if (has_work) {467heap->entry_uncommit(shrink_before, shrink_until);468}469}470471bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {472return GCCause::is_user_requested_gc(cause) ||473GCCause::is_serviceability_requested_gc(cause);474}475476void ShenandoahControlThread::request_gc(GCCause::Cause cause) {477assert(GCCause::is_user_requested_gc(cause) ||478GCCause::is_serviceability_requested_gc(cause) ||479cause == GCCause::_metadata_GC_clear_soft_refs ||480cause == GCCause::_full_gc_alot ||481cause == GCCause::_wb_full_gc ||482cause == GCCause::_wb_breakpoint ||483cause == GCCause::_scavenge_alot,484"only requested GCs here");485486if (is_explicit_gc(cause)) {487if (!DisableExplicitGC) {488handle_requested_gc(cause);489}490} else {491handle_requested_gc(cause);492}493}494495void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {496// Make sure we have at least one complete GC cycle before unblocking497// from the explicit GC request.498//499// This is especially important for weak references cleanup and/or native500// resources (e.g. DirectByteBuffers) machinery: when explicit GC request501// comes very late in the already running cycle, it would miss lots of new502// opportunities for cleanup that were made available before the caller503// requested the GC.504505MonitorLocker ml(&_gc_waiters_lock);506size_t current_gc_id = get_gc_id();507size_t required_gc_id = current_gc_id + 1;508while (current_gc_id < required_gc_id) {509// Although setting gc request is under _gc_waiters_lock, but read side (run_service())510// does not take the lock. We need to enforce following order, so that read side sees511// latest requested gc cause when the flag is set.512_requested_gc_cause = cause;513_gc_requested.set();514515if (cause != GCCause::_wb_breakpoint) {516ml.wait();517}518current_gc_id = get_gc_id();519}520}521522void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {523ShenandoahHeap* heap = ShenandoahHeap::heap();524525assert(current()->is_Java_thread(), "expect Java thread here");526527if (try_set_alloc_failure_gc()) {528// Only report the first allocation failure529log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",530req.type_string(),531byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));532533// Now that alloc failure GC is scheduled, we can abort everything else534heap->cancel_gc(GCCause::_allocation_failure);535}536537MonitorLocker ml(&_alloc_failure_waiters_lock);538while (is_alloc_failure_gc()) {539ml.wait();540}541}542543void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {544ShenandoahHeap* heap = ShenandoahHeap::heap();545546if (try_set_alloc_failure_gc()) {547// Only report the first allocation failure548log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",549byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));550}551552// Forcefully report allocation failure553heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);554}555556void ShenandoahControlThread::notify_alloc_failure_waiters() {557_alloc_failure_gc.unset();558MonitorLocker ml(&_alloc_failure_waiters_lock);559ml.notify_all();560}561562bool ShenandoahControlThread::try_set_alloc_failure_gc() {563return _alloc_failure_gc.try_set();564}565566bool ShenandoahControlThread::is_alloc_failure_gc() {567return _alloc_failure_gc.is_set();568}569570void ShenandoahControlThread::notify_gc_waiters() {571_gc_requested.unset();572MonitorLocker ml(&_gc_waiters_lock);573ml.notify_all();574}575576void ShenandoahControlThread::handle_counters_update() {577if (_do_counters_update.is_set()) {578_do_counters_update.unset();579ShenandoahHeap::heap()->monitoring_support()->update_counters();580}581}582583void ShenandoahControlThread::handle_force_counters_update() {584if (_force_counters_update.is_set()) {585_do_counters_update.unset(); // reset these too, we do update now!586ShenandoahHeap::heap()->monitoring_support()->update_counters();587}588}589590void ShenandoahControlThread::notify_heap_changed() {591// This is called from allocation path, and thus should be fast.592593// Update monitoring counters when we took a new region. This amortizes the594// update costs on slow path.595if (_do_counters_update.is_unset()) {596_do_counters_update.set();597}598// Notify that something had changed.599if (_heap_changed.is_unset()) {600_heap_changed.set();601}602}603604void ShenandoahControlThread::pacing_notify_alloc(size_t words) {605assert(ShenandoahPacing, "should only call when pacing is enabled");606Atomic::add(&_allocs_seen, words, memory_order_relaxed);607}608609void ShenandoahControlThread::set_forced_counters_update(bool value) {610_force_counters_update.set_cond(value);611}612613void ShenandoahControlThread::reset_gc_id() {614Atomic::store(&_gc_id, (size_t)0);615}616617void ShenandoahControlThread::update_gc_id() {618Atomic::inc(&_gc_id);619}620621size_t ShenandoahControlThread::get_gc_id() {622return Atomic::load(&_gc_id);623}624625void ShenandoahControlThread::print() const {626print_on(tty);627}628629void ShenandoahControlThread::print_on(outputStream* st) const {630st->print("Shenandoah Concurrent Thread");631Thread::print_on(st);632st->cr();633}634635void ShenandoahControlThread::start() {636create_and_start();637}638639void ShenandoahControlThread::prepare_for_graceful_shutdown() {640_graceful_shutdown.set();641}642643bool ShenandoahControlThread::in_graceful_shutdown() {644return _graceful_shutdown.is_set();645}646647648