Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
48789 views
/*1* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef __clang_major__25#define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.26#endif2728#include "precompiled.hpp"29#include "gc_implementation/g1/concurrentG1Refine.hpp"30#include "gc_implementation/g1/concurrentMark.hpp"31#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"32#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"33#include "gc_implementation/g1/g1CollectorPolicy.hpp"34#include "gc_implementation/g1/g1ErgoVerbose.hpp"35#include "gc_implementation/g1/g1GCPhaseTimes.hpp"36#include "gc_implementation/g1/g1Log.hpp"37#include "gc_implementation/g1/heapRegionRemSet.hpp"38#include "gc_implementation/shared/gcPolicyCounters.hpp"39#include "runtime/arguments.hpp"40#include "runtime/java.hpp"41#include "runtime/mutexLocker.hpp"42#include "utilities/debug.hpp"4344// Different defaults for different number of GC threads45// They were chosen by running GCOld and SPECjbb on debris with different46// numbers of GC threads and choosing them based on the results4748// all the same49static double rs_length_diff_defaults[] = {500.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.051};5253static double cost_per_card_ms_defaults[] = {540.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.001555};5657// all the same58static double young_cards_per_entry_ratio_defaults[] = {591.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.060};6162static double cost_per_entry_ms_defaults[] = {630.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.00564};6566static double cost_per_byte_ms_defaults[] = {670.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.00000968};6970// these should be pretty consistent71static double constant_other_time_ms_defaults[] = {725.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.073};747576static double young_other_cost_per_region_ms_defaults[] = {770.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.178};7980static double non_young_other_cost_per_region_ms_defaults[] = {811.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.3082};8384G1CollectorPolicy::G1CollectorPolicy() :85_parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()86? ParallelGCThreads : 1),8788_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),89_stop_world_start(0.0),9091_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),92_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),9394_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),95_prev_collection_pause_end_ms(0.0),96_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),97_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),98_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),99_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),100_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),101_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),102_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),103_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),104_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),105_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),106_non_young_other_cost_per_region_ms_seq(107new TruncatedSeq(TruncatedSeqLength)),108109_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),110_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),111112_pause_time_target_ms((double) MaxGCPauseMillis),113114_gcs_are_young(true),115116_during_marking(false),117_in_marking_window(false),118_in_marking_window_im(false),119120_recent_prev_end_times_for_all_gcs_sec(121new TruncatedSeq(NumPrevPausesForHeuristics)),122123_recent_avg_pause_time_ratio(0.0),124125_initiate_conc_mark_if_possible(false),126_during_initial_mark_pause(false),127_last_young_gc(false),128_last_gc_was_young(false),129130_eden_used_bytes_before_gc(0),131_survivor_used_bytes_before_gc(0),132_heap_used_bytes_before_gc(0),133_metaspace_used_bytes_before_gc(0),134_eden_capacity_bytes_before_gc(0),135_heap_capacity_bytes_before_gc(0),136137_eden_cset_region_length(0),138_survivor_cset_region_length(0),139_old_cset_region_length(0),140141_collection_set(NULL),142_collection_set_bytes_used_before(0),143144// Incremental CSet attributes145_inc_cset_build_state(Inactive),146_inc_cset_head(NULL),147_inc_cset_tail(NULL),148_inc_cset_bytes_used_before(0),149_inc_cset_max_finger(NULL),150_inc_cset_recorded_rs_lengths(0),151_inc_cset_recorded_rs_lengths_diffs(0),152_inc_cset_predicted_elapsed_time_ms(0.0),153_inc_cset_predicted_elapsed_time_ms_diffs(0.0),154155#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away156#pragma warning( disable:4355 ) // 'this' : used in base member initializer list157#endif // _MSC_VER158159_short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",160G1YoungSurvRateNumRegionsSummary)),161_survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",162G1YoungSurvRateNumRegionsSummary)),163// add here any more surv rate groups164_recorded_survivor_regions(0),165_recorded_survivor_head(NULL),166_recorded_survivor_tail(NULL),167_survivors_age_table(true),168169_gc_overhead_perc(0.0) {170171// Set up the region size and associated fields. Given that the172// policy is created before the heap, we have to set this up here,173// so it's done as soon as possible.174175// It would have been natural to pass initial_heap_byte_size() and176// max_heap_byte_size() to setup_heap_region_size() but those have177// not been set up at this point since they should be aligned with178// the region size. So, there is a circular dependency here. We base179// the region size on the heap size, but the heap size should be180// aligned with the region size. To get around this we use the181// unaligned values for the heap.182HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);183HeapRegionRemSet::setup_remset_size();184185G1ErgoVerbose::initialize();186if (PrintAdaptiveSizePolicy) {187// Currently, we only use a single switch for all the heuristics.188G1ErgoVerbose::set_enabled(true);189// Given that we don't currently have a verboseness level190// parameter, we'll hardcode this to high. This can be easily191// changed in the future.192G1ErgoVerbose::set_level(ErgoHigh);193} else {194G1ErgoVerbose::set_enabled(false);195}196197// Verify PLAB sizes198const size_t region_size = HeapRegion::GrainWords;199if (YoungPLABSize > region_size || OldPLABSize > region_size) {200char buffer[128];201jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,202OldPLABSize > region_size ? "Old" : "Young", region_size);203vm_exit_during_initialization(buffer);204}205206_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());207_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;208209_phase_times = new G1GCPhaseTimes(_parallel_gc_threads);210211int index = MIN2(_parallel_gc_threads - 1, 7);212213_rs_length_diff_seq->add(rs_length_diff_defaults[index]);214_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);215_young_cards_per_entry_ratio_seq->add(216young_cards_per_entry_ratio_defaults[index]);217_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);218_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);219_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);220_young_other_cost_per_region_ms_seq->add(221young_other_cost_per_region_ms_defaults[index]);222_non_young_other_cost_per_region_ms_seq->add(223non_young_other_cost_per_region_ms_defaults[index]);224225// Below, we might need to calculate the pause time target based on226// the pause interval. When we do so we are going to give G1 maximum227// flexibility and allow it to do pauses when it needs to. So, we'll228// arrange that the pause interval to be pause time target + 1 to229// ensure that a) the pause time target is maximized with respect to230// the pause interval and b) we maintain the invariant that pause231// time target < pause interval. If the user does not want this232// maximum flexibility, they will have to set the pause interval233// explicitly.234235// First make sure that, if either parameter is set, its value is236// reasonable.237if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {238if (MaxGCPauseMillis < 1) {239vm_exit_during_initialization("MaxGCPauseMillis should be "240"greater than 0");241}242}243if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {244if (GCPauseIntervalMillis < 1) {245vm_exit_during_initialization("GCPauseIntervalMillis should be "246"greater than 0");247}248}249250// Then, if the pause time target parameter was not set, set it to251// the default value.252if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {253if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {254// The default pause time target in G1 is 200ms255FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);256} else {257// We do not allow the pause interval to be set without the258// pause time target259vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "260"without setting MaxGCPauseMillis");261}262}263264// Then, if the interval parameter was not set, set it according to265// the pause time target (this will also deal with the case when the266// pause time target is the default value).267if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {268FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);269}270271// Finally, make sure that the two parameters are consistent.272if (MaxGCPauseMillis >= GCPauseIntervalMillis) {273char buffer[256];274jio_snprintf(buffer, 256,275"MaxGCPauseMillis (%u) should be less than "276"GCPauseIntervalMillis (%u)",277MaxGCPauseMillis, GCPauseIntervalMillis);278vm_exit_during_initialization(buffer);279}280281double max_gc_time = (double) MaxGCPauseMillis / 1000.0;282double time_slice = (double) GCPauseIntervalMillis / 1000.0;283_mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);284285uintx confidence_perc = G1ConfidencePercent;286// Put an artificial ceiling on this so that it's not set to a silly value.287if (confidence_perc > 100) {288confidence_perc = 100;289warning("G1ConfidencePercent is set to a value that is too large, "290"it's been updated to %u", confidence_perc);291}292_sigma = (double) confidence_perc / 100.0;293294// start conservatively (around 50ms is about right)295_concurrent_mark_remark_times_ms->add(0.05);296_concurrent_mark_cleanup_times_ms->add(0.20);297_tenuring_threshold = MaxTenuringThreshold;298// _max_survivor_regions will be calculated by299// update_young_list_target_length() during initialization.300_max_survivor_regions = 0;301302assert(GCTimeRatio > 0,303"we should have set it to a default value set_g1_gc_flags() "304"if a user set it to 0");305_gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));306307uintx reserve_perc = G1ReservePercent;308// Put an artificial ceiling on this so that it's not set to a silly value.309if (reserve_perc > 50) {310reserve_perc = 50;311warning("G1ReservePercent is set to a value that is too large, "312"it's been updated to %u", reserve_perc);313}314_reserve_factor = (double) reserve_perc / 100.0;315// This will be set when the heap is expanded316// for the first time during initialization.317_reserve_regions = 0;318319_collectionSetChooser = new CollectionSetChooser();320}321322void G1CollectorPolicy::initialize_alignments() {323_space_alignment = HeapRegion::GrainBytes;324size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);325size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();326_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);327}328329void G1CollectorPolicy::initialize_flags() {330if (G1HeapRegionSize != HeapRegion::GrainBytes) {331FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);332}333334if (SurvivorRatio < 1) {335vm_exit_during_initialization("Invalid survivor ratio specified");336}337CollectorPolicy::initialize_flags();338_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags339}340341void G1CollectorPolicy::post_heap_initialize() {342uintx max_regions = G1CollectedHeap::heap()->max_regions();343size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;344if (max_young_size != MaxNewSize) {345FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);346}347}348349G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),350_min_desired_young_length(0), _max_desired_young_length(0) {351if (FLAG_IS_CMDLINE(NewRatio)) {352if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {353warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");354} else {355_sizer_kind = SizerNewRatio;356_adaptive_size = false;357return;358}359}360361if (NewSize > MaxNewSize) {362if (FLAG_IS_CMDLINE(MaxNewSize)) {363warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "364"A new max generation size of " SIZE_FORMAT "k will be used.",365NewSize/K, MaxNewSize/K, NewSize/K);366}367MaxNewSize = NewSize;368}369370if (FLAG_IS_CMDLINE(NewSize)) {371_min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),3721U);373if (FLAG_IS_CMDLINE(MaxNewSize)) {374_max_desired_young_length =375MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),3761U);377_sizer_kind = SizerMaxAndNewSize;378_adaptive_size = _min_desired_young_length != _max_desired_young_length;379} else {380_sizer_kind = SizerNewSizeOnly;381}382} else if (FLAG_IS_CMDLINE(MaxNewSize)) {383_max_desired_young_length =384MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),3851U);386_sizer_kind = SizerMaxNewSizeOnly;387}388}389390uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {391uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;392return MAX2(1U, default_value);393}394395uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {396uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;397return MAX2(1U, default_value);398}399400void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {401assert(number_of_heap_regions > 0, "Heap must be initialized");402403switch (_sizer_kind) {404case SizerDefaults:405*min_young_length = calculate_default_min_length(number_of_heap_regions);406*max_young_length = calculate_default_max_length(number_of_heap_regions);407break;408case SizerNewSizeOnly:409*max_young_length = calculate_default_max_length(number_of_heap_regions);410*max_young_length = MAX2(*min_young_length, *max_young_length);411break;412case SizerMaxNewSizeOnly:413*min_young_length = calculate_default_min_length(number_of_heap_regions);414*min_young_length = MIN2(*min_young_length, *max_young_length);415break;416case SizerMaxAndNewSize:417// Do nothing. Values set on the command line, don't update them at runtime.418break;419case SizerNewRatio:420*min_young_length = number_of_heap_regions / (NewRatio + 1);421*max_young_length = *min_young_length;422break;423default:424ShouldNotReachHere();425}426427assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");428}429430uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {431// We need to pass the desired values because recalculation may not update these432// values in some cases.433uint temp = _min_desired_young_length;434uint result = _max_desired_young_length;435recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);436return result;437}438439void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {440recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,441&_max_desired_young_length);442}443444void G1CollectorPolicy::init() {445// Set aside an initial future to_space.446_g1 = G1CollectedHeap::heap();447448assert(Heap_lock->owned_by_self(), "Locking discipline.");449450initialize_gc_policy_counters();451452if (adaptive_young_list_length()) {453_young_list_fixed_length = 0;454} else {455_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();456}457_free_regions_at_end_of_collection = _g1->num_free_regions();458update_young_list_target_length();459460// We may immediately start allocating regions and placing them on the461// collection set list. Initialize the per-collection set info462start_incremental_cset_building();463}464465// Create the jstat counters for the policy.466void G1CollectorPolicy::initialize_gc_policy_counters() {467_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);468}469470bool G1CollectorPolicy::predict_will_fit(uint young_length,471double base_time_ms,472uint base_free_regions,473double target_pause_time_ms) {474if (young_length >= base_free_regions) {475// end condition 1: not enough space for the young regions476return false;477}478479double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);480size_t bytes_to_copy =481(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);482double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);483double young_other_time_ms = predict_young_other_time_ms(young_length);484double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;485if (pause_time_ms > target_pause_time_ms) {486// end condition 2: prediction is over the target pause time487return false;488}489490size_t free_bytes =491(base_free_regions - young_length) * HeapRegion::GrainBytes;492if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {493// end condition 3: out-of-space (conservatively!)494return false;495}496497// success!498return true;499}500501void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {502// re-calculate the necessary reserve503double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;504// We use ceiling so that if reserve_regions_d is > 0.0 (but505// smaller than 1.0) we'll get 1.506_reserve_regions = (uint) ceil(reserve_regions_d);507508_young_gen_sizer->heap_size_changed(new_number_of_regions);509}510511uint G1CollectorPolicy::calculate_young_list_desired_min_length(512uint base_min_length) {513uint desired_min_length = 0;514if (adaptive_young_list_length()) {515if (_alloc_rate_ms_seq->num() > 3) {516double now_sec = os::elapsedTime();517double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;518double alloc_rate_ms = predict_alloc_rate_ms();519desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);520} else {521// otherwise we don't have enough info to make the prediction522}523}524desired_min_length += base_min_length;525// make sure we don't go below any user-defined minimum bound526return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);527}528529uint G1CollectorPolicy::calculate_young_list_desired_max_length() {530// Here, we might want to also take into account any additional531// constraints (i.e., user-defined minimum bound). Currently, we532// effectively don't set this bound.533return _young_gen_sizer->max_desired_young_length();534}535536void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {537if (rs_lengths == (size_t) -1) {538// if it's set to the default value (-1), we should predict it;539// otherwise, use the given value.540rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);541}542543// Calculate the absolute and desired min bounds.544545// This is how many young regions we already have (currently: the survivors).546uint base_min_length = recorded_survivor_regions();547// This is the absolute minimum young length, which ensures that we548// can allocate one eden region in the worst-case.549uint absolute_min_length = base_min_length + 1;550uint desired_min_length =551calculate_young_list_desired_min_length(base_min_length);552if (desired_min_length < absolute_min_length) {553desired_min_length = absolute_min_length;554}555556// Calculate the absolute and desired max bounds.557558// We will try our best not to "eat" into the reserve.559uint absolute_max_length = 0;560if (_free_regions_at_end_of_collection > _reserve_regions) {561absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;562}563uint desired_max_length = calculate_young_list_desired_max_length();564if (desired_max_length > absolute_max_length) {565desired_max_length = absolute_max_length;566}567568uint young_list_target_length = 0;569if (adaptive_young_list_length()) {570if (gcs_are_young()) {571young_list_target_length =572calculate_young_list_target_length(rs_lengths,573base_min_length,574desired_min_length,575desired_max_length);576_rs_lengths_prediction = rs_lengths;577} else {578// Don't calculate anything and let the code below bound it to579// the desired_min_length, i.e., do the next GC as soon as580// possible to maximize how many old regions we can add to it.581}582} else {583// The user asked for a fixed young gen so we'll fix the young gen584// whether the next GC is young or mixed.585young_list_target_length = _young_list_fixed_length;586}587588// Make sure we don't go over the desired max length, nor under the589// desired min length. In case they clash, desired_min_length wins590// which is why that test is second.591if (young_list_target_length > desired_max_length) {592young_list_target_length = desired_max_length;593}594if (young_list_target_length < desired_min_length) {595young_list_target_length = desired_min_length;596}597598assert(young_list_target_length > recorded_survivor_regions(),599"we should be able to allocate at least one eden region");600assert(young_list_target_length >= absolute_min_length, "post-condition");601_young_list_target_length = young_list_target_length;602603update_max_gc_locker_expansion();604}605606uint607G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,608uint base_min_length,609uint desired_min_length,610uint desired_max_length) {611assert(adaptive_young_list_length(), "pre-condition");612assert(gcs_are_young(), "only call this for young GCs");613614// In case some edge-condition makes the desired max length too small...615if (desired_max_length <= desired_min_length) {616return desired_min_length;617}618619// We'll adjust min_young_length and max_young_length not to include620// the already allocated young regions (i.e., so they reflect the621// min and max eden regions we'll allocate). The base_min_length622// will be reflected in the predictions by the623// survivor_regions_evac_time prediction.624assert(desired_min_length > base_min_length, "invariant");625uint min_young_length = desired_min_length - base_min_length;626assert(desired_max_length > base_min_length, "invariant");627uint max_young_length = desired_max_length - base_min_length;628629double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;630double survivor_regions_evac_time = predict_survivor_regions_evac_time();631size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);632size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();633size_t scanned_cards = predict_young_card_num(adj_rs_lengths);634double base_time_ms =635predict_base_elapsed_time_ms(pending_cards, scanned_cards) +636survivor_regions_evac_time;637uint available_free_regions = _free_regions_at_end_of_collection;638uint base_free_regions = 0;639if (available_free_regions > _reserve_regions) {640base_free_regions = available_free_regions - _reserve_regions;641}642643// Here, we will make sure that the shortest young length that644// makes sense fits within the target pause time.645646if (predict_will_fit(min_young_length, base_time_ms,647base_free_regions, target_pause_time_ms)) {648// The shortest young length will fit into the target pause time;649// we'll now check whether the absolute maximum number of young650// regions will fit in the target pause time. If not, we'll do651// a binary search between min_young_length and max_young_length.652if (predict_will_fit(max_young_length, base_time_ms,653base_free_regions, target_pause_time_ms)) {654// The maximum young length will fit into the target pause time.655// We are done so set min young length to the maximum length (as656// the result is assumed to be returned in min_young_length).657min_young_length = max_young_length;658} else {659// The maximum possible number of young regions will not fit within660// the target pause time so we'll search for the optimal661// length. The loop invariants are:662//663// min_young_length < max_young_length664// min_young_length is known to fit into the target pause time665// max_young_length is known not to fit into the target pause time666//667// Going into the loop we know the above hold as we've just668// checked them. Every time around the loop we check whether669// the middle value between min_young_length and670// max_young_length fits into the target pause time. If it671// does, it becomes the new min. If it doesn't, it becomes672// the new max. This way we maintain the loop invariants.673674assert(min_young_length < max_young_length, "invariant");675uint diff = (max_young_length - min_young_length) / 2;676while (diff > 0) {677uint young_length = min_young_length + diff;678if (predict_will_fit(young_length, base_time_ms,679base_free_regions, target_pause_time_ms)) {680min_young_length = young_length;681} else {682max_young_length = young_length;683}684assert(min_young_length < max_young_length, "invariant");685diff = (max_young_length - min_young_length) / 2;686}687// The results is min_young_length which, according to the688// loop invariants, should fit within the target pause time.689690// These are the post-conditions of the binary search above:691assert(min_young_length < max_young_length,692"otherwise we should have discovered that max_young_length "693"fits into the pause target and not done the binary search");694assert(predict_will_fit(min_young_length, base_time_ms,695base_free_regions, target_pause_time_ms),696"min_young_length, the result of the binary search, should "697"fit into the pause target");698assert(!predict_will_fit(min_young_length + 1, base_time_ms,699base_free_regions, target_pause_time_ms),700"min_young_length, the result of the binary search, should be "701"optimal, so no larger length should fit into the pause target");702}703} else {704// Even the minimum length doesn't fit into the pause time705// target, return it as the result nevertheless.706}707return base_min_length + min_young_length;708}709710double G1CollectorPolicy::predict_survivor_regions_evac_time() {711double survivor_regions_evac_time = 0.0;712for (HeapRegion * r = _recorded_survivor_head;713r != NULL && r != _recorded_survivor_tail->get_next_young_region();714r = r->get_next_young_region()) {715survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());716}717return survivor_regions_evac_time;718}719720void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {721guarantee( adaptive_young_list_length(), "should not call this otherwise" );722723size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();724if (rs_lengths > _rs_lengths_prediction) {725// add 10% to avoid having to recalculate often726size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;727update_young_list_target_length(rs_lengths_prediction);728}729}730731732733HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,734bool is_tlab,735bool* gc_overhead_limit_was_exceeded) {736guarantee(false, "Not using this policy feature yet.");737return NULL;738}739740// This method controls how a collector handles one or more741// of its generations being fully allocated.742HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,743bool is_tlab) {744guarantee(false, "Not using this policy feature yet.");745return NULL;746}747748749#ifndef PRODUCT750bool G1CollectorPolicy::verify_young_ages() {751HeapRegion* head = _g1->young_list()->first_region();752return753verify_young_ages(head, _short_lived_surv_rate_group);754// also call verify_young_ages on any additional surv rate groups755}756757bool758G1CollectorPolicy::verify_young_ages(HeapRegion* head,759SurvRateGroup *surv_rate_group) {760guarantee( surv_rate_group != NULL, "pre-condition" );761762const char* name = surv_rate_group->name();763bool ret = true;764int prev_age = -1;765766for (HeapRegion* curr = head;767curr != NULL;768curr = curr->get_next_young_region()) {769SurvRateGroup* group = curr->surv_rate_group();770if (group == NULL && !curr->is_survivor()) {771gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);772ret = false;773}774775if (surv_rate_group == group) {776int age = curr->age_in_surv_rate_group();777778if (age < 0) {779gclog_or_tty->print_cr("## %s: encountered negative age", name);780ret = false;781}782783if (age <= prev_age) {784gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "785"(%d, %d)", name, age, prev_age);786ret = false;787}788prev_age = age;789}790}791792return ret;793}794#endif // PRODUCT795796void G1CollectorPolicy::record_full_collection_start() {797_full_collection_start_sec = os::elapsedTime();798record_heap_size_info_at_start(true /* full */);799// Release the future to-space so that it is available for compaction into.800_g1->set_full_collection();801}802803void G1CollectorPolicy::record_full_collection_end() {804// Consider this like a collection pause for the purposes of allocation805// since last pause.806double end_sec = os::elapsedTime();807double full_gc_time_sec = end_sec - _full_collection_start_sec;808double full_gc_time_ms = full_gc_time_sec * 1000.0;809810_trace_gen1_time_data.record_full_collection(full_gc_time_ms);811812update_recent_gc_times(end_sec, full_gc_time_ms);813814_g1->clear_full_collection();815816// "Nuke" the heuristics that control the young/mixed GC817// transitions and make sure we start with young GCs after the Full GC.818set_gcs_are_young(true);819_last_young_gc = false;820clear_initiate_conc_mark_if_possible();821clear_during_initial_mark_pause();822_in_marking_window = false;823_in_marking_window_im = false;824825_short_lived_surv_rate_group->start_adding_regions();826// also call this on any additional surv rate groups827828record_survivor_regions(0, NULL, NULL);829830_free_regions_at_end_of_collection = _g1->num_free_regions();831// Reset survivors SurvRateGroup.832_survivor_surv_rate_group->reset();833update_young_list_target_length();834_collectionSetChooser->clear();835}836837void G1CollectorPolicy::record_stop_world_start() {838_stop_world_start = os::elapsedTime();839}840841void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, GCTracer &tracer) {842// We only need to do this here as the policy will only be applied843// to the GC we're about to start. so, no point is calculating this844// every time we calculate / recalculate the target young length.845update_survivors_policy(tracer);846847assert(_g1->used() == _g1->recalculate_used(),848err_msg("sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,849_g1->used(), _g1->recalculate_used()));850851double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;852_trace_gen0_time_data.record_start_collection(s_w_t_ms);853_stop_world_start = 0.0;854855record_heap_size_info_at_start(false /* full */);856857phase_times()->record_cur_collection_start_sec(start_time_sec);858_pending_cards = _g1->pending_card_num();859860_collection_set_bytes_used_before = 0;861_bytes_copied_during_gc = 0;862863_last_gc_was_young = false;864865// do that for any other surv rate groups866_short_lived_surv_rate_group->stop_adding_regions();867_survivors_age_table.clear();868869assert( verify_young_ages(), "region age verification" );870}871872void G1CollectorPolicy::record_concurrent_mark_init_end(double873mark_init_elapsed_time_ms) {874_during_marking = true;875assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");876clear_during_initial_mark_pause();877_cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;878}879880void G1CollectorPolicy::record_concurrent_mark_remark_start() {881_mark_remark_start_sec = os::elapsedTime();882_during_marking = false;883}884885void G1CollectorPolicy::record_concurrent_mark_remark_end() {886double end_time_sec = os::elapsedTime();887double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;888_concurrent_mark_remark_times_ms->add(elapsed_time_ms);889_cur_mark_stop_world_time_ms += elapsed_time_ms;890_prev_collection_pause_end_ms += elapsed_time_ms;891892_mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);893}894895void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {896_mark_cleanup_start_sec = os::elapsedTime();897}898899void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {900_last_young_gc = true;901_in_marking_window = false;902}903904void G1CollectorPolicy::record_concurrent_pause() {905if (_stop_world_start > 0.0) {906double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;907_trace_gen0_time_data.record_yield_time(yield_ms);908}909}910911bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {912if (_g1->concurrent_mark()->cmThread()->during_cycle()) {913return false;914}915916size_t marking_initiating_used_threshold =917(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;918size_t cur_used_bytes = _g1->non_young_capacity_bytes();919size_t alloc_byte_size = alloc_word_size * HeapWordSize;920921if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {922if (gcs_are_young() && !_last_young_gc) {923ergo_verbose5(ErgoConcCycles,924"request concurrent cycle initiation",925ergo_format_reason("occupancy higher than threshold")926ergo_format_byte("occupancy")927ergo_format_byte("allocation request")928ergo_format_byte_perc("threshold")929ergo_format_str("source"),930cur_used_bytes,931alloc_byte_size,932marking_initiating_used_threshold,933(double) InitiatingHeapOccupancyPercent,934source);935return true;936} else {937ergo_verbose5(ErgoConcCycles,938"do not request concurrent cycle initiation",939ergo_format_reason("still doing mixed collections")940ergo_format_byte("occupancy")941ergo_format_byte("allocation request")942ergo_format_byte_perc("threshold")943ergo_format_str("source"),944cur_used_bytes,945alloc_byte_size,946marking_initiating_used_threshold,947(double) InitiatingHeapOccupancyPercent,948source);949}950}951952return false;953}954955// Anything below that is considered to be zero956#define MIN_TIMER_GRANULARITY 0.0000001957958void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {959double end_time_sec = os::elapsedTime();960assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),961"otherwise, the subtraction below does not make sense");962size_t rs_size =963_cur_collection_pause_used_regions_at_start - cset_region_length();964size_t cur_used_bytes = _g1->used();965assert(cur_used_bytes == _g1->recalculate_used(), "It should!");966bool last_pause_included_initial_mark = false;967bool update_stats = !_g1->evacuation_failed();968969#ifndef PRODUCT970if (G1YoungSurvRateVerbose) {971gclog_or_tty->cr();972_short_lived_surv_rate_group->print();973// do that for any other surv rate groups too974}975#endif // PRODUCT976977last_pause_included_initial_mark = during_initial_mark_pause();978if (last_pause_included_initial_mark) {979record_concurrent_mark_init_end(0.0);980} else if (need_to_start_conc_mark("end of GC")) {981// Note: this might have already been set, if during the last982// pause we decided to start a cycle but at the beginning of983// this pause we decided to postpone it. That's OK.984set_initiate_conc_mark_if_possible();985}986987_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,988end_time_sec, false);989990evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);991evacuation_info.set_bytes_copied(_bytes_copied_during_gc);992993if (update_stats) {994_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());995// this is where we update the allocation rate of the application996double app_time_ms =997(phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);998if (app_time_ms < MIN_TIMER_GRANULARITY) {999// This usually happens due to the timer not having the required1000// granularity. Some Linuxes are the usual culprits.1001// We'll just set it to something (arbitrarily) small.1002app_time_ms = 1.0;1003}1004// We maintain the invariant that all objects allocated by mutator1005// threads will be allocated out of eden regions. So, we can use1006// the eden region number allocated since the previous GC to1007// calculate the application's allocate rate. The only exception1008// to that is humongous objects that are allocated separately. But1009// given that humongous object allocations do not really affect1010// either the pause's duration nor when the next pause will take1011// place we can safely ignore them here.1012uint regions_allocated = eden_cset_region_length();1013double alloc_rate_ms = (double) regions_allocated / app_time_ms;1014_alloc_rate_ms_seq->add(alloc_rate_ms);10151016double interval_ms =1017(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;1018update_recent_gc_times(end_time_sec, pause_time_ms);1019_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;1020if (recent_avg_pause_time_ratio() < 0.0 ||1021(recent_avg_pause_time_ratio() - 1.0 > 0.0)) {1022#ifndef PRODUCT1023// Dump info to allow post-facto debugging1024gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");1025gclog_or_tty->print_cr("-------------------------------------------");1026gclog_or_tty->print_cr("Recent GC Times (ms):");1027_recent_gc_times_ms->dump();1028gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);1029_recent_prev_end_times_for_all_gcs_sec->dump();1030gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",1031_recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());1032// In debug mode, terminate the JVM if the user wants to debug at this point.1033assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");1034#endif // !PRODUCT1035// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in1036// CR 6902692 by redoing the manner in which the ratio is incrementally computed.1037if (_recent_avg_pause_time_ratio < 0.0) {1038_recent_avg_pause_time_ratio = 0.0;1039} else {1040assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");1041_recent_avg_pause_time_ratio = 1.0;1042}1043}1044}10451046bool new_in_marking_window = _in_marking_window;1047bool new_in_marking_window_im = false;1048if (last_pause_included_initial_mark) {1049new_in_marking_window = true;1050new_in_marking_window_im = true;1051}10521053if (_last_young_gc) {1054// This is supposed to to be the "last young GC" before we start1055// doing mixed GCs. Here we decide whether to start mixed GCs or not.10561057if (!last_pause_included_initial_mark) {1058if (next_gc_should_be_mixed("start mixed GCs",1059"do not start mixed GCs")) {1060set_gcs_are_young(false);1061}1062} else {1063ergo_verbose0(ErgoMixedGCs,1064"do not start mixed GCs",1065ergo_format_reason("concurrent cycle is about to start"));1066}1067_last_young_gc = false;1068}10691070if (!_last_gc_was_young) {1071// This is a mixed GC. Here we decide whether to continue doing1072// mixed GCs or not.10731074if (!next_gc_should_be_mixed("continue mixed GCs",1075"do not continue mixed GCs")) {1076set_gcs_are_young(true);1077}1078}10791080_short_lived_surv_rate_group->start_adding_regions();1081// do that for any other surv rate groupsx10821083if (update_stats) {1084double cost_per_card_ms = 0.0;1085if (_pending_cards > 0) {1086cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;1087_cost_per_card_ms_seq->add(cost_per_card_ms);1088}10891090size_t cards_scanned = _g1->cards_scanned();10911092double cost_per_entry_ms = 0.0;1093if (cards_scanned > 10) {1094cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;1095if (_last_gc_was_young) {1096_cost_per_entry_ms_seq->add(cost_per_entry_ms);1097} else {1098_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);1099}1100}11011102if (_max_rs_lengths > 0) {1103double cards_per_entry_ratio =1104(double) cards_scanned / (double) _max_rs_lengths;1105if (_last_gc_was_young) {1106_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);1107} else {1108_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);1109}1110}11111112// This is defensive. For a while _max_rs_lengths could get1113// smaller than _recorded_rs_lengths which was causing1114// rs_length_diff to get very large and mess up the RSet length1115// predictions. The reason was unsafe concurrent updates to the1116// _inc_cset_recorded_rs_lengths field which the code below guards1117// against (see CR 7118202). This bug has now been fixed (see CR1118// 7119027). However, I'm still worried that1119// _inc_cset_recorded_rs_lengths might still end up somewhat1120// inaccurate. The concurrent refinement thread calculates an1121// RSet's length concurrently with other CR threads updating it1122// which might cause it to calculate the length incorrectly (if,1123// say, it's in mid-coarsening). So I'll leave in the defensive1124// conditional below just in case.1125size_t rs_length_diff = 0;1126if (_max_rs_lengths > _recorded_rs_lengths) {1127rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;1128}1129_rs_length_diff_seq->add((double) rs_length_diff);11301131size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;11321133if (_collection_set_bytes_used_before > freed_bytes) {1134size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;1135double average_copy_time = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy);1136double cost_per_byte_ms = average_copy_time / (double) copied_bytes;1137if (_in_marking_window) {1138_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);1139} else {1140_cost_per_byte_ms_seq->add(cost_per_byte_ms);1141}1142}11431144double all_other_time_ms = pause_time_ms -1145(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +1146phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));11471148double young_other_time_ms = 0.0;1149if (young_cset_region_length() > 0) {1150young_other_time_ms =1151phase_times()->young_cset_choice_time_ms() +1152phase_times()->young_free_cset_time_ms();1153_young_other_cost_per_region_ms_seq->add(young_other_time_ms /1154(double) young_cset_region_length());1155}1156double non_young_other_time_ms = 0.0;1157if (old_cset_region_length() > 0) {1158non_young_other_time_ms =1159phase_times()->non_young_cset_choice_time_ms() +1160phase_times()->non_young_free_cset_time_ms();11611162_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /1163(double) old_cset_region_length());1164}11651166double constant_other_time_ms = all_other_time_ms -1167(young_other_time_ms + non_young_other_time_ms);1168_constant_other_time_ms_seq->add(constant_other_time_ms);11691170double survival_ratio = 0.0;1171if (_collection_set_bytes_used_before > 0) {1172survival_ratio = (double) _bytes_copied_during_gc /1173(double) _collection_set_bytes_used_before;1174}11751176_pending_cards_seq->add((double) _pending_cards);1177_rs_lengths_seq->add((double) _max_rs_lengths);1178}11791180_in_marking_window = new_in_marking_window;1181_in_marking_window_im = new_in_marking_window_im;1182_free_regions_at_end_of_collection = _g1->num_free_regions();1183update_young_list_target_length();11841185// Note that _mmu_tracker->max_gc_time() returns the time in seconds.1186double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;1187adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),1188phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);11891190_collectionSetChooser->verify();1191}11921193#define EXT_SIZE_FORMAT "%.1f%s"1194#define EXT_SIZE_PARAMS(bytes) \1195byte_size_in_proper_unit((double)(bytes)), \1196proper_unit_for_byte_size((bytes))11971198void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {1199YoungList* young_list = _g1->young_list();1200_eden_used_bytes_before_gc = young_list->eden_used_bytes();1201_survivor_used_bytes_before_gc = young_list->survivor_used_bytes();1202_heap_capacity_bytes_before_gc = _g1->capacity();1203_heap_used_bytes_before_gc = _g1->used();1204_cur_collection_pause_used_regions_at_start = _g1->num_used_regions();12051206_eden_capacity_bytes_before_gc =1207(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;12081209if (full) {1210_metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();1211}1212}12131214void G1CollectorPolicy::print_heap_transition() {1215_g1->print_size_transition(gclog_or_tty,1216_heap_used_bytes_before_gc,1217_g1->used(),1218_g1->capacity());1219}12201221void G1CollectorPolicy::print_detailed_heap_transition(bool full) {1222YoungList* young_list = _g1->young_list();12231224size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();1225size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();1226size_t heap_used_bytes_after_gc = _g1->used();12271228size_t heap_capacity_bytes_after_gc = _g1->capacity();1229size_t eden_capacity_bytes_after_gc =1230(_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;12311232gclog_or_tty->print(1233" [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "1234"Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "1235"Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"1236EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",1237EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),1238EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),1239EXT_SIZE_PARAMS(eden_used_bytes_after_gc),1240EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),1241EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),1242EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),1243EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),1244EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),1245EXT_SIZE_PARAMS(heap_used_bytes_after_gc),1246EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));12471248if (full) {1249MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);1250}12511252gclog_or_tty->cr();1253}12541255void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,1256double update_rs_processed_buffers,1257double goal_ms) {1258DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();1259ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();12601261if (G1UseAdaptiveConcRefinement) {1262const int k_gy = 3, k_gr = 6;1263const double inc_k = 1.1, dec_k = 0.9;12641265int g = cg1r->green_zone();1266if (update_rs_time > goal_ms) {1267g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.1268} else {1269if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {1270g = (int)MAX2(g * inc_k, g + 1.0);1271}1272}1273// Change the refinement threads params1274cg1r->set_green_zone(g);1275cg1r->set_yellow_zone(g * k_gy);1276cg1r->set_red_zone(g * k_gr);1277cg1r->reinitialize_threads();12781279int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);1280int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,1281cg1r->yellow_zone());1282// Change the barrier params1283dcqs.set_process_completed_threshold(processing_threshold);1284dcqs.set_max_completed_queue(cg1r->red_zone());1285}12861287int curr_queue_size = dcqs.completed_buffers_num();1288if (curr_queue_size >= cg1r->yellow_zone()) {1289dcqs.set_completed_queue_padding(curr_queue_size);1290} else {1291dcqs.set_completed_queue_padding(0);1292}1293dcqs.notify_if_necessary();1294}12951296double1297G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,1298size_t scanned_cards) {1299return1300predict_rs_update_time_ms(pending_cards) +1301predict_rs_scan_time_ms(scanned_cards) +1302predict_constant_other_time_ms();1303}13041305double1306G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {1307size_t rs_length = predict_rs_length_diff();1308size_t card_num;1309if (gcs_are_young()) {1310card_num = predict_young_card_num(rs_length);1311} else {1312card_num = predict_non_young_card_num(rs_length);1313}1314return predict_base_elapsed_time_ms(pending_cards, card_num);1315}13161317size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {1318size_t bytes_to_copy;1319if (hr->is_marked())1320bytes_to_copy = hr->max_live_bytes();1321else {1322assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");1323int age = hr->age_in_surv_rate_group();1324double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());1325bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);1326}1327return bytes_to_copy;1328}13291330double1331G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,1332bool for_young_gc) {1333size_t rs_length = hr->rem_set()->occupied();1334size_t card_num;13351336// Predicting the number of cards is based on which type of GC1337// we're predicting for.1338if (for_young_gc) {1339card_num = predict_young_card_num(rs_length);1340} else {1341card_num = predict_non_young_card_num(rs_length);1342}1343size_t bytes_to_copy = predict_bytes_to_copy(hr);13441345double region_elapsed_time_ms =1346predict_rs_scan_time_ms(card_num) +1347predict_object_copy_time_ms(bytes_to_copy);13481349// The prediction of the "other" time for this region is based1350// upon the region type and NOT the GC type.1351if (hr->is_young()) {1352region_elapsed_time_ms += predict_young_other_time_ms(1);1353} else {1354region_elapsed_time_ms += predict_non_young_other_time_ms(1);1355}1356return region_elapsed_time_ms;1357}13581359void1360G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,1361uint survivor_cset_region_length) {1362_eden_cset_region_length = eden_cset_region_length;1363_survivor_cset_region_length = survivor_cset_region_length;1364_old_cset_region_length = 0;1365}13661367void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {1368_recorded_rs_lengths = rs_lengths;1369}13701371void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,1372double elapsed_ms) {1373_recent_gc_times_ms->add(elapsed_ms);1374_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);1375_prev_collection_pause_end_ms = end_time_sec * 1000.0;1376}13771378size_t G1CollectorPolicy::expansion_amount() {1379double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;1380double threshold = _gc_overhead_perc;1381if (recent_gc_overhead > threshold) {1382// We will double the existing space, or take1383// G1ExpandByPercentOfAvailable % of the available expansion1384// space, whichever is smaller, bounded below by a minimum1385// expansion (unless that's all that's left.)1386const size_t min_expand_bytes = 1*M;1387size_t reserved_bytes = _g1->max_capacity();1388size_t committed_bytes = _g1->capacity();1389size_t uncommitted_bytes = reserved_bytes - committed_bytes;1390size_t expand_bytes;1391size_t expand_bytes_via_pct =1392uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;1393expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);1394expand_bytes = MAX2(expand_bytes, min_expand_bytes);1395expand_bytes = MIN2(expand_bytes, uncommitted_bytes);13961397ergo_verbose5(ErgoHeapSizing,1398"attempt heap expansion",1399ergo_format_reason("recent GC overhead higher than "1400"threshold after GC")1401ergo_format_perc("recent GC overhead")1402ergo_format_perc("threshold")1403ergo_format_byte("uncommitted")1404ergo_format_byte_perc("calculated expansion amount"),1405recent_gc_overhead, threshold,1406uncommitted_bytes,1407expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);14081409return expand_bytes;1410} else {1411return 0;1412}1413}14141415void G1CollectorPolicy::print_tracing_info() const {1416_trace_gen0_time_data.print();1417_trace_gen1_time_data.print();1418}14191420void G1CollectorPolicy::print_yg_surv_rate_info() const {1421#ifndef PRODUCT1422_short_lived_surv_rate_group->print_surv_rate_summary();1423// add this call for any other surv rate groups1424#endif // PRODUCT1425}14261427bool G1CollectorPolicy::is_young_list_full() {1428uint young_list_length = _g1->young_list()->length();1429uint young_list_target_length = _young_list_target_length;1430return young_list_length >= young_list_target_length;1431}14321433bool G1CollectorPolicy::can_expand_young_list() {1434uint young_list_length = _g1->young_list()->length();1435uint young_list_max_length = _young_list_max_length;1436return young_list_length < young_list_max_length;1437}14381439void G1CollectorPolicy::update_max_gc_locker_expansion() {1440uint expansion_region_num = 0;1441if (GCLockerEdenExpansionPercent > 0) {1442double perc = (double) GCLockerEdenExpansionPercent / 100.0;1443double expansion_region_num_d = perc * (double) _young_list_target_length;1444// We use ceiling so that if expansion_region_num_d is > 0.0 (but1445// less than 1.0) we'll get 1.1446expansion_region_num = (uint) ceil(expansion_region_num_d);1447} else {1448assert(expansion_region_num == 0, "sanity");1449}1450_young_list_max_length = _young_list_target_length + expansion_region_num;1451assert(_young_list_target_length <= _young_list_max_length, "post-condition");1452}14531454// Calculates survivor space parameters.1455void G1CollectorPolicy::update_survivors_policy(GCTracer &tracer) {1456double max_survivor_regions_d =1457(double) _young_list_target_length / (double) SurvivorRatio;1458// We use ceiling so that if max_survivor_regions_d is > 0.0 (but1459// smaller than 1.0) we'll get 1.1460_max_survivor_regions = (uint) ceil(max_survivor_regions_d);14611462_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(1463HeapRegion::GrainWords * _max_survivor_regions, tracer);1464}14651466bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(1467GCCause::Cause gc_cause) {1468bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();1469if (!during_cycle) {1470ergo_verbose1(ErgoConcCycles,1471"request concurrent cycle initiation",1472ergo_format_reason("requested by GC cause")1473ergo_format_str("GC cause"),1474GCCause::to_string(gc_cause));1475set_initiate_conc_mark_if_possible();1476return true;1477} else {1478ergo_verbose1(ErgoConcCycles,1479"do not request concurrent cycle initiation",1480ergo_format_reason("concurrent cycle already in progress")1481ergo_format_str("GC cause"),1482GCCause::to_string(gc_cause));1483return false;1484}1485}14861487void1488G1CollectorPolicy::decide_on_conc_mark_initiation() {1489// We are about to decide on whether this pause will be an1490// initial-mark pause.14911492// First, during_initial_mark_pause() should not be already set. We1493// will set it here if we have to. However, it should be cleared by1494// the end of the pause (it's only set for the duration of an1495// initial-mark pause).1496assert(!during_initial_mark_pause(), "pre-condition");14971498if (initiate_conc_mark_if_possible()) {1499// We had noticed on a previous pause that the heap occupancy has1500// gone over the initiating threshold and we should start a1501// concurrent marking cycle. So we might initiate one.15021503bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();1504if (!during_cycle) {1505// The concurrent marking thread is not "during a cycle", i.e.,1506// it has completed the last one. So we can go ahead and1507// initiate a new cycle.15081509set_during_initial_mark_pause();1510// We do not allow mixed GCs during marking.1511if (!gcs_are_young()) {1512set_gcs_are_young(true);1513ergo_verbose0(ErgoMixedGCs,1514"end mixed GCs",1515ergo_format_reason("concurrent cycle is about to start"));1516}15171518// And we can now clear initiate_conc_mark_if_possible() as1519// we've already acted on it.1520clear_initiate_conc_mark_if_possible();15211522ergo_verbose0(ErgoConcCycles,1523"initiate concurrent cycle",1524ergo_format_reason("concurrent cycle initiation requested"));1525} else {1526// The concurrent marking thread is still finishing up the1527// previous cycle. If we start one right now the two cycles1528// overlap. In particular, the concurrent marking thread might1529// be in the process of clearing the next marking bitmap (which1530// we will use for the next cycle if we start one). Starting a1531// cycle now will be bad given that parts of the marking1532// information might get cleared by the marking thread. And we1533// cannot wait for the marking thread to finish the cycle as it1534// periodically yields while clearing the next marking bitmap1535// and, if it's in a yield point, it's waiting for us to1536// finish. So, at this point we will not start a cycle and we'll1537// let the concurrent marking thread complete the last one.1538ergo_verbose0(ErgoConcCycles,1539"do not initiate concurrent cycle",1540ergo_format_reason("concurrent cycle already in progress"));1541}1542}1543}15441545class KnownGarbageClosure: public HeapRegionClosure {1546G1CollectedHeap* _g1h;1547CollectionSetChooser* _hrSorted;15481549public:1550KnownGarbageClosure(CollectionSetChooser* hrSorted) :1551_g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }15521553bool doHeapRegion(HeapRegion* r) {1554// We only include humongous regions in collection1555// sets when concurrent mark shows that their contained object is1556// unreachable.15571558// Do we have any marking information for this region?1559if (r->is_marked()) {1560// We will skip any region that's currently used as an old GC1561// alloc region (we should not consider those for collection1562// before we fill them up).1563if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {1564_hrSorted->add_region(r);1565}1566}1567return false;1568}1569};15701571class ParKnownGarbageHRClosure: public HeapRegionClosure {1572G1CollectedHeap* _g1h;1573CSetChooserParUpdater _cset_updater;15741575public:1576ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,1577uint chunk_size) :1578_g1h(G1CollectedHeap::heap()),1579_cset_updater(hrSorted, true /* parallel */, chunk_size) { }15801581bool doHeapRegion(HeapRegion* r) {1582// Do we have any marking information for this region?1583if (r->is_marked()) {1584// We will skip any region that's currently used as an old GC1585// alloc region (we should not consider those for collection1586// before we fill them up).1587if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {1588_cset_updater.add_region(r);1589}1590}1591return false;1592}1593};15941595class ParKnownGarbageTask: public AbstractGangTask {1596CollectionSetChooser* _hrSorted;1597uint _chunk_size;1598G1CollectedHeap* _g1;1599public:1600ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :1601AbstractGangTask("ParKnownGarbageTask"),1602_hrSorted(hrSorted), _chunk_size(chunk_size),1603_g1(G1CollectedHeap::heap()) { }16041605void work(uint worker_id) {1606ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);16071608// Back to zero for the claim value.1609_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,1610_g1->workers()->active_workers(),1611HeapRegion::InitialClaimValue);1612}1613};16141615void1616G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {1617_collectionSetChooser->clear();16181619uint region_num = _g1->num_regions();1620if (G1CollectedHeap::use_parallel_gc_threads()) {1621const uint OverpartitionFactor = 4;1622uint WorkUnit;1623// The use of MinChunkSize = 8 in the original code1624// causes some assertion failures when the total number of1625// region is less than 8. The code here tries to fix that.1626// Should the original code also be fixed?1627if (no_of_gc_threads > 0) {1628const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);1629WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),1630MinWorkUnit);1631} else {1632assert(no_of_gc_threads > 0,1633"The active gc workers should be greater than 0");1634// In a product build do something reasonable to avoid a crash.1635const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);1636WorkUnit =1637MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),1638MinWorkUnit);1639}1640_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),1641WorkUnit);1642ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,1643(int) WorkUnit);1644_g1->workers()->run_task(&parKnownGarbageTask);16451646assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),1647"sanity check");1648} else {1649KnownGarbageClosure knownGarbagecl(_collectionSetChooser);1650_g1->heap_region_iterate(&knownGarbagecl);1651}16521653_collectionSetChooser->sort_regions();16541655double end_sec = os::elapsedTime();1656double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;1657_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);1658_cur_mark_stop_world_time_ms += elapsed_time_ms;1659_prev_collection_pause_end_ms += elapsed_time_ms;1660_mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);1661}16621663// Add the heap region at the head of the non-incremental collection set1664void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {1665assert(_inc_cset_build_state == Active, "Precondition");1666assert(hr->is_old(), "the region should be old");16671668assert(!hr->in_collection_set(), "should not already be in the CSet");1669hr->set_in_collection_set(true);1670hr->set_next_in_collection_set(_collection_set);1671_collection_set = hr;1672_collection_set_bytes_used_before += hr->used();1673_g1->register_old_region_with_in_cset_fast_test(hr);1674size_t rs_length = hr->rem_set()->occupied();1675_recorded_rs_lengths += rs_length;1676_old_cset_region_length += 1;1677}16781679// Initialize the per-collection-set information1680void G1CollectorPolicy::start_incremental_cset_building() {1681assert(_inc_cset_build_state == Inactive, "Precondition");16821683_inc_cset_head = NULL;1684_inc_cset_tail = NULL;1685_inc_cset_bytes_used_before = 0;16861687_inc_cset_max_finger = 0;1688_inc_cset_recorded_rs_lengths = 0;1689_inc_cset_recorded_rs_lengths_diffs = 0;1690_inc_cset_predicted_elapsed_time_ms = 0.0;1691_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;1692_inc_cset_build_state = Active;1693}16941695void G1CollectorPolicy::finalize_incremental_cset_building() {1696assert(_inc_cset_build_state == Active, "Precondition");1697assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");16981699// The two "main" fields, _inc_cset_recorded_rs_lengths and1700// _inc_cset_predicted_elapsed_time_ms, are updated by the thread1701// that adds a new region to the CSet. Further updates by the1702// concurrent refinement thread that samples the young RSet lengths1703// are accumulated in the *_diffs fields. Here we add the diffs to1704// the "main" fields.17051706if (_inc_cset_recorded_rs_lengths_diffs >= 0) {1707_inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;1708} else {1709// This is defensive. The diff should in theory be always positive1710// as RSets can only grow between GCs. However, given that we1711// sample their size concurrently with other threads updating them1712// it's possible that we might get the wrong size back, which1713// could make the calculations somewhat inaccurate.1714size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);1715if (_inc_cset_recorded_rs_lengths >= diffs) {1716_inc_cset_recorded_rs_lengths -= diffs;1717} else {1718_inc_cset_recorded_rs_lengths = 0;1719}1720}1721_inc_cset_predicted_elapsed_time_ms +=1722_inc_cset_predicted_elapsed_time_ms_diffs;17231724_inc_cset_recorded_rs_lengths_diffs = 0;1725_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;1726}17271728void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {1729// This routine is used when:1730// * adding survivor regions to the incremental cset at the end of an1731// evacuation pause,1732// * adding the current allocation region to the incremental cset1733// when it is retired, and1734// * updating existing policy information for a region in the1735// incremental cset via young list RSet sampling.1736// Therefore this routine may be called at a safepoint by the1737// VM thread, or in-between safepoints by mutator threads (when1738// retiring the current allocation region) or a concurrent1739// refine thread (RSet sampling).17401741double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());1742size_t used_bytes = hr->used();1743_inc_cset_recorded_rs_lengths += rs_length;1744_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;1745_inc_cset_bytes_used_before += used_bytes;17461747// Cache the values we have added to the aggregated informtion1748// in the heap region in case we have to remove this region from1749// the incremental collection set, or it is updated by the1750// rset sampling code1751hr->set_recorded_rs_length(rs_length);1752hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);1753}17541755void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,1756size_t new_rs_length) {1757// Update the CSet information that is dependent on the new RS length1758assert(hr->is_young(), "Precondition");1759assert(!SafepointSynchronize::is_at_safepoint(),1760"should not be at a safepoint");17611762// We could have updated _inc_cset_recorded_rs_lengths and1763// _inc_cset_predicted_elapsed_time_ms directly but we'd need to do1764// that atomically, as this code is executed by a concurrent1765// refinement thread, potentially concurrently with a mutator thread1766// allocating a new region and also updating the same fields. To1767// avoid the atomic operations we accumulate these updates on two1768// separate fields (*_diffs) and we'll just add them to the "main"1769// fields at the start of a GC.17701771ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();1772ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;1773_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;17741775double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();1776double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());1777double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;1778_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;17791780hr->set_recorded_rs_length(new_rs_length);1781hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);1782}17831784void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {1785assert(hr->is_young(), "invariant");1786assert(hr->young_index_in_cset() > -1, "should have already been set");1787assert(_inc_cset_build_state == Active, "Precondition");17881789// We need to clear and set the cached recorded/cached collection set1790// information in the heap region here (before the region gets added1791// to the collection set). An individual heap region's cached values1792// are calculated, aggregated with the policy collection set info,1793// and cached in the heap region here (initially) and (subsequently)1794// by the Young List sampling code.17951796size_t rs_length = hr->rem_set()->occupied();1797add_to_incremental_cset_info(hr, rs_length);17981799HeapWord* hr_end = hr->end();1800_inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);18011802assert(!hr->in_collection_set(), "invariant");1803hr->set_in_collection_set(true);1804assert( hr->next_in_collection_set() == NULL, "invariant");18051806_g1->register_young_region_with_in_cset_fast_test(hr);1807}18081809// Add the region at the RHS of the incremental cset1810void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {1811// We should only ever be appending survivors at the end of a pause1812assert(hr->is_survivor(), "Logic");18131814// Do the 'common' stuff1815add_region_to_incremental_cset_common(hr);18161817// Now add the region at the right hand side1818if (_inc_cset_tail == NULL) {1819assert(_inc_cset_head == NULL, "invariant");1820_inc_cset_head = hr;1821} else {1822_inc_cset_tail->set_next_in_collection_set(hr);1823}1824_inc_cset_tail = hr;1825}18261827// Add the region to the LHS of the incremental cset1828void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {1829// Survivors should be added to the RHS at the end of a pause1830assert(hr->is_eden(), "Logic");18311832// Do the 'common' stuff1833add_region_to_incremental_cset_common(hr);18341835// Add the region at the left hand side1836hr->set_next_in_collection_set(_inc_cset_head);1837if (_inc_cset_head == NULL) {1838assert(_inc_cset_tail == NULL, "Invariant");1839_inc_cset_tail = hr;1840}1841_inc_cset_head = hr;1842}18431844#ifndef PRODUCT1845void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {1846assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");18471848st->print_cr("\nCollection_set:");1849HeapRegion* csr = list_head;1850while (csr != NULL) {1851HeapRegion* next = csr->next_in_collection_set();1852assert(csr->in_collection_set(), "bad CS");1853st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",1854HR_FORMAT_PARAMS(csr),1855csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),1856csr->age_in_surv_rate_group_cond());1857csr = next;1858}1859}1860#endif // !PRODUCT18611862double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {1863// Returns the given amount of reclaimable bytes (that represents1864// the amount of reclaimable space still to be collected) as a1865// percentage of the current heap capacity.1866size_t capacity_bytes = _g1->capacity();1867return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;1868}18691870bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,1871const char* false_action_str) {1872CollectionSetChooser* cset_chooser = _collectionSetChooser;1873if (cset_chooser->is_empty()) {1874ergo_verbose0(ErgoMixedGCs,1875false_action_str,1876ergo_format_reason("candidate old regions not available"));1877return false;1878}18791880// Is the amount of uncollected reclaimable space above G1HeapWastePercent?1881size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();1882double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);1883double threshold = (double) G1HeapWastePercent;1884if (reclaimable_perc <= threshold) {1885ergo_verbose4(ErgoMixedGCs,1886false_action_str,1887ergo_format_reason("reclaimable percentage not over threshold")1888ergo_format_region("candidate old regions")1889ergo_format_byte_perc("reclaimable")1890ergo_format_perc("threshold"),1891cset_chooser->remaining_regions(),1892reclaimable_bytes,1893reclaimable_perc, threshold);1894return false;1895}18961897ergo_verbose4(ErgoMixedGCs,1898true_action_str,1899ergo_format_reason("candidate old regions available")1900ergo_format_region("candidate old regions")1901ergo_format_byte_perc("reclaimable")1902ergo_format_perc("threshold"),1903cset_chooser->remaining_regions(),1904reclaimable_bytes,1905reclaimable_perc, threshold);1906return true;1907}19081909uint G1CollectorPolicy::calc_min_old_cset_length() {1910// The min old CSet region bound is based on the maximum desired1911// number of mixed GCs after a cycle. I.e., even if some old regions1912// look expensive, we should add them to the CSet anyway to make1913// sure we go through the available old regions in no more than the1914// maximum desired number of mixed GCs.1915//1916// The calculation is based on the number of marked regions we added1917// to the CSet chooser in the first place, not how many remain, so1918// that the result is the same during all mixed GCs that follow a cycle.19191920const size_t region_num = (size_t) _collectionSetChooser->length();1921const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);1922size_t result = region_num / gc_num;1923// emulate ceiling1924if (result * gc_num < region_num) {1925result += 1;1926}1927return (uint) result;1928}19291930uint G1CollectorPolicy::calc_max_old_cset_length() {1931// The max old CSet region bound is based on the threshold expressed1932// as a percentage of the heap size. I.e., it should bound the1933// number of old regions added to the CSet irrespective of how many1934// of them are available.19351936G1CollectedHeap* g1h = G1CollectedHeap::heap();1937const size_t region_num = g1h->num_regions();1938const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;1939size_t result = region_num * perc / 100;1940// emulate ceiling1941if (100 * result < region_num * perc) {1942result += 1;1943}1944return (uint) result;1945}194619471948void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {1949double young_start_time_sec = os::elapsedTime();19501951YoungList* young_list = _g1->young_list();1952finalize_incremental_cset_building();19531954guarantee(target_pause_time_ms > 0.0,1955err_msg("target_pause_time_ms = %1.6lf should be positive",1956target_pause_time_ms));1957guarantee(_collection_set == NULL, "Precondition");19581959double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);1960double predicted_pause_time_ms = base_time_ms;1961double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);19621963ergo_verbose4(ErgoCSetConstruction | ErgoHigh,1964"start choosing CSet",1965ergo_format_size("_pending_cards")1966ergo_format_ms("predicted base time")1967ergo_format_ms("remaining time")1968ergo_format_ms("target pause time"),1969_pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);19701971_last_gc_was_young = gcs_are_young() ? true : false;19721973if (_last_gc_was_young) {1974_trace_gen0_time_data.increment_young_collection_count();1975} else {1976_trace_gen0_time_data.increment_mixed_collection_count();1977}19781979// The young list is laid with the survivor regions from the previous1980// pause are appended to the RHS of the young list, i.e.1981// [Newly Young Regions ++ Survivors from last pause].19821983uint survivor_region_length = young_list->survivor_length();1984uint eden_region_length = young_list->length() - survivor_region_length;1985init_cset_region_lengths(eden_region_length, survivor_region_length);19861987HeapRegion* hr = young_list->first_survivor_region();1988while (hr != NULL) {1989assert(hr->is_survivor(), "badly formed young list");1990// There is a convention that all the young regions in the CSet1991// are tagged as "eden", so we do this for the survivors here. We1992// use the special set_eden_pre_gc() as it doesn't check that the1993// region is free (which is not the case here).1994hr->set_eden_pre_gc();1995hr = hr->get_next_young_region();1996}19971998// Clear the fields that point to the survivor list - they are all young now.1999young_list->clear_survivors();20002001_collection_set = _inc_cset_head;2002_collection_set_bytes_used_before = _inc_cset_bytes_used_before;2003time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);2004predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;20052006ergo_verbose3(ErgoCSetConstruction | ErgoHigh,2007"add young regions to CSet",2008ergo_format_region("eden")2009ergo_format_region("survivors")2010ergo_format_ms("predicted young region time"),2011eden_region_length, survivor_region_length,2012_inc_cset_predicted_elapsed_time_ms);20132014// The number of recorded young regions is the incremental2015// collection set's current size2016set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);20172018double young_end_time_sec = os::elapsedTime();2019phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);20202021// Set the start of the non-young choice time.2022double non_young_start_time_sec = young_end_time_sec;20232024if (!gcs_are_young()) {2025CollectionSetChooser* cset_chooser = _collectionSetChooser;2026cset_chooser->verify();2027const uint min_old_cset_length = calc_min_old_cset_length();2028const uint max_old_cset_length = calc_max_old_cset_length();20292030uint expensive_region_num = 0;2031bool check_time_remaining = adaptive_young_list_length();20322033HeapRegion* hr = cset_chooser->peek();2034while (hr != NULL) {2035if (old_cset_region_length() >= max_old_cset_length) {2036// Added maximum number of old regions to the CSet.2037ergo_verbose2(ErgoCSetConstruction,2038"finish adding old regions to CSet",2039ergo_format_reason("old CSet region num reached max")2040ergo_format_region("old")2041ergo_format_region("max"),2042old_cset_region_length(), max_old_cset_length);2043break;2044}204520462047// Stop adding regions if the remaining reclaimable space is2048// not above G1HeapWastePercent.2049size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();2050double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);2051double threshold = (double) G1HeapWastePercent;2052if (reclaimable_perc <= threshold) {2053// We've added enough old regions that the amount of uncollected2054// reclaimable space is at or below the waste threshold. Stop2055// adding old regions to the CSet.2056ergo_verbose5(ErgoCSetConstruction,2057"finish adding old regions to CSet",2058ergo_format_reason("reclaimable percentage not over threshold")2059ergo_format_region("old")2060ergo_format_region("max")2061ergo_format_byte_perc("reclaimable")2062ergo_format_perc("threshold"),2063old_cset_region_length(),2064max_old_cset_length,2065reclaimable_bytes,2066reclaimable_perc, threshold);2067break;2068}20692070double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());2071if (check_time_remaining) {2072if (predicted_time_ms > time_remaining_ms) {2073// Too expensive for the current CSet.20742075if (old_cset_region_length() >= min_old_cset_length) {2076// We have added the minimum number of old regions to the CSet,2077// we are done with this CSet.2078ergo_verbose4(ErgoCSetConstruction,2079"finish adding old regions to CSet",2080ergo_format_reason("predicted time is too high")2081ergo_format_ms("predicted time")2082ergo_format_ms("remaining time")2083ergo_format_region("old")2084ergo_format_region("min"),2085predicted_time_ms, time_remaining_ms,2086old_cset_region_length(), min_old_cset_length);2087break;2088}20892090// We'll add it anyway given that we haven't reached the2091// minimum number of old regions.2092expensive_region_num += 1;2093}2094} else {2095if (old_cset_region_length() >= min_old_cset_length) {2096// In the non-auto-tuning case, we'll finish adding regions2097// to the CSet if we reach the minimum.2098ergo_verbose2(ErgoCSetConstruction,2099"finish adding old regions to CSet",2100ergo_format_reason("old CSet region num reached min")2101ergo_format_region("old")2102ergo_format_region("min"),2103old_cset_region_length(), min_old_cset_length);2104break;2105}2106}21072108// We will add this region to the CSet.2109time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);2110predicted_pause_time_ms += predicted_time_ms;2111cset_chooser->remove_and_move_to_next(hr);2112_g1->old_set_remove(hr);2113add_old_region_to_cset(hr);21142115hr = cset_chooser->peek();2116}2117if (hr == NULL) {2118ergo_verbose0(ErgoCSetConstruction,2119"finish adding old regions to CSet",2120ergo_format_reason("candidate old regions not available"));2121}21222123if (expensive_region_num > 0) {2124// We print the information once here at the end, predicated on2125// whether we added any apparently expensive regions or not, to2126// avoid generating output per region.2127ergo_verbose4(ErgoCSetConstruction,2128"added expensive regions to CSet",2129ergo_format_reason("old CSet region num not reached min")2130ergo_format_region("old")2131ergo_format_region("expensive")2132ergo_format_region("min")2133ergo_format_ms("remaining time"),2134old_cset_region_length(),2135expensive_region_num,2136min_old_cset_length,2137time_remaining_ms);2138}21392140cset_chooser->verify();2141}21422143stop_incremental_cset_building();21442145ergo_verbose5(ErgoCSetConstruction,2146"finish choosing CSet",2147ergo_format_region("eden")2148ergo_format_region("survivors")2149ergo_format_region("old")2150ergo_format_ms("predicted pause time")2151ergo_format_ms("target pause time"),2152eden_region_length, survivor_region_length,2153old_cset_region_length(),2154predicted_pause_time_ms, target_pause_time_ms);21552156double non_young_end_time_sec = os::elapsedTime();2157phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);2158evacuation_info.set_collectionset_regions(cset_region_length());2159}21602161void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {2162if(TraceGen0Time) {2163_all_stop_world_times_ms.add(time_to_stop_the_world_ms);2164}2165}21662167void TraceGen0TimeData::record_yield_time(double yield_time_ms) {2168if(TraceGen0Time) {2169_all_yield_times_ms.add(yield_time_ms);2170}2171}21722173void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {2174if(TraceGen0Time) {2175_total.add(pause_time_ms);2176_other.add(pause_time_ms - phase_times->accounted_time_ms());2177_root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());2178_parallel.add(phase_times->cur_collection_par_time_ms());2179_ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));2180_satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));2181_update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));2182_scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));2183_obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));2184_termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));21852186double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +2187phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +2188phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +2189phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +2190phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +2191phase_times->average_time_ms(G1GCPhaseTimes::Termination);21922193double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;2194_parallel_other.add(parallel_other_time);2195_clear_ct.add(phase_times->cur_clear_ct_time_ms());2196}2197}21982199void TraceGen0TimeData::increment_young_collection_count() {2200if(TraceGen0Time) {2201++_young_pause_num;2202}2203}22042205void TraceGen0TimeData::increment_mixed_collection_count() {2206if(TraceGen0Time) {2207++_mixed_pause_num;2208}2209}22102211void TraceGen0TimeData::print_summary(const char* str,2212const NumberSeq* seq) const {2213double sum = seq->sum();2214gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",2215str, sum / 1000.0, seq->avg());2216}22172218void TraceGen0TimeData::print_summary_sd(const char* str,2219const NumberSeq* seq) const {2220print_summary(str, seq);2221gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",2222"(num", seq->num(), seq->sd(), seq->maximum());2223}22242225void TraceGen0TimeData::print() const {2226if (!TraceGen0Time) {2227return;2228}22292230gclog_or_tty->print_cr("ALL PAUSES");2231print_summary_sd(" Total", &_total);2232gclog_or_tty->cr();2233gclog_or_tty->cr();2234gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);2235gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);2236gclog_or_tty->cr();22372238gclog_or_tty->print_cr("EVACUATION PAUSES");22392240if (_young_pause_num == 0 && _mixed_pause_num == 0) {2241gclog_or_tty->print_cr("none");2242} else {2243print_summary_sd(" Evacuation Pauses", &_total);2244print_summary(" Root Region Scan Wait", &_root_region_scan_wait);2245print_summary(" Parallel Time", &_parallel);2246print_summary(" Ext Root Scanning", &_ext_root_scan);2247print_summary(" SATB Filtering", &_satb_filtering);2248print_summary(" Update RS", &_update_rs);2249print_summary(" Scan RS", &_scan_rs);2250print_summary(" Object Copy", &_obj_copy);2251print_summary(" Termination", &_termination);2252print_summary(" Parallel Other", &_parallel_other);2253print_summary(" Clear CT", &_clear_ct);2254print_summary(" Other", &_other);2255}2256gclog_or_tty->cr();22572258gclog_or_tty->print_cr("MISC");2259print_summary_sd(" Stop World", &_all_stop_world_times_ms);2260print_summary_sd(" Yields", &_all_yield_times_ms);2261}22622263void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {2264if (TraceGen1Time) {2265_all_full_gc_times.add(full_gc_time_ms);2266}2267}22682269void TraceGen1TimeData::print() const {2270if (!TraceGen1Time) {2271return;2272}22732274if (_all_full_gc_times.num() > 0) {2275gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",2276_all_full_gc_times.num(),2277_all_full_gc_times.sum() / 1000.0);2278gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());2279gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",2280_all_full_gc_times.sd(),2281_all_full_gc_times.maximum());2282}2283}228422852286