Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
38920 views
/*1* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP25#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP2627#include "gc_implementation/g1/collectionSetChooser.hpp"28#include "gc_implementation/g1/g1Allocator.hpp"29#include "gc_implementation/g1/g1MMUTracker.hpp"30#include "memory/collectorPolicy.hpp"3132// A G1CollectorPolicy makes policy decisions that determine the33// characteristics of the collector. Examples include:34// * choice of collection set.35// * when to collect.3637class HeapRegion;38class CollectionSetChooser;39class G1GCPhaseTimes;4041// TraceGen0Time collects data on _both_ young and mixed evacuation pauses42// (the latter may contain non-young regions - i.e. regions that are43// technically in Gen1) while TraceGen1Time collects data about full GCs.44class TraceGen0TimeData : public CHeapObj<mtGC> {45private:46unsigned _young_pause_num;47unsigned _mixed_pause_num;4849NumberSeq _all_stop_world_times_ms;50NumberSeq _all_yield_times_ms;5152NumberSeq _total;53NumberSeq _other;54NumberSeq _root_region_scan_wait;55NumberSeq _parallel;56NumberSeq _ext_root_scan;57NumberSeq _satb_filtering;58NumberSeq _update_rs;59NumberSeq _scan_rs;60NumberSeq _obj_copy;61NumberSeq _termination;62NumberSeq _parallel_other;63NumberSeq _clear_ct;6465void print_summary(const char* str, const NumberSeq* seq) const;66void print_summary_sd(const char* str, const NumberSeq* seq) const;6768public:69TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};70void record_start_collection(double time_to_stop_the_world_ms);71void record_yield_time(double yield_time_ms);72void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);73void increment_young_collection_count();74void increment_mixed_collection_count();75void print() const;76};7778class TraceGen1TimeData : public CHeapObj<mtGC> {79private:80NumberSeq _all_full_gc_times;8182public:83void record_full_collection(double full_gc_time_ms);84void print() const;85};8687// There are three command line options related to the young gen size:88// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is89// just a short form for NewSize==MaxNewSize). G1 will use its internal90// heuristics to calculate the actual young gen size, so these options91// basically only limit the range within which G1 can pick a young gen92// size. Also, these are general options taking byte sizes. G1 will93// internally work with a number of regions instead. So, some rounding94// will occur.95//96// If nothing related to the the young gen size is set on the command97// line we should allow the young gen to be between G1NewSizePercent98// and G1MaxNewSizePercent of the heap size. This means that every time99// the heap size changes, the limits for the young gen size will be100// recalculated.101//102// If only -XX:NewSize is set we should use the specified value as the103// minimum size for young gen. Still using G1MaxNewSizePercent of the104// heap as maximum.105//106// If only -XX:MaxNewSize is set we should use the specified value as the107// maximum size for young gen. Still using G1NewSizePercent of the heap108// as minimum.109//110// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.111// No updates when the heap size changes. There is a special case when112// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a113// different heuristic for calculating the collection set when we do mixed114// collection.115//116// If only -XX:NewRatio is set we should use the specified ratio of the heap117// as both min and max. This will be interpreted as "fixed" just like the118// NewSize==MaxNewSize case above. But we will update the min and max119// everytime the heap size changes.120//121// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is122// combined with either NewSize or MaxNewSize. (A warning message is printed.)123class G1YoungGenSizer : public CHeapObj<mtGC> {124private:125enum SizerKind {126SizerDefaults,127SizerNewSizeOnly,128SizerMaxNewSizeOnly,129SizerMaxAndNewSize,130SizerNewRatio131};132SizerKind _sizer_kind;133uint _min_desired_young_length;134uint _max_desired_young_length;135136// False when using a fixed young generation size due to command-line options,137// true otherwise.138bool _adaptive_size;139140uint calculate_default_min_length(uint new_number_of_heap_regions);141uint calculate_default_max_length(uint new_number_of_heap_regions);142143// Update the given values for minimum and maximum young gen length in regions144// given the number of heap regions depending on the kind of sizing algorithm.145void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);146147public:148G1YoungGenSizer();149// Calculate the maximum length of the young gen given the number of regions150// depending on the sizing algorithm.151uint max_young_length(uint number_of_heap_regions);152153void heap_size_changed(uint new_number_of_heap_regions);154uint min_desired_young_length() {155return _min_desired_young_length;156}157uint max_desired_young_length() {158return _max_desired_young_length;159}160bool adaptive_young_list_length() {161return _adaptive_size;162}163};164165class G1CollectorPolicy: public CollectorPolicy {166private:167// either equal to the number of parallel threads, if ParallelGCThreads168// has been set, or 1 otherwise169int _parallel_gc_threads;170171// The number of GC threads currently active.172uintx _no_of_gc_threads;173174enum SomePrivateConstants {175NumPrevPausesForHeuristics = 10176};177178G1MMUTracker* _mmu_tracker;179180void initialize_alignments();181void initialize_flags();182183CollectionSetChooser* _collectionSetChooser;184185double _full_collection_start_sec;186uint _cur_collection_pause_used_regions_at_start;187188// These exclude marking times.189TruncatedSeq* _recent_gc_times_ms;190191TruncatedSeq* _concurrent_mark_remark_times_ms;192TruncatedSeq* _concurrent_mark_cleanup_times_ms;193194TraceGen0TimeData _trace_gen0_time_data;195TraceGen1TimeData _trace_gen1_time_data;196197double _stop_world_start;198199// indicates whether we are in young or mixed GC mode200bool _gcs_are_young;201202uint _young_list_target_length;203uint _young_list_fixed_length;204205// The max number of regions we can extend the eden by while the GC206// locker is active. This should be >= _young_list_target_length;207uint _young_list_max_length;208209bool _last_gc_was_young;210211bool _during_marking;212bool _in_marking_window;213bool _in_marking_window_im;214215SurvRateGroup* _short_lived_surv_rate_group;216SurvRateGroup* _survivor_surv_rate_group;217// add here any more surv rate groups218219double _gc_overhead_perc;220221double _reserve_factor;222uint _reserve_regions;223224bool during_marking() {225return _during_marking;226}227228enum PredictionConstants {229TruncatedSeqLength = 10230};231232TruncatedSeq* _alloc_rate_ms_seq;233double _prev_collection_pause_end_ms;234235TruncatedSeq* _rs_length_diff_seq;236TruncatedSeq* _cost_per_card_ms_seq;237TruncatedSeq* _young_cards_per_entry_ratio_seq;238TruncatedSeq* _mixed_cards_per_entry_ratio_seq;239TruncatedSeq* _cost_per_entry_ms_seq;240TruncatedSeq* _mixed_cost_per_entry_ms_seq;241TruncatedSeq* _cost_per_byte_ms_seq;242TruncatedSeq* _constant_other_time_ms_seq;243TruncatedSeq* _young_other_cost_per_region_ms_seq;244TruncatedSeq* _non_young_other_cost_per_region_ms_seq;245246TruncatedSeq* _pending_cards_seq;247TruncatedSeq* _rs_lengths_seq;248249TruncatedSeq* _cost_per_byte_ms_during_cm_seq;250251G1YoungGenSizer* _young_gen_sizer;252253uint _eden_cset_region_length;254uint _survivor_cset_region_length;255uint _old_cset_region_length;256257void init_cset_region_lengths(uint eden_cset_region_length,258uint survivor_cset_region_length);259260uint eden_cset_region_length() { return _eden_cset_region_length; }261uint survivor_cset_region_length() { return _survivor_cset_region_length; }262uint old_cset_region_length() { return _old_cset_region_length; }263264uint _free_regions_at_end_of_collection;265266size_t _recorded_rs_lengths;267size_t _max_rs_lengths;268double _sigma;269270size_t _rs_lengths_prediction;271272double sigma() { return _sigma; }273274// A function that prevents us putting too much stock in small sample275// sets. Returns a number between 2.0 and 1.0, depending on the number276// of samples. 5 or more samples yields one; fewer scales linearly from277// 2.0 at 1 sample to 1.0 at 5.278double confidence_factor(int samples) {279if (samples > 4) return 1.0;280else return 1.0 + sigma() * ((double)(5 - samples))/2.0;281}282283double get_new_neg_prediction(TruncatedSeq* seq) {284return seq->davg() - sigma() * seq->dsd();285}286287#ifndef PRODUCT288bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);289#endif // PRODUCT290291void adjust_concurrent_refinement(double update_rs_time,292double update_rs_processed_buffers,293double goal_ms);294295uintx no_of_gc_threads() { return _no_of_gc_threads; }296void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }297298double _pause_time_target_ms;299300size_t _pending_cards;301302public:303// Accessors304305void set_region_eden(HeapRegion* hr, int young_index_in_cset) {306hr->set_eden();307hr->install_surv_rate_group(_short_lived_surv_rate_group);308hr->set_young_index_in_cset(young_index_in_cset);309}310311void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {312assert(hr->is_survivor(), "pre-condition");313hr->install_surv_rate_group(_survivor_surv_rate_group);314hr->set_young_index_in_cset(young_index_in_cset);315}316317#ifndef PRODUCT318bool verify_young_ages();319#endif // PRODUCT320321double get_new_prediction(TruncatedSeq* seq) {322return MAX2(seq->davg() + sigma() * seq->dsd(),323seq->davg() * confidence_factor(seq->num()));324}325326void record_max_rs_lengths(size_t rs_lengths) {327_max_rs_lengths = rs_lengths;328}329330size_t predict_rs_length_diff() {331return (size_t) get_new_prediction(_rs_length_diff_seq);332}333334double predict_alloc_rate_ms() {335return get_new_prediction(_alloc_rate_ms_seq);336}337338double predict_cost_per_card_ms() {339return get_new_prediction(_cost_per_card_ms_seq);340}341342double predict_rs_update_time_ms(size_t pending_cards) {343return (double) pending_cards * predict_cost_per_card_ms();344}345346double predict_young_cards_per_entry_ratio() {347return get_new_prediction(_young_cards_per_entry_ratio_seq);348}349350double predict_mixed_cards_per_entry_ratio() {351if (_mixed_cards_per_entry_ratio_seq->num() < 2) {352return predict_young_cards_per_entry_ratio();353} else {354return get_new_prediction(_mixed_cards_per_entry_ratio_seq);355}356}357358size_t predict_young_card_num(size_t rs_length) {359return (size_t) ((double) rs_length *360predict_young_cards_per_entry_ratio());361}362363size_t predict_non_young_card_num(size_t rs_length) {364return (size_t) ((double) rs_length *365predict_mixed_cards_per_entry_ratio());366}367368double predict_rs_scan_time_ms(size_t card_num) {369if (gcs_are_young()) {370return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);371} else {372return predict_mixed_rs_scan_time_ms(card_num);373}374}375376double predict_mixed_rs_scan_time_ms(size_t card_num) {377if (_mixed_cost_per_entry_ms_seq->num() < 3) {378return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);379} else {380return (double) (card_num *381get_new_prediction(_mixed_cost_per_entry_ms_seq));382}383}384385double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {386if (_cost_per_byte_ms_during_cm_seq->num() < 3) {387return (1.1 * (double) bytes_to_copy) *388get_new_prediction(_cost_per_byte_ms_seq);389} else {390return (double) bytes_to_copy *391get_new_prediction(_cost_per_byte_ms_during_cm_seq);392}393}394395double predict_object_copy_time_ms(size_t bytes_to_copy) {396if (_in_marking_window && !_in_marking_window_im) {397return predict_object_copy_time_ms_during_cm(bytes_to_copy);398} else {399return (double) bytes_to_copy *400get_new_prediction(_cost_per_byte_ms_seq);401}402}403404double predict_constant_other_time_ms() {405return get_new_prediction(_constant_other_time_ms_seq);406}407408double predict_young_other_time_ms(size_t young_num) {409return (double) young_num *410get_new_prediction(_young_other_cost_per_region_ms_seq);411}412413double predict_non_young_other_time_ms(size_t non_young_num) {414return (double) non_young_num *415get_new_prediction(_non_young_other_cost_per_region_ms_seq);416}417418double predict_base_elapsed_time_ms(size_t pending_cards);419double predict_base_elapsed_time_ms(size_t pending_cards,420size_t scanned_cards);421size_t predict_bytes_to_copy(HeapRegion* hr);422double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);423424void set_recorded_rs_lengths(size_t rs_lengths);425426uint cset_region_length() { return young_cset_region_length() +427old_cset_region_length(); }428uint young_cset_region_length() { return eden_cset_region_length() +429survivor_cset_region_length(); }430431double predict_survivor_regions_evac_time();432433void cset_regions_freed() {434bool propagate = _last_gc_was_young && !_in_marking_window;435_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);436_survivor_surv_rate_group->all_surviving_words_recorded(propagate);437// also call it on any more surv rate groups438}439440G1MMUTracker* mmu_tracker() {441return _mmu_tracker;442}443444double max_pause_time_ms() {445return _mmu_tracker->max_gc_time() * 1000.0;446}447448double predict_remark_time_ms() {449return get_new_prediction(_concurrent_mark_remark_times_ms);450}451452double predict_cleanup_time_ms() {453return get_new_prediction(_concurrent_mark_cleanup_times_ms);454}455456// Returns an estimate of the survival rate of the region at yg-age457// "yg_age".458double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {459TruncatedSeq* seq = surv_rate_group->get_seq(age);460if (seq->num() == 0)461gclog_or_tty->print("BARF! age is %d", age);462guarantee( seq->num() > 0, "invariant" );463double pred = get_new_prediction(seq);464if (pred > 1.0)465pred = 1.0;466return pred;467}468469double predict_yg_surv_rate(int age) {470return predict_yg_surv_rate(age, _short_lived_surv_rate_group);471}472473double accum_yg_surv_rate_pred(int age) {474return _short_lived_surv_rate_group->accum_surv_rate_pred(age);475}476477private:478// Statistics kept per GC stoppage, pause or full.479TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;480481// Add a new GC of the given duration and end time to the record.482void update_recent_gc_times(double end_time_sec, double elapsed_ms);483484// The head of the list (via "next_in_collection_set()") representing the485// current collection set. Set from the incrementally built collection486// set at the start of the pause.487HeapRegion* _collection_set;488489// The number of bytes in the collection set before the pause. Set from490// the incrementally built collection set at the start of an evacuation491// pause, and incremented in finalize_cset() when adding old regions492// (if any) to the collection set.493size_t _collection_set_bytes_used_before;494495// The number of bytes copied during the GC.496size_t _bytes_copied_during_gc;497498// The associated information that is maintained while the incremental499// collection set is being built with young regions. Used to populate500// the recorded info for the evacuation pause.501502enum CSetBuildType {503Active, // We are actively building the collection set504Inactive // We are not actively building the collection set505};506507CSetBuildType _inc_cset_build_state;508509// The head of the incrementally built collection set.510HeapRegion* _inc_cset_head;511512// The tail of the incrementally built collection set.513HeapRegion* _inc_cset_tail;514515// The number of bytes in the incrementally built collection set.516// Used to set _collection_set_bytes_used_before at the start of517// an evacuation pause.518size_t _inc_cset_bytes_used_before;519520// Used to record the highest end of heap region in collection set521HeapWord* _inc_cset_max_finger;522523// The RSet lengths recorded for regions in the CSet. It is updated524// by the thread that adds a new region to the CSet. We assume that525// only one thread can be allocating a new CSet region (currently,526// it does so after taking the Heap_lock) hence no need to527// synchronize updates to this field.528size_t _inc_cset_recorded_rs_lengths;529530// A concurrent refinement thread periodcially samples the young531// region RSets and needs to update _inc_cset_recorded_rs_lengths as532// the RSets grow. Instead of having to syncronize updates to that533// field we accumulate them in this field and add it to534// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.535ssize_t _inc_cset_recorded_rs_lengths_diffs;536537// The predicted elapsed time it will take to collect the regions in538// the CSet. This is updated by the thread that adds a new region to539// the CSet. See the comment for _inc_cset_recorded_rs_lengths about540// MT-safety assumptions.541double _inc_cset_predicted_elapsed_time_ms;542543// See the comment for _inc_cset_recorded_rs_lengths_diffs.544double _inc_cset_predicted_elapsed_time_ms_diffs;545546// Stash a pointer to the g1 heap.547G1CollectedHeap* _g1;548549G1GCPhaseTimes* _phase_times;550551// The ratio of gc time to elapsed time, computed over recent pauses.552double _recent_avg_pause_time_ratio;553554double recent_avg_pause_time_ratio() {555return _recent_avg_pause_time_ratio;556}557558// At the end of a pause we check the heap occupancy and we decide559// whether we will start a marking cycle during the next pause. If560// we decide that we want to do that, we will set this parameter to561// true. So, this parameter will stay true between the end of a562// pause and the beginning of a subsequent pause (not necessarily563// the next one, see the comments on the next field) when we decide564// that we will indeed start a marking cycle and do the initial-mark565// work.566volatile bool _initiate_conc_mark_if_possible;567568// If initiate_conc_mark_if_possible() is set at the beginning of a569// pause, it is a suggestion that the pause should start a marking570// cycle by doing the initial-mark work. However, it is possible571// that the concurrent marking thread is still finishing up the572// previous marking cycle (e.g., clearing the next marking573// bitmap). If that is the case we cannot start a new cycle and574// we'll have to wait for the concurrent marking thread to finish575// what it is doing. In this case we will postpone the marking cycle576// initiation decision for the next pause. When we eventually decide577// to start a cycle, we will set _during_initial_mark_pause which578// will stay true until the end of the initial-mark pause and it's579// the condition that indicates that a pause is doing the580// initial-mark work.581volatile bool _during_initial_mark_pause;582583bool _last_young_gc;584585// This set of variables tracks the collector efficiency, in order to586// determine whether we should initiate a new marking.587double _cur_mark_stop_world_time_ms;588double _mark_remark_start_sec;589double _mark_cleanup_start_sec;590591// Update the young list target length either by setting it to the592// desired fixed value or by calculating it using G1's pause593// prediction model. If no rs_lengths parameter is passed, predict594// the RS lengths using the prediction model, otherwise use the595// given rs_lengths as the prediction.596void update_young_list_target_length(size_t rs_lengths = (size_t) -1);597598// Calculate and return the minimum desired young list target599// length. This is the minimum desired young list length according600// to the user's inputs.601uint calculate_young_list_desired_min_length(uint base_min_length);602603// Calculate and return the maximum desired young list target604// length. This is the maximum desired young list length according605// to the user's inputs.606uint calculate_young_list_desired_max_length();607608// Calculate and return the maximum young list target length that609// can fit into the pause time goal. The parameters are: rs_lengths610// represent the prediction of how large the young RSet lengths will611// be, base_min_length is the alreay existing number of regions in612// the young list, min_length and max_length are the desired min and613// max young list length according to the user's inputs.614uint calculate_young_list_target_length(size_t rs_lengths,615uint base_min_length,616uint desired_min_length,617uint desired_max_length);618619// Check whether a given young length (young_length) fits into the620// given target pause time and whether the prediction for the amount621// of objects to be copied for the given length will fit into the622// given free space (expressed by base_free_regions). It is used by623// calculate_young_list_target_length().624bool predict_will_fit(uint young_length, double base_time_ms,625uint base_free_regions, double target_pause_time_ms);626627// Calculate the minimum number of old regions we'll add to the CSet628// during a mixed GC.629uint calc_min_old_cset_length();630631// Calculate the maximum number of old regions we'll add to the CSet632// during a mixed GC.633uint calc_max_old_cset_length();634635// Returns the given amount of uncollected reclaimable space636// as a percentage of the current heap capacity.637double reclaimable_bytes_perc(size_t reclaimable_bytes);638639public:640641G1CollectorPolicy();642643virtual G1CollectorPolicy* as_g1_policy() { return this; }644645virtual CollectorPolicy::Name kind() {646return CollectorPolicy::G1CollectorPolicyKind;647}648649G1GCPhaseTimes* phase_times() const { return _phase_times; }650651// Check the current value of the young list RSet lengths and652// compare it against the last prediction. If the current value is653// higher, recalculate the young list target length prediction.654void revise_young_list_target_length_if_necessary();655656// This should be called after the heap is resized.657void record_new_heap_size(uint new_number_of_regions);658659void init();660661// Create jstat counters for the policy.662virtual void initialize_gc_policy_counters();663664virtual HeapWord* mem_allocate_work(size_t size,665bool is_tlab,666bool* gc_overhead_limit_was_exceeded);667668// This method controls how a collector handles one or more669// of its generations being fully allocated.670virtual HeapWord* satisfy_failed_allocation(size_t size,671bool is_tlab);672673BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }674675bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);676677// Record the start and end of an evacuation pause.678void record_collection_pause_start(double start_time_sec, GCTracer &tracer);679void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);680681// Record the start and end of a full collection.682void record_full_collection_start();683void record_full_collection_end();684685// Must currently be called while the world is stopped.686void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);687688// Record start and end of remark.689void record_concurrent_mark_remark_start();690void record_concurrent_mark_remark_end();691692// Record start, end, and completion of cleanup.693void record_concurrent_mark_cleanup_start();694void record_concurrent_mark_cleanup_end(int no_of_gc_threads);695void record_concurrent_mark_cleanup_completed();696697// Records the information about the heap size for reporting in698// print_detailed_heap_transition699void record_heap_size_info_at_start(bool full);700701// Print heap sizing transition (with less and more detail).702void print_heap_transition();703void print_detailed_heap_transition(bool full = false);704705void record_stop_world_start();706void record_concurrent_pause();707708// Record how much space we copied during a GC. This is typically709// called when a GC alloc region is being retired.710void record_bytes_copied_during_gc(size_t bytes) {711_bytes_copied_during_gc += bytes;712}713714// The amount of space we copied during a GC.715size_t bytes_copied_during_gc() {716return _bytes_copied_during_gc;717}718719// Determine whether there are candidate regions so that the720// next GC should be mixed. The two action strings are used721// in the ergo output when the method returns true or false.722bool next_gc_should_be_mixed(const char* true_action_str,723const char* false_action_str);724725// Choose a new collection set. Marks the chosen regions as being726// "in_collection_set", and links them together. The head and number of727// the collection set are available via access methods.728void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);729730// The head of the list (via "next_in_collection_set()") representing the731// current collection set.732HeapRegion* collection_set() { return _collection_set; }733734void clear_collection_set() { _collection_set = NULL; }735736// Add old region "hr" to the CSet.737void add_old_region_to_cset(HeapRegion* hr);738739// Incremental CSet Support740741// The head of the incrementally built collection set.742HeapRegion* inc_cset_head() { return _inc_cset_head; }743744// The tail of the incrementally built collection set.745HeapRegion* inc_set_tail() { return _inc_cset_tail; }746747// Initialize incremental collection set info.748void start_incremental_cset_building();749750// Perform any final calculations on the incremental CSet fields751// before we can use them.752void finalize_incremental_cset_building();753754void clear_incremental_cset() {755_inc_cset_head = NULL;756_inc_cset_tail = NULL;757}758759// Stop adding regions to the incremental collection set760void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }761762// Add information about hr to the aggregated information for the763// incrementally built collection set.764void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);765766// Update information about hr in the aggregated information for767// the incrementally built collection set.768void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);769770private:771// Update the incremental cset information when adding a region772// (should not be called directly).773void add_region_to_incremental_cset_common(HeapRegion* hr);774775public:776// Add hr to the LHS of the incremental collection set.777void add_region_to_incremental_cset_lhs(HeapRegion* hr);778779// Add hr to the RHS of the incremental collection set.780void add_region_to_incremental_cset_rhs(HeapRegion* hr);781782#ifndef PRODUCT783void print_collection_set(HeapRegion* list_head, outputStream* st);784#endif // !PRODUCT785786bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }787void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }788void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }789790bool during_initial_mark_pause() { return _during_initial_mark_pause; }791void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }792void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }793794// This sets the initiate_conc_mark_if_possible() flag to start a795// new cycle, as long as we are not already in one. It's best if it796// is called during a safepoint when the test whether a cycle is in797// progress or not is stable.798bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);799800// This is called at the very beginning of an evacuation pause (it801// has to be the first thing that the pause does). If802// initiate_conc_mark_if_possible() is true, and the concurrent803// marking thread has completed its work during the previous cycle,804// it will set during_initial_mark_pause() to so that the pause does805// the initial-mark work and start a marking cycle.806void decide_on_conc_mark_initiation();807808// If an expansion would be appropriate, because recent GC overhead had809// exceeded the desired limit, return an amount to expand by.810virtual size_t expansion_amount();811812// Print tracing information.813void print_tracing_info() const;814815// Print stats on young survival ratio816void print_yg_surv_rate_info() const;817818void finished_recalculating_age_indexes(bool is_survivors) {819if (is_survivors) {820_survivor_surv_rate_group->finished_recalculating_age_indexes();821} else {822_short_lived_surv_rate_group->finished_recalculating_age_indexes();823}824// do that for any other surv rate groups825}826827size_t young_list_target_length() const { return _young_list_target_length; }828829bool is_young_list_full();830831bool can_expand_young_list();832833uint young_list_max_length() {834return _young_list_max_length;835}836837bool gcs_are_young() {838return _gcs_are_young;839}840void set_gcs_are_young(bool gcs_are_young) {841_gcs_are_young = gcs_are_young;842}843844bool adaptive_young_list_length() {845return _young_gen_sizer->adaptive_young_list_length();846}847848private:849//850// Survivor regions policy.851//852853// Current tenuring threshold, set to 0 if the collector reaches the854// maximum amount of survivors regions.855uint _tenuring_threshold;856857// The limit on the number of regions allocated for survivors.858uint _max_survivor_regions;859860// For reporting purposes.861// The value of _heap_bytes_before_gc is also used to calculate862// the cost of copying.863864size_t _eden_used_bytes_before_gc; // Eden occupancy before GC865size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC866size_t _heap_used_bytes_before_gc; // Heap occupancy before GC867size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC868869size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC870size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC871872// The amount of survivor regions after a collection.873uint _recorded_survivor_regions;874// List of survivor regions.875HeapRegion* _recorded_survivor_head;876HeapRegion* _recorded_survivor_tail;877878ageTable _survivors_age_table;879880public:881uint tenuring_threshold() const { return _tenuring_threshold; }882883static const uint REGIONS_UNLIMITED = (uint) -1;884885uint max_regions(InCSetState dest) {886switch (dest.value()) {887case InCSetState::Young:888return _max_survivor_regions;889case InCSetState::Old:890return REGIONS_UNLIMITED;891default:892assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));893break;894}895// keep some compilers happy896return 0;897}898899void note_start_adding_survivor_regions() {900_survivor_surv_rate_group->start_adding_regions();901}902903void note_stop_adding_survivor_regions() {904_survivor_surv_rate_group->stop_adding_regions();905}906907void record_survivor_regions(uint regions,908HeapRegion* head,909HeapRegion* tail) {910_recorded_survivor_regions = regions;911_recorded_survivor_head = head;912_recorded_survivor_tail = tail;913}914915uint recorded_survivor_regions() {916return _recorded_survivor_regions;917}918919void record_thread_age_table(ageTable* age_table) {920_survivors_age_table.merge_par(age_table);921}922923void update_max_gc_locker_expansion();924925// Calculates survivor space parameters.926void update_survivors_policy(GCTracer &tracer);927928virtual void post_heap_initialize();929};930931// This should move to some place more general...932933// If we have "n" measurements, and we've kept track of their "sum" and the934// "sum_of_squares" of the measurements, this returns the variance of the935// sequence.936inline double variance(int n, double sum_of_squares, double sum) {937double n_d = (double)n;938double avg = sum/n_d;939return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;940}941942#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP943944945