Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
38920 views
/*1* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"26#include "gc_implementation/g1/g1GCPhaseTimes.hpp"27#include "gc_implementation/g1/g1Log.hpp"28#include "gc_implementation/g1/g1StringDedup.hpp"29#include "memory/allocation.hpp"30#include "runtime/os.hpp"3132// Helper class for avoiding interleaved logging33class LineBuffer: public StackObj {3435private:36static const int BUFFER_LEN = 1024;37static const int INDENT_CHARS = 3;38char _buffer[BUFFER_LEN];39int _indent_level;40int _cur;4142void vappend(const char* format, va_list ap) ATTRIBUTE_PRINTF(2, 0) {43int res = os::vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);44if (res > BUFFER_LEN) {45DEBUG_ONLY(warning("buffer too small in LineBuffer");)46_buffer[BUFFER_LEN -1] = 0;47_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again48} else if (res != -1) {49_cur += res;50}51}5253public:54explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {55for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {56_buffer[_cur] = ' ';57}58}5960#ifndef PRODUCT61~LineBuffer() {62assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");63}64#endif6566void append(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {67va_list ap;68va_start(ap, format);69vappend(format, ap);70va_end(ap);71}7273void print_cr() {74gclog_or_tty->print_cr("%s", _buffer);75_cur = _indent_level * INDENT_CHARS;76}7778void append_and_print_cr(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {79va_list ap;80va_start(ap, format);81vappend(format, ap);82va_end(ap);83print_cr();84}85};8687template <class T>88class WorkerDataArray : public CHeapObj<mtGC> {89friend class G1GCParPhasePrinter;90T* _data;91uint _length;92const char* _title;93bool _print_sum;94int _log_level;95uint _indent_level;96bool _enabled;9798WorkerDataArray<size_t>* _thread_work_items;99100NOT_PRODUCT(T uninitialized();)101102// We are caching the sum and average to only have to calculate them once.103// This is not done in an MT-safe way. It is intended to allow single104// threaded code to call sum() and average() multiple times in any order105// without having to worry about the cost.106bool _has_new_data;107T _sum;108T _min;109T _max;110double _average;111112public:113WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :114_title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),115_has_new_data(true), _thread_work_items(NULL), _enabled(true) {116assert(length > 0, "Must have some workers to store data for");117_length = length;118_data = NEW_C_HEAP_ARRAY(T, _length, mtGC);119}120121~WorkerDataArray() {122FREE_C_HEAP_ARRAY(T, _data, mtGC);123}124125void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {126_thread_work_items = thread_work_items;127}128129WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }130131void set(uint worker_i, T value) {132assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));133assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));134_data[worker_i] = value;135_has_new_data = true;136}137138void set_thread_work_item(uint worker_i, size_t value) {139assert(_thread_work_items != NULL, "No sub count");140_thread_work_items->set(worker_i, value);141}142143T get(uint worker_i) {144assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));145assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));146return _data[worker_i];147}148149void add(uint worker_i, T value) {150assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));151assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));152_data[worker_i] += value;153_has_new_data = true;154}155156double average(uint active_threads){157calculate_totals(active_threads);158return _average;159}160161T sum(uint active_threads) {162calculate_totals(active_threads);163return _sum;164}165166T minimum(uint active_threads) {167calculate_totals(active_threads);168return _min;169}170171T maximum(uint active_threads) {172calculate_totals(active_threads);173return _max;174}175176void reset() PRODUCT_RETURN;177void verify(uint active_threads) PRODUCT_RETURN;178179void set_enabled(bool enabled) { _enabled = enabled; }180181int log_level() { return _log_level; }182183private:184185void calculate_totals(uint active_threads){186if (!_has_new_data) {187return;188}189190_sum = (T)0;191_min = _data[0];192_max = _min;193assert(active_threads <= _length, "Wrong number of active threads");194for (uint i = 0; i < active_threads; ++i) {195T val = _data[i];196_sum += val;197_min = MIN2(_min, val);198_max = MAX2(_max, val);199}200_average = (double)_sum / (double)active_threads;201_has_new_data = false;202}203};204205206#ifndef PRODUCT207208template <>209size_t WorkerDataArray<size_t>::uninitialized() {210return (size_t)-1;211}212213template <>214double WorkerDataArray<double>::uninitialized() {215return -1.0;216}217218template <class T>219void WorkerDataArray<T>::reset() {220for (uint i = 0; i < _length; i++) {221_data[i] = WorkerDataArray<T>::uninitialized();222}223if (_thread_work_items != NULL) {224_thread_work_items->reset();225}226}227228template <class T>229void WorkerDataArray<T>::verify(uint active_threads) {230if (!_enabled) {231return;232}233234assert(active_threads <= _length, "Wrong number of active threads");235for (uint i = 0; i < active_threads; i++) {236assert(_data[i] != WorkerDataArray<T>::uninitialized(),237err_msg("Invalid data for worker %u in '%s'", i, _title));238}239if (_thread_work_items != NULL) {240_thread_work_items->verify(active_threads);241}242}243244#endif245246G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :247_max_gc_threads(max_gc_threads)248{249assert(max_gc_threads > 0, "Must have some GC threads");250251_gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);252_gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);253254// Root scanning phases255_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);256_gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);257_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);258_gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);259_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);260_gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);261_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms)", true, G1Log::LevelFinest, 3);262_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);263_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);264_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);265_gc_par_phases[CodeCacheRoots] = new WorkerDataArray<double>(max_gc_threads, "CodeCache Roots (ms)", true, G1Log::LevelFinest, 3);266_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);267_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);268_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);269_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);270271_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);272_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);273_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);274_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);275_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);276_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);277_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);278_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);279280_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);281_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);282283_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);284_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);285286_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);287_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);288289_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);290_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);291_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);292}293294void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {295assert(active_gc_threads > 0, "The number of threads must be > 0");296assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");297_active_gc_threads = active_gc_threads;298299for (int i = 0; i < GCParPhasesSentinel; i++) {300_gc_par_phases[i]->reset();301}302303_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());304_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());305}306307void G1GCPhaseTimes::note_gc_end() {308for (uint i = 0; i < _active_gc_threads; i++) {309double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);310record_time_secs(GCWorkerTotal, i , worker_time);311312double worker_known_time =313_gc_par_phases[ExtRootScan]->get(i) +314_gc_par_phases[SATBFiltering]->get(i) +315_gc_par_phases[UpdateRS]->get(i) +316_gc_par_phases[ScanRS]->get(i) +317_gc_par_phases[CodeRoots]->get(i) +318_gc_par_phases[ObjCopy]->get(i) +319_gc_par_phases[Termination]->get(i);320321record_time_secs(Other, i, worker_time - worker_known_time);322}323324for (int i = 0; i < GCParPhasesSentinel; i++) {325_gc_par_phases[i]->verify(_active_gc_threads);326}327}328329void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {330LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);331}332333void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {334LineBuffer(level).append_and_print_cr("[%s: " SIZE_FORMAT "]", str, value);335}336337void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {338LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);339}340341double G1GCPhaseTimes::accounted_time_ms() {342// Subtract the root region scanning wait time. It's initialized to343// zero at the start of the pause.344double misc_time_ms = _root_region_scan_wait_time_ms;345346misc_time_ms += _cur_collection_par_time_ms;347348// Now subtract the time taken to fix up roots in generated code349misc_time_ms += _cur_collection_code_root_fixup_time_ms;350351// Strong code root purge time352misc_time_ms += _cur_strong_code_root_purge_time_ms;353354if (G1StringDedup::is_enabled()) {355// String dedup fixup time356misc_time_ms += _cur_string_dedup_fixup_time_ms;357}358359// Subtract the time taken to clean the card table from the360// current value of "other time"361misc_time_ms += _cur_clear_ct_time_ms;362363return misc_time_ms;364}365366// record the time a phase took in seconds367void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {368_gc_par_phases[phase]->set(worker_i, secs);369}370371// add a number of seconds to a phase372void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {373_gc_par_phases[phase]->add(worker_i, secs);374}375376void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {377_gc_par_phases[phase]->set_thread_work_item(worker_i, count);378}379380// return the average time for a phase in milliseconds381double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {382return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;383}384385double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {386return _gc_par_phases[phase]->get(worker_i) * 1000.0;387}388389double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {390return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;391}392393double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {394return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;395}396397double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {398return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;399}400401size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {402assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");403return _gc_par_phases[phase]->thread_work_items()->get(worker_i);404}405406size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {407assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");408return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);409}410411double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {412assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");413return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads);414}415416size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {417assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");418return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads);419}420421size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {422assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");423return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads);424}425426class G1GCParPhasePrinter : public StackObj {427G1GCPhaseTimes* _phase_times;428public:429G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}430431void print(G1GCPhaseTimes::GCParPhases phase_id) {432WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];433434if (phase->_log_level > G1Log::level() || !phase->_enabled) {435return;436}437438if (phase->_length == 1) {439print_single_length(phase_id, phase);440} else {441print_multi_length(phase_id, phase);442}443}444445private:446447void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {448// No need for min, max, average and sum for only one worker449LineBuffer buf(phase->_indent_level);450buf.append_and_print_cr("[%s: %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));451452if (phase->_thread_work_items != NULL) {453LineBuffer buf2(phase->_thread_work_items->_indent_level);454buf2.append_and_print_cr("[%s: " SIZE_FORMAT "]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));455}456}457458void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {459uint active_length = _phase_times->_active_gc_threads;460for (uint i = 0; i < active_length; ++i) {461buf.append(" %.1lf", _phase_times->get_time_ms(phase_id, i));462}463buf.print_cr();464}465466void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {467uint active_length = _phase_times->_active_gc_threads;468for (uint i = 0; i < active_length; ++i) {469buf.append(" " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));470}471buf.print_cr();472}473474void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {475LineBuffer buf(thread_work_items->_indent_level);476buf.append("[%s:", thread_work_items->_title);477478if (G1Log::finest()) {479print_count_values(buf, phase_id, thread_work_items);480}481482assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));483484buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",485_phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),486_phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));487}488489void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {490LineBuffer buf(phase->_indent_level);491buf.append("[%s:", phase->_title);492493if (G1Log::finest()) {494print_time_values(buf, phase_id, phase);495}496497buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",498_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),499_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));500501if (phase->_print_sum) {502// for things like the start and end times the sum is not503// that relevant504buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));505}506507buf.append_and_print_cr("]");508509if (phase->_thread_work_items != NULL) {510print_thread_work_items(phase_id, phase->_thread_work_items);511}512}513};514515void G1GCPhaseTimes::print(double pause_time_sec) {516G1GCParPhasePrinter par_phase_printer(this);517518if (_root_region_scan_wait_time_ms > 0.0) {519print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);520}521522print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);523for (int i = 0; i <= GCMainParPhasesLast; i++) {524par_phase_printer.print((GCParPhases) i);525}526527print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);528print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);529if (G1StringDedup::is_enabled()) {530print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);531for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {532par_phase_printer.print((GCParPhases) i);533}534}535print_stats(1, "Clear CT", _cur_clear_ct_time_ms);536double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();537print_stats(1, "Other", misc_time_ms);538if (_cur_verify_before_time_ms > 0.0) {539print_stats(2, "Verify Before", _cur_verify_before_time_ms);540}541if (G1CollectedHeap::heap()->evacuation_failed()) {542double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +543_cur_evac_fail_restore_remsets;544print_stats(2, "Evacuation Failure", evac_fail_handling);545if (G1Log::finest()) {546print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);547print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);548print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);549}550}551print_stats(2, "Choose CSet",552(_recorded_young_cset_choice_time_ms +553_recorded_non_young_cset_choice_time_ms));554print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);555print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);556print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);557par_phase_printer.print(RedirtyCards);558559if (G1EagerReclaimHumongousObjects) {560print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);561if (G1Log::finest()) {562print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);563print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);564}565print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);566if (G1Log::finest()) {567print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);568}569}570print_stats(2, "Free CSet",571(_recorded_young_free_cset_time_ms +572_recorded_non_young_free_cset_time_ms));573if (G1Log::finest()) {574print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);575print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);576}577if (_cur_verify_after_time_ms > 0.0) {578print_stats(2, "Verify After", _cur_verify_after_time_ms);579}580}581582G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :583_phase_times(phase_times), _phase(phase), _worker_id(worker_id) {584if (_phase_times != NULL) {585_start_time = Ticks::now();586}587}588589G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {590if (_phase_times != NULL) {591_phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());592}593}594595596