Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
38921 views
/*1* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/symbolTable.hpp"26#include "classfile/systemDictionary.hpp"27#include "code/codeCache.hpp"28#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"29#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"30#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"31#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"32#include "gc_implementation/parallelScavenge/psOldGen.hpp"33#include "gc_implementation/parallelScavenge/psScavenge.hpp"34#include "gc_implementation/parallelScavenge/psYoungGen.hpp"35#include "gc_implementation/shared/gcHeapSummary.hpp"36#include "gc_implementation/shared/gcTimer.hpp"37#include "gc_implementation/shared/gcTrace.hpp"38#include "gc_implementation/shared/gcTraceTime.hpp"39#include "gc_implementation/shared/isGCActiveMark.hpp"40#include "gc_implementation/shared/markSweep.hpp"41#include "gc_implementation/shared/spaceDecorator.hpp"42#include "gc_interface/gcCause.hpp"43#include "memory/gcLocker.inline.hpp"44#include "memory/referencePolicy.hpp"45#include "memory/referenceProcessor.hpp"46#include "oops/oop.inline.hpp"47#include "runtime/biasedLocking.hpp"48#include "runtime/fprofiler.hpp"49#include "runtime/safepoint.hpp"50#include "runtime/vmThread.hpp"51#include "services/management.hpp"52#include "services/memoryService.hpp"53#include "utilities/events.hpp"54#include "utilities/stack.inline.hpp"55#if INCLUDE_JFR56#include "jfr/jfr.hpp"57#endif // INCLUDE_JFR5859PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC6061elapsedTimer PSMarkSweep::_accumulated_time;62jlong PSMarkSweep::_time_of_last_gc = 0;63CollectorCounters* PSMarkSweep::_counters = NULL;6465void PSMarkSweep::initialize() {66MemRegion mr = Universe::heap()->reserved_region();67_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc68_counters = new CollectorCounters("PSMarkSweep", 1);69}7071// This method contains all heap specific policy for invoking mark sweep.72// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact73// the heap. It will do nothing further. If we need to bail out for policy74// reasons, scavenge before full gc, or any other specialized behavior, it75// needs to be added here.76//77// Note that this method should only be called from the vm_thread while78// at a safepoint!79//80// Note that the all_soft_refs_clear flag in the collector policy81// may be true because this method can be called without intervening82// activity. For example when the heap space is tight and full measure83// are being taken to free space.8485void PSMarkSweep::invoke(bool maximum_heap_compaction) {86assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");87assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");88assert(!Universe::heap()->is_gc_active(), "not reentrant");8990ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();91GCCause::Cause gc_cause = heap->gc_cause();92PSAdaptiveSizePolicy* policy = heap->size_policy();93IsGCActiveMark mark;9495if (ScavengeBeforeFullGC) {96PSScavenge::invoke_no_policy();97}9899const bool clear_all_soft_refs =100heap->collector_policy()->should_clear_all_soft_refs();101102uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;103UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);104PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);105}106107// This method contains no policy. You should probably108// be calling invoke() instead.109bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {110assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");111assert(ref_processor() != NULL, "Sanity");112113if (GC_locker::check_active_before_gc()) {114return false;115}116117ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();118assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");119GCCause::Cause gc_cause = heap->gc_cause();120121_gc_timer->register_gc_start();122_gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());123124PSAdaptiveSizePolicy* size_policy = heap->size_policy();125126// The scope of casr should end after code that can change127// CollectorPolicy::_should_clear_all_soft_refs.128ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());129130PSYoungGen* young_gen = heap->young_gen();131PSOldGen* old_gen = heap->old_gen();132133// Increment the invocation count134heap->increment_total_collections(true /* full */);135136// Save information needed to minimize mangling137heap->record_gen_tops_before_GC();138139// We need to track unique mark sweep invocations as well.140_total_invocations++;141142AdaptiveSizePolicyOutput(size_policy, heap->total_collections());143144heap->print_heap_before_gc();145heap->trace_heap_before_gc(_gc_tracer);146147// Fill in TLABs148heap->accumulate_statistics_all_tlabs();149heap->ensure_parsability(true); // retire TLABs150151if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {152HandleMark hm; // Discard invalid handles created during verification153Universe::verify(" VerifyBeforeGC:");154}155156// Verify object start arrays157if (VerifyObjectStartArray &&158VerifyBeforeGC) {159old_gen->verify_object_start_array();160}161162heap->pre_full_gc_dump(_gc_timer);163164// Filled in below to track the state of the young gen after the collection.165bool eden_empty;166bool survivors_empty;167bool young_gen_empty;168169{170HandleMark hm;171172TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);173GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());174TraceCollectorStats tcs(counters());175TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);176177if (TraceGen1Time) accumulated_time()->start();178179// Let the size policy know we're starting180size_policy->major_collection_begin();181182CodeCache::gc_prologue();183Threads::gc_prologue();184BiasedLocking::preserve_marks();185186// Capture heap size before collection for printing.187size_t prev_used = heap->used();188189// Capture metadata size before collection for sizing.190size_t metadata_prev_used = MetaspaceAux::used_bytes();191192// For PrintGCDetails193size_t old_gen_prev_used = old_gen->used_in_bytes();194size_t young_gen_prev_used = young_gen->used_in_bytes();195196allocate_stacks();197198COMPILER2_PRESENT(DerivedPointerTable::clear());199200ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);201ref_processor()->setup_policy(clear_all_softrefs);202203mark_sweep_phase1(clear_all_softrefs);204205mark_sweep_phase2();206207// Don't add any more derived pointers during phase3208COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));209COMPILER2_PRESENT(DerivedPointerTable::set_active(false));210211mark_sweep_phase3();212213mark_sweep_phase4();214215restore_marks();216217deallocate_stacks();218219if (ZapUnusedHeapArea) {220// Do a complete mangle (top to end) because the usage for221// scratch does not maintain a top pointer.222young_gen->to_space()->mangle_unused_area_complete();223}224225eden_empty = young_gen->eden_space()->is_empty();226if (!eden_empty) {227eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);228}229230// Update heap occupancy information which is used as231// input to soft ref clearing policy at the next gc.232Universe::update_heap_info_at_gc();233234survivors_empty = young_gen->from_space()->is_empty() &&235young_gen->to_space()->is_empty();236young_gen_empty = eden_empty && survivors_empty;237238BarrierSet* bs = heap->barrier_set();239if (bs->is_a(BarrierSet::ModRef)) {240ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;241MemRegion old_mr = heap->old_gen()->reserved();242if (young_gen_empty) {243modBS->clear(MemRegion(old_mr.start(), old_mr.end()));244} else {245modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));246}247}248249// Delete metaspaces for unloaded class loaders and clean up loader_data graph250ClassLoaderDataGraph::purge();251MetaspaceAux::verify_metrics();252253BiasedLocking::restore_marks();254Threads::gc_epilogue();255CodeCache::gc_epilogue();256JvmtiExport::gc_epilogue();257258COMPILER2_PRESENT(DerivedPointerTable::update_pointers());259260ref_processor()->enqueue_discovered_references(NULL);261262// Update time of last GC263reset_millis_since_last_gc();264265// Let the size policy know we're done266size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);267268if (UseAdaptiveSizePolicy) {269270if (PrintAdaptiveSizePolicy) {271gclog_or_tty->print("AdaptiveSizeStart: ");272gclog_or_tty->stamp();273gclog_or_tty->print_cr(" collection: %d ",274heap->total_collections());275if (Verbose) {276gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",277old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());278}279}280281// Don't check if the size_policy is ready here. Let282// the size_policy check that internally.283if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&284((gc_cause != GCCause::_java_lang_system_gc) ||285UseAdaptiveSizePolicyWithSystemGC)) {286// Calculate optimal free space amounts287assert(young_gen->max_size() >288young_gen->from_space()->capacity_in_bytes() +289young_gen->to_space()->capacity_in_bytes(),290"Sizes of space in young gen are out-of-bounds");291292size_t young_live = young_gen->used_in_bytes();293size_t eden_live = young_gen->eden_space()->used_in_bytes();294size_t old_live = old_gen->used_in_bytes();295size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();296size_t max_old_gen_size = old_gen->max_gen_size();297size_t max_eden_size = young_gen->max_size() -298young_gen->from_space()->capacity_in_bytes() -299young_gen->to_space()->capacity_in_bytes();300301// Used for diagnostics302size_policy->clear_generation_free_space_flags();303304size_policy->compute_generations_free_space(young_live,305eden_live,306old_live,307cur_eden,308max_old_gen_size,309max_eden_size,310true /* full gc*/);311312size_policy->check_gc_overhead_limit(young_live,313eden_live,314max_old_gen_size,315max_eden_size,316true /* full gc*/,317gc_cause,318heap->collector_policy());319320size_policy->decay_supplemental_growth(true /* full gc*/);321322heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());323324// Don't resize the young generation at an major collection. A325// desired young generation size may have been calculated but326// resizing the young generation complicates the code because the327// resizing of the old generation may have moved the boundary328// between the young generation and the old generation. Let the329// young generation resizing happen at the minor collections.330}331if (PrintAdaptiveSizePolicy) {332gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",333heap->total_collections());334}335}336337if (UsePerfData) {338heap->gc_policy_counters()->update_counters();339heap->gc_policy_counters()->update_old_capacity(340old_gen->capacity_in_bytes());341heap->gc_policy_counters()->update_young_capacity(342young_gen->capacity_in_bytes());343}344345heap->resize_all_tlabs();346347// We collected the heap, recalculate the metaspace capacity348MetaspaceGC::compute_new_size();349350if (TraceGen1Time) accumulated_time()->stop();351352if (PrintGC) {353if (PrintGCDetails) {354// Don't print a GC timestamp here. This is after the GC so355// would be confusing.356young_gen->print_used_change(young_gen_prev_used);357old_gen->print_used_change(old_gen_prev_used);358}359heap->print_heap_change(prev_used);360if (PrintGCDetails) {361MetaspaceAux::print_metaspace_change(metadata_prev_used);362}363}364365// Track memory usage and detect low memory366MemoryService::track_memory_usage();367heap->update_counters();368}369370if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {371HandleMark hm; // Discard invalid handles created during verification372Universe::verify(" VerifyAfterGC:");373}374375// Re-verify object start arrays376if (VerifyObjectStartArray &&377VerifyAfterGC) {378old_gen->verify_object_start_array();379}380381if (ZapUnusedHeapArea) {382old_gen->object_space()->check_mangled_unused_area_complete();383}384385NOT_PRODUCT(ref_processor()->verify_no_references_recorded());386387heap->print_heap_after_gc();388heap->trace_heap_after_gc(_gc_tracer);389390heap->post_full_gc_dump(_gc_timer);391392#ifdef TRACESPINNING393ParallelTaskTerminator::print_termination_counts();394#endif395396_gc_timer->register_gc_end();397398_gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());399400return true;401}402403bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,404PSYoungGen* young_gen,405PSOldGen* old_gen) {406MutableSpace* const eden_space = young_gen->eden_space();407assert(!eden_space->is_empty(), "eden must be non-empty");408assert(young_gen->virtual_space()->alignment() ==409old_gen->virtual_space()->alignment(), "alignments do not match");410411if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {412return false;413}414415// Both generations must be completely committed.416if (young_gen->virtual_space()->uncommitted_size() != 0) {417return false;418}419if (old_gen->virtual_space()->uncommitted_size() != 0) {420return false;421}422423// Figure out how much to take from eden. Include the average amount promoted424// in the total; otherwise the next young gen GC will simply bail out to a425// full GC.426const size_t alignment = old_gen->virtual_space()->alignment();427const size_t eden_used = eden_space->used_in_bytes();428const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();429const size_t absorb_size = align_size_up(eden_used + promoted, alignment);430const size_t eden_capacity = eden_space->capacity_in_bytes();431432if (absorb_size >= eden_capacity) {433return false; // Must leave some space in eden.434}435436const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;437if (new_young_size < young_gen->min_gen_size()) {438return false; // Respect young gen minimum size.439}440441if (TraceAdaptiveGCBoundary && Verbose) {442gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "443"eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "444"from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "445"young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",446absorb_size / K,447eden_capacity / K, (eden_capacity - absorb_size) / K,448young_gen->from_space()->used_in_bytes() / K,449young_gen->to_space()->used_in_bytes() / K,450young_gen->capacity_in_bytes() / K, new_young_size / K);451}452453// Fill the unused part of the old gen.454MutableSpace* const old_space = old_gen->object_space();455HeapWord* const unused_start = old_space->top();456size_t const unused_words = pointer_delta(old_space->end(), unused_start);457458if (unused_words > 0) {459if (unused_words < CollectedHeap::min_fill_size()) {460return false; // If the old gen cannot be filled, must give up.461}462CollectedHeap::fill_with_objects(unused_start, unused_words);463}464465// Take the live data from eden and set both top and end in the old gen to466// eden top. (Need to set end because reset_after_change() mangles the region467// from end to virtual_space->high() in debug builds).468HeapWord* const new_top = eden_space->top();469old_gen->virtual_space()->expand_into(young_gen->virtual_space(),470absorb_size);471young_gen->reset_after_change();472old_space->set_top(new_top);473old_space->set_end(new_top);474old_gen->reset_after_change();475476// Update the object start array for the filler object and the data from eden.477ObjectStartArray* const start_array = old_gen->start_array();478for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {479start_array->allocate_block(p);480}481482// Could update the promoted average here, but it is not typically updated at483// full GCs and the value to use is unclear. Something like484//485// cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.486487size_policy->set_bytes_absorbed_from_eden(absorb_size);488return true;489}490491void PSMarkSweep::allocate_stacks() {492ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();493assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");494495PSYoungGen* young_gen = heap->young_gen();496497MutableSpace* to_space = young_gen->to_space();498_preserved_marks = (PreservedMark*)to_space->top();499_preserved_count = 0;500501// We want to calculate the size in bytes first.502_preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));503// Now divide by the size of a PreservedMark504_preserved_count_max /= sizeof(PreservedMark);505}506507508void PSMarkSweep::deallocate_stacks() {509_preserved_mark_stack.clear(true);510_preserved_oop_stack.clear(true);511_marking_stack.clear();512_objarray_stack.clear(true);513}514515void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {516// Recursively traverse all live objects and mark them517GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());518trace(" 1");519520ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();521assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");522523// Need to clear claim bits before the tracing starts.524ClassLoaderDataGraph::clear_claimed_marks();525526// General strong roots.527{528ParallelScavengeHeap::ParStrongRootsScope psrs;529Universe::oops_do(mark_and_push_closure());530JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles531CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());532MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);533Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);534ObjectSynchronizer::oops_do(mark_and_push_closure());535FlatProfiler::oops_do(mark_and_push_closure());536Management::oops_do(mark_and_push_closure());537JvmtiExport::oops_do(mark_and_push_closure());538SystemDictionary::always_strong_oops_do(mark_and_push_closure());539ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());540// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.541//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));542}543544// Flush marking stack.545follow_stack();546547// Process reference objects found during marking548{549ref_processor()->setup_policy(clear_all_softrefs);550const ReferenceProcessorStats& stats =551ref_processor()->process_discovered_references(552is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());553gc_tracer()->report_gc_reference_stats(stats);554}555556// This is the point where the entire marking should have completed.557assert(_marking_stack.is_empty(), "Marking should have completed");558559// Unload classes and purge the SystemDictionary.560bool purged_class = SystemDictionary::do_unloading(is_alive_closure());561562// Unload nmethods.563CodeCache::do_unloading(is_alive_closure(), purged_class);564565// Prune dead klasses from subklass/sibling/implementor lists.566Klass::clean_weak_klass_links(is_alive_closure());567568// Delete entries for dead interned strings.569StringTable::unlink(is_alive_closure());570571// Clean up unreferenced symbols in symbol table.572SymbolTable::unlink();573_gc_tracer->report_object_count_after_gc(is_alive_closure());574}575576577void PSMarkSweep::mark_sweep_phase2() {578GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());579trace("2");580581// Now all live objects are marked, compute the new object addresses.582583// It is not required that we traverse spaces in the same order in584// phase2, phase3 and phase4, but the ValidateMarkSweep live oops585// tracking expects us to do so. See comment under phase4.586587ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();588assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");589590PSOldGen* old_gen = heap->old_gen();591592// Begin compacting into the old gen593PSMarkSweepDecorator::set_destination_decorator_tenured();594595// This will also compact the young gen spaces.596old_gen->precompact();597}598599void PSMarkSweep::mark_sweep_phase3() {600// Adjust the pointers to reflect the new locations601GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());602trace("3");603604ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();605assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");606607PSYoungGen* young_gen = heap->young_gen();608PSOldGen* old_gen = heap->old_gen();609610// Need to clear claim bits before the tracing starts.611ClassLoaderDataGraph::clear_claimed_marks();612613// General strong roots.614Universe::oops_do(adjust_pointer_closure());615JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles616CLDToOopClosure adjust_from_cld(adjust_pointer_closure());617Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);618ObjectSynchronizer::oops_do(adjust_pointer_closure());619FlatProfiler::oops_do(adjust_pointer_closure());620Management::oops_do(adjust_pointer_closure());621JvmtiExport::oops_do(adjust_pointer_closure());622SystemDictionary::oops_do(adjust_pointer_closure());623ClassLoaderDataGraph::cld_do(adjust_cld_closure());624625// Now adjust pointers in remaining weak roots. (All of which should626// have been cleared if they pointed to non-surviving objects.)627// Global (weak) JNI handles628JNIHandles::weak_oops_do(adjust_pointer_closure());629JFR_ONLY(Jfr::weak_oops_do(adjust_pointer_closure()));630631CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);632CodeCache::blobs_do(&adjust_from_blobs);633StringTable::oops_do(adjust_pointer_closure());634ref_processor()->weak_oops_do(adjust_pointer_closure());635PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());636637adjust_marks();638639young_gen->adjust_pointers();640old_gen->adjust_pointers();641}642643void PSMarkSweep::mark_sweep_phase4() {644EventMark m("4 compact heap");645GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());646trace("4");647648// All pointers are now adjusted, move objects accordingly649650ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();651assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");652653PSYoungGen* young_gen = heap->young_gen();654PSOldGen* old_gen = heap->old_gen();655656old_gen->compact();657young_gen->compact();658}659660jlong PSMarkSweep::millis_since_last_gc() {661// We need a monotonically non-deccreasing time in ms but662// os::javaTimeMillis() does not guarantee monotonicity.663jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;664jlong ret_val = now - _time_of_last_gc;665// XXX See note in genCollectedHeap::millis_since_last_gc().666if (ret_val < 0) {667NOT_PRODUCT(warning("time warp: " INT64_FORMAT, ret_val);)668return 0;669}670return ret_val;671}672673void PSMarkSweep::reset_millis_since_last_gc() {674// We need a monotonically non-deccreasing time in ms but675// os::javaTimeMillis() does not guarantee monotonicity.676_time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;677}678679680