Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/defNewGeneration.cpp
32285 views
/*1* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/shared/collectorCounters.hpp"26#include "gc_implementation/shared/gcPolicyCounters.hpp"27#include "gc_implementation/shared/gcHeapSummary.hpp"28#include "gc_implementation/shared/gcTimer.hpp"29#include "gc_implementation/shared/gcTraceTime.hpp"30#include "gc_implementation/shared/gcTrace.hpp"31#include "gc_implementation/shared/spaceDecorator.hpp"32#include "memory/defNewGeneration.inline.hpp"33#include "memory/gcLocker.inline.hpp"34#include "memory/genCollectedHeap.hpp"35#include "memory/genOopClosures.inline.hpp"36#include "memory/genRemSet.hpp"37#include "memory/generationSpec.hpp"38#include "memory/iterator.hpp"39#include "memory/referencePolicy.hpp"40#include "memory/space.inline.hpp"41#include "oops/instanceRefKlass.hpp"42#include "oops/oop.inline.hpp"43#include "runtime/java.hpp"44#include "runtime/prefetch.inline.hpp"45#include "runtime/thread.inline.hpp"46#include "utilities/copy.hpp"47#include "utilities/stack.inline.hpp"4849PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC5051//52// DefNewGeneration functions.5354// Methods of protected closure types.5556DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {57assert(g->level() == 0, "Optimized for youngest gen.");58}59bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {60return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();61}6263DefNewGeneration::KeepAliveClosure::64KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {65GenRemSet* rs = GenCollectedHeap::heap()->rem_set();66assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");67_rs = (CardTableRS*)rs;68}6970void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }71void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }727374DefNewGeneration::FastKeepAliveClosure::75FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :76DefNewGeneration::KeepAliveClosure(cl) {77_boundary = g->reserved().end();78}7980void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }81void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }8283DefNewGeneration::EvacuateFollowersClosure::84EvacuateFollowersClosure(GenCollectedHeap* gch, int level,85ScanClosure* cur, ScanClosure* older) :86_gch(gch), _level(level),87_scan_cur_or_nonheap(cur), _scan_older(older)88{}8990void DefNewGeneration::EvacuateFollowersClosure::do_void() {91do {92_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,93_scan_older);94} while (!_gch->no_allocs_since_save_marks(_level));95}9697DefNewGeneration::FastEvacuateFollowersClosure::98FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,99DefNewGeneration* gen,100FastScanClosure* cur, FastScanClosure* older) :101_gch(gch), _level(level), _gen(gen),102_scan_cur_or_nonheap(cur), _scan_older(older)103{}104105void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {106do {107_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,108_scan_older);109} while (!_gch->no_allocs_since_save_marks(_level));110guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");111}112113ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :114OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)115{116assert(_g->level() == 0, "Optimized for youngest generation");117_boundary = _g->reserved().end();118}119120void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }121void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }122123FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :124OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)125{126assert(_g->level() == 0, "Optimized for youngest generation");127_boundary = _g->reserved().end();128}129130void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }131void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }132133void KlassScanClosure::do_klass(Klass* klass) {134#ifndef PRODUCT135if (TraceScavenge) {136ResourceMark rm;137gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",138klass,139klass->external_name(),140klass->has_modified_oops() ? "true" : "false");141}142#endif143144// If the klass has not been dirtied we know that there's145// no references into the young gen and we can skip it.146if (klass->has_modified_oops()) {147if (_accumulate_modified_oops) {148klass->accumulate_modified_oops();149}150151// Clear this state since we're going to scavenge all the metadata.152klass->clear_modified_oops();153154// Tell the closure which Klass is being scanned so that it can be dirtied155// if oops are left pointing into the young gen.156_scavenge_closure->set_scanned_klass(klass);157158klass->oops_do(_scavenge_closure);159160_scavenge_closure->set_scanned_klass(NULL);161}162}163164ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :165_g(g)166{167assert(_g->level() == 0, "Optimized for youngest generation");168_boundary = _g->reserved().end();169}170171void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }172void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }173174void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }175void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }176177KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,178KlassRemSet* klass_rem_set)179: _scavenge_closure(scavenge_closure),180_accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}181182183DefNewGeneration::DefNewGeneration(ReservedSpace rs,184size_t initial_size,185int level,186const char* policy)187: Generation(rs, initial_size, level),188_promo_failure_drain_in_progress(false),189_should_allocate_from_space(false)190{191MemRegion cmr((HeapWord*)_virtual_space.low(),192(HeapWord*)_virtual_space.high());193Universe::heap()->barrier_set()->resize_covered_region(cmr);194195if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {196_eden_space = new ConcEdenSpace(this);197} else {198_eden_space = new EdenSpace(this);199}200_from_space = new ContiguousSpace();201_to_space = new ContiguousSpace();202203if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)204vm_exit_during_initialization("Could not allocate a new gen space");205206// Compute the maximum eden and survivor space sizes. These sizes207// are computed assuming the entire reserved space is committed.208// These values are exported as performance counters.209uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();210uintx size = _virtual_space.reserved_size();211_max_survivor_size = compute_survivor_size(size, alignment);212_max_eden_size = size - (2*_max_survivor_size);213214// allocate the performance counters215216// Generation counters -- generation 0, 3 subspaces217_gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);218_gc_counters = new CollectorCounters(policy, 0);219220_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,221_gen_counters);222_from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,223_gen_counters);224_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,225_gen_counters);226227compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);228update_counters();229_next_gen = NULL;230_tenuring_threshold = MaxTenuringThreshold;231_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;232233_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();234}235236void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,237bool clear_space,238bool mangle_space) {239uintx alignment =240GenCollectedHeap::heap()->collector_policy()->space_alignment();241242// If the spaces are being cleared (only done at heap initialization243// currently), the survivor spaces need not be empty.244// Otherwise, no care is taken for used areas in the survivor spaces245// so check.246assert(clear_space || (to()->is_empty() && from()->is_empty()),247"Initialization of the survivor spaces assumes these are empty");248249// Compute sizes250uintx size = _virtual_space.committed_size();251uintx survivor_size = compute_survivor_size(size, alignment);252uintx eden_size = size - (2*survivor_size);253assert(eden_size > 0 && survivor_size <= eden_size, "just checking");254255if (eden_size < minimum_eden_size) {256// May happen due to 64Kb rounding, if so adjust eden size back up257minimum_eden_size = align_size_up(minimum_eden_size, alignment);258uintx maximum_survivor_size = (size - minimum_eden_size) / 2;259uintx unaligned_survivor_size =260align_size_down(maximum_survivor_size, alignment);261survivor_size = MAX2(unaligned_survivor_size, alignment);262eden_size = size - (2*survivor_size);263assert(eden_size > 0 && survivor_size <= eden_size, "just checking");264assert(eden_size >= minimum_eden_size, "just checking");265}266267char *eden_start = _virtual_space.low();268char *from_start = eden_start + eden_size;269char *to_start = from_start + survivor_size;270char *to_end = to_start + survivor_size;271272assert(to_end == _virtual_space.high(), "just checking");273assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");274assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");275assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");276277MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);278MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);279MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);280281// A minimum eden size implies that there is a part of eden that282// is being used and that affects the initialization of any283// newly formed eden.284bool live_in_eden = minimum_eden_size > 0;285286// If not clearing the spaces, do some checking to verify that287// the space are already mangled.288if (!clear_space) {289// Must check mangling before the spaces are reshaped. Otherwise,290// the bottom or end of one space may have moved into another291// a failure of the check may not correctly indicate which space292// is not properly mangled.293if (ZapUnusedHeapArea) {294HeapWord* limit = (HeapWord*) _virtual_space.high();295eden()->check_mangled_unused_area(limit);296from()->check_mangled_unused_area(limit);297to()->check_mangled_unused_area(limit);298}299}300301// Reset the spaces for their new regions.302eden()->initialize(edenMR,303clear_space && !live_in_eden,304SpaceDecorator::Mangle);305// If clear_space and live_in_eden, we will not have cleared any306// portion of eden above its top. This can cause newly307// expanded space not to be mangled if using ZapUnusedHeapArea.308// We explicitly do such mangling here.309if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {310eden()->mangle_unused_area();311}312from()->initialize(fromMR, clear_space, mangle_space);313to()->initialize(toMR, clear_space, mangle_space);314315// Set next compaction spaces.316eden()->set_next_compaction_space(from());317// The to-space is normally empty before a compaction so need318// not be considered. The exception is during promotion319// failure handling when to-space can contain live objects.320from()->set_next_compaction_space(NULL);321}322323void DefNewGeneration::swap_spaces() {324ContiguousSpace* s = from();325_from_space = to();326_to_space = s;327eden()->set_next_compaction_space(from());328// The to-space is normally empty before a compaction so need329// not be considered. The exception is during promotion330// failure handling when to-space can contain live objects.331from()->set_next_compaction_space(NULL);332333if (UsePerfData) {334CSpaceCounters* c = _from_counters;335_from_counters = _to_counters;336_to_counters = c;337}338}339340bool DefNewGeneration::expand(size_t bytes) {341MutexLocker x(ExpandHeap_lock);342HeapWord* prev_high = (HeapWord*) _virtual_space.high();343bool success = _virtual_space.expand_by(bytes);344if (success && ZapUnusedHeapArea) {345// Mangle newly committed space immediately because it346// can be done here more simply that after the new347// spaces have been computed.348HeapWord* new_high = (HeapWord*) _virtual_space.high();349MemRegion mangle_region(prev_high, new_high);350SpaceMangler::mangle_region(mangle_region);351}352353// Do not attempt an expand-to-the reserve size. The354// request should properly observe the maximum size of355// the generation so an expand-to-reserve should be356// unnecessary. Also a second call to expand-to-reserve357// value potentially can cause an undue expansion.358// For example if the first expand fail for unknown reasons,359// but the second succeeds and expands the heap to its maximum360// value.361if (GC_locker::is_active()) {362if (PrintGC && Verbose) {363gclog_or_tty->print_cr("Garbage collection disabled, "364"expanded heap instead");365}366}367368return success;369}370371372void DefNewGeneration::compute_new_size() {373// This is called after a gc that includes the following generation374// (which is required to exist.) So from-space will normally be empty.375// Note that we check both spaces, since if scavenge failed they revert roles.376// If not we bail out (otherwise we would have to relocate the objects)377if (!from()->is_empty() || !to()->is_empty()) {378return;379}380381int next_level = level() + 1;382GenCollectedHeap* gch = GenCollectedHeap::heap();383assert(next_level < gch->_n_gens,384"DefNewGeneration cannot be an oldest gen");385386Generation* next_gen = gch->_gens[next_level];387size_t old_size = next_gen->capacity();388size_t new_size_before = _virtual_space.committed_size();389size_t min_new_size = spec()->init_size();390size_t max_new_size = reserved().byte_size();391assert(min_new_size <= new_size_before &&392new_size_before <= max_new_size,393"just checking");394// All space sizes must be multiples of Generation::GenGrain.395size_t alignment = Generation::GenGrain;396397// Compute desired new generation size based on NewRatio and398// NewSizeThreadIncrease399size_t desired_new_size = old_size/NewRatio;400int threads_count = Threads::number_of_non_daemon_threads();401size_t thread_increase_size = threads_count * NewSizeThreadIncrease;402desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);403404// Adjust new generation size405desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);406assert(desired_new_size <= max_new_size, "just checking");407408bool changed = false;409if (desired_new_size > new_size_before) {410size_t change = desired_new_size - new_size_before;411assert(change % alignment == 0, "just checking");412if (expand(change)) {413changed = true;414}415// If the heap failed to expand to the desired size,416// "changed" will be false. If the expansion failed417// (and at this point it was expected to succeed),418// ignore the failure (leaving "changed" as false).419}420if (desired_new_size < new_size_before && eden()->is_empty()) {421// bail out of shrinking if objects in eden422size_t change = new_size_before - desired_new_size;423assert(change % alignment == 0, "just checking");424_virtual_space.shrink_by(change);425changed = true;426}427if (changed) {428// The spaces have already been mangled at this point but429// may not have been cleared (set top = bottom) and should be.430// Mangling was done when the heap was being expanded.431compute_space_boundaries(eden()->used(),432SpaceDecorator::Clear,433SpaceDecorator::DontMangle);434MemRegion cmr((HeapWord*)_virtual_space.low(),435(HeapWord*)_virtual_space.high());436Universe::heap()->barrier_set()->resize_covered_region(cmr);437if (Verbose && PrintGC) {438size_t new_size_after = _virtual_space.committed_size();439size_t eden_size_after = eden()->capacity();440size_t survivor_size_after = from()->capacity();441gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"442SIZE_FORMAT "K [eden="443SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",444new_size_before/K, new_size_after/K,445eden_size_after/K, survivor_size_after/K);446if (WizardMode) {447gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",448thread_increase_size/K, threads_count);449}450gclog_or_tty->cr();451}452}453}454455void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {456assert(false, "NYI -- are you sure you want to call this?");457}458459460size_t DefNewGeneration::capacity() const {461return eden()->capacity()462+ from()->capacity(); // to() is only used during scavenge463}464465466size_t DefNewGeneration::used() const {467return eden()->used()468+ from()->used(); // to() is only used during scavenge469}470471472size_t DefNewGeneration::free() const {473return eden()->free()474+ from()->free(); // to() is only used during scavenge475}476477size_t DefNewGeneration::max_capacity() const {478const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();479const size_t reserved_bytes = reserved().byte_size();480return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);481}482483size_t DefNewGeneration::unsafe_max_alloc_nogc() const {484return eden()->free();485}486487size_t DefNewGeneration::capacity_before_gc() const {488return eden()->capacity();489}490491size_t DefNewGeneration::contiguous_available() const {492return eden()->free();493}494495496HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }497HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }498499void DefNewGeneration::object_iterate(ObjectClosure* blk) {500eden()->object_iterate(blk);501from()->object_iterate(blk);502}503504505void DefNewGeneration::space_iterate(SpaceClosure* blk,506bool usedOnly) {507blk->do_space(eden());508blk->do_space(from());509blk->do_space(to());510}511512// The last collection bailed out, we are running out of heap space,513// so we try to allocate the from-space, too.514HeapWord* DefNewGeneration::allocate_from_space(size_t size) {515HeapWord* result = NULL;516if (Verbose && PrintGCDetails) {517gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"518" will_fail: %s"519" heap_lock: %s"520" free: " SIZE_FORMAT,521size,522GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?523"true" : "false",524Heap_lock->is_locked() ? "locked" : "unlocked",525from()->free());526}527if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {528if (Heap_lock->owned_by_self() ||529(SafepointSynchronize::is_at_safepoint() &&530Thread::current()->is_VM_thread())) {531// If the Heap_lock is not locked by this thread, this will be called532// again later with the Heap_lock held.533result = from()->allocate(size);534} else if (PrintGC && Verbose) {535gclog_or_tty->print_cr(" Heap_lock is not owned by self");536}537} else if (PrintGC && Verbose) {538gclog_or_tty->print_cr(" should_allocate_from_space: NOT");539}540if (PrintGC && Verbose) {541gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");542}543return result;544}545546HeapWord* DefNewGeneration::expand_and_allocate(size_t size,547bool is_tlab,548bool parallel) {549// We don't attempt to expand the young generation (but perhaps we should.)550return allocate(size, is_tlab);551}552553void DefNewGeneration::adjust_desired_tenuring_threshold(GCTracer &tracer) {554// Set the desired survivor size to half the real survivor space555_tenuring_threshold =556age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, tracer);557}558559void DefNewGeneration::collect(bool full,560bool clear_all_soft_refs,561size_t size,562bool is_tlab) {563assert(full || size > 0, "otherwise we don't want to collect");564565GenCollectedHeap* gch = GenCollectedHeap::heap();566567_gc_timer->register_gc_start();568DefNewTracer gc_tracer;569gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());570571_next_gen = gch->next_gen(this);572573// If the next generation is too full to accommodate promotion574// from this generation, pass on collection; let the next generation575// do it.576if (!collection_attempt_is_safe()) {577if (Verbose && PrintGCDetails) {578gclog_or_tty->print(" :: Collection attempt not safe :: ");579}580gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one581return;582}583assert(to()->is_empty(), "Else not collection_attempt_is_safe");584585init_assuming_no_promotion_failure();586587GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());588// Capture heap used before collection (for printing).589size_t gch_prev_used = gch->used();590591gch->trace_heap_before_gc(&gc_tracer);592593SpecializationStats::clear();594595// These can be shared for all code paths596IsAliveClosure is_alive(this);597ScanWeakRefClosure scan_weak_ref(this);598599age_table()->clear();600to()->clear(SpaceDecorator::Mangle);601602gch->rem_set()->prepare_for_younger_refs_iterate(false);603604assert(gch->no_allocs_since_save_marks(0),605"save marks have not been newly set.");606607// Not very pretty.608CollectorPolicy* cp = gch->collector_policy();609610FastScanClosure fsc_with_no_gc_barrier(this, false);611FastScanClosure fsc_with_gc_barrier(this, true);612613KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,614gch->rem_set()->klass_rem_set());615CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,616&fsc_with_no_gc_barrier,617false);618619set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);620FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,621&fsc_with_no_gc_barrier,622&fsc_with_gc_barrier);623624assert(gch->no_allocs_since_save_marks(0),625"save marks have not been newly set.");626627gch->gen_process_roots(_level,628true, // Process younger gens, if any,629// as strong roots.630true, // activate StrongRootsScope631GenCollectedHeap::SO_ScavengeCodeCache,632GenCollectedHeap::StrongAndWeakRoots,633&fsc_with_no_gc_barrier,634&fsc_with_gc_barrier,635&cld_scan_closure);636637// "evacuate followers".638evacuate_followers.do_void();639640FastKeepAliveClosure keep_alive(this, &scan_weak_ref);641ReferenceProcessor* rp = ref_processor();642rp->setup_policy(clear_all_soft_refs);643const ReferenceProcessorStats& stats =644rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,645NULL, _gc_timer, gc_tracer.gc_id());646gc_tracer.report_gc_reference_stats(stats);647648if (!_promotion_failed) {649// Swap the survivor spaces.650eden()->clear(SpaceDecorator::Mangle);651from()->clear(SpaceDecorator::Mangle);652if (ZapUnusedHeapArea) {653// This is now done here because of the piece-meal mangling which654// can check for valid mangling at intermediate points in the655// collection(s). When a minor collection fails to collect656// sufficient space resizing of the young generation can occur657// an redistribute the spaces in the young generation. Mangle658// here so that unzapped regions don't get distributed to659// other spaces.660to()->mangle_unused_area();661}662swap_spaces();663664assert(to()->is_empty(), "to space should be empty now");665666adjust_desired_tenuring_threshold(gc_tracer);667668// A successful scavenge should restart the GC time limit count which is669// for full GC's.670AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();671size_policy->reset_gc_overhead_limit_count();672if (PrintGC && !PrintGCDetails) {673gch->print_heap_change(gch_prev_used);674}675assert(!gch->incremental_collection_failed(), "Should be clear");676} else {677assert(_promo_failure_scan_stack.is_empty(), "post condition");678_promo_failure_scan_stack.clear(true); // Clear cached segments.679680remove_forwarding_pointers();681if (PrintGCDetails) {682gclog_or_tty->print(" (promotion failed) ");683}684// Add to-space to the list of space to compact685// when a promotion failure has occurred. In that686// case there can be live objects in to-space687// as a result of a partial evacuation of eden688// and from-space.689swap_spaces(); // For uniformity wrt ParNewGeneration.690from()->set_next_compaction_space(to());691gch->set_incremental_collection_failed();692693// Inform the next generation that a promotion failure occurred.694_next_gen->promotion_failure_occurred();695gc_tracer.report_promotion_failed(_promotion_failed_info);696697// Reset the PromotionFailureALot counters.698NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)699}700// set new iteration safe limit for the survivor spaces701from()->set_concurrent_iteration_safe_limit(from()->top());702to()->set_concurrent_iteration_safe_limit(to()->top());703SpecializationStats::print();704705// We need to use a monotonically non-decreasing time in ms706// or we will see time-warp warnings and os::javaTimeMillis()707// does not guarantee monotonicity.708jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;709update_time_of_last_gc(now);710711gch->trace_heap_after_gc(&gc_tracer);712gc_tracer.report_tenuring_threshold(tenuring_threshold());713714_gc_timer->register_gc_end();715716gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());717}718719class RemoveForwardPointerClosure: public ObjectClosure {720public:721void do_object(oop obj) {722obj->init_mark();723}724};725726void DefNewGeneration::init_assuming_no_promotion_failure() {727_promotion_failed = false;728_promotion_failed_info.reset();729from()->set_next_compaction_space(NULL);730}731732void DefNewGeneration::remove_forwarding_pointers() {733RemoveForwardPointerClosure rspc;734eden()->object_iterate(&rspc);735from()->object_iterate(&rspc);736737// Now restore saved marks, if any.738assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),739"should be the same");740while (!_objs_with_preserved_marks.is_empty()) {741oop obj = _objs_with_preserved_marks.pop();742markOop m = _preserved_marks_of_objs.pop();743obj->set_mark(m);744}745_objs_with_preserved_marks.clear(true);746_preserved_marks_of_objs.clear(true);747}748749void DefNewGeneration::preserve_mark(oop obj, markOop m) {750assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),751"Oversaving!");752_objs_with_preserved_marks.push(obj);753_preserved_marks_of_objs.push(m);754}755756void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {757if (m->must_be_preserved_for_promotion_failure(obj)) {758preserve_mark(obj, m);759}760}761762void DefNewGeneration::handle_promotion_failure(oop old) {763if (PrintPromotionFailure && !_promotion_failed) {764gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",765old->size());766}767_promotion_failed = true;768_promotion_failed_info.register_copy_failure(old->size());769preserve_mark_if_necessary(old, old->mark());770// forward to self771old->forward_to(old);772773_promo_failure_scan_stack.push(old);774775if (!_promo_failure_drain_in_progress) {776// prevent recursion in copy_to_survivor_space()777_promo_failure_drain_in_progress = true;778drain_promo_failure_scan_stack();779_promo_failure_drain_in_progress = false;780}781}782783oop DefNewGeneration::copy_to_survivor_space(oop old) {784assert(is_in_reserved(old) && !old->is_forwarded(),785"shouldn't be scavenging this oop");786size_t s = old->size();787oop obj = NULL;788789// Try allocating obj in to-space (unless too old)790if (old->age() < tenuring_threshold()) {791obj = (oop) to()->allocate_aligned(s);792}793794// Otherwise try allocating obj tenured795if (obj == NULL) {796obj = _next_gen->promote(old, s);797if (obj == NULL) {798handle_promotion_failure(old);799return old;800}801} else {802// Prefetch beyond obj803const intx interval = PrefetchCopyIntervalInBytes;804Prefetch::write(obj, interval);805806// Copy obj807Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);808809// Increment age if obj still in new generation810obj->incr_age();811age_table()->add(obj, s);812}813814// Done, insert forward pointer to obj in this header815old->forward_to(obj);816817return obj;818}819820void DefNewGeneration::drain_promo_failure_scan_stack() {821while (!_promo_failure_scan_stack.is_empty()) {822oop obj = _promo_failure_scan_stack.pop();823obj->oop_iterate(_promo_failure_scan_stack_closure);824}825}826827void DefNewGeneration::save_marks() {828eden()->set_saved_mark();829to()->set_saved_mark();830from()->set_saved_mark();831}832833834void DefNewGeneration::reset_saved_marks() {835eden()->reset_saved_mark();836to()->reset_saved_mark();837from()->reset_saved_mark();838}839840841bool DefNewGeneration::no_allocs_since_save_marks() {842assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");843assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");844return to()->saved_mark_at_top();845}846847#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \848\849void DefNewGeneration:: \850oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \851cl->set_generation(this); \852eden()->oop_since_save_marks_iterate##nv_suffix(cl); \853to()->oop_since_save_marks_iterate##nv_suffix(cl); \854from()->oop_since_save_marks_iterate##nv_suffix(cl); \855cl->reset_generation(); \856save_marks(); \857}858859ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)860861#undef DefNew_SINCE_SAVE_MARKS_DEFN862863void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,864size_t max_alloc_words) {865if (requestor == this || _promotion_failed) return;866assert(requestor->level() > level(), "DefNewGeneration must be youngest");867868/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.869if (to_space->top() > to_space->bottom()) {870trace("to_space not empty when contribute_scratch called");871}872*/873874ContiguousSpace* to_space = to();875assert(to_space->end() >= to_space->top(), "pointers out of order");876size_t free_words = pointer_delta(to_space->end(), to_space->top());877if (free_words >= MinFreeScratchWords) {878ScratchBlock* sb = (ScratchBlock*)to_space->top();879sb->num_words = free_words;880sb->next = list;881list = sb;882}883}884885void DefNewGeneration::reset_scratch() {886// If contributing scratch in to_space, mangle all of887// to_space if ZapUnusedHeapArea. This is needed because888// top is not maintained while using to-space as scratch.889if (ZapUnusedHeapArea) {890to()->mangle_unused_area_complete();891}892}893894bool DefNewGeneration::collection_attempt_is_safe() {895if (!to()->is_empty()) {896if (Verbose && PrintGCDetails) {897gclog_or_tty->print(" :: to is not empty :: ");898}899return false;900}901if (_next_gen == NULL) {902GenCollectedHeap* gch = GenCollectedHeap::heap();903_next_gen = gch->next_gen(this);904}905return _next_gen->promotion_attempt_is_safe(used());906}907908void DefNewGeneration::gc_epilogue(bool full) {909DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)910911assert(!GC_locker::is_active(), "We should not be executing here");912// Check if the heap is approaching full after a collection has913// been done. Generally the young generation is empty at914// a minimum at the end of a collection. If it is not, then915// the heap is approaching full.916GenCollectedHeap* gch = GenCollectedHeap::heap();917if (full) {918DEBUG_ONLY(seen_incremental_collection_failed = false;)919if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {920if (Verbose && PrintGCDetails) {921gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",922GCCause::to_string(gch->gc_cause()));923}924gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state925set_should_allocate_from_space(); // we seem to be running out of space926} else {927if (Verbose && PrintGCDetails) {928gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",929GCCause::to_string(gch->gc_cause()));930}931gch->clear_incremental_collection_failed(); // We just did a full collection932clear_should_allocate_from_space(); // if set933}934} else {935#ifdef ASSERT936// It is possible that incremental_collection_failed() == true937// here, because an attempted scavenge did not succeed. The policy938// is normally expected to cause a full collection which should939// clear that condition, so we should not be here twice in a row940// with incremental_collection_failed() == true without having done941// a full collection in between.942if (!seen_incremental_collection_failed &&943gch->incremental_collection_failed()) {944if (Verbose && PrintGCDetails) {945gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",946GCCause::to_string(gch->gc_cause()));947}948seen_incremental_collection_failed = true;949} else if (seen_incremental_collection_failed) {950if (Verbose && PrintGCDetails) {951gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",952GCCause::to_string(gch->gc_cause()));953}954assert(gch->gc_cause() == GCCause::_scavenge_alot ||955(gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||956!gch->incremental_collection_failed(),957"Twice in a row");958seen_incremental_collection_failed = false;959}960#endif // ASSERT961}962963if (ZapUnusedHeapArea) {964eden()->check_mangled_unused_area_complete();965from()->check_mangled_unused_area_complete();966to()->check_mangled_unused_area_complete();967}968969if (!CleanChunkPoolAsync) {970Chunk::clean_chunk_pool();971}972973// update the generation and space performance counters974update_counters();975gch->collector_policy()->counters()->update_counters();976}977978void DefNewGeneration::record_spaces_top() {979assert(ZapUnusedHeapArea, "Not mangling unused space");980eden()->set_top_for_allocations();981to()->set_top_for_allocations();982from()->set_top_for_allocations();983}984985void DefNewGeneration::ref_processor_init() {986Generation::ref_processor_init();987}988989990void DefNewGeneration::update_counters() {991if (UsePerfData) {992_eden_counters->update_all();993_from_counters->update_all();994_to_counters->update_all();995_gen_counters->update_all();996}997}998999void DefNewGeneration::verify() {1000eden()->verify();1001from()->verify();1002to()->verify();1003}10041005void DefNewGeneration::print_on(outputStream* st) const {1006Generation::print_on(st);1007st->print(" eden");1008eden()->print_on(st);1009st->print(" from");1010from()->print_on(st);1011st->print(" to ");1012to()->print_on(st);1013}101410151016const char* DefNewGeneration::name() const {1017return "def new generation";1018}10191020// Moved from inline file as they are not called inline1021CompactibleSpace* DefNewGeneration::first_compaction_space() const {1022return eden();1023}10241025HeapWord* DefNewGeneration::allocate(size_t word_size,1026bool is_tlab) {1027// This is the slow-path allocation for the DefNewGeneration.1028// Most allocations are fast-path in compiled code.1029// We try to allocate from the eden. If that works, we are happy.1030// Note that since DefNewGeneration supports lock-free allocation, we1031// have to use it here, as well.1032HeapWord* result = eden()->par_allocate(word_size);1033if (result != NULL) {1034if (CMSEdenChunksRecordAlways && _next_gen != NULL) {1035_next_gen->sample_eden_chunk();1036}1037return result;1038}1039do {1040HeapWord* old_limit = eden()->soft_end();1041if (old_limit < eden()->end()) {1042// Tell the next generation we reached a limit.1043HeapWord* new_limit =1044next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);1045if (new_limit != NULL) {1046Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);1047} else {1048assert(eden()->soft_end() == eden()->end(),1049"invalid state after allocation_limit_reached returned null");1050}1051} else {1052// The allocation failed and the soft limit is equal to the hard limit,1053// there are no reasons to do an attempt to allocate1054assert(old_limit == eden()->end(), "sanity check");1055break;1056}1057// Try to allocate until succeeded or the soft limit can't be adjusted1058result = eden()->par_allocate(word_size);1059} while (result == NULL);10601061// If the eden is full and the last collection bailed out, we are running1062// out of heap space, and we try to allocate the from-space, too.1063// allocate_from_space can't be inlined because that would introduce a1064// circular dependency at compile time.1065if (result == NULL) {1066result = allocate_from_space(word_size);1067} else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {1068_next_gen->sample_eden_chunk();1069}1070return result;1071}10721073HeapWord* DefNewGeneration::par_allocate(size_t word_size,1074bool is_tlab) {1075HeapWord* res = eden()->par_allocate(word_size);1076if (CMSEdenChunksRecordAlways && _next_gen != NULL) {1077_next_gen->sample_eden_chunk();1078}1079return res;1080}10811082void DefNewGeneration::gc_prologue(bool full) {1083// Ensure that _end and _soft_end are the same in eden space.1084eden()->set_soft_end(eden()->end());1085}10861087size_t DefNewGeneration::tlab_capacity() const {1088return eden()->capacity();1089}10901091size_t DefNewGeneration::tlab_used() const {1092return eden()->used();1093}10941095size_t DefNewGeneration::unsafe_max_tlab_alloc() const {1096return unsafe_max_alloc_nogc();1097}109810991100