Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
38920 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"26#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"27#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"28#include "gc_implementation/parallelScavenge/psOldGen.hpp"29#include "gc_implementation/shared/spaceDecorator.hpp"30#include "memory/cardTableModRefBS.hpp"31#include "memory/gcLocker.inline.hpp"32#include "oops/oop.inline.hpp"33#include "runtime/java.hpp"3435PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC3637inline const char* PSOldGen::select_name() {38return UseParallelOldGC ? "ParOldGen" : "PSOldGen";39}4041PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,42size_t initial_size, size_t min_size, size_t max_size,43const char* perf_data_name, int level):44_name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),45_max_gen_size(max_size)46{47initialize(rs, alignment, perf_data_name, level);48}4950PSOldGen::PSOldGen(size_t initial_size,51size_t min_size, size_t max_size,52const char* perf_data_name, int level):53_name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),54_max_gen_size(max_size)55{}5657void PSOldGen::initialize(ReservedSpace rs, size_t alignment,58const char* perf_data_name, int level) {59initialize_virtual_space(rs, alignment);60initialize_work(perf_data_name, level);6162// The old gen can grow to gen_size_limit(). _reserve reflects only63// the current maximum that can be committed.64assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");6566initialize_performance_counters(perf_data_name, level);67}6869void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {7071_virtual_space = new PSVirtualSpace(rs, alignment);72if (!_virtual_space->expand_by(_init_gen_size)) {73vm_exit_during_initialization("Could not reserve enough space for "74"object heap");75}76}7778void PSOldGen::initialize_work(const char* perf_data_name, int level) {79//80// Basic memory initialization81//8283MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),84heap_word_size(_max_gen_size));85assert(limit_reserved.byte_size() == _max_gen_size,86"word vs bytes confusion");87//88// Object start stuff89//9091start_array()->initialize(limit_reserved);9293_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),94(HeapWord*)virtual_space()->high_boundary());9596//97// Card table stuff98//99100MemRegion cmr((HeapWord*)virtual_space()->low(),101(HeapWord*)virtual_space()->high());102if (ZapUnusedHeapArea) {103// Mangle newly committed space immediately rather than104// waiting for the initialization of the space even though105// mangling is related to spaces. Doing it here eliminates106// the need to carry along information that a complete mangling107// (bottom to end) needs to be done.108SpaceMangler::mangle_region(cmr);109}110111Universe::heap()->barrier_set()->resize_covered_region(cmr);112113CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();114assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");115116// Verify that the start and end of this generation is the start of a card.117// If this wasn't true, a single card could span more than one generation,118// which would cause problems when we commit/uncommit memory, and when we119// clear and dirty cards.120guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");121if (_reserved.end() != Universe::heap()->reserved_region().end()) {122// Don't check at the very end of the heap as we'll assert that we're probing off123// the end if we try.124guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");125}126127//128// ObjectSpace stuff129//130131_object_space = new MutableSpace(virtual_space()->alignment());132133if (_object_space == NULL)134vm_exit_during_initialization("Could not allocate an old gen space");135136object_space()->initialize(cmr,137SpaceDecorator::Clear,138SpaceDecorator::Mangle);139140_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);141142if (_object_mark_sweep == NULL)143vm_exit_during_initialization("Could not complete allocation of old generation");144145// Update the start_array146start_array()->set_covered_region(cmr);147}148149void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {150// Generation Counters, generation 'level', 1 subspace151_gen_counters = new PSGenerationCounters(perf_data_name, level, 1,152virtual_space());153_space_counters = new SpaceCounters(perf_data_name, 0,154virtual_space()->reserved_size(),155_object_space, _gen_counters);156}157158// Assume that the generation has been allocated if its159// reserved size is not 0.160bool PSOldGen::is_allocated() {161return virtual_space()->reserved_size() != 0;162}163164void PSOldGen::precompact() {165ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();166assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");167168// Reset start array first.169start_array()->reset();170171object_mark_sweep()->precompact();172173// Now compact the young gen174heap->young_gen()->precompact();175}176177void PSOldGen::adjust_pointers() {178object_mark_sweep()->adjust_pointers();179}180181void PSOldGen::compact() {182object_mark_sweep()->compact(ZapUnusedHeapArea);183}184185size_t PSOldGen::contiguous_available() const {186return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();187}188189// Allocation. We report all successful allocations to the size policy190// Note that the perm gen does not use this method, and should not!191HeapWord* PSOldGen::allocate(size_t word_size) {192assert_locked_or_safepoint(Heap_lock);193HeapWord* res = allocate_noexpand(word_size);194195if (res == NULL) {196res = expand_and_allocate(word_size);197}198199// Allocations in the old generation need to be reported200if (res != NULL) {201ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();202heap->size_policy()->tenured_allocation(word_size);203}204205return res;206}207208HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {209expand(word_size*HeapWordSize);210if (GCExpandToAllocateDelayMillis > 0) {211os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);212}213return allocate_noexpand(word_size);214}215216HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {217expand(word_size*HeapWordSize);218if (GCExpandToAllocateDelayMillis > 0) {219os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);220}221return cas_allocate_noexpand(word_size);222}223224void PSOldGen::expand(size_t bytes) {225if (bytes == 0) {226return;227}228MutexLocker x(ExpandHeap_lock);229const size_t alignment = virtual_space()->alignment();230size_t aligned_bytes = align_size_up(bytes, alignment);231size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);232233if (UseNUMA) {234// With NUMA we use round-robin page allocation for the old gen. Expand by at least235// providing a page per lgroup. Alignment is larger or equal to the page size.236aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());237}238if (aligned_bytes == 0){239// The alignment caused the number of bytes to wrap. An expand_by(0) will240// return true with the implication that and expansion was done when it241// was not. A call to expand implies a best effort to expand by "bytes"242// but not a guarantee. Align down to give a best effort. This is likely243// the most that the generation can expand since it has some capacity to244// start with.245aligned_bytes = align_size_down(bytes, alignment);246}247248bool success = false;249if (aligned_expand_bytes > aligned_bytes) {250success = expand_by(aligned_expand_bytes);251}252if (!success) {253success = expand_by(aligned_bytes);254}255if (!success) {256success = expand_to_reserved();257}258259if (PrintGC && Verbose) {260if (success && GC_locker::is_active_and_needs_gc()) {261gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");262}263}264}265266bool PSOldGen::expand_by(size_t bytes) {267assert_lock_strong(ExpandHeap_lock);268assert_locked_or_safepoint(Heap_lock);269if (bytes == 0) {270return true; // That's what virtual_space()->expand_by(0) would return271}272bool result = virtual_space()->expand_by(bytes);273if (result) {274if (ZapUnusedHeapArea) {275// We need to mangle the newly expanded area. The memregion spans276// end -> new_end, we assume that top -> end is already mangled.277// Do the mangling before post_resize() is called because278// the space is available for allocation after post_resize();279HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();280assert(object_space()->end() < virtual_space_high,281"Should be true before post_resize()");282MemRegion mangle_region(object_space()->end(), virtual_space_high);283// Note that the object space has not yet been updated to284// coincede with the new underlying virtual space.285SpaceMangler::mangle_region(mangle_region);286}287post_resize();288if (UsePerfData) {289_space_counters->update_capacity();290_gen_counters->update_all();291}292}293294if (result && Verbose && PrintGC) {295size_t new_mem_size = virtual_space()->committed_size();296size_t old_mem_size = new_mem_size - bytes;297gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "298SIZE_FORMAT "K to "299SIZE_FORMAT "K",300name(), old_mem_size/K, bytes/K, new_mem_size/K);301}302303return result;304}305306bool PSOldGen::expand_to_reserved() {307assert_lock_strong(ExpandHeap_lock);308assert_locked_or_safepoint(Heap_lock);309310bool result = true;311const size_t remaining_bytes = virtual_space()->uncommitted_size();312if (remaining_bytes > 0) {313result = expand_by(remaining_bytes);314DEBUG_ONLY(if (!result) warning("grow to reserve failed"));315}316return result;317}318319void PSOldGen::shrink(size_t bytes) {320assert_lock_strong(ExpandHeap_lock);321assert_locked_or_safepoint(Heap_lock);322323size_t size = align_size_down(bytes, virtual_space()->alignment());324if (size > 0) {325assert_lock_strong(ExpandHeap_lock);326virtual_space()->shrink_by(bytes);327post_resize();328329if (Verbose && PrintGC) {330size_t new_mem_size = virtual_space()->committed_size();331size_t old_mem_size = new_mem_size + bytes;332gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "333SIZE_FORMAT "K to "334SIZE_FORMAT "K",335name(), old_mem_size/K, bytes/K, new_mem_size/K);336}337}338}339340void PSOldGen::resize(size_t desired_free_space) {341const size_t alignment = virtual_space()->alignment();342const size_t size_before = virtual_space()->committed_size();343size_t new_size = used_in_bytes() + desired_free_space;344if (new_size < used_in_bytes()) {345// Overflowed the addition.346new_size = gen_size_limit();347}348// Adjust according to our min and max349new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());350351assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");352new_size = align_size_up(new_size, alignment);353354const size_t current_size = capacity_in_bytes();355356if (PrintAdaptiveSizePolicy && Verbose) {357gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "358"desired free: " SIZE_FORMAT " used: " SIZE_FORMAT359" new size: " SIZE_FORMAT " current size " SIZE_FORMAT360" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,361desired_free_space, used_in_bytes(), new_size, current_size,362gen_size_limit(), min_gen_size());363}364365if (new_size == current_size) {366// No change requested367return;368}369if (new_size > current_size) {370size_t change_bytes = new_size - current_size;371expand(change_bytes);372} else {373size_t change_bytes = current_size - new_size;374// shrink doesn't grab this lock, expand does. Is that right?375MutexLocker x(ExpandHeap_lock);376shrink(change_bytes);377}378379if (PrintAdaptiveSizePolicy) {380ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();381assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");382gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "383"collection: %d "384"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",385heap->total_collections(),386size_before, virtual_space()->committed_size());387}388}389390// NOTE! We need to be careful about resizing. During a GC, multiple391// allocators may be active during heap expansion. If we allow the392// heap resizing to become visible before we have correctly resized393// all heap related data structures, we may cause program failures.394void PSOldGen::post_resize() {395// First construct a memregion representing the new size396MemRegion new_memregion((HeapWord*)virtual_space()->low(),397(HeapWord*)virtual_space()->high());398size_t new_word_size = new_memregion.word_size();399400start_array()->set_covered_region(new_memregion);401Universe::heap()->barrier_set()->resize_covered_region(new_memregion);402403// The update of the space's end is done by this call. As that404// makes the new space available for concurrent allocation, this405// must be the last step when expanding.406object_space()->initialize(new_memregion,407SpaceDecorator::DontClear,408SpaceDecorator::DontMangle);409410assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),411"Sanity");412}413414size_t PSOldGen::gen_size_limit() {415return _max_gen_size;416}417418void PSOldGen::reset_after_change() {419ShouldNotReachHere();420return;421}422423size_t PSOldGen::available_for_expansion() {424ShouldNotReachHere();425return 0;426}427428size_t PSOldGen::available_for_contraction() {429ShouldNotReachHere();430return 0;431}432433void PSOldGen::print() const { print_on(tty);}434void PSOldGen::print_on(outputStream* st) const {435st->print(" %-15s", name());436if (PrintGCDetails && Verbose) {437st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,438capacity_in_bytes(), used_in_bytes());439} else {440st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",441capacity_in_bytes()/K, used_in_bytes()/K);442}443st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",444virtual_space()->low_boundary(),445virtual_space()->high(),446virtual_space()->high_boundary());447448st->print(" object"); object_space()->print_on(st);449}450451void PSOldGen::print_used_change(size_t prev_used) const {452gclog_or_tty->print(" [%s:", name());453gclog_or_tty->print(" " SIZE_FORMAT "K"454"->" SIZE_FORMAT "K"455"(" SIZE_FORMAT "K)",456prev_used / K, used_in_bytes() / K,457capacity_in_bytes() / K);458gclog_or_tty->print("]");459}460461void PSOldGen::update_counters() {462if (UsePerfData) {463_space_counters->update_all();464_gen_counters->update_all();465}466}467468#ifndef PRODUCT469470void PSOldGen::space_invariants() {471assert(object_space()->end() == (HeapWord*) virtual_space()->high(),472"Space invariant");473assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),474"Space invariant");475assert(virtual_space()->low_boundary() <= virtual_space()->low(),476"Space invariant");477assert(virtual_space()->high_boundary() >= virtual_space()->high(),478"Space invariant");479assert(virtual_space()->low_boundary() == (char*) _reserved.start(),480"Space invariant");481assert(virtual_space()->high_boundary() == (char*) _reserved.end(),482"Space invariant");483assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),484"Space invariant");485}486#endif487488void PSOldGen::verify() {489object_space()->verify();490}491class VerifyObjectStartArrayClosure : public ObjectClosure {492PSOldGen* _gen;493ObjectStartArray* _start_array;494495public:496VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :497_gen(gen), _start_array(start_array) { }498499virtual void do_object(oop obj) {500HeapWord* test_addr = (HeapWord*)obj + 1;501guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");502guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");503}504};505506void PSOldGen::verify_object_start_array() {507VerifyObjectStartArrayClosure check( this, &_start_array );508object_iterate(&check);509}510511#ifndef PRODUCT512void PSOldGen::record_spaces_top() {513assert(ZapUnusedHeapArea, "Not mangling unused space");514object_space()->set_top_for_allocations();515}516#endif517518519