Path: blob/master/src/hotspot/share/gc/shared/cardGeneration.cpp
40957 views
/*1* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"2526#include "gc/shared/blockOffsetTable.inline.hpp"27#include "gc/shared/cardGeneration.inline.hpp"28#include "gc/shared/cardTableRS.hpp"29#include "gc/shared/gcLocker.hpp"30#include "gc/shared/genCollectedHeap.hpp"31#include "gc/shared/genOopClosures.inline.hpp"32#include "gc/shared/generationSpec.hpp"33#include "gc/shared/space.inline.hpp"34#include "memory/iterator.hpp"35#include "memory/memRegion.hpp"36#include "logging/log.hpp"37#include "runtime/java.hpp"3839CardGeneration::CardGeneration(ReservedSpace rs,40size_t initial_byte_size,41CardTableRS* remset) :42Generation(rs, initial_byte_size), _rs(remset),43_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),44_used_at_prologue()45{46HeapWord* start = (HeapWord*)rs.base();47size_t reserved_byte_size = rs.size();48assert((uintptr_t(start) & 3) == 0, "bad alignment");49assert((reserved_byte_size & 3) == 0, "bad alignment");50MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));51_bts = new BlockOffsetSharedArray(reserved_mr,52heap_word_size(initial_byte_size));53MemRegion committed_mr(start, heap_word_size(initial_byte_size));54_rs->resize_covered_region(committed_mr);5556// Verify that the start and end of this generation is the start of a card.57// If this wasn't true, a single card could span more than on generation,58// which would cause problems when we commit/uncommit memory, and when we59// clear and dirty cards.60guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");61if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {62// Don't check at the very end of the heap as we'll assert that we're probing off63// the end if we try.64guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");65}66_min_heap_delta_bytes = MinHeapDeltaBytes;67_capacity_at_prologue = initial_byte_size;68_used_at_prologue = 0;69}7071bool CardGeneration::grow_by(size_t bytes) {72assert_correct_size_change_locking();73bool result = _virtual_space.expand_by(bytes);74if (result) {75size_t new_word_size =76heap_word_size(_virtual_space.committed_size());77MemRegion mr(space()->bottom(), new_word_size);78// Expand card table79GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);80// Expand shared block offset array81_bts->resize(new_word_size);8283// Fix for bug #466853184if (ZapUnusedHeapArea) {85MemRegion mangle_region(space()->end(),86(HeapWord*)_virtual_space.high());87SpaceMangler::mangle_region(mangle_region);88}8990// Expand space -- also expands space's BOT91// (which uses (part of) shared array above)92space()->set_end((HeapWord*)_virtual_space.high());9394// update the space and generation capacity counters95update_counters();9697size_t new_mem_size = _virtual_space.committed_size();98size_t old_mem_size = new_mem_size - bytes;99log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",100name(), old_mem_size/K, bytes/K, new_mem_size/K);101}102return result;103}104105bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {106assert_locked_or_safepoint(Heap_lock);107if (bytes == 0) {108return true; // That's what grow_by(0) would return109}110size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);111if (aligned_bytes == 0){112// The alignment caused the number of bytes to wrap. An expand_by(0) will113// return true with the implication that an expansion was done when it114// was not. A call to expand implies a best effort to expand by "bytes"115// but not a guarantee. Align down to give a best effort. This is likely116// the most that the generation can expand since it has some capacity to117// start with.118aligned_bytes = ReservedSpace::page_align_size_down(bytes);119}120size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);121bool success = false;122if (aligned_expand_bytes > aligned_bytes) {123success = grow_by(aligned_expand_bytes);124}125if (!success) {126success = grow_by(aligned_bytes);127}128if (!success) {129success = grow_to_reserved();130}131if (success && GCLocker::is_active_and_needs_gc()) {132log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");133}134135return success;136}137138bool CardGeneration::grow_to_reserved() {139assert_correct_size_change_locking();140bool success = true;141const size_t remaining_bytes = _virtual_space.uncommitted_size();142if (remaining_bytes > 0) {143success = grow_by(remaining_bytes);144DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)145}146return success;147}148149void CardGeneration::shrink(size_t bytes) {150assert_correct_size_change_locking();151152size_t size = ReservedSpace::page_align_size_down(bytes);153if (size == 0) {154return;155}156157// Shrink committed space158_virtual_space.shrink_by(size);159// Shrink space; this also shrinks the space's BOT160space()->set_end((HeapWord*) _virtual_space.high());161size_t new_word_size = heap_word_size(space()->capacity());162// Shrink the shared block offset array163_bts->resize(new_word_size);164MemRegion mr(space()->bottom(), new_word_size);165// Shrink the card table166GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);167168size_t new_mem_size = _virtual_space.committed_size();169size_t old_mem_size = new_mem_size + size;170log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",171name(), old_mem_size/K, new_mem_size/K);172}173174// No young generation references, clear this generation's cards.175void CardGeneration::clear_remembered_set() {176_rs->clear(reserved());177}178179// Objects in this generation may have moved, invalidate this180// generation's cards.181void CardGeneration::invalidate_remembered_set() {182_rs->invalidate(used_region());183}184185void CardGeneration::compute_new_size() {186assert(_shrink_factor <= 100, "invalid shrink factor");187size_t current_shrink_factor = _shrink_factor;188_shrink_factor = 0;189190// We don't have floating point command-line arguments191// Note: argument processing ensures that MinHeapFreeRatio < 100.192const double minimum_free_percentage = MinHeapFreeRatio / 100.0;193const double maximum_used_percentage = 1.0 - minimum_free_percentage;194195// Compute some numbers about the state of the heap.196const size_t used_after_gc = used();197const size_t capacity_after_gc = capacity();198199const double min_tmp = used_after_gc / maximum_used_percentage;200size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));201// Don't shrink less than the initial generation size202minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size());203assert(used_after_gc <= minimum_desired_capacity, "sanity check");204205const size_t free_after_gc = free();206const double free_percentage = ((double)free_after_gc) / capacity_after_gc;207log_trace(gc, heap)("CardGeneration::compute_new_size:");208log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",209minimum_free_percentage,210maximum_used_percentage);211log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK",212free_after_gc / (double) K,213used_after_gc / (double) K,214capacity_after_gc / (double) K);215log_trace(gc, heap)(" free_percentage: %6.2f", free_percentage);216217if (capacity_after_gc < minimum_desired_capacity) {218// If we have less free space than we want then expand219size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;220// Don't expand unless it's significant221if (expand_bytes >= _min_heap_delta_bytes) {222expand(expand_bytes, 0); // safe if expansion fails223}224log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK",225minimum_desired_capacity / (double) K,226expand_bytes / (double) K,227_min_heap_delta_bytes / (double) K);228return;229}230231// No expansion, now see if we want to shrink232size_t shrink_bytes = 0;233// We would never want to shrink more than this234size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;235236if (MaxHeapFreeRatio < 100) {237const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;238const double minimum_used_percentage = 1.0 - maximum_free_percentage;239const double max_tmp = used_after_gc / minimum_used_percentage;240size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));241maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size());242log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",243maximum_free_percentage, minimum_used_percentage);244log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK",245_capacity_at_prologue / (double) K,246minimum_desired_capacity / (double) K,247maximum_desired_capacity / (double) K);248assert(minimum_desired_capacity <= maximum_desired_capacity,249"sanity check");250251if (capacity_after_gc > maximum_desired_capacity) {252// Capacity too large, compute shrinking size253shrink_bytes = capacity_after_gc - maximum_desired_capacity;254if (ShrinkHeapInSteps) {255// If ShrinkHeapInSteps is true (the default),256// we don't want to shrink all the way back to initSize if people call257// System.gc(), because some programs do that between "phases" and then258// we'd just have to grow the heap up again for the next phase. So we259// damp the shrinking: 0% on the first call, 10% on the second call, 40%260// on the third call, and 100% by the fourth call. But if we recompute261// size without shrinking, it goes back to 0%.262shrink_bytes = shrink_bytes / 100 * current_shrink_factor;263if (current_shrink_factor == 0) {264_shrink_factor = 10;265} else {266_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);267}268}269assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");270log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK",271initial_size() / (double) K, maximum_desired_capacity / (double) K);272log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: " SIZE_FORMAT " new shrink factor: " SIZE_FORMAT " _min_heap_delta_bytes: %.1fK",273shrink_bytes / (double) K,274current_shrink_factor,275_shrink_factor,276_min_heap_delta_bytes / (double) K);277}278}279280if (capacity_after_gc > _capacity_at_prologue) {281// We might have expanded for promotions, in which case we might want to282// take back that expansion if there's room after GC. That keeps us from283// stretching the heap with promotions when there's plenty of room.284size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;285expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);286// We have two shrinking computations, take the largest287shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);288assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");289log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK",290capacity_after_gc / (double) K,291_capacity_at_prologue / (double) K,292expansion_for_promotion / (double) K,293shrink_bytes / (double) K);294}295// Don't shrink unless it's significant296if (shrink_bytes >= _min_heap_delta_bytes) {297shrink(shrink_bytes);298}299}300301// Currently nothing to do.302void CardGeneration::prepare_for_verify() {}303304void CardGeneration::space_iterate(SpaceClosure* blk,305bool usedOnly) {306blk->do_space(space());307}308309void CardGeneration::younger_refs_iterate(OopIterateClosure* blk) {310// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in311// "sp" that point into the young generation.312// The iteration is only over objects allocated at the start of the313// iterations; objects allocated as a result of applying the closure are314// not included.315316HeapWord* gen_boundary = reserved().start();317_rs->younger_refs_in_space_iterate(space(), gen_boundary, blk);318}319320321