Path: blob/master/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
40957 views
/*1* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP25#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP2627#include "gc/g1/g1CollectedHeap.hpp"2829#include "gc/g1/g1BarrierSet.hpp"30#include "gc/g1/g1CollectorState.hpp"31#include "gc/g1/g1Policy.hpp"32#include "gc/g1/g1RemSet.hpp"33#include "gc/g1/heapRegionManager.inline.hpp"34#include "gc/g1/heapRegionRemSet.hpp"35#include "gc/g1/heapRegionSet.inline.hpp"36#include "gc/shared/markBitMap.inline.hpp"37#include "gc/shared/taskqueue.inline.hpp"38#include "runtime/atomic.hpp"3940G1GCPhaseTimes* G1CollectedHeap::phase_times() const {41return _policy->phase_times();42}4344G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {45switch (dest.type()) {46case G1HeapRegionAttr::Young:47return &_survivor_evac_stats;48case G1HeapRegionAttr::Old:49return &_old_evac_stats;50default:51ShouldNotReachHere();52return NULL; // Keep some compilers happy53}54}5556size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {57size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());58// Prevent humongous PLAB sizes for two reasons:59// * PLABs are allocated using a similar paths as oops, but should60// never be in a humongous region61// * Allowing humongous PLABs needlessly churns the region free lists62return MIN2(_humongous_object_threshold_in_words, gclab_word_size);63}6465// Inline functions for G1CollectedHeap6667// Return the region with the given index. It assumes the index is valid.68inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }6970// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.71inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }7273inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {74return _hrm.next_region_in_humongous(hr);75}7677inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {78assert(is_in_reserved(addr),79"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",80p2i(addr), p2i(reserved().start()), p2i(reserved().end()));81return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);82}8384inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {85return _hrm.reserved().start() + index * HeapRegion::GrainWords;86}8788template <class T>89inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {90assert(addr != NULL, "invariant");91assert(is_in_reserved((const void*) addr),92"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",93p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));94return _hrm.addr_to_region((HeapWord*)(void*) addr);95}9697template <class T>98inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {99assert(addr != NULL, "invariant");100assert(is_in_reserved((const void*) addr),101"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",102p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));103uint const region_idx = addr_to_region(addr);104return region_at_or_null(region_idx);105}106107inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {108_old_set.add(hr);109}110111inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {112_old_set.remove(hr);113}114115inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {116_archive_set.add(hr);117}118119// It dirties the cards that cover the block so that the post120// write barrier never queues anything when updating objects on this121// block. It is assumed (and in fact we assert) that the block122// belongs to a young region.123inline void124G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {125assert_heap_not_locked();126127// Assign the containing region to containing_hr so that we don't128// have to keep calling heap_region_containing() in the129// asserts below.130DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)131assert(word_size > 0, "pre-condition");132assert(containing_hr->is_in(start), "it should contain start");133assert(containing_hr->is_young(), "it should be young");134assert(!containing_hr->is_humongous(), "it should not be humongous");135136HeapWord* end = start + word_size;137assert(containing_hr->is_in(end - 1), "it should also contain end - 1");138139MemRegion mr(start, end);140card_table()->g1_mark_as_young(mr);141}142143inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {144return _task_queues->queue(i);145}146147inline bool G1CollectedHeap::is_marked_next(oop obj) const {148return _cm->next_mark_bitmap()->is_marked(obj);149}150151inline bool G1CollectedHeap::is_in_cset(oop obj) {152return is_in_cset(cast_from_oop<HeapWord*>(obj));153}154155inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {156return _region_attr.is_in_cset(addr);157}158159bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {160return _region_attr.is_in_cset(hr);161}162163bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {164return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));165}166167G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {168return _region_attr.at((HeapWord*)addr);169}170171G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {172return _region_attr.get_by_index(idx);173}174175void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {176_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());177}178179void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {180_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());181}182183void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {184_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());185_rem_set->exclude_region_from_scan(r->hrm_index());186}187188void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {189_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());190}191192bool G1CollectedHeap::evacuation_failed() const {193return num_regions_failed_evacuation() > 0;194}195196uint G1CollectedHeap::num_regions_failed_evacuation() const {197return Atomic::load(&_num_regions_failed_evacuation);198}199200void G1CollectedHeap::notify_region_failed_evacuation() {201Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);202}203204#ifndef PRODUCT205// Support for G1EvacuationFailureALot206207inline bool208G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,209bool during_concurrent_start,210bool mark_or_rebuild_in_progress) {211bool res = false;212if (mark_or_rebuild_in_progress) {213res |= G1EvacuationFailureALotDuringConcMark;214}215if (during_concurrent_start) {216res |= G1EvacuationFailureALotDuringConcurrentStart;217}218if (for_young_gc) {219res |= G1EvacuationFailureALotDuringYoungGC;220} else {221// GCs are mixed222res |= G1EvacuationFailureALotDuringMixedGC;223}224return res;225}226227inline void228G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {229if (G1EvacuationFailureALot) {230// Note we can't assert that _evacuation_failure_alot_for_current_gc231// is clear here. It may have been set during a previous GC but that GC232// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to233// trigger an evacuation failure and clear the flags and and counts.234235// Check if we have gone over the interval.236const size_t gc_num = total_collections();237const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;238239_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);240241// Now check if G1EvacuationFailureALot is enabled for the current GC type.242const bool in_young_only_phase = collector_state()->in_young_only_phase();243const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();244const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();245246_evacuation_failure_alot_for_current_gc &=247evacuation_failure_alot_for_gc_type(in_young_only_phase,248in_concurrent_start_gc,249mark_or_rebuild_in_progress);250}251}252253inline bool G1CollectedHeap::evacuation_should_fail() {254if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {255return false;256}257// G1EvacuationFailureALot is in effect for current GC258// Access to _evacuation_failure_alot_count is not atomic;259// the value does not have to be exact.260if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {261return false;262}263_evacuation_failure_alot_count = 0;264return true;265}266267inline void G1CollectedHeap::reset_evacuation_should_fail() {268if (G1EvacuationFailureALot) {269_evacuation_failure_alot_gc_number = total_collections();270_evacuation_failure_alot_count = 0;271_evacuation_failure_alot_for_current_gc = false;272}273}274#endif // #ifndef PRODUCT275276inline bool G1CollectedHeap::is_in_young(const oop obj) {277if (obj == NULL) {278return false;279}280return heap_region_containing(obj)->is_young();281}282283inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {284if (obj == NULL) {285return false;286}287return is_obj_dead(obj, heap_region_containing(obj));288}289290inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {291if (obj == NULL) {292return false;293}294return is_obj_ill(obj, heap_region_containing(obj));295}296297inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {298return !is_marked_next(obj) && !hr->is_closed_archive();299}300301inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {302return is_obj_dead_full(obj, heap_region_containing(obj));303}304305inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {306assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");307_humongous_reclaim_candidates.set_candidate(region, value);308}309310inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {311assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");312return _humongous_reclaim_candidates.is_candidate(region);313}314315inline void G1CollectedHeap::set_humongous_is_live(oop obj) {316uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));317// Clear the flag in the humongous_reclaim_candidates table. Also318// reset the entry in the region attribute table so that subsequent references319// to the same humongous object do not go into the slow path again.320// This is racy, as multiple threads may at the same time enter here, but this321// is benign.322// During collection we only ever clear the "candidate" flag, and only ever clear the323// entry in the in_cset_fast_table.324// We only ever evaluate the contents of these tables (in the VM thread) after325// having synchronized the worker threads with the VM thread, or in the same326// thread (i.e. within the VM thread).327if (is_humongous_reclaim_candidate(region)) {328set_humongous_reclaim_candidate(region, false);329_region_attr.clear_humongous(region);330}331}332333#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP334335336