Path: blob/master/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
66644 views
/*1* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP25#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP2627#include "gc/g1/g1CollectedHeap.hpp"2829#include "gc/g1/g1BarrierSet.hpp"30#include "gc/g1/g1CollectorState.hpp"31#include "gc/g1/g1Policy.hpp"32#include "gc/g1/g1RemSet.hpp"33#include "gc/g1/heapRegionManager.inline.hpp"34#include "gc/g1/heapRegionRemSet.hpp"35#include "gc/g1/heapRegionSet.inline.hpp"36#include "gc/shared/markBitMap.inline.hpp"37#include "gc/shared/taskqueue.inline.hpp"38#include "runtime/atomic.hpp"3940G1GCPhaseTimes* G1CollectedHeap::phase_times() const {41return _policy->phase_times();42}4344G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {45switch (dest.type()) {46case G1HeapRegionAttr::Young:47return &_survivor_evac_stats;48case G1HeapRegionAttr::Old:49return &_old_evac_stats;50default:51ShouldNotReachHere();52return NULL; // Keep some compilers happy53}54}5556size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {57size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());58// Prevent humongous PLAB sizes for two reasons:59// * PLABs are allocated using a similar paths as oops, but should60// never be in a humongous region61// * Allowing humongous PLABs needlessly churns the region free lists62return MIN2(_humongous_object_threshold_in_words, gclab_word_size);63}6465// Inline functions for G1CollectedHeap6667// Return the region with the given index. It assumes the index is valid.68inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }6970// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.71inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }7273inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {74return _hrm.next_region_in_humongous(hr);75}7677inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {78assert(is_in_reserved(addr),79"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",80p2i(addr), p2i(reserved().start()), p2i(reserved().end()));81return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);82}8384inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {85return _hrm.reserved().start() + index * HeapRegion::GrainWords;86}8788template <class T>89inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {90assert(addr != NULL, "invariant");91assert(is_in_reserved((const void*) addr),92"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",93p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));94return _hrm.addr_to_region((HeapWord*)(void*) addr);95}9697template <class T>98inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {99assert(addr != NULL, "invariant");100assert(is_in_reserved((const void*) addr),101"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",102p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));103uint const region_idx = addr_to_region(addr);104return region_at_or_null(region_idx);105}106107inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {108_old_set.add(hr);109}110111inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {112_old_set.remove(hr);113}114115inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {116_archive_set.add(hr);117}118119// It dirties the cards that cover the block so that the post120// write barrier never queues anything when updating objects on this121// block. It is assumed (and in fact we assert) that the block122// belongs to a young region.123inline void124G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {125assert_heap_not_locked();126127// Assign the containing region to containing_hr so that we don't128// have to keep calling heap_region_containing() in the129// asserts below.130DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)131assert(word_size > 0, "pre-condition");132assert(containing_hr->is_in(start), "it should contain start");133assert(containing_hr->is_young(), "it should be young");134assert(!containing_hr->is_humongous(), "it should not be humongous");135136HeapWord* end = start + word_size;137assert(containing_hr->is_in(end - 1), "it should also contain end - 1");138139MemRegion mr(start, end);140card_table()->g1_mark_as_young(mr);141}142143inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {144return _task_queues->queue(i);145}146147inline bool G1CollectedHeap::is_marked_next(oop obj) const {148return _cm->next_mark_bitmap()->is_marked(obj);149}150151inline bool G1CollectedHeap::is_in_cset(oop obj) {152return is_in_cset(cast_from_oop<HeapWord*>(obj));153}154155inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {156return _region_attr.is_in_cset(addr);157}158159bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {160return _region_attr.is_in_cset(hr);161}162163bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {164return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));165}166167G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {168return _region_attr.at((HeapWord*)addr);169}170171G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {172return _region_attr.get_by_index(idx);173}174175void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {176_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());177}178179void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {180_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());181}182183void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {184_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());185_rem_set->exclude_region_from_scan(r->hrm_index());186}187188void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {189_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());190}191192bool G1CollectedHeap::evacuation_failed() const {193return num_regions_failed_evacuation() > 0;194}195196bool G1CollectedHeap::evacuation_failed(uint region_idx) const {197assert(region_idx < max_regions(), "Invalid region index %u", region_idx);198199return Atomic::load(&_regions_failed_evacuation[region_idx]);200}201202uint G1CollectedHeap::num_regions_failed_evacuation() const {203return Atomic::load(&_num_regions_failed_evacuation);204}205206bool G1CollectedHeap::notify_region_failed_evacuation(uint const region_idx) {207assert(region_idx < max_regions(), "Invalid region index %u", region_idx);208209volatile bool* region_failed_addr = &_regions_failed_evacuation[region_idx];210bool result = !Atomic::load(region_failed_addr) && !Atomic::cmpxchg(region_failed_addr, false, true, memory_order_relaxed);211if (result) {212Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);213}214return result;215}216217#ifndef PRODUCT218// Support for G1EvacuationFailureALot219220inline bool221G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,222bool during_concurrent_start,223bool mark_or_rebuild_in_progress) {224bool res = false;225if (mark_or_rebuild_in_progress) {226res |= G1EvacuationFailureALotDuringConcMark;227}228if (during_concurrent_start) {229res |= G1EvacuationFailureALotDuringConcurrentStart;230}231if (for_young_gc) {232res |= G1EvacuationFailureALotDuringYoungGC;233} else {234// GCs are mixed235res |= G1EvacuationFailureALotDuringMixedGC;236}237return res;238}239240inline void241G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {242if (G1EvacuationFailureALot) {243// Note we can't assert that _evacuation_failure_alot_for_current_gc244// is clear here. It may have been set during a previous GC but that GC245// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to246// trigger an evacuation failure and clear the flags and and counts.247248// Check if we have gone over the interval.249const size_t gc_num = total_collections();250const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;251252_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);253254// Now check if G1EvacuationFailureALot is enabled for the current GC type.255const bool in_young_only_phase = collector_state()->in_young_only_phase();256const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();257const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();258259_evacuation_failure_alot_for_current_gc &=260evacuation_failure_alot_for_gc_type(in_young_only_phase,261in_concurrent_start_gc,262mark_or_rebuild_in_progress);263}264}265266inline bool G1CollectedHeap::evacuation_should_fail() {267if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {268return false;269}270// G1EvacuationFailureALot is in effect for current GC271// Access to _evacuation_failure_alot_count is not atomic;272// the value does not have to be exact.273if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {274return false;275}276_evacuation_failure_alot_count = 0;277return true;278}279280inline void G1CollectedHeap::reset_evacuation_should_fail() {281if (G1EvacuationFailureALot) {282_evacuation_failure_alot_gc_number = total_collections();283_evacuation_failure_alot_count = 0;284_evacuation_failure_alot_for_current_gc = false;285}286}287#endif // #ifndef PRODUCT288289inline bool G1CollectedHeap::is_in_young(const oop obj) {290if (obj == NULL) {291return false;292}293return heap_region_containing(obj)->is_young();294}295296inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {297if (obj == NULL) {298return false;299}300return is_obj_dead(obj, heap_region_containing(obj));301}302303inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {304if (obj == NULL) {305return false;306}307return is_obj_ill(obj, heap_region_containing(obj));308}309310inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {311return !is_marked_next(obj) && !hr->is_closed_archive();312}313314inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {315return is_obj_dead_full(obj, heap_region_containing(obj));316}317318inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {319assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");320_humongous_reclaim_candidates.set_candidate(region, value);321}322323inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {324assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");325return _humongous_reclaim_candidates.is_candidate(region);326}327328inline void G1CollectedHeap::set_humongous_is_live(oop obj) {329uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));330// Clear the flag in the humongous_reclaim_candidates table. Also331// reset the entry in the region attribute table so that subsequent references332// to the same humongous object do not go into the slow path again.333// This is racy, as multiple threads may at the same time enter here, but this334// is benign.335// During collection we only ever clear the "candidate" flag, and only ever clear the336// entry in the in_cset_fast_table.337// We only ever evaluate the contents of these tables (in the VM thread) after338// having synchronized the worker threads with the VM thread, or in the same339// thread (i.e. within the VM thread).340if (is_humongous_reclaim_candidate(region)) {341set_humongous_reclaim_candidate(region, false);342_region_attr.clear_humongous(region);343}344}345346#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP347348349