Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
38920 views
/*1* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP25#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP2627#include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"28#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"29#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"30#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"31#include "gc_implementation/shared/gcUtil.hpp"32#include "memory/defNewGeneration.hpp"3334inline void CMSBitMap::clear_all() {35assert_locked();36// CMS bitmaps are usually cover large memory regions37_bm.clear_large();38return;39}4041inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {42return (pointer_delta(addr, _bmStartWord)) >> _shifter;43}4445inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {46return _bmStartWord + (offset << _shifter);47}4849inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {50assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");51return diff >> _shifter;52}5354inline void CMSBitMap::mark(HeapWord* addr) {55assert_locked();56assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),57"outside underlying space?");58_bm.set_bit(heapWordToOffset(addr));59}6061inline bool CMSBitMap::par_mark(HeapWord* addr) {62assert_locked();63assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),64"outside underlying space?");65return _bm.par_at_put(heapWordToOffset(addr), true);66}6768inline void CMSBitMap::par_clear(HeapWord* addr) {69assert_locked();70assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),71"outside underlying space?");72_bm.par_at_put(heapWordToOffset(addr), false);73}7475inline void CMSBitMap::mark_range(MemRegion mr) {76NOT_PRODUCT(region_invariant(mr));77// Range size is usually just 1 bit.78_bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),79BitMap::small_range);80}8182inline void CMSBitMap::clear_range(MemRegion mr) {83NOT_PRODUCT(region_invariant(mr));84// Range size is usually just 1 bit.85_bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),86BitMap::small_range);87}8889inline void CMSBitMap::par_mark_range(MemRegion mr) {90NOT_PRODUCT(region_invariant(mr));91// Range size is usually just 1 bit.92_bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),93BitMap::small_range);94}9596inline void CMSBitMap::par_clear_range(MemRegion mr) {97NOT_PRODUCT(region_invariant(mr));98// Range size is usually just 1 bit.99_bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),100BitMap::small_range);101}102103inline void CMSBitMap::mark_large_range(MemRegion mr) {104NOT_PRODUCT(region_invariant(mr));105// Range size must be greater than 32 bytes.106_bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),107BitMap::large_range);108}109110inline void CMSBitMap::clear_large_range(MemRegion mr) {111NOT_PRODUCT(region_invariant(mr));112// Range size must be greater than 32 bytes.113_bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),114BitMap::large_range);115}116117inline void CMSBitMap::par_mark_large_range(MemRegion mr) {118NOT_PRODUCT(region_invariant(mr));119// Range size must be greater than 32 bytes.120_bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),121BitMap::large_range);122}123124inline void CMSBitMap::par_clear_large_range(MemRegion mr) {125NOT_PRODUCT(region_invariant(mr));126// Range size must be greater than 32 bytes.127_bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),128BitMap::large_range);129}130131// Starting at "addr" (inclusive) return a memory region132// corresponding to the first maximally contiguous marked ("1") region.133inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {134return getAndClearMarkedRegion(addr, endWord());135}136137// Starting at "start_addr" (inclusive) return a memory region138// corresponding to the first maximal contiguous marked ("1") region139// strictly less than end_addr.140inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,141HeapWord* end_addr) {142HeapWord *start, *end;143assert_locked();144start = getNextMarkedWordAddress (start_addr, end_addr);145end = getNextUnmarkedWordAddress(start, end_addr);146assert(start <= end, "Consistency check");147MemRegion mr(start, end);148if (!mr.is_empty()) {149clear_range(mr);150}151return mr;152}153154inline bool CMSBitMap::isMarked(HeapWord* addr) const {155assert_locked();156assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),157"outside underlying space?");158return _bm.at(heapWordToOffset(addr));159}160161// The same as isMarked() but without a lock check.162inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {163assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),164"outside underlying space?");165return _bm.at(heapWordToOffset(addr));166}167168169inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {170assert_locked();171assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),172"outside underlying space?");173return !_bm.at(heapWordToOffset(addr));174}175176// Return the HeapWord address corresponding to next "1" bit177// (inclusive).178inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {179return getNextMarkedWordAddress(addr, endWord());180}181182// Return the least HeapWord address corresponding to next "1" bit183// starting at start_addr (inclusive) but strictly less than end_addr.184inline HeapWord* CMSBitMap::getNextMarkedWordAddress(185HeapWord* start_addr, HeapWord* end_addr) const {186assert_locked();187size_t nextOffset = _bm.get_next_one_offset(188heapWordToOffset(start_addr),189heapWordToOffset(end_addr));190HeapWord* nextAddr = offsetToHeapWord(nextOffset);191assert(nextAddr >= start_addr &&192nextAddr <= end_addr, "get_next_one postcondition");193assert((nextAddr == end_addr) ||194isMarked(nextAddr), "get_next_one postcondition");195return nextAddr;196}197198199// Return the HeapWord address corrsponding to the next "0" bit200// (inclusive).201inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {202return getNextUnmarkedWordAddress(addr, endWord());203}204205// Return the HeapWord address corrsponding to the next "0" bit206// (inclusive).207inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(208HeapWord* start_addr, HeapWord* end_addr) const {209assert_locked();210size_t nextOffset = _bm.get_next_zero_offset(211heapWordToOffset(start_addr),212heapWordToOffset(end_addr));213HeapWord* nextAddr = offsetToHeapWord(nextOffset);214assert(nextAddr >= start_addr &&215nextAddr <= end_addr, "get_next_zero postcondition");216assert((nextAddr == end_addr) ||217isUnmarked(nextAddr), "get_next_zero postcondition");218return nextAddr;219}220221inline bool CMSBitMap::isAllClear() const {222assert_locked();223return getNextMarkedWordAddress(startWord()) >= endWord();224}225226inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,227HeapWord* right) {228assert_locked();229left = MAX2(_bmStartWord, left);230right = MIN2(_bmStartWord + _bmWordSize, right);231if (right > left) {232_bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));233}234}235236inline void CMSCollector::start_icms() {237if (CMSIncrementalMode) {238ConcurrentMarkSweepThread::start_icms();239}240}241242inline void CMSCollector::stop_icms() {243if (CMSIncrementalMode) {244ConcurrentMarkSweepThread::stop_icms();245}246}247248inline void CMSCollector::disable_icms() {249if (CMSIncrementalMode) {250ConcurrentMarkSweepThread::disable_icms();251}252}253254inline void CMSCollector::enable_icms() {255if (CMSIncrementalMode) {256ConcurrentMarkSweepThread::enable_icms();257}258}259260inline void CMSCollector::icms_wait() {261if (CMSIncrementalMode) {262cmsThread()->icms_wait();263}264}265266inline void CMSCollector::save_sweep_limits() {267_cmsGen->save_sweep_limit();268}269270inline bool CMSCollector::is_dead_obj(oop obj) const {271HeapWord* addr = (HeapWord*)obj;272assert((_cmsGen->cmsSpace()->is_in_reserved(addr)273&& _cmsGen->cmsSpace()->block_is_obj(addr)),274"must be object");275return should_unload_classes() &&276_collectorState == Sweeping &&277!_markBitMap.isMarked(addr);278}279280inline bool CMSCollector::should_abort_preclean() const {281// We are in the midst of an "abortable preclean" and either282// scavenge is done or foreground GC wants to take over collection283return _collectorState == AbortablePreclean &&284(_abort_preclean || _foregroundGCIsActive ||285GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));286}287288inline size_t CMSCollector::get_eden_used() const {289return _young_gen->as_DefNewGeneration()->eden()->used();290}291292inline size_t CMSCollector::get_eden_capacity() const {293return _young_gen->as_DefNewGeneration()->eden()->capacity();294}295296inline bool CMSStats::valid() const {297return _valid_bits == _ALL_VALID;298}299300inline void CMSStats::record_gc0_begin() {301if (_gc0_begin_time.is_updated()) {302float last_gc0_period = _gc0_begin_time.seconds();303_gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,304last_gc0_period, _gc0_alpha);305_gc0_alpha = _saved_alpha;306_valid_bits |= _GC0_VALID;307}308_cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();309310_gc0_begin_time.update();311}312313inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {314float last_gc0_duration = _gc0_begin_time.seconds();315_gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,316last_gc0_duration, _gc0_alpha);317318// Amount promoted.319_cms_used_at_gc0_end = cms_gen_bytes_used;320321size_t promoted_bytes = 0;322if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {323promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;324}325326// If the younger gen collections were skipped, then the327// number of promoted bytes will be 0 and adding it to the328// average will incorrectly lessen the average. It is, however,329// also possible that no promotion was needed.330//331// _gc0_promoted used to be calculated as332// _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,333// promoted_bytes, _gc0_alpha);334_cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);335_gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();336337// Amount directly allocated.338size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;339_cms_gen->reset_direct_allocated_words();340_cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,341allocated_bytes, _gc0_alpha);342}343344inline void CMSStats::record_cms_begin() {345_cms_timer.stop();346347// This is just an approximate value, but is good enough.348_cms_used_at_cms_begin = _cms_used_at_gc0_end;349350_cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,351(float) _cms_timer.seconds(), _cms_alpha);352_cms_begin_time.update();353354_cms_timer.reset();355_cms_timer.start();356}357358inline void CMSStats::record_cms_end() {359_cms_timer.stop();360361float cur_duration = _cms_timer.seconds();362_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,363cur_duration, _cms_alpha);364365// Avoid division by 0.366const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);367_cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,368cur_duration / cms_used_mb,369_cms_alpha);370371_cms_end_time.update();372_cms_alpha = _saved_alpha;373_allow_duty_cycle_reduction = true;374_valid_bits |= _CMS_VALID;375376_cms_timer.start();377}378379inline double CMSStats::cms_time_since_begin() const {380return _cms_begin_time.seconds();381}382383inline double CMSStats::cms_time_since_end() const {384return _cms_end_time.seconds();385}386387inline double CMSStats::promotion_rate() const {388assert(valid(), "statistics not valid yet");389return gc0_promoted() / gc0_period();390}391392inline double CMSStats::cms_allocation_rate() const {393assert(valid(), "statistics not valid yet");394return cms_allocated() / gc0_period();395}396397inline double CMSStats::cms_consumption_rate() const {398assert(valid(), "statistics not valid yet");399return (gc0_promoted() + cms_allocated()) / gc0_period();400}401402inline unsigned int CMSStats::icms_update_duty_cycle() {403// Update the duty cycle only if pacing is enabled and the stats are valid404// (after at least one young gen gc and one cms cycle have completed).405if (CMSIncrementalPacing && valid()) {406return icms_update_duty_cycle_impl();407}408return _icms_duty_cycle;409}410411inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {412cmsSpace()->save_sweep_limit();413}414415inline size_t ConcurrentMarkSweepGeneration::capacity() const {416return _cmsSpace->capacity();417}418419inline size_t ConcurrentMarkSweepGeneration::used() const {420return _cmsSpace->used();421}422423inline size_t ConcurrentMarkSweepGeneration::free() const {424return _cmsSpace->free();425}426427inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {428return _cmsSpace->used_region();429}430431inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {432return _cmsSpace->used_region_at_save_marks();433}434435inline void MarkFromRootsClosure::do_yield_check() {436if (ConcurrentMarkSweepThread::should_yield() &&437!_collector->foregroundGCIsActive() &&438_yield) {439do_yield_work();440}441}442443inline void Par_MarkFromRootsClosure::do_yield_check() {444if (ConcurrentMarkSweepThread::should_yield() &&445!_collector->foregroundGCIsActive() &&446_yield) {447do_yield_work();448}449}450451inline void PushOrMarkClosure::do_yield_check() {452_parent->do_yield_check();453}454455inline void Par_PushOrMarkClosure::do_yield_check() {456_parent->do_yield_check();457}458459// Return value of "true" indicates that the on-going preclean460// should be aborted.461inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {462if (ConcurrentMarkSweepThread::should_yield() &&463!_collector->foregroundGCIsActive() &&464_yield) {465// Sample young gen size before and after yield466_collector->sample_eden();467do_yield_work();468_collector->sample_eden();469return _collector->should_abort_preclean();470}471return false;472}473474inline void SurvivorSpacePrecleanClosure::do_yield_check() {475if (ConcurrentMarkSweepThread::should_yield() &&476!_collector->foregroundGCIsActive() &&477_yield) {478// Sample young gen size before and after yield479_collector->sample_eden();480do_yield_work();481_collector->sample_eden();482}483}484485inline void SweepClosure::do_yield_check(HeapWord* addr) {486if (ConcurrentMarkSweepThread::should_yield() &&487!_collector->foregroundGCIsActive() &&488_yield) {489do_yield_work(addr);490}491}492493inline void MarkRefsIntoAndScanClosure::do_yield_check() {494// The conditions are ordered for the remarking phase495// when _yield is false.496if (_yield &&497!_collector->foregroundGCIsActive() &&498ConcurrentMarkSweepThread::should_yield()) {499do_yield_work();500}501}502503504inline void ModUnionClosure::do_MemRegion(MemRegion mr) {505// Align the end of mr so it's at a card boundary.506// This is superfluous except at the end of the space;507// we should do better than this XXX508MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),509CardTableModRefBS::card_size /* bytes */));510_t->mark_range(mr2);511}512513inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {514// Align the end of mr so it's at a card boundary.515// This is superfluous except at the end of the space;516// we should do better than this XXX517MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),518CardTableModRefBS::card_size /* bytes */));519_t->par_mark_range(mr2);520}521522#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP523524525