Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
40961 views
/*1* Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP25#define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP2627#include "gc/shenandoah/shenandoahHeap.hpp"2829#include "classfile/javaClasses.inline.hpp"30#include "gc/shared/markBitMap.inline.hpp"31#include "gc/shared/threadLocalAllocBuffer.inline.hpp"32#include "gc/shared/suspendibleThreadSet.hpp"33#include "gc/shared/tlab_globals.hpp"34#include "gc/shenandoah/shenandoahAsserts.hpp"35#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"36#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"37#include "gc/shenandoah/shenandoahForwarding.inline.hpp"38#include "gc/shenandoah/shenandoahWorkGroup.hpp"39#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"40#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"41#include "gc/shenandoah/shenandoahControlThread.hpp"42#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"43#include "gc/shenandoah/shenandoahThreadLocalData.hpp"44#include "oops/compressedOops.inline.hpp"45#include "oops/oop.inline.hpp"46#include "runtime/atomic.hpp"47#include "runtime/prefetch.inline.hpp"48#include "runtime/thread.hpp"49#include "utilities/copy.hpp"50#include "utilities/globalDefinitions.hpp"5152inline ShenandoahHeap* ShenandoahHeap::heap() {53return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);54}5556inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {57size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);58// get_region() provides the bounds-check and returns NULL on OOB.59return _heap->get_region(new_index - 1);60}6162inline bool ShenandoahHeap::has_forwarded_objects() const {63return _gc_state.is_set(HAS_FORWARDED);64}6566inline WorkGang* ShenandoahHeap::workers() const {67return _workers;68}6970inline WorkGang* ShenandoahHeap::safepoint_workers() {71return _safepoint_workers;72}7374inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {75uintptr_t region_start = ((uintptr_t) addr);76uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();77assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));78return index;79}8081inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {82size_t index = heap_region_index_containing(addr);83ShenandoahHeapRegion* const result = get_region(index);84assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));85return result;86}8788inline void ShenandoahHeap::enter_evacuation(Thread* t) {89_oom_evac_handler.enter_evacuation(t);90}9192inline void ShenandoahHeap::leave_evacuation(Thread* t) {93_oom_evac_handler.leave_evacuation(t);94}9596template <class T>97inline void ShenandoahHeap::update_with_forwarded(T* p) {98T o = RawAccess<>::oop_load(p);99if (!CompressedOops::is_null(o)) {100oop obj = CompressedOops::decode_not_null(o);101if (in_collection_set(obj)) {102// Corner case: when evacuation fails, there are objects in collection103// set that are not really forwarded. We can still go and try and update them104// (uselessly) to simplify the common path.105shenandoah_assert_forwarded_except(p, obj, cancelled_gc());106oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);107shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());108109// Unconditionally store the update: no concurrent updates expected.110RawAccess<IS_NOT_NULL>::oop_store(p, fwd);111}112}113}114115template <class T>116inline void ShenandoahHeap::conc_update_with_forwarded(T* p) {117T o = RawAccess<>::oop_load(p);118if (!CompressedOops::is_null(o)) {119oop obj = CompressedOops::decode_not_null(o);120if (in_collection_set(obj)) {121// Corner case: when evacuation fails, there are objects in collection122// set that are not really forwarded. We can still go and try CAS-update them123// (uselessly) to simplify the common path.124shenandoah_assert_forwarded_except(p, obj, cancelled_gc());125oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);126shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());127128// Sanity check: we should not be updating the cset regions themselves,129// unless we are recovering from the evacuation failure.130shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc());131132// Either we succeed in updating the reference, or something else gets in our way.133// We don't care if that is another concurrent GC update, or another mutator update.134// We only check that non-NULL store still updated with non-forwarded reference.135oop witness = cas_oop(fwd, p, obj);136shenandoah_assert_not_forwarded_except(p, witness, (witness == NULL) || (witness == obj));137}138}139}140141inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {142assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));143return (oop) Atomic::cmpxchg(addr, c, n);144}145146inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {147assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));148narrowOop val = CompressedOops::encode(n);149return CompressedOops::decode(Atomic::cmpxchg(addr, c, val));150}151152inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {153assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));154narrowOop cmp = CompressedOops::encode(c);155narrowOop val = CompressedOops::encode(n);156return CompressedOops::decode(Atomic::cmpxchg(addr, cmp, val));157}158159inline bool ShenandoahHeap::cancelled_gc() const {160return _cancelled_gc.get() == CANCELLED;161}162163inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {164if (! (sts_active && ShenandoahSuspendibleWorkers)) {165return cancelled_gc();166}167168jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);169if (prev == CANCELLABLE || prev == NOT_CANCELLED) {170if (SuspendibleThreadSet::should_yield()) {171SuspendibleThreadSet::yield();172}173174// Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets175// to restore to CANCELLABLE.176if (prev == CANCELLABLE) {177_cancelled_gc.set(CANCELLABLE);178}179return false;180} else {181return true;182}183}184185inline void ShenandoahHeap::clear_cancelled_gc() {186_cancelled_gc.set(CANCELLABLE);187_oom_evac_handler.clear();188}189190inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {191assert(UseTLAB, "TLABs should be enabled");192193PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);194if (gclab == NULL) {195assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),196"Performance: thread should have GCLAB: %s", thread->name());197// No GCLABs in this thread, fallback to shared allocation198return NULL;199}200HeapWord* obj = gclab->allocate(size);201if (obj != NULL) {202return obj;203}204// Otherwise...205return allocate_from_gclab_slow(thread, size);206}207208inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {209if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {210// This thread went through the OOM during evac protocol and it is safe to return211// the forward pointer. It must not attempt to evacuate any more.212return ShenandoahBarrierSet::resolve_forwarded(p);213}214215assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");216217size_t size = p->size();218219assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");220221bool alloc_from_gclab = true;222HeapWord* copy = NULL;223224#ifdef ASSERT225if (ShenandoahOOMDuringEvacALot &&226(os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call227copy = NULL;228} else {229#endif230if (UseTLAB) {231copy = allocate_from_gclab(thread, size);232}233if (copy == NULL) {234ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);235copy = allocate_memory(req);236alloc_from_gclab = false;237}238#ifdef ASSERT239}240#endif241242if (copy == NULL) {243control_thread()->handle_alloc_failure_evac(size);244245_oom_evac_handler.handle_out_of_memory_during_evacuation();246247return ShenandoahBarrierSet::resolve_forwarded(p);248}249250// Copy the object:251Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);252253// Try to install the new forwarding pointer.254oop copy_val = cast_to_oop(copy);255oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);256if (result == copy_val) {257// Successfully evacuated. Our copy is now the public one!258shenandoah_assert_correct(NULL, copy_val);259return copy_val;260} else {261// Failed to evacuate. We need to deal with the object that is left behind. Since this262// new allocation is certainly after TAMS, it will be considered live in the next cycle.263// But if it happens to contain references to evacuated regions, those references would264// not get updated for this stale copy during this cycle, and we will crash while scanning265// it the next cycle.266//267// For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next268// object will overwrite this stale copy, or the filler object on LAB retirement will269// do this. For non-GCLAB allocations, we have no way to retract the allocation, and270// have to explicitly overwrite the copy with the filler object. With that overwrite,271// we have to keep the fwdptr initialized and pointing to our (stale) copy.272if (alloc_from_gclab) {273ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);274} else {275fill_with_object(copy, size);276shenandoah_assert_correct(NULL, copy_val);277}278shenandoah_assert_correct(NULL, result);279return result;280}281}282283inline bool ShenandoahHeap::requires_marking(const void* entry) const {284oop obj = cast_to_oop(entry);285return !_marking_context->is_marked_strong(obj);286}287288inline bool ShenandoahHeap::in_collection_set(oop p) const {289assert(collection_set() != NULL, "Sanity");290return collection_set()->is_in(p);291}292293inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {294assert(collection_set() != NULL, "Sanity");295return collection_set()->is_in_loc(p);296}297298inline bool ShenandoahHeap::is_stable() const {299return _gc_state.is_clear();300}301302inline bool ShenandoahHeap::is_idle() const {303return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);304}305306inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {307return _gc_state.is_set(MARKING);308}309310inline bool ShenandoahHeap::is_evacuation_in_progress() const {311return _gc_state.is_set(EVACUATION);312}313314inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {315return _gc_state.is_set(mask);316}317318inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {319return _degenerated_gc_in_progress.is_set();320}321322inline bool ShenandoahHeap::is_full_gc_in_progress() const {323return _full_gc_in_progress.is_set();324}325326inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {327return _full_gc_move_in_progress.is_set();328}329330inline bool ShenandoahHeap::is_update_refs_in_progress() const {331return _gc_state.is_set(UPDATEREFS);332}333334inline bool ShenandoahHeap::is_stw_gc_in_progress() const {335return is_full_gc_in_progress() || is_degenerated_gc_in_progress();336}337338inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {339return _concurrent_strong_root_in_progress.is_set();340}341342inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {343return _gc_state.is_set(WEAK_ROOTS);344}345346template<class T>347inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {348marked_object_iterate(region, cl, region->top());349}350351template<class T>352inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {353assert(! region->is_humongous_continuation(), "no humongous continuation regions here");354355ShenandoahMarkingContext* const ctx = complete_marking_context();356assert(ctx->is_complete(), "sanity");357358HeapWord* tams = ctx->top_at_mark_start(region);359360size_t skip_bitmap_delta = 1;361HeapWord* start = region->bottom();362HeapWord* end = MIN2(tams, region->end());363364// Step 1. Scan below the TAMS based on bitmap data.365HeapWord* limit_bitmap = MIN2(limit, tams);366367// Try to scan the initial candidate. If the candidate is above the TAMS, it would368// fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.369HeapWord* cb = ctx->get_next_marked_addr(start, end);370371intx dist = ShenandoahMarkScanPrefetch;372if (dist > 0) {373// Batched scan that prefetches the oop data, anticipating the access to374// either header, oop field, or forwarding pointer. Not that we cannot375// touch anything in oop, while it still being prefetched to get enough376// time for prefetch to work. This is why we try to scan the bitmap linearly,377// disregarding the object size. However, since we know forwarding pointer378// preceeds the object, we can skip over it. Once we cannot trust the bitmap,379// there is no point for prefetching the oop contents, as oop->size() will380// touch it prematurely.381382// No variable-length arrays in standard C++, have enough slots to fit383// the prefetch distance.384static const int SLOT_COUNT = 256;385guarantee(dist <= SLOT_COUNT, "adjust slot count");386HeapWord* slots[SLOT_COUNT];387388int avail;389do {390avail = 0;391for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {392Prefetch::read(cb, oopDesc::mark_offset_in_bytes());393slots[avail++] = cb;394cb += skip_bitmap_delta;395if (cb < limit_bitmap) {396cb = ctx->get_next_marked_addr(cb, limit_bitmap);397}398}399400for (int c = 0; c < avail; c++) {401assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));402assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));403oop obj = cast_to_oop(slots[c]);404assert(oopDesc::is_oop(obj), "sanity");405assert(ctx->is_marked(obj), "object expected to be marked");406cl->do_object(obj);407}408} while (avail > 0);409} else {410while (cb < limit_bitmap) {411assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));412assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));413oop obj = cast_to_oop(cb);414assert(oopDesc::is_oop(obj), "sanity");415assert(ctx->is_marked(obj), "object expected to be marked");416cl->do_object(obj);417cb += skip_bitmap_delta;418if (cb < limit_bitmap) {419cb = ctx->get_next_marked_addr(cb, limit_bitmap);420}421}422}423424// Step 2. Accurate size-based traversal, happens past the TAMS.425// This restarts the scan at TAMS, which makes sure we traverse all objects,426// regardless of what happened at Step 1.427HeapWord* cs = tams;428while (cs < limit) {429assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));430assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));431oop obj = cast_to_oop(cs);432assert(oopDesc::is_oop(obj), "sanity");433assert(ctx->is_marked(obj), "object expected to be marked");434int size = obj->size();435cl->do_object(obj);436cs += size;437}438}439440template <class T>441class ShenandoahObjectToOopClosure : public ObjectClosure {442T* _cl;443public:444ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}445446void do_object(oop obj) {447obj->oop_iterate(_cl);448}449};450451template <class T>452class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {453T* _cl;454MemRegion _bounds;455public:456ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :457_cl(cl), _bounds(bottom, top) {}458459void do_object(oop obj) {460obj->oop_iterate(_cl, _bounds);461}462};463464template<class T>465inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {466if (region->is_humongous()) {467HeapWord* bottom = region->bottom();468if (top > bottom) {469region = region->humongous_start_region();470ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);471marked_object_iterate(region, &objs);472}473} else {474ShenandoahObjectToOopClosure<T> objs(cl);475marked_object_iterate(region, &objs, top);476}477}478479inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {480if (region_idx < _num_regions) {481return _regions[region_idx];482} else {483return NULL;484}485}486487inline void ShenandoahHeap::mark_complete_marking_context() {488_marking_context->mark_complete();489}490491inline void ShenandoahHeap::mark_incomplete_marking_context() {492_marking_context->mark_incomplete();493}494495inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {496assert (_marking_context->is_complete()," sanity");497return _marking_context;498}499500inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {501return _marking_context;502}503504#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP505506507