Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp
38920 views
/*1* Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.2*3* This code is free software; you can redistribute it and/or modify it4* under the terms of the GNU General Public License version 2 only, as5* published by the Free Software Foundation.6*7* This code is distributed in the hope that it will be useful, but WITHOUT8* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or9* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License10* version 2 for more details (a copy is included in the LICENSE file that11* accompanied this code).12*13* You should have received a copy of the GNU General Public License version14* 2 along with this work; if not, write to the Free Software Foundation,15* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.16*17* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA18* or visit www.oracle.com if you need additional information or have any19* questions.20*21*/2223#include "precompiled.hpp"2425#include "classfile/symbolTable.hpp"26#include "classfile/systemDictionary.hpp"27#include "code/codeCache.hpp"2829#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"30#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"31#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"32#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"33#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"34#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"35#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp"36#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"37#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"38#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"39#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"40#include "gc_implementation/shenandoah/shenandoahUtils.hpp"41#include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"4243#include "memory/referenceProcessor.hpp"44#include "memory/iterator.inline.hpp"45#include "memory/metaspace.hpp"46#include "memory/resourceArea.hpp"47#include "oops/oop.inline.hpp"4849template<UpdateRefsMode UPDATE_REFS>50class ShenandoahInitMarkRootsClosure : public OopClosure {51private:52ShenandoahObjToScanQueue* _queue;53ShenandoahHeap* _heap;54ShenandoahStrDedupQueue* _dedup_queue;55ShenandoahMarkingContext* const _mark_context;5657template <class T>58inline void do_oop_nv(T* p) {59ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context, _dedup_queue);60}6162public:63ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) :64_queue(q),65_heap(ShenandoahHeap::heap()),66_dedup_queue(dq),67_mark_context(_heap->marking_context()) {};6869void do_oop(narrowOop* p) { do_oop_nv(p); }70void do_oop(oop* p) { do_oop_nv(p); }71};7273ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :74MetadataAwareOopClosure(rp),75_queue(q),76_dedup_queue(NULL),77_heap(ShenandoahHeap::heap()),78_mark_context(_heap->marking_context())79{ }8081ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :82MetadataAwareOopClosure(rp),83_queue(q),84_dedup_queue(dq),85_heap(ShenandoahHeap::heap()),86_mark_context(_heap->marking_context())87{ }8889template<UpdateRefsMode UPDATE_REFS>90class ShenandoahInitMarkRootsTask : public AbstractGangTask {91private:92ShenandoahAllRootScanner* _rp;93public:94ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp) :95AbstractGangTask("Shenandoah init mark roots task"),96_rp(rp) {97}9899void work(uint worker_id) {100assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");101ShenandoahParallelWorkerSession worker_session(worker_id);102103ShenandoahHeap* heap = ShenandoahHeap::heap();104ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();105assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));106107ShenandoahObjToScanQueue* q = queues->queue(worker_id);108ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q, NULL);109do_work(heap, &mark_cl, worker_id);110}111112private:113void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {114// The rationale for selecting the roots to scan is as follows:115// a. With unload_classes = true, we only want to scan the actual strong roots from the116// code cache. This will allow us to identify the dead classes, unload them, *and*117// invalidate the relevant code cache blobs. This could be only done together with118// class unloading.119// b. With unload_classes = false, we have to nominally retain all the references from code120// cache, because there could be the case of embedded class/oop in the generated code,121// which we will never visit during mark. Without code cache invalidation, as in (a),122// we risk executing that code cache blob, and crashing.123// c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,124// and instead do that in concurrent phase under the relevant lock. This saves init mark125// pause time.126ResourceMark m;127if (heap->unload_classes()) {128_rp->strong_roots_do(worker_id, oops);129} else {130_rp->roots_do(worker_id, oops);131}132}133};134135class ShenandoahUpdateRootsTask : public AbstractGangTask {136private:137ShenandoahRootUpdater* _root_updater;138public:139ShenandoahUpdateRootsTask(ShenandoahRootUpdater* _root_updater) :140AbstractGangTask("Shenandoah update roots task"),141_root_updater(_root_updater) {142}143144void work(uint worker_id) {145assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");146ShenandoahParallelWorkerSession worker_session(worker_id);147148ShenandoahHeap* heap = ShenandoahHeap::heap();149ShenandoahUpdateRefsClosure cl;150ShenandoahIsAliveSelector is_alive;151_root_updater->roots_do(worker_id, is_alive.is_alive_closure(), &cl);152}153};154155class ShenandoahConcurrentMarkingTask : public AbstractGangTask {156private:157ShenandoahConcurrentMark* _cm;158ShenandoahTaskTerminator* _terminator;159160public:161ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :162AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {163}164165void work(uint worker_id) {166ShenandoahHeap* heap = ShenandoahHeap::heap();167ShenandoahConcurrentWorkerSession worker_session(worker_id);168ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);169ReferenceProcessor* rp;170if (heap->process_references()) {171rp = heap->ref_processor();172shenandoah_assert_rp_isalive_installed();173} else {174rp = NULL;175}176177_cm->concurrent_scan_code_roots(worker_id, rp);178_cm->mark_loop(worker_id, _terminator, rp,179true, // cancellable180ShenandoahStringDedup::is_enabled()); // perform string dedup181}182};183184class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {185private:186ShenandoahSATBBufferClosure* _satb_cl;187OopClosure* const _cl;188MarkingCodeBlobClosure* _code_cl;189int _thread_parity;190191public:192ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :193_satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),194_thread_parity(SharedHeap::heap()->strong_roots_parity()) {}195196void do_thread(Thread* thread) {197if (thread->is_Java_thread()) {198if (thread->claim_oops_do(true, _thread_parity)) {199JavaThread* jt = (JavaThread*)thread;200jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);201if (_cl != NULL) {202ResourceMark rm;203jt->oops_do(_cl, NULL, _code_cl);204} else if (_code_cl != NULL) {205// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking206// however the liveness of oops reachable from nmethods have very complex lifecycles:207// * Alive if on the stack of an executing method208// * Weakly reachable otherwise209// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be210// live by the SATB invariant but other oops recorded in nmethods may behave differently.211jt->nmethods_do(_code_cl);212}213}214} else if (thread->is_VM_thread()) {215if (thread->claim_oops_do(true, _thread_parity)) {216JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);217}218}219}220};221222class ShenandoahFinalMarkingTask : public AbstractGangTask {223private:224ShenandoahConcurrentMark* _cm;225ShenandoahTaskTerminator* _terminator;226bool _dedup_string;227ShenandoahSharedFlag _claimed_syncroots;228229public:230ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :231AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {232}233234void work(uint worker_id) {235ShenandoahHeap* heap = ShenandoahHeap::heap();236237ReferenceProcessor* rp;238if (heap->process_references()) {239rp = heap->ref_processor();240shenandoah_assert_rp_isalive_installed();241} else {242rp = NULL;243}244245// First drain remaining SATB buffers.246// Notice that this is not strictly necessary for mark-compact. But since247// it requires a StrongRootsScope around the task, we need to claim the248// threads, and performance-wise it doesn't really matter. Adds about 1ms to249// full-gc.250{251ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);252ShenandoahStrDedupQueue *dq = NULL;253if (ShenandoahStringDedup::is_enabled()) {254dq = ShenandoahStringDedup::queue(worker_id);255}256ShenandoahSATBBufferClosure cl(q, dq);257SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();258while (satb_mq_set.apply_closure_to_completed_buffer(&cl));259bool do_nmethods = heap->unload_classes();260if (heap->has_forwarded_objects()) {261ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);262MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);263ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,264ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,265do_nmethods ? &blobsCl : NULL);266Threads::threads_do(&tc);267if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {268ObjectSynchronizer::oops_do(&resolve_mark_cl);269}270} else {271ShenandoahMarkRefsClosure mark_cl(q, rp);272MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);273ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,274ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,275do_nmethods ? &blobsCl : NULL);276Threads::threads_do(&tc);277if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {278ObjectSynchronizer::oops_do(&mark_cl);279}280}281}282283if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {284// Full GC does not execute concurrent cycle.285// Degenerated cycle may bypass concurrent cycle.286// So code roots might not be scanned, let's scan here.287_cm->concurrent_scan_code_roots(worker_id, rp);288}289290_cm->mark_loop(worker_id, _terminator, rp,291false, // not cancellable292_dedup_string);293294assert(_cm->task_queues()->is_empty(), "Should be empty");295}296};297298void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {299assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");300assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");301302ShenandoahGCPhase phase(root_phase);303304WorkGang* workers = _heap->workers();305uint nworkers = workers->active_workers();306307assert(nworkers <= task_queues()->size(), "Just check");308309ShenandoahAllRootScanner root_proc(root_phase);310TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());311task_queues()->reserve(nworkers);312313if (_heap->has_forwarded_objects()) {314ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);315workers->run_task(&mark_roots);316} else {317// No need to update references, which means the heap is stable.318// Can save time not walking through forwarding pointers.319ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);320workers->run_task(&mark_roots);321}322323clear_claim_codecache();324}325326void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {327assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");328assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||329root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,330"Only for these phases");331332ShenandoahHeap* heap = ShenandoahHeap::heap();333ShenandoahGCPhase phase(root_phase);334335COMPILER2_PRESENT(DerivedPointerTable::clear());336337uint nworkers = heap->workers()->active_workers();338339ShenandoahRootUpdater root_updater(root_phase);340ShenandoahUpdateRootsTask update_roots(&root_updater);341_heap->workers()->run_task(&update_roots);342343COMPILER2_PRESENT(DerivedPointerTable::update_pointers());344}345346class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {347private:348SharedHeap::StrongRootsScope _srs;349ShenandoahPhaseTimings::Phase _phase;350ShenandoahGCWorkerPhase _worker_phase;351public:352ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :353AbstractGangTask("Shenandoah Update Thread Roots"),354_srs(ShenandoahHeap::heap(), true),355_phase(phase),356_worker_phase(phase) {}357358void work(uint worker_id) {359ShenandoahUpdateRefsClosure cl;360ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);361ResourceMark rm;362Threads::possibly_parallel_oops_do(&cl, NULL, NULL);363}364};365366void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {367assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");368369ShenandoahGCPhase phase(root_phase);370371COMPILER2_PRESENT(DerivedPointerTable::clear());372373WorkGang* workers = _heap->workers();374bool is_par = workers->active_workers() > 1;375376ShenandoahUpdateThreadRootsTask task(is_par, root_phase);377workers->run_task(&task);378379COMPILER2_PRESENT(DerivedPointerTable::update_pointers());380}381382void ShenandoahConcurrentMark::initialize(uint workers) {383_heap = ShenandoahHeap::heap();384385uint num_queues = MAX2(workers, 1U);386387_task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);388389for (uint i = 0; i < num_queues; ++i) {390ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();391task_queue->initialize();392_task_queues->register_queue(i, task_queue);393}394395JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);396}397398void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {399if (claim_codecache()) {400ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);401if (!_heap->unload_classes()) {402MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);403// TODO: We can not honor StringDeduplication here, due to lock ranking404// inversion. So, we may miss some deduplication candidates.405if (_heap->has_forwarded_objects()) {406ShenandoahMarkResolveRefsClosure cl(q, rp);407CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);408CodeCache::blobs_do(&blobs);409} else {410ShenandoahMarkRefsClosure cl(q, rp);411CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);412CodeCache::blobs_do(&blobs);413}414}415}416}417418void ShenandoahConcurrentMark::mark_from_roots() {419WorkGang* workers = _heap->workers();420uint nworkers = workers->active_workers();421422ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);423424if (_heap->process_references()) {425ReferenceProcessor* rp = _heap->ref_processor();426rp->set_active_mt_degree(nworkers);427428// enable ("weak") refs discovery429rp->enable_discovery(true /*verify_no_refs*/, true);430rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());431}432433shenandoah_assert_rp_isalive_not_installed();434ShenandoahIsAliveSelector is_alive;435ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());436437task_queues()->reserve(nworkers);438439{440ShenandoahTaskTerminator terminator(nworkers, task_queues());441ShenandoahConcurrentMarkingTask task(this, &terminator);442workers->run_task(&task);443}444445assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");446if (!_heap->cancelled_gc()) {447TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());448}449450TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());451}452453void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {454assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");455456uint nworkers = _heap->workers()->active_workers();457458// Finally mark everything else we've got in our queues during the previous steps.459// It does two different things for concurrent vs. mark-compact GC:460// - For concurrent GC, it starts with empty task queues, drains the remaining461// SATB buffers, and then completes the marking closure.462// - For mark-compact GC, it starts out with the task queues seeded by initial463// root scan, and completes the closure, thus marking through all live objects464// The implementation is the same, so it's shared here.465{466ShenandoahGCPhase phase(full_gc ?467ShenandoahPhaseTimings::full_gc_mark_finish_queues :468ShenandoahPhaseTimings::finish_queues);469task_queues()->reserve(nworkers);470471shenandoah_assert_rp_isalive_not_installed();472ShenandoahIsAliveSelector is_alive;473ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());474475SharedHeap::StrongRootsScope scope(_heap, true);476ShenandoahTaskTerminator terminator(nworkers, task_queues());477ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());478_heap->workers()->run_task(&task);479}480481assert(task_queues()->is_empty(), "Should be empty");482483// Marking is completed, deactivate SATB barrier if it is active484_heap->complete_marking();485486// When we're done marking everything, we process weak references.487// It is not obvious, but reference processing actually calls488// JNIHandle::weak_oops_do() to cleanup JNI and JVMTI weak oops.489if (_heap->process_references()) {490weak_refs_work(full_gc);491} else {492weak_roots_work(full_gc);493}494495// And finally finish class unloading496if (_heap->unload_classes()) {497_heap->unload_classes_and_cleanup_tables(full_gc);498} else if (ShenandoahStringDedup::is_enabled()) {499ShenandoahStringDedup::parallel_cleanup();500}501assert(task_queues()->is_empty(), "Should be empty");502TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());503TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());504505// Resize Metaspace506MetaspaceGC::compute_new_size();507}508509// Weak Reference Closures510class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {511uint _worker_id;512ShenandoahTaskTerminator* _terminator;513bool _reset_terminator;514515public:516ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):517_worker_id(worker_id),518_terminator(t),519_reset_terminator(reset_terminator) {520}521522void do_void() {523assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");524525ShenandoahHeap* sh = ShenandoahHeap::heap();526ShenandoahConcurrentMark* scm = sh->concurrent_mark();527assert(sh->process_references(), "why else would we be here?");528ReferenceProcessor* rp = sh->ref_processor();529530shenandoah_assert_rp_isalive_installed();531532scm->mark_loop(_worker_id, _terminator, rp,533false, // not cancellable534false); // do not do strdedup535536if (_reset_terminator) {537_terminator->reset_for_reuse();538}539}540};541542class ShenandoahCMKeepAliveClosure : public OopClosure {543private:544ShenandoahObjToScanQueue* _queue;545ShenandoahHeap* _heap;546ShenandoahMarkingContext* const _mark_context;547548template <class T>549inline void do_oop_nv(T* p) {550ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);551}552553public:554ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :555_queue(q),556_heap(ShenandoahHeap::heap()),557_mark_context(_heap->marking_context()) {}558559void do_oop(narrowOop* p) { do_oop_nv(p); }560void do_oop(oop* p) { do_oop_nv(p); }561};562563class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {564private:565ShenandoahObjToScanQueue* _queue;566ShenandoahHeap* _heap;567ShenandoahMarkingContext* const _mark_context;568569template <class T>570inline void do_oop_nv(T* p) {571ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);572}573574public:575ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :576_queue(q),577_heap(ShenandoahHeap::heap()),578_mark_context(_heap->marking_context()) {}579580void do_oop(narrowOop* p) { do_oop_nv(p); }581void do_oop(oop* p) { do_oop_nv(p); }582};583584class ShenandoahRefProcTaskProxy : public AbstractGangTask {585private:586AbstractRefProcTaskExecutor::ProcessTask& _proc_task;587ShenandoahTaskTerminator* _terminator;588589public:590ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,591ShenandoahTaskTerminator* t) :592AbstractGangTask("Process reference objects in parallel"),593_proc_task(proc_task),594_terminator(t) {595}596597void work(uint worker_id) {598assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");599ShenandoahHeap* heap = ShenandoahHeap::heap();600ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);601if (heap->has_forwarded_objects()) {602ShenandoahForwardedIsAliveClosure is_alive;603ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));604_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);605} else {606ShenandoahIsAliveClosure is_alive;607ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));608_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);609}610}611};612613class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {614private:615AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;616617public:618ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :619AbstractGangTask("Enqueue reference objects in parallel"),620_enqueue_task(enqueue_task) {621}622623void work(uint worker_id) {624_enqueue_task.work(worker_id);625}626};627628class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {629private:630WorkGang* _workers;631632public:633ShenandoahRefProcTaskExecutor(WorkGang* workers) :634_workers(workers) {635}636637// Executes a task using worker threads.638void execute(ProcessTask& task) {639assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");640641// Shortcut execution if task is empty.642// This should be replaced with the generic ReferenceProcessor shortcut,643// see JDK-8181214, JDK-8043575, JDK-6938732.644if (task.is_empty()) {645return;646}647648ShenandoahHeap* heap = ShenandoahHeap::heap();649ShenandoahConcurrentMark* cm = heap->concurrent_mark();650uint nworkers = _workers->active_workers();651cm->task_queues()->reserve(nworkers);652653ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());654ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);655_workers->run_task(&proc_task_proxy);656}657658void execute(EnqueueTask& task) {659ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);660_workers->run_task(&enqueue_task_proxy);661}662};663664void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {665assert(_heap->process_references(), "sanity");666667ShenandoahPhaseTimings::Phase phase_root =668full_gc ?669ShenandoahPhaseTimings::full_gc_weakrefs :670ShenandoahPhaseTimings::weakrefs;671672ShenandoahGCPhase phase(phase_root);673674ReferenceProcessor* rp = _heap->ref_processor();675weak_refs_work_doit(full_gc);676677rp->verify_no_references_recorded();678assert(!rp->discovery_enabled(), "Post condition");679680}681682void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {683ReferenceProcessor* rp = _heap->ref_processor();684685ShenandoahPhaseTimings::Phase phase_process =686full_gc ?687ShenandoahPhaseTimings::full_gc_weakrefs_process :688ShenandoahPhaseTimings::weakrefs_process;689690ShenandoahPhaseTimings::Phase phase_enqueue =691full_gc ?692ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :693ShenandoahPhaseTimings::weakrefs_enqueue;694695shenandoah_assert_rp_isalive_not_installed();696ShenandoahIsAliveSelector is_alive;697ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());698699WorkGang* workers = _heap->workers();700uint nworkers = workers->active_workers();701702rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());703rp->set_active_mt_degree(nworkers);704705assert(task_queues()->is_empty(), "Should be empty");706707// complete_gc and keep_alive closures instantiated here are only needed for708// single-threaded path in RP. They share the queue 0 for tracking work, which709// simplifies implementation. Since RP may decide to call complete_gc several710// times, we need to be able to reuse the terminator.711uint serial_worker_id = 0;712ShenandoahTaskTerminator terminator(1, task_queues());713ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);714715ShenandoahRefProcTaskExecutor executor(workers);716717{718ShenandoahGCPhase phase(phase_process);719720if (_heap->has_forwarded_objects()) {721ShenandoahForwardedIsAliveClosure is_alive;722ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));723rp->process_discovered_references(&is_alive, &keep_alive,724&complete_gc, &executor,725NULL, _heap->shenandoah_policy()->tracer()->gc_id());726} else {727ShenandoahIsAliveClosure is_alive;728ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));729rp->process_discovered_references(&is_alive, &keep_alive,730&complete_gc, &executor,731NULL, _heap->shenandoah_policy()->tracer()->gc_id());732}733734assert(task_queues()->is_empty(), "Should be empty");735}736737{738ShenandoahGCPhase phase(phase_enqueue);739rp->enqueue_discovered_references(&executor);740}741}742743class DoNothingClosure: public OopClosure {744public:745void do_oop(oop* p) {}746void do_oop(narrowOop* p) {}747};748749class ShenandoahWeakUpdateClosure : public OopClosure {750private:751ShenandoahHeap* const _heap;752753template <class T>754inline void do_oop_work(T* p) {755oop o = _heap->maybe_update_with_forwarded(p);756shenandoah_assert_marked_except(p, o, o == NULL);757}758759public:760ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}761762void do_oop(narrowOop* p) { do_oop_work(p); }763void do_oop(oop* p) { do_oop_work(p); }764};765766void ShenandoahConcurrentMark::weak_roots_work(bool full_gc) {767ShenandoahPhaseTimings::Phase phase = full_gc ?768ShenandoahPhaseTimings::full_gc_weak_roots :769ShenandoahPhaseTimings::weak_roots;770ShenandoahGCPhase root_phase(phase);771ShenandoahGCWorkerPhase worker_phase(phase);772773ShenandoahIsAliveSelector is_alive;774DoNothingClosure cl;775ShenandoahWeakRoots weak_roots(phase);776weak_roots.weak_oops_do(is_alive.is_alive_closure(), &cl, 0);777}778779class ShenandoahCancelledGCYieldClosure : public YieldClosure {780private:781ShenandoahHeap* const _heap;782public:783ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};784virtual bool should_return() { return _heap->cancelled_gc(); }785};786787class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {788public:789void do_void() {790ShenandoahHeap* sh = ShenandoahHeap::heap();791ShenandoahConcurrentMark* scm = sh->concurrent_mark();792assert(sh->process_references(), "why else would we be here?");793ShenandoahTaskTerminator terminator(1, scm->task_queues());794795ReferenceProcessor* rp = sh->ref_processor();796shenandoah_assert_rp_isalive_installed();797798scm->mark_loop(0, &terminator, rp,799false, // not cancellable800false); // do not do strdedup801}802};803804class ShenandoahPrecleanTask : public AbstractGangTask {805private:806ReferenceProcessor* _rp;807808public:809ShenandoahPrecleanTask(ReferenceProcessor* rp) :810AbstractGangTask("Precleaning task"),811_rp(rp) {}812813void work(uint worker_id) {814assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");815ShenandoahParallelWorkerSession worker_session(worker_id);816817ShenandoahHeap* sh = ShenandoahHeap::heap();818assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");819820ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);821822ShenandoahCancelledGCYieldClosure yield;823ShenandoahPrecleanCompleteGCClosure complete_gc;824825ShenandoahIsAliveClosure is_alive;826ShenandoahCMKeepAliveClosure keep_alive(q);827ResourceMark rm;828_rp->preclean_discovered_references(&is_alive, &keep_alive,829&complete_gc, &yield,830NULL, sh->shenandoah_policy()->tracer()->gc_id());831}832};833834void ShenandoahConcurrentMark::preclean_weak_refs() {835// Pre-cleaning weak references before diving into STW makes sense at the836// end of concurrent mark. This will filter out the references which referents837// are alive. Note that ReferenceProcessor already filters out these on reference838// discovery, and the bulk of work is done here. This phase processes leftovers839// that missed the initial filtering, i.e. when referent was marked alive after840// reference was discovered by RP.841842assert(_heap->process_references(), "sanity");843844ReferenceProcessor* rp = _heap->ref_processor();845846assert(task_queues()->is_empty(), "Should be empty");847848ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);849850shenandoah_assert_rp_isalive_not_installed();851ShenandoahIsAliveSelector is_alive;852ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());853854// Execute precleaning in the worker thread: it will give us GCLABs, String dedup855// queues and other goodies. When upstream ReferenceProcessor starts supporting856// parallel precleans, we can extend this to more threads.857WorkGang* workers = _heap->workers();858uint nworkers = workers->active_workers();859assert(nworkers == 1, "This code uses only a single worker");860task_queues()->reserve(nworkers);861862ShenandoahPrecleanTask task(rp);863workers->run_task(&task);864865assert(task_queues()->is_empty(), "Should be empty");866}867868void ShenandoahConcurrentMark::cancel() {869// Clean up marking stacks.870ShenandoahObjToScanQueueSet* queues = task_queues();871queues->clear();872873// Cancel SATB buffers.874JavaThread::satb_mark_queue_set().abandon_partial_marking();875}876877ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {878assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));879return _task_queues->queue(worker_id);880}881882template <bool CANCELLABLE>883void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,884bool strdedup) {885ShenandoahObjToScanQueue* q = get_queue(w);886887ShenandoahLiveData* ld = _heap->get_liveness_cache(w);888889// TODO: We can clean up this if we figure out how to do templated oop closures that890// play nice with specialized_oop_iterators.891if (_heap->unload_classes()) {892if (_heap->has_forwarded_objects()) {893if (strdedup) {894ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);895ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);896mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);897} else {898ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);899mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);900}901} else {902if (strdedup) {903ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);904ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);905mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);906} else {907ShenandoahMarkRefsMetadataClosure cl(q, rp);908mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);909}910}911} else {912if (_heap->has_forwarded_objects()) {913if (strdedup) {914ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);915ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);916mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);917} else {918ShenandoahMarkUpdateRefsClosure cl(q, rp);919mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);920}921} else {922if (strdedup) {923ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);924ShenandoahMarkRefsDedupClosure cl(q, dq, rp);925mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);926} else {927ShenandoahMarkRefsClosure cl(q, rp);928mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);929}930}931}932933_heap->flush_liveness_cache(w);934}935936template <class T, bool CANCELLABLE>937void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {938int seed = 17;939uintx stride = ShenandoahMarkLoopStride;940941ShenandoahHeap* heap = ShenandoahHeap::heap();942ShenandoahObjToScanQueueSet* queues = task_queues();943ShenandoahObjToScanQueue* q;944ShenandoahMarkTask t;945946/*947* Process outstanding queues, if any.948*949* There can be more queues than workers. To deal with the imbalance, we claim950* extra queues first. Since marking can push new tasks into the queue associated951* with this worker id, we come back to process this queue in the normal loop.952*/953assert(queues->get_reserved() == heap->workers()->active_workers(),954"Need to reserve proper number of queues");955956q = queues->claim_next();957while (q != NULL) {958if (CANCELLABLE && heap->cancelled_gc()) {959return;960}961962for (uint i = 0; i < stride; i++) {963if (q->pop(t)) {964do_task<T>(q, cl, live_data, &t);965} else {966assert(q->is_empty(), "Must be empty");967q = queues->claim_next();968break;969}970}971}972973q = get_queue(worker_id);974975ShenandoahStrDedupQueue *dq = NULL;976if (ShenandoahStringDedup::is_enabled()) {977dq = ShenandoahStringDedup::queue(worker_id);978}979980ShenandoahSATBBufferClosure drain_satb(q, dq);981SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();982983/*984* Normal marking loop:985*/986while (true) {987if (CANCELLABLE && heap->cancelled_gc()) {988return;989}990991while (satb_mq_set.completed_buffers_num() > 0) {992satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);993}994995uint work = 0;996for (uint i = 0; i < stride; i++) {997if (q->pop(t) ||998queues->steal(worker_id, &seed, t)) {999do_task<T>(q, cl, live_data, &t);1000work++;1001} else {1002break;1003}1004}10051006if (work == 0) {1007// No work encountered in current stride, try to terminate.1008ShenandoahTerminatorTerminator tt(heap);1009if (terminator->offer_termination(&tt)) return;1010}1011}1012}10131014bool ShenandoahConcurrentMark::claim_codecache() {1015return _claimed_codecache.try_set();1016}10171018void ShenandoahConcurrentMark::clear_claim_codecache() {1019_claimed_codecache.unset();1020}102110221023