Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
38920 views
/*1* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"26#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"27#include "gc_implementation/g1/g1CollectorPolicy.hpp"28#include "gc_implementation/g1/g1Log.hpp"29#include "gc_implementation/g1/g1MMUTracker.hpp"30#include "gc_implementation/g1/vm_operations_g1.hpp"31#include "gc_implementation/shared/gcTrace.hpp"32#include "memory/resourceArea.hpp"33#include "runtime/vmThread.hpp"3435// ======= Concurrent Mark Thread ========3637// The CM thread is created when the G1 garbage collector is used3839SurrogateLockerThread*40ConcurrentMarkThread::_slt = NULL;4142ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :43ConcurrentGCThread(),44_cm(cm),45_state(Idle),46_vtime_accum(0.0),47_vtime_mark_accum(0.0) {48create_and_start();49}5051class CMCheckpointRootsFinalClosure: public VoidClosure {5253ConcurrentMark* _cm;54public:5556CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :57_cm(cm) {}5859void do_void(){60_cm->checkpointRootsFinal(false); // !clear_all_soft_refs61}62};6364class CMCleanUp: public VoidClosure {65ConcurrentMark* _cm;66public:6768CMCleanUp(ConcurrentMark* cm) :69_cm(cm) {}7071void do_void(){72_cm->cleanup();73}74};75767778void ConcurrentMarkThread::run() {79initialize_in_thread();80_vtime_start = os::elapsedVTime();81wait_for_universe_init();8283G1CollectedHeap* g1h = G1CollectedHeap::heap();84G1CollectorPolicy* g1_policy = g1h->g1_policy();85G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();86Thread *current_thread = Thread::current();8788while (!_should_terminate) {89// wait until started is set.90sleepBeforeNextCycle();91if (_should_terminate) {92break;93}9495{96ResourceMark rm;97HandleMark hm;98double cycle_start = os::elapsedVTime();99100// We have to ensure that we finish scanning the root regions101// before the next GC takes place. To ensure this we have to102// make sure that we do not join the STS until the root regions103// have been scanned. If we did then it's possible that a104// subsequent GC could block us from joining the STS and proceed105// without the root regions have been scanned which would be a106// correctness issue.107108double scan_start = os::elapsedTime();109if (!cm()->has_aborted()) {110if (G1Log::fine()) {111gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());112gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");113}114115_cm->scanRootRegions();116117double scan_end = os::elapsedTime();118if (G1Log::fine()) {119gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());120gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",121scan_end - scan_start);122}123}124125double mark_start_sec = os::elapsedTime();126if (G1Log::fine()) {127gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());128gclog_or_tty->print_cr("[GC concurrent-mark-start]");129}130131int iter = 0;132do {133iter++;134if (!cm()->has_aborted()) {135_cm->markFromRoots();136}137138double mark_end_time = os::elapsedVTime();139double mark_end_sec = os::elapsedTime();140_vtime_mark_accum += (mark_end_time - cycle_start);141if (!cm()->has_aborted()) {142if (g1_policy->adaptive_young_list_length()) {143double now = os::elapsedTime();144double remark_prediction_ms = g1_policy->predict_remark_time_ms();145jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);146os::sleep(current_thread, sleep_time_ms, false);147}148149if (G1Log::fine()) {150gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());151gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",152mark_end_sec - mark_start_sec);153}154155CMCheckpointRootsFinalClosure final_cl(_cm);156VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);157VMThread::execute(&op);158}159if (cm()->restart_for_overflow()) {160if (G1TraceMarkStackOverflow) {161gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "162"in remark (restart #%d).", iter);163}164if (G1Log::fine()) {165gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());166gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");167}168}169} while (cm()->restart_for_overflow());170171double end_time = os::elapsedVTime();172// Update the total virtual time before doing this, since it will try173// to measure it to get the vtime for this marking. We purposely174// neglect the presumably-short "completeCleanup" phase here.175_vtime_accum = (end_time - _vtime_start);176177if (!cm()->has_aborted()) {178if (g1_policy->adaptive_young_list_length()) {179double now = os::elapsedTime();180double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();181jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);182os::sleep(current_thread, sleep_time_ms, false);183}184185CMCleanUp cl_cl(_cm);186VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);187VMThread::execute(&op);188} else {189// We don't want to update the marking status if a GC pause190// is already underway.191SuspendibleThreadSetJoiner sts;192g1h->set_marking_complete();193}194195// Check if cleanup set the free_regions_coming flag. If it196// hasn't, we can just skip the next step.197if (g1h->free_regions_coming()) {198// The following will finish freeing up any regions that we199// found to be empty during cleanup. We'll do this part200// without joining the suspendible set. If an evacuation pause201// takes place, then we would carry on freeing regions in202// case they are needed by the pause. If a Full GC takes203// place, it would wait for us to process the regions204// reclaimed by cleanup.205206double cleanup_start_sec = os::elapsedTime();207if (G1Log::fine()) {208gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());209gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");210}211212// Now do the concurrent cleanup operation.213_cm->completeCleanup();214215// Notify anyone who's waiting that there are no more free216// regions coming. We have to do this before we join the STS217// (in fact, we should not attempt to join the STS in the218// interval between finishing the cleanup pause and clearing219// the free_regions_coming flag) otherwise we might deadlock:220// a GC worker could be blocked waiting for the notification221// whereas this thread will be blocked for the pause to finish222// while it's trying to join the STS, which is conditional on223// the GC workers finishing.224g1h->reset_free_regions_coming();225226double cleanup_end_sec = os::elapsedTime();227if (G1Log::fine()) {228gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());229gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",230cleanup_end_sec - cleanup_start_sec);231}232}233guarantee(cm()->cleanup_list_is_empty(),234"at this point there should be no regions on the cleanup list");235236// There is a tricky race before recording that the concurrent237// cleanup has completed and a potential Full GC starting around238// the same time. We want to make sure that the Full GC calls239// abort() on concurrent mark after240// record_concurrent_mark_cleanup_completed(), since abort() is241// the method that will reset the concurrent mark state. If we242// end up calling record_concurrent_mark_cleanup_completed()243// after abort() then we might incorrectly undo some of the work244// abort() did. Checking the has_aborted() flag after joining245// the STS allows the correct ordering of the two methods. There246// are two scenarios:247//248// a) If we reach here before the Full GC, the fact that we have249// joined the STS means that the Full GC cannot start until we250// leave the STS, so record_concurrent_mark_cleanup_completed()251// will complete before abort() is called.252//253// b) If we reach here during the Full GC, we'll be held up from254// joining the STS until the Full GC is done, which means that255// abort() will have completed and has_aborted() will return256// true to prevent us from calling257// record_concurrent_mark_cleanup_completed() (and, in fact, it's258// not needed any more as the concurrent mark state has been259// already reset).260{261SuspendibleThreadSetJoiner sts;262if (!cm()->has_aborted()) {263g1_policy->record_concurrent_mark_cleanup_completed();264}265}266267if (cm()->has_aborted()) {268if (G1Log::fine()) {269gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());270gclog_or_tty->print_cr("[GC concurrent-mark-abort]");271}272}273274// We now want to allow clearing of the marking bitmap to be275// suspended by a collection pause.276// We may have aborted just before the remark. Do not bother clearing the277// bitmap then, as it has been done during mark abort.278if (!cm()->has_aborted()) {279SuspendibleThreadSetJoiner sts;280_cm->clearNextBitmap();281} else {282assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");283}284}285286// Update the number of full collections that have been287// completed. This will also notify the FullGCCount_lock in case a288// Java thread is waiting for a full GC to happen (e.g., it289// called System.gc() with +ExplicitGCInvokesConcurrent).290{291SuspendibleThreadSetJoiner sts;292g1h->increment_old_marking_cycles_completed(true /* concurrent */);293g1h->register_concurrent_cycle_end();294}295}296assert(_should_terminate, "just checking");297298terminate();299}300301void ConcurrentMarkThread::stop() {302{303MutexLockerEx ml(Terminator_lock);304_should_terminate = true;305}306307{308MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);309CGC_lock->notify_all();310}311312{313MutexLockerEx ml(Terminator_lock);314while (!_has_terminated) {315Terminator_lock->wait();316}317}318}319320void ConcurrentMarkThread::print() const {321print_on(tty);322}323324void ConcurrentMarkThread::print_on(outputStream* st) const {325st->print("\"G1 Main Concurrent Mark GC Thread\" ");326Thread::print_on(st);327st->cr();328}329330void ConcurrentMarkThread::sleepBeforeNextCycle() {331// We join here because we don't want to do the "shouldConcurrentMark()"332// below while the world is otherwise stopped.333assert(!in_progress(), "should have been cleared");334335MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);336while (!started() && !_should_terminate) {337CGC_lock->wait(Mutex::_no_safepoint_check_flag);338}339340if (started()) {341set_in_progress();342}343}344345// Note: As is the case with CMS - this method, although exported346// by the ConcurrentMarkThread, which is a non-JavaThread, can only347// be called by a JavaThread. Currently this is done at vm creation348// time (post-vm-init) by the main/Primordial (Java)Thread.349// XXX Consider changing this in the future to allow the CM thread350// itself to create this thread?351void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) {352assert(UseG1GC, "SLT thread needed only for concurrent GC");353assert(THREAD->is_Java_thread(), "must be a Java thread");354assert(_slt == NULL, "SLT already created");355_slt = SurrogateLockerThread::make(THREAD);356}357358359