Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
38920 views
/*1* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/g1/concurrentG1Refine.hpp"26#include "gc_implementation/g1/concurrentG1RefineThread.hpp"27#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"28#include "gc_implementation/g1/g1CollectorPolicy.hpp"29#include "memory/resourceArea.hpp"30#include "runtime/handles.inline.hpp"31#include "runtime/mutexLocker.hpp"3233ConcurrentG1RefineThread::34ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,35CardTableEntryClosure* refine_closure,36uint worker_id_offset, uint worker_id) :37ConcurrentGCThread(),38_refine_closure(refine_closure),39_worker_id_offset(worker_id_offset),40_worker_id(worker_id),41_active(false),42_next(next),43_monitor(NULL),44_cg1r(cg1r),45_vtime_accum(0.0)46{4748// Each thread has its own monitor. The i-th thread is responsible for signalling49// to thread i+1 if the number of buffers in the queue exceeds a threashold for this50// thread. Monitors are also used to wake up the threads during termination.51// The 0th worker in notified by mutator threads and has a special monitor.52// The last worker is used for young gen rset size sampling.53if (worker_id > 0) {54_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);55} else {56_monitor = DirtyCardQ_CBL_mon;57}58initialize();59create_and_start();60}6162void ConcurrentG1RefineThread::initialize() {63if (_worker_id < cg1r()->worker_thread_num()) {64// Current thread activation threshold65_threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),66cg1r()->yellow_zone());67// A thread deactivates once the number of buffer reached a deactivation threshold68_deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());69} else {70set_active(true);71}72}7374void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {75SuspendibleThreadSetJoiner sts;76G1CollectedHeap* g1h = G1CollectedHeap::heap();77G1CollectorPolicy* g1p = g1h->g1_policy();78if (g1p->adaptive_young_list_length()) {79int regions_visited = 0;80g1h->young_list()->rs_length_sampling_init();81while (g1h->young_list()->rs_length_sampling_more()) {82g1h->young_list()->rs_length_sampling_next();83++regions_visited;8485// we try to yield every time we visit 10 regions86if (regions_visited == 10) {87if (sts.should_yield()) {88sts.yield();89// we just abandon the iteration90break;91}92regions_visited = 0;93}94}9596g1p->revise_young_list_target_length_if_necessary();97}98}99100void ConcurrentG1RefineThread::run_young_rs_sampling() {101DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();102_vtime_start = os::elapsedVTime();103while(!_should_terminate) {104sample_young_list_rs_lengths();105106if (os::supports_vtime()) {107_vtime_accum = (os::elapsedVTime() - _vtime_start);108} else {109_vtime_accum = 0.0;110}111112MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);113if (_should_terminate) {114break;115}116_monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);117}118}119120void ConcurrentG1RefineThread::wait_for_completed_buffers() {121DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();122MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);123while (!_should_terminate && !is_active()) {124_monitor->wait(Mutex::_no_safepoint_check_flag);125}126}127128bool ConcurrentG1RefineThread::is_active() {129DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();130return _worker_id > 0 ? _active : dcqs.process_completed_buffers();131}132133void ConcurrentG1RefineThread::activate() {134MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);135if (_worker_id > 0) {136if (G1TraceConcRefinement) {137DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();138gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",139_worker_id, _threshold, (int)dcqs.completed_buffers_num());140}141set_active(true);142} else {143DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();144dcqs.set_process_completed(true);145}146_monitor->notify();147}148149void ConcurrentG1RefineThread::deactivate() {150MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);151if (_worker_id > 0) {152if (G1TraceConcRefinement) {153DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();154gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",155_worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());156}157set_active(false);158} else {159DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();160dcqs.set_process_completed(false);161}162}163164void ConcurrentG1RefineThread::run() {165initialize_in_thread();166wait_for_universe_init();167168if (_worker_id >= cg1r()->worker_thread_num()) {169run_young_rs_sampling();170terminate();171return;172}173174_vtime_start = os::elapsedVTime();175while (!_should_terminate) {176DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();177178// Wait for work179wait_for_completed_buffers();180181if (_should_terminate) {182break;183}184185{186SuspendibleThreadSetJoiner sts;187188do {189int curr_buffer_num = (int)dcqs.completed_buffers_num();190// If the number of the buffers falls down into the yellow zone,191// that means that the transition period after the evacuation pause has ended.192if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {193dcqs.set_completed_queue_padding(0);194}195196if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {197// If the number of the buffer has fallen below our threshold198// we should deactivate. The predecessor will reactivate this199// thread should the number of the buffers cross the threshold again.200deactivate();201break;202}203204// Check if we need to activate the next thread.205if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {206_next->activate();207}208} while (dcqs.apply_closure_to_completed_buffer(_refine_closure, _worker_id + _worker_id_offset, cg1r()->green_zone()));209210// We can exit the loop above while being active if there was a yield request.211if (is_active()) {212deactivate();213}214}215216if (os::supports_vtime()) {217_vtime_accum = (os::elapsedVTime() - _vtime_start);218} else {219_vtime_accum = 0.0;220}221}222assert(_should_terminate, "just checking");223terminate();224}225226void ConcurrentG1RefineThread::stop() {227// it is ok to take late safepoints here, if needed228{229MutexLockerEx mu(Terminator_lock);230_should_terminate = true;231}232233{234MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);235_monitor->notify();236}237238{239MutexLockerEx mu(Terminator_lock);240while (!_has_terminated) {241Terminator_lock->wait();242}243}244if (G1TraceConcRefinement) {245gclog_or_tty->print_cr("G1-Refine-stop");246}247}248249void ConcurrentG1RefineThread::print() const {250print_on(tty);251}252253void ConcurrentG1RefineThread::print_on(outputStream* st) const {254st->print("\"G1 Concurrent Refinement Thread#%d\" ", _worker_id);255Thread::print_on(st);256st->cr();257}258259260