Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
38920 views
/*1* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"26#include "gc_implementation/parallelScavenge/gcTaskThread.hpp"27#include "gc_implementation/shared/adaptiveSizePolicy.hpp"28#include "memory/allocation.hpp"29#include "memory/allocation.inline.hpp"30#include "runtime/mutex.hpp"31#include "runtime/mutexLocker.hpp"32#include "runtime/orderAccess.inline.hpp"3334PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC3536//37// GCTask38//3940const char* GCTask::Kind::to_string(kind value) {41const char* result = "unknown GCTask kind";42switch (value) {43default:44result = "unknown GCTask kind";45break;46case unknown_task:47result = "unknown task";48break;49case ordinary_task:50result = "ordinary task";51break;52case barrier_task:53result = "barrier task";54break;55case noop_task:56result = "noop task";57break;58case idle_task:59result = "idle task";60break;61}62return result;63};6465GCTask::GCTask() :66_kind(Kind::ordinary_task),67_affinity(GCTaskManager::sentinel_worker()){68initialize();69}7071GCTask::GCTask(Kind::kind kind) :72_kind(kind),73_affinity(GCTaskManager::sentinel_worker()) {74initialize();75}7677GCTask::GCTask(uint affinity) :78_kind(Kind::ordinary_task),79_affinity(affinity) {80initialize();81}8283GCTask::GCTask(Kind::kind kind, uint affinity) :84_kind(kind),85_affinity(affinity) {86initialize();87}8889void GCTask::initialize() {90_older = NULL;91_newer = NULL;92}9394void GCTask::destruct() {95assert(older() == NULL, "shouldn't have an older task");96assert(newer() == NULL, "shouldn't have a newer task");97// Nothing to do.98}99100NOT_PRODUCT(101void GCTask::print(const char* message) const {102tty->print(INTPTR_FORMAT " <- " INTPTR_FORMAT "(%u) -> " INTPTR_FORMAT,103newer(), this, affinity(), older());104}105)106107//108// GCTaskQueue109//110111GCTaskQueue* GCTaskQueue::create() {112GCTaskQueue* result = new GCTaskQueue(false);113if (TraceGCTaskQueue) {114tty->print_cr("GCTaskQueue::create()"115" returns " INTPTR_FORMAT, result);116}117return result;118}119120GCTaskQueue* GCTaskQueue::create_on_c_heap() {121GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true);122if (TraceGCTaskQueue) {123tty->print_cr("GCTaskQueue::create_on_c_heap()"124" returns " INTPTR_FORMAT,125result);126}127return result;128}129130GCTaskQueue::GCTaskQueue(bool on_c_heap) :131_is_c_heap_obj(on_c_heap) {132initialize();133if (TraceGCTaskQueue) {134tty->print_cr("[" INTPTR_FORMAT "]"135" GCTaskQueue::GCTaskQueue() constructor",136this);137}138}139140void GCTaskQueue::destruct() {141// Nothing to do.142}143144void GCTaskQueue::destroy(GCTaskQueue* that) {145if (TraceGCTaskQueue) {146tty->print_cr("[" INTPTR_FORMAT "]"147" GCTaskQueue::destroy()"148" is_c_heap_obj: %s",149that,150that->is_c_heap_obj() ? "true" : "false");151}152// That instance may have been allocated as a CHeapObj,153// in which case we have to free it explicitly.154if (that != NULL) {155that->destruct();156assert(that->is_empty(), "should be empty");157if (that->is_c_heap_obj()) {158FreeHeap(that);159}160}161}162163void GCTaskQueue::initialize() {164set_insert_end(NULL);165set_remove_end(NULL);166set_length(0);167}168169// Enqueue one task.170void GCTaskQueue::enqueue(GCTask* task) {171if (TraceGCTaskQueue) {172tty->print_cr("[" INTPTR_FORMAT "]"173" GCTaskQueue::enqueue(task: "174INTPTR_FORMAT ")",175this, task);176print("before:");177}178assert(task != NULL, "shouldn't have null task");179assert(task->older() == NULL, "shouldn't be on queue");180assert(task->newer() == NULL, "shouldn't be on queue");181task->set_newer(NULL);182task->set_older(insert_end());183if (is_empty()) {184set_remove_end(task);185} else {186insert_end()->set_newer(task);187}188set_insert_end(task);189increment_length();190verify_length();191if (TraceGCTaskQueue) {192print("after:");193}194}195196// Enqueue a whole list of tasks. Empties the argument list.197void GCTaskQueue::enqueue(GCTaskQueue* list) {198if (TraceGCTaskQueue) {199tty->print_cr("[" INTPTR_FORMAT "]"200" GCTaskQueue::enqueue(list: "201INTPTR_FORMAT ")",202this, list);203print("before:");204list->print("list:");205}206if (list->is_empty()) {207// Enqueuing the empty list: nothing to do.208return;209}210uint list_length = list->length();211if (is_empty()) {212// Enqueuing to empty list: just acquire elements.213set_insert_end(list->insert_end());214set_remove_end(list->remove_end());215set_length(list_length);216} else {217// Prepend argument list to our queue.218list->remove_end()->set_older(insert_end());219insert_end()->set_newer(list->remove_end());220set_insert_end(list->insert_end());221set_length(length() + list_length);222// empty the argument list.223}224list->initialize();225if (TraceGCTaskQueue) {226print("after:");227list->print("list:");228}229verify_length();230}231232// Dequeue one task.233GCTask* GCTaskQueue::dequeue() {234if (TraceGCTaskQueue) {235tty->print_cr("[" INTPTR_FORMAT "]"236" GCTaskQueue::dequeue()", this);237print("before:");238}239assert(!is_empty(), "shouldn't dequeue from empty list");240GCTask* result = remove();241assert(result != NULL, "shouldn't have NULL task");242if (TraceGCTaskQueue) {243tty->print_cr(" return: " INTPTR_FORMAT, result);244print("after:");245}246return result;247}248249// Dequeue one task, preferring one with affinity.250GCTask* GCTaskQueue::dequeue(uint affinity) {251if (TraceGCTaskQueue) {252tty->print_cr("[" INTPTR_FORMAT "]"253" GCTaskQueue::dequeue(%u)", this, affinity);254print("before:");255}256assert(!is_empty(), "shouldn't dequeue from empty list");257// Look down to the next barrier for a task with this affinity.258GCTask* result = NULL;259for (GCTask* element = remove_end();260element != NULL;261element = element->newer()) {262if (element->is_barrier_task()) {263// Don't consider barrier tasks, nor past them.264result = NULL;265break;266}267if (element->affinity() == affinity) {268result = remove(element);269break;270}271}272// If we didn't find anything with affinity, just take the next task.273if (result == NULL) {274result = remove();275}276if (TraceGCTaskQueue) {277tty->print_cr(" return: " INTPTR_FORMAT, result);278print("after:");279}280return result;281}282283GCTask* GCTaskQueue::remove() {284// Dequeue from remove end.285GCTask* result = remove_end();286assert(result != NULL, "shouldn't have null task");287assert(result->older() == NULL, "not the remove_end");288set_remove_end(result->newer());289if (remove_end() == NULL) {290assert(insert_end() == result, "not a singleton");291set_insert_end(NULL);292} else {293remove_end()->set_older(NULL);294}295result->set_newer(NULL);296decrement_length();297assert(result->newer() == NULL, "shouldn't be on queue");298assert(result->older() == NULL, "shouldn't be on queue");299verify_length();300return result;301}302303GCTask* GCTaskQueue::remove(GCTask* task) {304// This is slightly more work, and has slightly fewer asserts305// than removing from the remove end.306assert(task != NULL, "shouldn't have null task");307GCTask* result = task;308if (result->newer() != NULL) {309result->newer()->set_older(result->older());310} else {311assert(insert_end() == result, "not youngest");312set_insert_end(result->older());313}314if (result->older() != NULL) {315result->older()->set_newer(result->newer());316} else {317assert(remove_end() == result, "not oldest");318set_remove_end(result->newer());319}320result->set_newer(NULL);321result->set_older(NULL);322decrement_length();323verify_length();324return result;325}326327NOT_PRODUCT(328// Count the elements in the queue and verify the length against329// that count.330void GCTaskQueue::verify_length() const {331uint count = 0;332for (GCTask* element = insert_end();333element != NULL;334element = element->older()) {335336count++;337}338assert(count == length(), "Length does not match queue");339}340341void GCTaskQueue::print(const char* message) const {342tty->print_cr("[" INTPTR_FORMAT "] GCTaskQueue:"343" insert_end: " INTPTR_FORMAT344" remove_end: " INTPTR_FORMAT345" length: %d"346" %s",347this, insert_end(), remove_end(), length(), message);348uint count = 0;349for (GCTask* element = insert_end();350element != NULL;351element = element->older()) {352element->print(" ");353count++;354tty->cr();355}356tty->print("Total tasks: %d", count);357}358)359360//361// SynchronizedGCTaskQueue362//363364SynchronizedGCTaskQueue::SynchronizedGCTaskQueue(GCTaskQueue* queue_arg,365Monitor * lock_arg) :366_unsynchronized_queue(queue_arg),367_lock(lock_arg) {368assert(unsynchronized_queue() != NULL, "null queue");369assert(lock() != NULL, "null lock");370}371372SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {373// Nothing to do.374}375376//377// GCTaskManager378//379GCTaskManager::GCTaskManager(uint workers) :380_workers(workers),381_active_workers(0),382_idle_workers(0),383_ndc(NULL) {384initialize();385}386387GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :388_workers(workers),389_active_workers(0),390_idle_workers(0),391_ndc(ndc) {392initialize();393}394395void GCTaskManager::initialize() {396if (TraceGCTaskManager) {397tty->print_cr("GCTaskManager::initialize: workers: %u", workers());398}399assert(workers() != 0, "no workers");400_monitor = new Monitor(Mutex::barrier, // rank401"GCTaskManager monitor", // name402Mutex::_allow_vm_block_flag); // allow_vm_block403// The queue for the GCTaskManager must be a CHeapObj.404GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();405_queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());406_noop_task = NoopGCTask::create_on_c_heap();407_idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();408_resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);409{410// Set up worker threads.411// Distribute the workers among the available processors,412// unless we were told not to, or if the os doesn't want to.413uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);414if (!BindGCTaskThreadsToCPUs ||415!os::distribute_processes(workers(), processor_assignment)) {416for (uint a = 0; a < workers(); a += 1) {417processor_assignment[a] = sentinel_worker();418}419}420_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);421for (uint t = 0; t < workers(); t += 1) {422set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));423}424if (TraceGCTaskThread) {425tty->print("GCTaskManager::initialize: distribution:");426for (uint t = 0; t < workers(); t += 1) {427tty->print(" %u", processor_assignment[t]);428}429tty->cr();430}431FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC);432}433reset_busy_workers();434set_unblocked();435for (uint w = 0; w < workers(); w += 1) {436set_resource_flag(w, false);437}438reset_delivered_tasks();439reset_completed_tasks();440reset_noop_tasks();441reset_barriers();442reset_emptied_queue();443for (uint s = 0; s < workers(); s += 1) {444thread(s)->start();445}446}447448GCTaskManager::~GCTaskManager() {449assert(busy_workers() == 0, "still have busy workers");450assert(queue()->is_empty(), "still have queued work");451NoopGCTask::destroy(_noop_task);452_noop_task = NULL;453WaitForBarrierGCTask::destroy(_idle_inactive_task);454_idle_inactive_task = NULL;455if (_thread != NULL) {456for (uint i = 0; i < workers(); i += 1) {457GCTaskThread::destroy(thread(i));458set_thread(i, NULL);459}460FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC);461_thread = NULL;462}463if (_resource_flag != NULL) {464FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC);465_resource_flag = NULL;466}467if (queue() != NULL) {468GCTaskQueue* unsynchronized_queue = queue()->unsynchronized_queue();469GCTaskQueue::destroy(unsynchronized_queue);470SynchronizedGCTaskQueue::destroy(queue());471_queue = NULL;472}473if (monitor() != NULL) {474delete monitor();475_monitor = NULL;476}477}478479void GCTaskManager::set_active_gang() {480_active_workers =481AdaptiveSizePolicy::calc_active_workers(workers(),482active_workers(),483Threads::number_of_non_daemon_threads());484485assert(!all_workers_active() || active_workers() == ParallelGCThreads,486err_msg("all_workers_active() is incorrect: "487"active %d ParallelGCThreads %d", active_workers(),488ParallelGCThreads));489if (TraceDynamicGCThreads) {490gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "491"all_workers_active() %d workers %d "492"active %d ParallelGCThreads %d ",493all_workers_active(), workers(), active_workers(),494ParallelGCThreads);495}496}497498// Create IdleGCTasks for inactive workers.499// Creates tasks in a ResourceArea and assumes500// an appropriate ResourceMark.501void GCTaskManager::task_idle_workers() {502{503int more_inactive_workers = 0;504{505// Stop any idle tasks from exiting their IdleGCTask's506// and get the count for additional IdleGCTask's under507// the GCTaskManager's monitor so that the "more_inactive_workers"508// count is correct.509MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);510_idle_inactive_task->set_should_wait(true);511// active_workers are a number being requested. idle_workers512// are the number currently idle. If all the workers are being513// requested to be active but some are already idle, reduce514// the number of active_workers to be consistent with the515// number of idle_workers. The idle_workers are stuck in516// idle tasks and will no longer be release (since a new GC517// is starting). Try later to release enough idle_workers518// to allow the desired number of active_workers.519more_inactive_workers =520workers() - active_workers() - idle_workers();521if (more_inactive_workers < 0) {522int reduced_active_workers = active_workers() + more_inactive_workers;523set_active_workers(reduced_active_workers);524more_inactive_workers = 0;525}526if (TraceDynamicGCThreads) {527gclog_or_tty->print_cr("JT: %d workers %d active %d "528"idle %d more %d",529Threads::number_of_non_daemon_threads(),530workers(),531active_workers(),532idle_workers(),533more_inactive_workers);534}535}536GCTaskQueue* q = GCTaskQueue::create();537for(uint i = 0; i < (uint) more_inactive_workers; i++) {538q->enqueue(IdleGCTask::create_on_c_heap());539increment_idle_workers();540}541assert(workers() == active_workers() + idle_workers(),542"total workers should equal active + inactive");543add_list(q);544// GCTaskQueue* q was created in a ResourceArea so a545// destroy() call is not needed.546}547}548549void GCTaskManager::release_idle_workers() {550{551MutexLockerEx ml(monitor(),552Mutex::_no_safepoint_check_flag);553_idle_inactive_task->set_should_wait(false);554monitor()->notify_all();555// Release monitor556}557}558559void GCTaskManager::print_task_time_stamps() {560for(uint i=0; i<ParallelGCThreads; i++) {561GCTaskThread* t = thread(i);562t->print_task_time_stamps();563}564}565566void GCTaskManager::print_threads_on(outputStream* st) {567uint num_thr = workers();568for (uint i = 0; i < num_thr; i++) {569thread(i)->print_on(st);570st->cr();571}572}573574void GCTaskManager::threads_do(ThreadClosure* tc) {575assert(tc != NULL, "Null ThreadClosure");576uint num_thr = workers();577for (uint i = 0; i < num_thr; i++) {578tc->do_thread(thread(i));579}580}581582GCTaskThread* GCTaskManager::thread(uint which) {583assert(which < workers(), "index out of bounds");584assert(_thread[which] != NULL, "shouldn't have null thread");585return _thread[which];586}587588void GCTaskManager::set_thread(uint which, GCTaskThread* value) {589assert(which < workers(), "index out of bounds");590assert(value != NULL, "shouldn't have null thread");591_thread[which] = value;592}593594void GCTaskManager::add_task(GCTask* task) {595assert(task != NULL, "shouldn't have null task");596MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);597if (TraceGCTaskManager) {598tty->print_cr("GCTaskManager::add_task(" INTPTR_FORMAT " [%s])",599task, GCTask::Kind::to_string(task->kind()));600}601queue()->enqueue(task);602// Notify with the lock held to avoid missed notifies.603if (TraceGCTaskManager) {604tty->print_cr(" GCTaskManager::add_task (%s)->notify_all",605monitor()->name());606}607(void) monitor()->notify_all();608// Release monitor().609}610611void GCTaskManager::add_list(GCTaskQueue* list) {612assert(list != NULL, "shouldn't have null task");613MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);614if (TraceGCTaskManager) {615tty->print_cr("GCTaskManager::add_list(%u)", list->length());616}617queue()->enqueue(list);618// Notify with the lock held to avoid missed notifies.619if (TraceGCTaskManager) {620tty->print_cr(" GCTaskManager::add_list (%s)->notify_all",621monitor()->name());622}623(void) monitor()->notify_all();624// Release monitor().625}626627// GC workers wait in get_task() for new work to be added628// to the GCTaskManager's queue. When new work is added,629// a notify is sent to the waiting GC workers which then630// compete to get tasks. If a GC worker wakes up and there631// is no work on the queue, it is given a noop_task to execute632// and then loops to find more work.633634GCTask* GCTaskManager::get_task(uint which) {635GCTask* result = NULL;636// Grab the queue lock.637MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);638// Wait while the queue is block or639// there is nothing to do, except maybe release resources.640while (is_blocked() ||641(queue()->is_empty() && !should_release_resources(which))) {642if (TraceGCTaskManager) {643tty->print_cr("GCTaskManager::get_task(%u)"644" blocked: %s"645" empty: %s"646" release: %s",647which,648is_blocked() ? "true" : "false",649queue()->is_empty() ? "true" : "false",650should_release_resources(which) ? "true" : "false");651tty->print_cr(" => (%s)->wait()",652monitor()->name());653}654monitor()->wait(Mutex::_no_safepoint_check_flag, 0);655}656// We've reacquired the queue lock here.657// Figure out which condition caused us to exit the loop above.658if (!queue()->is_empty()) {659if (UseGCTaskAffinity) {660result = queue()->dequeue(which);661} else {662result = queue()->dequeue();663}664if (result->is_barrier_task()) {665assert(which != sentinel_worker(),666"blocker shouldn't be bogus");667set_blocking_worker(which);668}669} else {670// The queue is empty, but we were woken up.671// Just hand back a Noop task,672// in case someone wanted us to release resources, or whatever.673result = noop_task();674increment_noop_tasks();675}676assert(result != NULL, "shouldn't have null task");677if (TraceGCTaskManager) {678tty->print_cr("GCTaskManager::get_task(%u) => " INTPTR_FORMAT " [%s]",679which, result, GCTask::Kind::to_string(result->kind()));680tty->print_cr(" %s", result->name());681}682if (!result->is_idle_task()) {683increment_busy_workers();684increment_delivered_tasks();685}686return result;687// Release monitor().688}689690void GCTaskManager::note_completion(uint which) {691MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);692if (TraceGCTaskManager) {693tty->print_cr("GCTaskManager::note_completion(%u)", which);694}695// If we are blocked, check if the completing thread is the blocker.696if (blocking_worker() == which) {697assert(blocking_worker() != sentinel_worker(),698"blocker shouldn't be bogus");699increment_barriers();700set_unblocked();701}702increment_completed_tasks();703uint active = decrement_busy_workers();704if ((active == 0) && (queue()->is_empty())) {705increment_emptied_queue();706if (TraceGCTaskManager) {707tty->print_cr(" GCTaskManager::note_completion(%u) done", which);708}709// Notify client that we are done.710NotifyDoneClosure* ndc = notify_done_closure();711if (ndc != NULL) {712ndc->notify(this);713}714}715if (TraceGCTaskManager) {716tty->print_cr(" GCTaskManager::note_completion(%u) (%s)->notify_all",717which, monitor()->name());718tty->print_cr(" "719" blocked: %s"720" empty: %s"721" release: %s",722is_blocked() ? "true" : "false",723queue()->is_empty() ? "true" : "false",724should_release_resources(which) ? "true" : "false");725tty->print_cr(" "726" delivered: %u"727" completed: %u"728" barriers: %u"729" emptied: %u",730delivered_tasks(),731completed_tasks(),732barriers(),733emptied_queue());734}735// Tell everyone that a task has completed.736(void) monitor()->notify_all();737// Release monitor().738}739740uint GCTaskManager::increment_busy_workers() {741assert(queue()->own_lock(), "don't own the lock");742_busy_workers += 1;743return _busy_workers;744}745746uint GCTaskManager::decrement_busy_workers() {747assert(queue()->own_lock(), "don't own the lock");748assert(_busy_workers > 0, "About to make a mistake");749_busy_workers -= 1;750return _busy_workers;751}752753void GCTaskManager::release_all_resources() {754// If you want this to be done atomically, do it in a BarrierGCTask.755for (uint i = 0; i < workers(); i += 1) {756set_resource_flag(i, true);757}758}759760bool GCTaskManager::should_release_resources(uint which) {761// This can be done without a lock because each thread reads one element.762return resource_flag(which);763}764765void GCTaskManager::note_release(uint which) {766// This can be done without a lock because each thread writes one element.767set_resource_flag(which, false);768}769770// "list" contains tasks that are ready to execute. Those771// tasks are added to the GCTaskManager's queue of tasks and772// then the GC workers are notified that there is new work to773// do.774//775// Typically different types of tasks can be added to the "list".776// For example in PSScavenge OldToYoungRootsTask, SerialOldToYoungRootsTask,777// ScavengeRootsTask, and StealTask tasks are all added to the list778// and then the GC workers are notified of new work. The tasks are779// handed out in the order in which they are added to the list780// (although execution is not necessarily in that order). As long781// as any tasks are running the GCTaskManager will wait for execution782// to complete. GC workers that execute a stealing task remain in783// the stealing task until all stealing tasks have completed. The load784// balancing afforded by the stealing tasks work best if the stealing785// tasks are added last to the list.786787void GCTaskManager::execute_and_wait(GCTaskQueue* list) {788WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();789list->enqueue(fin);790// The barrier task will be read by one of the GC791// workers once it is added to the list of tasks.792// Be sure that is globally visible before the793// GC worker reads it (which is after the task is added794// to the list of tasks below).795OrderAccess::storestore();796add_list(list);797fin->wait_for(true /* reset */);798// We have to release the barrier tasks!799WaitForBarrierGCTask::destroy(fin);800}801802bool GCTaskManager::resource_flag(uint which) {803assert(which < workers(), "index out of bounds");804return _resource_flag[which];805}806807void GCTaskManager::set_resource_flag(uint which, bool value) {808assert(which < workers(), "index out of bounds");809_resource_flag[which] = value;810}811812//813// NoopGCTask814//815816NoopGCTask* NoopGCTask::create() {817NoopGCTask* result = new NoopGCTask(false);818return result;819}820821NoopGCTask* NoopGCTask::create_on_c_heap() {822NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask(true);823return result;824}825826void NoopGCTask::destroy(NoopGCTask* that) {827if (that != NULL) {828that->destruct();829if (that->is_c_heap_obj()) {830FreeHeap(that);831}832}833}834835void NoopGCTask::destruct() {836// This has to know it's superclass structure, just like the constructor.837this->GCTask::destruct();838// Nothing else to do.839}840841//842// IdleGCTask843//844845IdleGCTask* IdleGCTask::create() {846IdleGCTask* result = new IdleGCTask(false);847assert(UseDynamicNumberOfGCThreads,848"Should only be used with dynamic GC thread");849return result;850}851852IdleGCTask* IdleGCTask::create_on_c_heap() {853IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true);854assert(UseDynamicNumberOfGCThreads,855"Should only be used with dynamic GC thread");856return result;857}858859void IdleGCTask::do_it(GCTaskManager* manager, uint which) {860WaitForBarrierGCTask* wait_for_task = manager->idle_inactive_task();861if (TraceGCTaskManager) {862tty->print_cr("[" INTPTR_FORMAT "]"863" IdleGCTask:::do_it()"864" should_wait: %s",865this, wait_for_task->should_wait() ? "true" : "false");866}867MutexLockerEx ml(manager->monitor(), Mutex::_no_safepoint_check_flag);868if (TraceDynamicGCThreads) {869gclog_or_tty->print_cr("--- idle %d", which);870}871// Increment has to be done when the idle tasks are created.872// manager->increment_idle_workers();873manager->monitor()->notify_all();874while (wait_for_task->should_wait()) {875if (TraceGCTaskManager) {876tty->print_cr("[" INTPTR_FORMAT "]"877" IdleGCTask::do_it()"878" [" INTPTR_FORMAT "] (%s)->wait()",879this, manager->monitor(), manager->monitor()->name());880}881manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);882}883manager->decrement_idle_workers();884if (TraceDynamicGCThreads) {885gclog_or_tty->print_cr("--- release %d", which);886}887if (TraceGCTaskManager) {888tty->print_cr("[" INTPTR_FORMAT "]"889" IdleGCTask::do_it() returns"890" should_wait: %s",891this, wait_for_task->should_wait() ? "true" : "false");892}893// Release monitor().894}895896void IdleGCTask::destroy(IdleGCTask* that) {897if (that != NULL) {898that->destruct();899if (that->is_c_heap_obj()) {900FreeHeap(that);901}902}903}904905void IdleGCTask::destruct() {906// This has to know it's superclass structure, just like the constructor.907this->GCTask::destruct();908// Nothing else to do.909}910911//912// BarrierGCTask913//914915void BarrierGCTask::do_it(GCTaskManager* manager, uint which) {916// Wait for this to be the only busy worker.917// ??? I thought of having a StackObj class918// whose constructor would grab the lock and come to the barrier,919// and whose destructor would release the lock,920// but that seems like too much mechanism for two lines of code.921MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);922do_it_internal(manager, which);923// Release manager->lock().924}925926void BarrierGCTask::do_it_internal(GCTaskManager* manager, uint which) {927// Wait for this to be the only busy worker.928assert(manager->monitor()->owned_by_self(), "don't own the lock");929assert(manager->is_blocked(), "manager isn't blocked");930while (manager->busy_workers() > 1) {931if (TraceGCTaskManager) {932tty->print_cr("BarrierGCTask::do_it(%u) waiting on %u workers",933which, manager->busy_workers());934}935manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);936}937}938939void BarrierGCTask::destruct() {940this->GCTask::destruct();941// Nothing else to do.942}943944//945// ReleasingBarrierGCTask946//947948void ReleasingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {949MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);950do_it_internal(manager, which);951manager->release_all_resources();952// Release manager->lock().953}954955void ReleasingBarrierGCTask::destruct() {956this->BarrierGCTask::destruct();957// Nothing else to do.958}959960//961// NotifyingBarrierGCTask962//963964void NotifyingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {965MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);966do_it_internal(manager, which);967NotifyDoneClosure* ndc = notify_done_closure();968if (ndc != NULL) {969ndc->notify(manager);970}971// Release manager->lock().972}973974void NotifyingBarrierGCTask::destruct() {975this->BarrierGCTask::destruct();976// Nothing else to do.977}978979//980// WaitForBarrierGCTask981//982WaitForBarrierGCTask* WaitForBarrierGCTask::create() {983WaitForBarrierGCTask* result = new WaitForBarrierGCTask(false);984return result;985}986987WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() {988WaitForBarrierGCTask* result =989new (ResourceObj::C_HEAP, mtGC) WaitForBarrierGCTask(true);990return result;991}992993WaitForBarrierGCTask::WaitForBarrierGCTask(bool on_c_heap) :994_is_c_heap_obj(on_c_heap) {995_monitor = MonitorSupply::reserve();996set_should_wait(true);997if (TraceGCTaskManager) {998tty->print_cr("[" INTPTR_FORMAT "]"999" WaitForBarrierGCTask::WaitForBarrierGCTask()"1000" monitor: " INTPTR_FORMAT,1001this, monitor());1002}1003}10041005void WaitForBarrierGCTask::destroy(WaitForBarrierGCTask* that) {1006if (that != NULL) {1007if (TraceGCTaskManager) {1008tty->print_cr("[" INTPTR_FORMAT "]"1009" WaitForBarrierGCTask::destroy()"1010" is_c_heap_obj: %s"1011" monitor: " INTPTR_FORMAT,1012that,1013that->is_c_heap_obj() ? "true" : "false",1014that->monitor());1015}1016that->destruct();1017if (that->is_c_heap_obj()) {1018FreeHeap(that);1019}1020}1021}10221023void WaitForBarrierGCTask::destruct() {1024assert(monitor() != NULL, "monitor should not be NULL");1025if (TraceGCTaskManager) {1026tty->print_cr("[" INTPTR_FORMAT "]"1027" WaitForBarrierGCTask::destruct()"1028" monitor: " INTPTR_FORMAT,1029this, monitor());1030}1031this->BarrierGCTask::destruct();1032// Clean up that should be in the destructor,1033// except that ResourceMarks don't call destructors.1034if (monitor() != NULL) {1035MonitorSupply::release(monitor());1036}1037_monitor = (Monitor*) (uintptr_t) 0xDEAD000F;1038}10391040void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {1041if (TraceGCTaskManager) {1042tty->print_cr("[" INTPTR_FORMAT "]"1043" WaitForBarrierGCTask::do_it() waiting for idle"1044" monitor: " INTPTR_FORMAT,1045this, monitor());1046}1047{1048// First, wait for the barrier to arrive.1049MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);1050do_it_internal(manager, which);1051// Release manager->lock().1052}1053{1054// Then notify the waiter.1055MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);1056set_should_wait(false);1057// Waiter doesn't miss the notify in the wait_for method1058// since it checks the flag after grabbing the monitor.1059if (TraceGCTaskManager) {1060tty->print_cr("[" INTPTR_FORMAT "]"1061" WaitForBarrierGCTask::do_it()"1062" [" INTPTR_FORMAT "] (%s)->notify_all()",1063this, monitor(), monitor()->name());1064}1065monitor()->notify_all();1066// Release monitor().1067}1068}10691070void WaitForBarrierGCTask::wait_for(bool reset) {1071if (TraceGCTaskManager) {1072tty->print_cr("[" INTPTR_FORMAT "]"1073" WaitForBarrierGCTask::wait_for()"1074" should_wait: %s",1075this, should_wait() ? "true" : "false");1076}1077{1078// Grab the lock and check again.1079MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);1080while (should_wait()) {1081if (TraceGCTaskManager) {1082tty->print_cr("[" INTPTR_FORMAT "]"1083" WaitForBarrierGCTask::wait_for()"1084" [" INTPTR_FORMAT "] (%s)->wait()",1085this, monitor(), monitor()->name());1086}1087monitor()->wait(Mutex::_no_safepoint_check_flag, 0);1088}1089// Reset the flag in case someone reuses this task.1090if (reset) {1091set_should_wait(true);1092}1093if (TraceGCTaskManager) {1094tty->print_cr("[" INTPTR_FORMAT "]"1095" WaitForBarrierGCTask::wait_for() returns"1096" should_wait: %s",1097this, should_wait() ? "true" : "false");1098}1099// Release monitor().1100}1101}11021103Mutex* MonitorSupply::_lock = NULL;1104GrowableArray<Monitor*>* MonitorSupply::_freelist = NULL;11051106Monitor* MonitorSupply::reserve() {1107Monitor* result = NULL;1108// Lazy initialization: possible race.1109if (lock() == NULL) {1110_lock = new Mutex(Mutex::barrier, // rank1111"MonitorSupply mutex", // name1112Mutex::_allow_vm_block_flag); // allow_vm_block1113}1114{1115MutexLockerEx ml(lock());1116// Lazy initialization.1117if (freelist() == NULL) {1118_freelist =1119new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,1120true);1121}1122if (! freelist()->is_empty()) {1123result = freelist()->pop();1124} else {1125result = new Monitor(Mutex::barrier, // rank1126"MonitorSupply monitor", // name1127Mutex::_allow_vm_block_flag); // allow_vm_block1128}1129guarantee(result != NULL, "shouldn't return NULL");1130assert(!result->is_locked(), "shouldn't be locked");1131// release lock().1132}1133return result;1134}11351136void MonitorSupply::release(Monitor* instance) {1137assert(instance != NULL, "shouldn't release NULL");1138assert(!instance->is_locked(), "shouldn't be locked");1139{1140MutexLockerEx ml(lock());1141freelist()->push(instance);1142// release lock().1143}1144}114511461147