Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
38920 views
1
/*
2
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
27
#include "gc_implementation/parallelScavenge/gcTaskThread.hpp"
28
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
29
#include "memory/allocation.hpp"
30
#include "memory/allocation.inline.hpp"
31
#include "runtime/mutex.hpp"
32
#include "runtime/mutexLocker.hpp"
33
#include "runtime/orderAccess.inline.hpp"
34
35
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
36
37
//
38
// GCTask
39
//
40
41
const char* GCTask::Kind::to_string(kind value) {
42
const char* result = "unknown GCTask kind";
43
switch (value) {
44
default:
45
result = "unknown GCTask kind";
46
break;
47
case unknown_task:
48
result = "unknown task";
49
break;
50
case ordinary_task:
51
result = "ordinary task";
52
break;
53
case barrier_task:
54
result = "barrier task";
55
break;
56
case noop_task:
57
result = "noop task";
58
break;
59
case idle_task:
60
result = "idle task";
61
break;
62
}
63
return result;
64
};
65
66
GCTask::GCTask() :
67
_kind(Kind::ordinary_task),
68
_affinity(GCTaskManager::sentinel_worker()){
69
initialize();
70
}
71
72
GCTask::GCTask(Kind::kind kind) :
73
_kind(kind),
74
_affinity(GCTaskManager::sentinel_worker()) {
75
initialize();
76
}
77
78
GCTask::GCTask(uint affinity) :
79
_kind(Kind::ordinary_task),
80
_affinity(affinity) {
81
initialize();
82
}
83
84
GCTask::GCTask(Kind::kind kind, uint affinity) :
85
_kind(kind),
86
_affinity(affinity) {
87
initialize();
88
}
89
90
void GCTask::initialize() {
91
_older = NULL;
92
_newer = NULL;
93
}
94
95
void GCTask::destruct() {
96
assert(older() == NULL, "shouldn't have an older task");
97
assert(newer() == NULL, "shouldn't have a newer task");
98
// Nothing to do.
99
}
100
101
NOT_PRODUCT(
102
void GCTask::print(const char* message) const {
103
tty->print(INTPTR_FORMAT " <- " INTPTR_FORMAT "(%u) -> " INTPTR_FORMAT,
104
newer(), this, affinity(), older());
105
}
106
)
107
108
//
109
// GCTaskQueue
110
//
111
112
GCTaskQueue* GCTaskQueue::create() {
113
GCTaskQueue* result = new GCTaskQueue(false);
114
if (TraceGCTaskQueue) {
115
tty->print_cr("GCTaskQueue::create()"
116
" returns " INTPTR_FORMAT, result);
117
}
118
return result;
119
}
120
121
GCTaskQueue* GCTaskQueue::create_on_c_heap() {
122
GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true);
123
if (TraceGCTaskQueue) {
124
tty->print_cr("GCTaskQueue::create_on_c_heap()"
125
" returns " INTPTR_FORMAT,
126
result);
127
}
128
return result;
129
}
130
131
GCTaskQueue::GCTaskQueue(bool on_c_heap) :
132
_is_c_heap_obj(on_c_heap) {
133
initialize();
134
if (TraceGCTaskQueue) {
135
tty->print_cr("[" INTPTR_FORMAT "]"
136
" GCTaskQueue::GCTaskQueue() constructor",
137
this);
138
}
139
}
140
141
void GCTaskQueue::destruct() {
142
// Nothing to do.
143
}
144
145
void GCTaskQueue::destroy(GCTaskQueue* that) {
146
if (TraceGCTaskQueue) {
147
tty->print_cr("[" INTPTR_FORMAT "]"
148
" GCTaskQueue::destroy()"
149
" is_c_heap_obj: %s",
150
that,
151
that->is_c_heap_obj() ? "true" : "false");
152
}
153
// That instance may have been allocated as a CHeapObj,
154
// in which case we have to free it explicitly.
155
if (that != NULL) {
156
that->destruct();
157
assert(that->is_empty(), "should be empty");
158
if (that->is_c_heap_obj()) {
159
FreeHeap(that);
160
}
161
}
162
}
163
164
void GCTaskQueue::initialize() {
165
set_insert_end(NULL);
166
set_remove_end(NULL);
167
set_length(0);
168
}
169
170
// Enqueue one task.
171
void GCTaskQueue::enqueue(GCTask* task) {
172
if (TraceGCTaskQueue) {
173
tty->print_cr("[" INTPTR_FORMAT "]"
174
" GCTaskQueue::enqueue(task: "
175
INTPTR_FORMAT ")",
176
this, task);
177
print("before:");
178
}
179
assert(task != NULL, "shouldn't have null task");
180
assert(task->older() == NULL, "shouldn't be on queue");
181
assert(task->newer() == NULL, "shouldn't be on queue");
182
task->set_newer(NULL);
183
task->set_older(insert_end());
184
if (is_empty()) {
185
set_remove_end(task);
186
} else {
187
insert_end()->set_newer(task);
188
}
189
set_insert_end(task);
190
increment_length();
191
verify_length();
192
if (TraceGCTaskQueue) {
193
print("after:");
194
}
195
}
196
197
// Enqueue a whole list of tasks. Empties the argument list.
198
void GCTaskQueue::enqueue(GCTaskQueue* list) {
199
if (TraceGCTaskQueue) {
200
tty->print_cr("[" INTPTR_FORMAT "]"
201
" GCTaskQueue::enqueue(list: "
202
INTPTR_FORMAT ")",
203
this, list);
204
print("before:");
205
list->print("list:");
206
}
207
if (list->is_empty()) {
208
// Enqueuing the empty list: nothing to do.
209
return;
210
}
211
uint list_length = list->length();
212
if (is_empty()) {
213
// Enqueuing to empty list: just acquire elements.
214
set_insert_end(list->insert_end());
215
set_remove_end(list->remove_end());
216
set_length(list_length);
217
} else {
218
// Prepend argument list to our queue.
219
list->remove_end()->set_older(insert_end());
220
insert_end()->set_newer(list->remove_end());
221
set_insert_end(list->insert_end());
222
set_length(length() + list_length);
223
// empty the argument list.
224
}
225
list->initialize();
226
if (TraceGCTaskQueue) {
227
print("after:");
228
list->print("list:");
229
}
230
verify_length();
231
}
232
233
// Dequeue one task.
234
GCTask* GCTaskQueue::dequeue() {
235
if (TraceGCTaskQueue) {
236
tty->print_cr("[" INTPTR_FORMAT "]"
237
" GCTaskQueue::dequeue()", this);
238
print("before:");
239
}
240
assert(!is_empty(), "shouldn't dequeue from empty list");
241
GCTask* result = remove();
242
assert(result != NULL, "shouldn't have NULL task");
243
if (TraceGCTaskQueue) {
244
tty->print_cr(" return: " INTPTR_FORMAT, result);
245
print("after:");
246
}
247
return result;
248
}
249
250
// Dequeue one task, preferring one with affinity.
251
GCTask* GCTaskQueue::dequeue(uint affinity) {
252
if (TraceGCTaskQueue) {
253
tty->print_cr("[" INTPTR_FORMAT "]"
254
" GCTaskQueue::dequeue(%u)", this, affinity);
255
print("before:");
256
}
257
assert(!is_empty(), "shouldn't dequeue from empty list");
258
// Look down to the next barrier for a task with this affinity.
259
GCTask* result = NULL;
260
for (GCTask* element = remove_end();
261
element != NULL;
262
element = element->newer()) {
263
if (element->is_barrier_task()) {
264
// Don't consider barrier tasks, nor past them.
265
result = NULL;
266
break;
267
}
268
if (element->affinity() == affinity) {
269
result = remove(element);
270
break;
271
}
272
}
273
// If we didn't find anything with affinity, just take the next task.
274
if (result == NULL) {
275
result = remove();
276
}
277
if (TraceGCTaskQueue) {
278
tty->print_cr(" return: " INTPTR_FORMAT, result);
279
print("after:");
280
}
281
return result;
282
}
283
284
GCTask* GCTaskQueue::remove() {
285
// Dequeue from remove end.
286
GCTask* result = remove_end();
287
assert(result != NULL, "shouldn't have null task");
288
assert(result->older() == NULL, "not the remove_end");
289
set_remove_end(result->newer());
290
if (remove_end() == NULL) {
291
assert(insert_end() == result, "not a singleton");
292
set_insert_end(NULL);
293
} else {
294
remove_end()->set_older(NULL);
295
}
296
result->set_newer(NULL);
297
decrement_length();
298
assert(result->newer() == NULL, "shouldn't be on queue");
299
assert(result->older() == NULL, "shouldn't be on queue");
300
verify_length();
301
return result;
302
}
303
304
GCTask* GCTaskQueue::remove(GCTask* task) {
305
// This is slightly more work, and has slightly fewer asserts
306
// than removing from the remove end.
307
assert(task != NULL, "shouldn't have null task");
308
GCTask* result = task;
309
if (result->newer() != NULL) {
310
result->newer()->set_older(result->older());
311
} else {
312
assert(insert_end() == result, "not youngest");
313
set_insert_end(result->older());
314
}
315
if (result->older() != NULL) {
316
result->older()->set_newer(result->newer());
317
} else {
318
assert(remove_end() == result, "not oldest");
319
set_remove_end(result->newer());
320
}
321
result->set_newer(NULL);
322
result->set_older(NULL);
323
decrement_length();
324
verify_length();
325
return result;
326
}
327
328
NOT_PRODUCT(
329
// Count the elements in the queue and verify the length against
330
// that count.
331
void GCTaskQueue::verify_length() const {
332
uint count = 0;
333
for (GCTask* element = insert_end();
334
element != NULL;
335
element = element->older()) {
336
337
count++;
338
}
339
assert(count == length(), "Length does not match queue");
340
}
341
342
void GCTaskQueue::print(const char* message) const {
343
tty->print_cr("[" INTPTR_FORMAT "] GCTaskQueue:"
344
" insert_end: " INTPTR_FORMAT
345
" remove_end: " INTPTR_FORMAT
346
" length: %d"
347
" %s",
348
this, insert_end(), remove_end(), length(), message);
349
uint count = 0;
350
for (GCTask* element = insert_end();
351
element != NULL;
352
element = element->older()) {
353
element->print(" ");
354
count++;
355
tty->cr();
356
}
357
tty->print("Total tasks: %d", count);
358
}
359
)
360
361
//
362
// SynchronizedGCTaskQueue
363
//
364
365
SynchronizedGCTaskQueue::SynchronizedGCTaskQueue(GCTaskQueue* queue_arg,
366
Monitor * lock_arg) :
367
_unsynchronized_queue(queue_arg),
368
_lock(lock_arg) {
369
assert(unsynchronized_queue() != NULL, "null queue");
370
assert(lock() != NULL, "null lock");
371
}
372
373
SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {
374
// Nothing to do.
375
}
376
377
//
378
// GCTaskManager
379
//
380
GCTaskManager::GCTaskManager(uint workers) :
381
_workers(workers),
382
_active_workers(0),
383
_idle_workers(0),
384
_ndc(NULL) {
385
initialize();
386
}
387
388
GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :
389
_workers(workers),
390
_active_workers(0),
391
_idle_workers(0),
392
_ndc(ndc) {
393
initialize();
394
}
395
396
void GCTaskManager::initialize() {
397
if (TraceGCTaskManager) {
398
tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
399
}
400
assert(workers() != 0, "no workers");
401
_monitor = new Monitor(Mutex::barrier, // rank
402
"GCTaskManager monitor", // name
403
Mutex::_allow_vm_block_flag); // allow_vm_block
404
// The queue for the GCTaskManager must be a CHeapObj.
405
GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
406
_queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
407
_noop_task = NoopGCTask::create_on_c_heap();
408
_idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
409
_resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
410
{
411
// Set up worker threads.
412
// Distribute the workers among the available processors,
413
// unless we were told not to, or if the os doesn't want to.
414
uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
415
if (!BindGCTaskThreadsToCPUs ||
416
!os::distribute_processes(workers(), processor_assignment)) {
417
for (uint a = 0; a < workers(); a += 1) {
418
processor_assignment[a] = sentinel_worker();
419
}
420
}
421
_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
422
for (uint t = 0; t < workers(); t += 1) {
423
set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));
424
}
425
if (TraceGCTaskThread) {
426
tty->print("GCTaskManager::initialize: distribution:");
427
for (uint t = 0; t < workers(); t += 1) {
428
tty->print(" %u", processor_assignment[t]);
429
}
430
tty->cr();
431
}
432
FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC);
433
}
434
reset_busy_workers();
435
set_unblocked();
436
for (uint w = 0; w < workers(); w += 1) {
437
set_resource_flag(w, false);
438
}
439
reset_delivered_tasks();
440
reset_completed_tasks();
441
reset_noop_tasks();
442
reset_barriers();
443
reset_emptied_queue();
444
for (uint s = 0; s < workers(); s += 1) {
445
thread(s)->start();
446
}
447
}
448
449
GCTaskManager::~GCTaskManager() {
450
assert(busy_workers() == 0, "still have busy workers");
451
assert(queue()->is_empty(), "still have queued work");
452
NoopGCTask::destroy(_noop_task);
453
_noop_task = NULL;
454
WaitForBarrierGCTask::destroy(_idle_inactive_task);
455
_idle_inactive_task = NULL;
456
if (_thread != NULL) {
457
for (uint i = 0; i < workers(); i += 1) {
458
GCTaskThread::destroy(thread(i));
459
set_thread(i, NULL);
460
}
461
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC);
462
_thread = NULL;
463
}
464
if (_resource_flag != NULL) {
465
FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC);
466
_resource_flag = NULL;
467
}
468
if (queue() != NULL) {
469
GCTaskQueue* unsynchronized_queue = queue()->unsynchronized_queue();
470
GCTaskQueue::destroy(unsynchronized_queue);
471
SynchronizedGCTaskQueue::destroy(queue());
472
_queue = NULL;
473
}
474
if (monitor() != NULL) {
475
delete monitor();
476
_monitor = NULL;
477
}
478
}
479
480
void GCTaskManager::set_active_gang() {
481
_active_workers =
482
AdaptiveSizePolicy::calc_active_workers(workers(),
483
active_workers(),
484
Threads::number_of_non_daemon_threads());
485
486
assert(!all_workers_active() || active_workers() == ParallelGCThreads,
487
err_msg("all_workers_active() is incorrect: "
488
"active %d ParallelGCThreads %d", active_workers(),
489
ParallelGCThreads));
490
if (TraceDynamicGCThreads) {
491
gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
492
"all_workers_active() %d workers %d "
493
"active %d ParallelGCThreads %d ",
494
all_workers_active(), workers(), active_workers(),
495
ParallelGCThreads);
496
}
497
}
498
499
// Create IdleGCTasks for inactive workers.
500
// Creates tasks in a ResourceArea and assumes
501
// an appropriate ResourceMark.
502
void GCTaskManager::task_idle_workers() {
503
{
504
int more_inactive_workers = 0;
505
{
506
// Stop any idle tasks from exiting their IdleGCTask's
507
// and get the count for additional IdleGCTask's under
508
// the GCTaskManager's monitor so that the "more_inactive_workers"
509
// count is correct.
510
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
511
_idle_inactive_task->set_should_wait(true);
512
// active_workers are a number being requested. idle_workers
513
// are the number currently idle. If all the workers are being
514
// requested to be active but some are already idle, reduce
515
// the number of active_workers to be consistent with the
516
// number of idle_workers. The idle_workers are stuck in
517
// idle tasks and will no longer be release (since a new GC
518
// is starting). Try later to release enough idle_workers
519
// to allow the desired number of active_workers.
520
more_inactive_workers =
521
workers() - active_workers() - idle_workers();
522
if (more_inactive_workers < 0) {
523
int reduced_active_workers = active_workers() + more_inactive_workers;
524
set_active_workers(reduced_active_workers);
525
more_inactive_workers = 0;
526
}
527
if (TraceDynamicGCThreads) {
528
gclog_or_tty->print_cr("JT: %d workers %d active %d "
529
"idle %d more %d",
530
Threads::number_of_non_daemon_threads(),
531
workers(),
532
active_workers(),
533
idle_workers(),
534
more_inactive_workers);
535
}
536
}
537
GCTaskQueue* q = GCTaskQueue::create();
538
for(uint i = 0; i < (uint) more_inactive_workers; i++) {
539
q->enqueue(IdleGCTask::create_on_c_heap());
540
increment_idle_workers();
541
}
542
assert(workers() == active_workers() + idle_workers(),
543
"total workers should equal active + inactive");
544
add_list(q);
545
// GCTaskQueue* q was created in a ResourceArea so a
546
// destroy() call is not needed.
547
}
548
}
549
550
void GCTaskManager::release_idle_workers() {
551
{
552
MutexLockerEx ml(monitor(),
553
Mutex::_no_safepoint_check_flag);
554
_idle_inactive_task->set_should_wait(false);
555
monitor()->notify_all();
556
// Release monitor
557
}
558
}
559
560
void GCTaskManager::print_task_time_stamps() {
561
for(uint i=0; i<ParallelGCThreads; i++) {
562
GCTaskThread* t = thread(i);
563
t->print_task_time_stamps();
564
}
565
}
566
567
void GCTaskManager::print_threads_on(outputStream* st) {
568
uint num_thr = workers();
569
for (uint i = 0; i < num_thr; i++) {
570
thread(i)->print_on(st);
571
st->cr();
572
}
573
}
574
575
void GCTaskManager::threads_do(ThreadClosure* tc) {
576
assert(tc != NULL, "Null ThreadClosure");
577
uint num_thr = workers();
578
for (uint i = 0; i < num_thr; i++) {
579
tc->do_thread(thread(i));
580
}
581
}
582
583
GCTaskThread* GCTaskManager::thread(uint which) {
584
assert(which < workers(), "index out of bounds");
585
assert(_thread[which] != NULL, "shouldn't have null thread");
586
return _thread[which];
587
}
588
589
void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
590
assert(which < workers(), "index out of bounds");
591
assert(value != NULL, "shouldn't have null thread");
592
_thread[which] = value;
593
}
594
595
void GCTaskManager::add_task(GCTask* task) {
596
assert(task != NULL, "shouldn't have null task");
597
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
598
if (TraceGCTaskManager) {
599
tty->print_cr("GCTaskManager::add_task(" INTPTR_FORMAT " [%s])",
600
task, GCTask::Kind::to_string(task->kind()));
601
}
602
queue()->enqueue(task);
603
// Notify with the lock held to avoid missed notifies.
604
if (TraceGCTaskManager) {
605
tty->print_cr(" GCTaskManager::add_task (%s)->notify_all",
606
monitor()->name());
607
}
608
(void) monitor()->notify_all();
609
// Release monitor().
610
}
611
612
void GCTaskManager::add_list(GCTaskQueue* list) {
613
assert(list != NULL, "shouldn't have null task");
614
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
615
if (TraceGCTaskManager) {
616
tty->print_cr("GCTaskManager::add_list(%u)", list->length());
617
}
618
queue()->enqueue(list);
619
// Notify with the lock held to avoid missed notifies.
620
if (TraceGCTaskManager) {
621
tty->print_cr(" GCTaskManager::add_list (%s)->notify_all",
622
monitor()->name());
623
}
624
(void) monitor()->notify_all();
625
// Release monitor().
626
}
627
628
// GC workers wait in get_task() for new work to be added
629
// to the GCTaskManager's queue. When new work is added,
630
// a notify is sent to the waiting GC workers which then
631
// compete to get tasks. If a GC worker wakes up and there
632
// is no work on the queue, it is given a noop_task to execute
633
// and then loops to find more work.
634
635
GCTask* GCTaskManager::get_task(uint which) {
636
GCTask* result = NULL;
637
// Grab the queue lock.
638
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
639
// Wait while the queue is block or
640
// there is nothing to do, except maybe release resources.
641
while (is_blocked() ||
642
(queue()->is_empty() && !should_release_resources(which))) {
643
if (TraceGCTaskManager) {
644
tty->print_cr("GCTaskManager::get_task(%u)"
645
" blocked: %s"
646
" empty: %s"
647
" release: %s",
648
which,
649
is_blocked() ? "true" : "false",
650
queue()->is_empty() ? "true" : "false",
651
should_release_resources(which) ? "true" : "false");
652
tty->print_cr(" => (%s)->wait()",
653
monitor()->name());
654
}
655
monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
656
}
657
// We've reacquired the queue lock here.
658
// Figure out which condition caused us to exit the loop above.
659
if (!queue()->is_empty()) {
660
if (UseGCTaskAffinity) {
661
result = queue()->dequeue(which);
662
} else {
663
result = queue()->dequeue();
664
}
665
if (result->is_barrier_task()) {
666
assert(which != sentinel_worker(),
667
"blocker shouldn't be bogus");
668
set_blocking_worker(which);
669
}
670
} else {
671
// The queue is empty, but we were woken up.
672
// Just hand back a Noop task,
673
// in case someone wanted us to release resources, or whatever.
674
result = noop_task();
675
increment_noop_tasks();
676
}
677
assert(result != NULL, "shouldn't have null task");
678
if (TraceGCTaskManager) {
679
tty->print_cr("GCTaskManager::get_task(%u) => " INTPTR_FORMAT " [%s]",
680
which, result, GCTask::Kind::to_string(result->kind()));
681
tty->print_cr(" %s", result->name());
682
}
683
if (!result->is_idle_task()) {
684
increment_busy_workers();
685
increment_delivered_tasks();
686
}
687
return result;
688
// Release monitor().
689
}
690
691
void GCTaskManager::note_completion(uint which) {
692
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
693
if (TraceGCTaskManager) {
694
tty->print_cr("GCTaskManager::note_completion(%u)", which);
695
}
696
// If we are blocked, check if the completing thread is the blocker.
697
if (blocking_worker() == which) {
698
assert(blocking_worker() != sentinel_worker(),
699
"blocker shouldn't be bogus");
700
increment_barriers();
701
set_unblocked();
702
}
703
increment_completed_tasks();
704
uint active = decrement_busy_workers();
705
if ((active == 0) && (queue()->is_empty())) {
706
increment_emptied_queue();
707
if (TraceGCTaskManager) {
708
tty->print_cr(" GCTaskManager::note_completion(%u) done", which);
709
}
710
// Notify client that we are done.
711
NotifyDoneClosure* ndc = notify_done_closure();
712
if (ndc != NULL) {
713
ndc->notify(this);
714
}
715
}
716
if (TraceGCTaskManager) {
717
tty->print_cr(" GCTaskManager::note_completion(%u) (%s)->notify_all",
718
which, monitor()->name());
719
tty->print_cr(" "
720
" blocked: %s"
721
" empty: %s"
722
" release: %s",
723
is_blocked() ? "true" : "false",
724
queue()->is_empty() ? "true" : "false",
725
should_release_resources(which) ? "true" : "false");
726
tty->print_cr(" "
727
" delivered: %u"
728
" completed: %u"
729
" barriers: %u"
730
" emptied: %u",
731
delivered_tasks(),
732
completed_tasks(),
733
barriers(),
734
emptied_queue());
735
}
736
// Tell everyone that a task has completed.
737
(void) monitor()->notify_all();
738
// Release monitor().
739
}
740
741
uint GCTaskManager::increment_busy_workers() {
742
assert(queue()->own_lock(), "don't own the lock");
743
_busy_workers += 1;
744
return _busy_workers;
745
}
746
747
uint GCTaskManager::decrement_busy_workers() {
748
assert(queue()->own_lock(), "don't own the lock");
749
assert(_busy_workers > 0, "About to make a mistake");
750
_busy_workers -= 1;
751
return _busy_workers;
752
}
753
754
void GCTaskManager::release_all_resources() {
755
// If you want this to be done atomically, do it in a BarrierGCTask.
756
for (uint i = 0; i < workers(); i += 1) {
757
set_resource_flag(i, true);
758
}
759
}
760
761
bool GCTaskManager::should_release_resources(uint which) {
762
// This can be done without a lock because each thread reads one element.
763
return resource_flag(which);
764
}
765
766
void GCTaskManager::note_release(uint which) {
767
// This can be done without a lock because each thread writes one element.
768
set_resource_flag(which, false);
769
}
770
771
// "list" contains tasks that are ready to execute. Those
772
// tasks are added to the GCTaskManager's queue of tasks and
773
// then the GC workers are notified that there is new work to
774
// do.
775
//
776
// Typically different types of tasks can be added to the "list".
777
// For example in PSScavenge OldToYoungRootsTask, SerialOldToYoungRootsTask,
778
// ScavengeRootsTask, and StealTask tasks are all added to the list
779
// and then the GC workers are notified of new work. The tasks are
780
// handed out in the order in which they are added to the list
781
// (although execution is not necessarily in that order). As long
782
// as any tasks are running the GCTaskManager will wait for execution
783
// to complete. GC workers that execute a stealing task remain in
784
// the stealing task until all stealing tasks have completed. The load
785
// balancing afforded by the stealing tasks work best if the stealing
786
// tasks are added last to the list.
787
788
void GCTaskManager::execute_and_wait(GCTaskQueue* list) {
789
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
790
list->enqueue(fin);
791
// The barrier task will be read by one of the GC
792
// workers once it is added to the list of tasks.
793
// Be sure that is globally visible before the
794
// GC worker reads it (which is after the task is added
795
// to the list of tasks below).
796
OrderAccess::storestore();
797
add_list(list);
798
fin->wait_for(true /* reset */);
799
// We have to release the barrier tasks!
800
WaitForBarrierGCTask::destroy(fin);
801
}
802
803
bool GCTaskManager::resource_flag(uint which) {
804
assert(which < workers(), "index out of bounds");
805
return _resource_flag[which];
806
}
807
808
void GCTaskManager::set_resource_flag(uint which, bool value) {
809
assert(which < workers(), "index out of bounds");
810
_resource_flag[which] = value;
811
}
812
813
//
814
// NoopGCTask
815
//
816
817
NoopGCTask* NoopGCTask::create() {
818
NoopGCTask* result = new NoopGCTask(false);
819
return result;
820
}
821
822
NoopGCTask* NoopGCTask::create_on_c_heap() {
823
NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask(true);
824
return result;
825
}
826
827
void NoopGCTask::destroy(NoopGCTask* that) {
828
if (that != NULL) {
829
that->destruct();
830
if (that->is_c_heap_obj()) {
831
FreeHeap(that);
832
}
833
}
834
}
835
836
void NoopGCTask::destruct() {
837
// This has to know it's superclass structure, just like the constructor.
838
this->GCTask::destruct();
839
// Nothing else to do.
840
}
841
842
//
843
// IdleGCTask
844
//
845
846
IdleGCTask* IdleGCTask::create() {
847
IdleGCTask* result = new IdleGCTask(false);
848
assert(UseDynamicNumberOfGCThreads,
849
"Should only be used with dynamic GC thread");
850
return result;
851
}
852
853
IdleGCTask* IdleGCTask::create_on_c_heap() {
854
IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true);
855
assert(UseDynamicNumberOfGCThreads,
856
"Should only be used with dynamic GC thread");
857
return result;
858
}
859
860
void IdleGCTask::do_it(GCTaskManager* manager, uint which) {
861
WaitForBarrierGCTask* wait_for_task = manager->idle_inactive_task();
862
if (TraceGCTaskManager) {
863
tty->print_cr("[" INTPTR_FORMAT "]"
864
" IdleGCTask:::do_it()"
865
" should_wait: %s",
866
this, wait_for_task->should_wait() ? "true" : "false");
867
}
868
MutexLockerEx ml(manager->monitor(), Mutex::_no_safepoint_check_flag);
869
if (TraceDynamicGCThreads) {
870
gclog_or_tty->print_cr("--- idle %d", which);
871
}
872
// Increment has to be done when the idle tasks are created.
873
// manager->increment_idle_workers();
874
manager->monitor()->notify_all();
875
while (wait_for_task->should_wait()) {
876
if (TraceGCTaskManager) {
877
tty->print_cr("[" INTPTR_FORMAT "]"
878
" IdleGCTask::do_it()"
879
" [" INTPTR_FORMAT "] (%s)->wait()",
880
this, manager->monitor(), manager->monitor()->name());
881
}
882
manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
883
}
884
manager->decrement_idle_workers();
885
if (TraceDynamicGCThreads) {
886
gclog_or_tty->print_cr("--- release %d", which);
887
}
888
if (TraceGCTaskManager) {
889
tty->print_cr("[" INTPTR_FORMAT "]"
890
" IdleGCTask::do_it() returns"
891
" should_wait: %s",
892
this, wait_for_task->should_wait() ? "true" : "false");
893
}
894
// Release monitor().
895
}
896
897
void IdleGCTask::destroy(IdleGCTask* that) {
898
if (that != NULL) {
899
that->destruct();
900
if (that->is_c_heap_obj()) {
901
FreeHeap(that);
902
}
903
}
904
}
905
906
void IdleGCTask::destruct() {
907
// This has to know it's superclass structure, just like the constructor.
908
this->GCTask::destruct();
909
// Nothing else to do.
910
}
911
912
//
913
// BarrierGCTask
914
//
915
916
void BarrierGCTask::do_it(GCTaskManager* manager, uint which) {
917
// Wait for this to be the only busy worker.
918
// ??? I thought of having a StackObj class
919
// whose constructor would grab the lock and come to the barrier,
920
// and whose destructor would release the lock,
921
// but that seems like too much mechanism for two lines of code.
922
MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
923
do_it_internal(manager, which);
924
// Release manager->lock().
925
}
926
927
void BarrierGCTask::do_it_internal(GCTaskManager* manager, uint which) {
928
// Wait for this to be the only busy worker.
929
assert(manager->monitor()->owned_by_self(), "don't own the lock");
930
assert(manager->is_blocked(), "manager isn't blocked");
931
while (manager->busy_workers() > 1) {
932
if (TraceGCTaskManager) {
933
tty->print_cr("BarrierGCTask::do_it(%u) waiting on %u workers",
934
which, manager->busy_workers());
935
}
936
manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
937
}
938
}
939
940
void BarrierGCTask::destruct() {
941
this->GCTask::destruct();
942
// Nothing else to do.
943
}
944
945
//
946
// ReleasingBarrierGCTask
947
//
948
949
void ReleasingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
950
MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
951
do_it_internal(manager, which);
952
manager->release_all_resources();
953
// Release manager->lock().
954
}
955
956
void ReleasingBarrierGCTask::destruct() {
957
this->BarrierGCTask::destruct();
958
// Nothing else to do.
959
}
960
961
//
962
// NotifyingBarrierGCTask
963
//
964
965
void NotifyingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
966
MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
967
do_it_internal(manager, which);
968
NotifyDoneClosure* ndc = notify_done_closure();
969
if (ndc != NULL) {
970
ndc->notify(manager);
971
}
972
// Release manager->lock().
973
}
974
975
void NotifyingBarrierGCTask::destruct() {
976
this->BarrierGCTask::destruct();
977
// Nothing else to do.
978
}
979
980
//
981
// WaitForBarrierGCTask
982
//
983
WaitForBarrierGCTask* WaitForBarrierGCTask::create() {
984
WaitForBarrierGCTask* result = new WaitForBarrierGCTask(false);
985
return result;
986
}
987
988
WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() {
989
WaitForBarrierGCTask* result =
990
new (ResourceObj::C_HEAP, mtGC) WaitForBarrierGCTask(true);
991
return result;
992
}
993
994
WaitForBarrierGCTask::WaitForBarrierGCTask(bool on_c_heap) :
995
_is_c_heap_obj(on_c_heap) {
996
_monitor = MonitorSupply::reserve();
997
set_should_wait(true);
998
if (TraceGCTaskManager) {
999
tty->print_cr("[" INTPTR_FORMAT "]"
1000
" WaitForBarrierGCTask::WaitForBarrierGCTask()"
1001
" monitor: " INTPTR_FORMAT,
1002
this, monitor());
1003
}
1004
}
1005
1006
void WaitForBarrierGCTask::destroy(WaitForBarrierGCTask* that) {
1007
if (that != NULL) {
1008
if (TraceGCTaskManager) {
1009
tty->print_cr("[" INTPTR_FORMAT "]"
1010
" WaitForBarrierGCTask::destroy()"
1011
" is_c_heap_obj: %s"
1012
" monitor: " INTPTR_FORMAT,
1013
that,
1014
that->is_c_heap_obj() ? "true" : "false",
1015
that->monitor());
1016
}
1017
that->destruct();
1018
if (that->is_c_heap_obj()) {
1019
FreeHeap(that);
1020
}
1021
}
1022
}
1023
1024
void WaitForBarrierGCTask::destruct() {
1025
assert(monitor() != NULL, "monitor should not be NULL");
1026
if (TraceGCTaskManager) {
1027
tty->print_cr("[" INTPTR_FORMAT "]"
1028
" WaitForBarrierGCTask::destruct()"
1029
" monitor: " INTPTR_FORMAT,
1030
this, monitor());
1031
}
1032
this->BarrierGCTask::destruct();
1033
// Clean up that should be in the destructor,
1034
// except that ResourceMarks don't call destructors.
1035
if (monitor() != NULL) {
1036
MonitorSupply::release(monitor());
1037
}
1038
_monitor = (Monitor*) (uintptr_t) 0xDEAD000F;
1039
}
1040
1041
void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
1042
if (TraceGCTaskManager) {
1043
tty->print_cr("[" INTPTR_FORMAT "]"
1044
" WaitForBarrierGCTask::do_it() waiting for idle"
1045
" monitor: " INTPTR_FORMAT,
1046
this, monitor());
1047
}
1048
{
1049
// First, wait for the barrier to arrive.
1050
MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
1051
do_it_internal(manager, which);
1052
// Release manager->lock().
1053
}
1054
{
1055
// Then notify the waiter.
1056
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
1057
set_should_wait(false);
1058
// Waiter doesn't miss the notify in the wait_for method
1059
// since it checks the flag after grabbing the monitor.
1060
if (TraceGCTaskManager) {
1061
tty->print_cr("[" INTPTR_FORMAT "]"
1062
" WaitForBarrierGCTask::do_it()"
1063
" [" INTPTR_FORMAT "] (%s)->notify_all()",
1064
this, monitor(), monitor()->name());
1065
}
1066
monitor()->notify_all();
1067
// Release monitor().
1068
}
1069
}
1070
1071
void WaitForBarrierGCTask::wait_for(bool reset) {
1072
if (TraceGCTaskManager) {
1073
tty->print_cr("[" INTPTR_FORMAT "]"
1074
" WaitForBarrierGCTask::wait_for()"
1075
" should_wait: %s",
1076
this, should_wait() ? "true" : "false");
1077
}
1078
{
1079
// Grab the lock and check again.
1080
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
1081
while (should_wait()) {
1082
if (TraceGCTaskManager) {
1083
tty->print_cr("[" INTPTR_FORMAT "]"
1084
" WaitForBarrierGCTask::wait_for()"
1085
" [" INTPTR_FORMAT "] (%s)->wait()",
1086
this, monitor(), monitor()->name());
1087
}
1088
monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
1089
}
1090
// Reset the flag in case someone reuses this task.
1091
if (reset) {
1092
set_should_wait(true);
1093
}
1094
if (TraceGCTaskManager) {
1095
tty->print_cr("[" INTPTR_FORMAT "]"
1096
" WaitForBarrierGCTask::wait_for() returns"
1097
" should_wait: %s",
1098
this, should_wait() ? "true" : "false");
1099
}
1100
// Release monitor().
1101
}
1102
}
1103
1104
Mutex* MonitorSupply::_lock = NULL;
1105
GrowableArray<Monitor*>* MonitorSupply::_freelist = NULL;
1106
1107
Monitor* MonitorSupply::reserve() {
1108
Monitor* result = NULL;
1109
// Lazy initialization: possible race.
1110
if (lock() == NULL) {
1111
_lock = new Mutex(Mutex::barrier, // rank
1112
"MonitorSupply mutex", // name
1113
Mutex::_allow_vm_block_flag); // allow_vm_block
1114
}
1115
{
1116
MutexLockerEx ml(lock());
1117
// Lazy initialization.
1118
if (freelist() == NULL) {
1119
_freelist =
1120
new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
1121
true);
1122
}
1123
if (! freelist()->is_empty()) {
1124
result = freelist()->pop();
1125
} else {
1126
result = new Monitor(Mutex::barrier, // rank
1127
"MonitorSupply monitor", // name
1128
Mutex::_allow_vm_block_flag); // allow_vm_block
1129
}
1130
guarantee(result != NULL, "shouldn't return NULL");
1131
assert(!result->is_locked(), "shouldn't be locked");
1132
// release lock().
1133
}
1134
return result;
1135
}
1136
1137
void MonitorSupply::release(Monitor* instance) {
1138
assert(instance != NULL, "shouldn't release NULL");
1139
assert(!instance->is_locked(), "shouldn't be locked");
1140
{
1141
MutexLockerEx ml(lock());
1142
freelist()->push(instance);
1143
// release lock().
1144
}
1145
}
1146
1147