Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_realtime/Scheduler.cpp
5985 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2020 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "omr.h"
24
#include "omrcfg.h"
25
#include "ModronAssertions.h"
26
27
#include <string.h>
28
29
#include "AtomicOperations.hpp"
30
#include "EnvironmentRealtime.hpp"
31
#include "GCCode.hpp"
32
#include "GCExtensionsBase.hpp"
33
#include "Heap.hpp"
34
#include "IncrementalParallelTask.hpp"
35
#include "MemoryPoolSegregated.hpp"
36
#include "MemorySubSpaceMetronome.hpp"
37
#include "Metronome.hpp"
38
#include "MetronomeAlarmThread.hpp"
39
#include "MetronomeDelegate.hpp"
40
#include "RealtimeGC.hpp"
41
#include "OSInterface.hpp"
42
#include "ParallelDispatcher.hpp"
43
#include "Scheduler.hpp"
44
#include "Timer.hpp"
45
#include "UtilizationTracker.hpp"
46
47
/**
48
* Initialization.
49
* @todo Provide method documentation
50
* @ingroup GC_Metronome methodGroup
51
*/
52
MM_Scheduler*
53
MM_Scheduler::newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void* handler_arg, uintptr_t defaultOSStackSize)
54
{
55
MM_Scheduler *scheduler = (MM_Scheduler *)env->getForge()->allocate(sizeof(MM_Scheduler), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE());
56
if (scheduler) {
57
new(scheduler) MM_Scheduler(env, handler, handler_arg, defaultOSStackSize);
58
if (!scheduler->initialize(env)) {
59
scheduler->kill(env);
60
scheduler = NULL;
61
}
62
}
63
return scheduler;
64
}
65
66
/**
67
* Initialization.
68
* @todo Provide method documentation
69
* @ingroup GC_Metronome methodGroup
70
*/
71
void
72
MM_Scheduler::kill(MM_EnvironmentBase *env)
73
{
74
tearDown(env);
75
}
76
77
/**
78
* Teardown
79
* @todo Provide method documentation
80
* @ingroup GC_Metronome methodGroup
81
*/
82
void
83
MM_Scheduler::tearDown(MM_EnvironmentBase *env)
84
{
85
if (_mainThreadMonitor) {
86
omrthread_monitor_destroy(_mainThreadMonitor);
87
}
88
if (NULL != _threadResumedTable) {
89
env->getForge()->free(_threadResumedTable);
90
_threadResumedTable = NULL;
91
}
92
if (NULL != _utilTracker) {
93
_utilTracker->kill(env);
94
}
95
MM_ParallelDispatcher::kill(env);
96
}
97
98
uintptr_t
99
MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize, char *valueBuffer, I_32 valueBufferSize)
100
{
101
OMRPORT_ACCESS_FROM_OMRVM(_vm);
102
switch (which) {
103
case 0: omrstr_printf(keyBuffer, keyBufferSize, "Verbose Level");
104
omrstr_printf(valueBuffer, valueBufferSize, "%d", verbose());
105
return 1;
106
case 1:
107
{
108
omrstr_printf(keyBuffer, keyBufferSize, "Scheduling Method");
109
I_32 len = (I_32)omrstr_printf(valueBuffer, valueBufferSize, "TIME_BASED with ");
110
while (_alarmThread == NULL || _alarmThread->_alarm == NULL) {
111
/* Wait for GC to finish initializing */
112
omrthread_sleep(100);
113
}
114
_alarmThread->_alarm->describe(OMRPORTLIB, &valueBuffer[len], valueBufferSize - len);
115
return 1;
116
}
117
case 2:
118
omrstr_printf(keyBuffer, keyBufferSize, "Time Window");
119
omrstr_printf(valueBuffer, valueBufferSize, "%6.2f ms", window * 1.0e3);
120
return 1;
121
case 3:
122
omrstr_printf(keyBuffer, keyBufferSize, "Target Utilization");
123
omrstr_printf(valueBuffer, valueBufferSize, "%4.1f%%", _utilTracker->getTargetUtilization() * 1.0e2);
124
return 1;
125
case 4:
126
omrstr_printf(keyBuffer, keyBufferSize, "Beat Size");
127
omrstr_printf(valueBuffer, valueBufferSize, "%4.2f ms", beat * 1.0e3);
128
return 1;
129
case 5:
130
omrstr_printf(keyBuffer, keyBufferSize, "Heap Size");
131
omrstr_printf(valueBuffer, valueBufferSize, "%6.2f MB", ((double)(_extensions->memoryMax)) / (1 << 20));
132
return 1;
133
case 6:
134
omrstr_printf(keyBuffer, keyBufferSize, "GC Trigger");
135
omrstr_printf(valueBuffer, valueBufferSize, "%6.2f MB", _extensions->gcTrigger / (double) (1<<20));
136
return 1;
137
case 7:
138
omrstr_printf(keyBuffer, keyBufferSize, "Headroom");
139
omrstr_printf(valueBuffer, valueBufferSize, "%5.2f MB", _extensions->headRoom / (double) (1<<20));
140
return 1;
141
case 8:
142
omrstr_printf(keyBuffer, keyBufferSize, "Number of GC Threads");
143
omrstr_printf(valueBuffer, valueBufferSize, "%d", _extensions->gcThreadCount);
144
return 1;
145
case 9:
146
omrstr_printf(keyBuffer, keyBufferSize, "Regionsize");
147
omrstr_printf(valueBuffer, valueBufferSize, "%d", _extensions->regionSize);
148
return 1;
149
}
150
return 0;
151
}
152
153
void
154
MM_Scheduler::showParameters(MM_EnvironmentBase *env)
155
{
156
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
157
omrtty_printf("****************************************************************************\n");
158
for (uintptr_t which=0; ; which++) {
159
char keyBuffer[256], valBuffer[256];
160
uintptr_t rc = getParameter(which, keyBuffer, sizeof(keyBuffer), valBuffer, sizeof(valBuffer));
161
if (rc == 0) { break; }
162
if (rc == 1) { omrtty_printf("%s: %s\n", keyBuffer, valBuffer); }
163
}
164
omrtty_printf("****************************************************************************\n");
165
}
166
167
void
168
MM_Scheduler::initializeForVirtualSTW(MM_GCExtensionsBase *ext)
169
{
170
ext->gcInitialTrigger = (uintptr_t) - 1;
171
ext->gcTrigger = ext->gcInitialTrigger;
172
ext->targetUtilizationPercentage = 0;
173
}
174
175
/**
176
* Initialization.
177
* @todo Provide method documentation
178
* @ingroup GC_Metronome methodGroup
179
*/
180
bool
181
MM_Scheduler::initialize(MM_EnvironmentBase *env)
182
{
183
if (!MM_ParallelDispatcher::initialize(env)) {
184
return false;
185
}
186
187
/* Show GC parameters here before we enter real execution */
188
window = _extensions->timeWindowMicro / 1e6;
189
beat = _extensions->beatMicro / 1e6;
190
beatNanos = (U_64) (_extensions->beatMicro * 1e3);
191
_staticTargetUtilization = _extensions->targetUtilizationPercentage / 1e2;
192
_utilTracker = MM_UtilizationTracker::newInstance(env, window, beatNanos, _staticTargetUtilization);
193
if (NULL == _utilTracker) {
194
goto error_no_memory;
195
}
196
197
198
/* Set up the table used for keeping track of which threads were resumed from suspended */
199
_threadResumedTable = (bool*)env->getForge()->allocate(_threadCountMaximum * sizeof(bool), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE());
200
if (NULL == _threadResumedTable) {
201
goto error_no_memory;
202
}
203
memset(_threadResumedTable, false, _threadCountMaximum * sizeof(bool));
204
205
if (omrthread_monitor_init_with_name(&_mainThreadMonitor, 0, "MainThread")) {
206
return false;
207
}
208
209
return true;
210
211
error_no_memory:
212
return false;
213
}
214
215
void
216
MM_Scheduler::collectorInitialized(MM_RealtimeGC *gc) {
217
_gc = gc;
218
_osInterface = _gc->_osInterface;
219
}
220
221
void
222
MM_Scheduler::checkStartGC(MM_EnvironmentRealtime *env)
223
{
224
uintptr_t bytesInUse = _gc->_memoryPool->getBytesInUse();
225
if (isInitialized() && !isGCOn() && (bytesInUse > _extensions->gcTrigger)) {
226
startGC(env);
227
}
228
}
229
230
/* Races with other startGC's are ok
231
*
232
*/
233
void
234
MM_Scheduler::startGC(MM_EnvironmentBase *env)
235
{
236
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
237
if (verbose() >= 3) {
238
omrtty_printf("GC request: %d Mb in use\n", _gc->_memoryPool->getBytesInUse() >> 20);
239
}
240
241
if (METRONOME_GC_OFF == MM_AtomicOperations::lockCompareExchangeU32(&_gcOn, METRONOME_GC_OFF, METRONOME_GC_ON)) {
242
if (_gc->isPreviousCycleBelowTrigger()) {
243
_gc->setPreviousCycleBelowTrigger(false);
244
TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_START(_extensions->privateHookInterface,
245
env->getOmrVMThread(), omrtime_hires_clock(),
246
J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_START
247
);
248
}
249
}
250
}
251
252
/* External synchronization to make sure this does not race with startGC
253
*/
254
void
255
MM_Scheduler::stopGC(MM_EnvironmentBase *env)
256
{
257
_gcOn = METRONOME_GC_OFF;
258
}
259
260
bool
261
MM_Scheduler::isGCOn()
262
{
263
return (METRONOME_GC_ON == _gcOn);
264
}
265
266
bool
267
MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t resonParameter, OMR_VMThread *thr, bool doRequestExclusiveVMAccess)
268
{
269
uintptr_t gcPriority = 0;
270
bool didGC = true;
271
272
assert1(isInitialized());
273
if (!isGCOn()) {
274
return false;
275
}
276
277
if (_extensions->trackMutatorThreadCategory) {
278
/* This thread is doing GC work, account for the time spent into the GC bucket */
279
omrthread_set_category(omrthread_self(), J9THREAD_CATEGORY_SYSTEM_GC_THREAD, J9THREAD_TYPE_SET_GC);
280
}
281
282
_gc->getRealtimeDelegate()->preRequestExclusiveVMAccess(thr);
283
284
/* Wake up only the main thread -- it is responsible for
285
* waking up any workers.
286
* Make sure _completeCurrentGCSynchronously and _mode are atomically changed.
287
*/
288
omrthread_monitor_enter(_mainThreadMonitor);
289
switch(reason) {
290
case OUT_OF_MEMORY_TRIGGER:
291
/* For now we assume that OUT_OF_MEMORY trigger means perform
292
* a synchronous GC, but maybe we want a mode where we try one
293
* more time slice before degrading to synchronous.
294
*/
295
if(!_extensions->synchronousGCOnOOM) {
296
break;
297
}
298
/* fall through */
299
case SYSTEM_GC_TRIGGER:
300
/* System garbage collects, if not disabled through the usual command lines,
301
* force a synchronous GC
302
*/
303
_completeCurrentGCSynchronously = true;
304
_completeCurrentGCSynchronouslyReason = reason;
305
_completeCurrentGCSynchronouslyReasonParameter = resonParameter;
306
307
break;
308
default: /* WORK_TRIGGER or TIME_TRIGGER */ {
309
if(_threadWaitingOnMainThreadMonitor != NULL) {
310
/* Check your timer again incase another thread beat you to checking for shouldMutatorDoubleBeat */
311
if (env->getTimer()->hasTimeElapsed(getStartTimeOfCurrentMutatorSlice(), beatNanos)) {
312
if (shouldMutatorDoubleBeat(_threadWaitingOnMainThreadMonitor, env->getTimer())) {
313
/*
314
* Since the mutator should double beat signal the mutator threads to update their
315
* timer with the current time.
316
*/
317
setStartTimeOfCurrentMutatorSlice(env->getTimer()->getTimeInNanos());
318
didGC = false;
319
goto exit;
320
}
321
} else {
322
didGC = false;
323
goto exit;
324
}
325
}
326
break;
327
}
328
}
329
if(_threadWaitingOnMainThreadMonitor == NULL) {
330
/*
331
* The gc thread(s) are already awake and collecting (otherwise, the main
332
* gc thread would be waiting on the monitor).
333
* This also means that the application threads are already sleeping.
334
* So there is no need to put the application threads to sleep or to
335
* awaken the gc thread(s). However we return true to indicate that
336
* garbage collection is indeed taking place as requested.
337
*/
338
goto exit;
339
}
340
341
/* At this point main thread is blocked and cannot change _gcOn flag anymore.
342
* Check the flag again, since there is (a small) chance it may have changed since the last check
343
* (main thread, driven by mutators' could have finished the GC cycle)
344
*/
345
if (!isGCOn()) {
346
didGC = false;
347
goto exit;
348
}
349
350
_exclusiveVMAccessRequired = doRequestExclusiveVMAccess;
351
352
_mode = WAKING_GC;
353
354
if (_exclusiveVMAccessRequired) {
355
/* initiate the request for exclusive VM access; this function does not wait for exclusive access to occur,
356
* that will be done by the main gc thread when it resumes activity after the mainThreadMonitor is notified
357
* We do not block. It's best effort. If the request is success full TRUE is returned via requested flag.
358
*/
359
if (FALSE == _gc->getRealtimeDelegate()->requestExclusiveVMAccess(_threadWaitingOnMainThreadMonitor, FALSE /* do not block */, &gcPriority)) {
360
didGC = false;
361
goto exit;
362
}
363
_gc->setGCThreadPriority(env->getOmrVMThread(), gcPriority);
364
}
365
366
omrthread_monitor_notify(_mainThreadMonitor);
367
/* set the waiting thread to NULL while we are in the _mainThreadMonitor so that nobody else will notify the waiting thread */
368
_threadWaitingOnMainThreadMonitor = NULL;
369
370
exit:
371
if (_extensions->trackMutatorThreadCategory) {
372
/* Done doing GC, reset the category back to the old one */
373
omrthread_set_category(omrthread_self(), 0, J9THREAD_TYPE_SET_GC);
374
}
375
376
omrthread_monitor_exit(_mainThreadMonitor);
377
_gc->getRealtimeDelegate()->postRequestExclusiveVMAccess(thr);
378
379
return didGC;
380
}
381
382
uintptr_t
383
MM_Scheduler::getTaskThreadCount(MM_EnvironmentBase *env)
384
{
385
if (env->_currentTask == NULL) {
386
return 1;
387
}
388
return env->_currentTask->getThreadCount();
389
}
390
391
void
392
MM_Scheduler::waitForMutatorsToStop(MM_EnvironmentRealtime *env)
393
{
394
/* assumption: only main enters this */
395
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
396
397
/* we need to record how long it took to wait for the mutators to stop */
398
U_64 exclusiveAccessTime = omrtime_hires_clock();
399
400
/* The time before acquiring exclusive VM access is charged to the mutator but the time
401
* during the acquisition is conservatively charged entirely to the GC. */
402
_utilTracker->addTimeSlice(env, env->getTimer(), true);
403
omrthread_monitor_enter(_mainThreadMonitor);
404
/* If main GC thread gets here without anybody requesting exclusive access for us
405
* (possible in a shutdown scenario after we kill alarm thread), the thread will request
406
* exclusive access for itself.
407
* requestExclusiveVMAccess is invoked atomically with _mode being set to WAKING_GC
408
* under mainThreadMonitor (see continueGC). Therefore, we check here if mode is not
409
* WAKING_GC, and only then we request exclusive assess for ourselves.
410
* TODO: This approach is just to fix some timing holes in shutdown. Consider removing this
411
* "if" statement and fix alarm thread not to die before requesting exclusive access for us.
412
*/
413
if (_mainThreadMustShutDown && _mode != WAKING_GC) {
414
uintptr_t gcPriority = 0;
415
_gc->getRealtimeDelegate()->requestExclusiveVMAccess(env, TRUE /* block */, &gcPriority);
416
_gc->setGCThreadPriority(env->getOmrVMThread(), gcPriority);
417
}
418
/* Avoid another attempt to start up GC increment */
419
_mode = STOP_MUTATOR;
420
omrthread_monitor_exit(_mainThreadMonitor);
421
422
_gc->getRealtimeDelegate()->waitForExclusiveVMAccess(env, _exclusiveVMAccessRequired);
423
424
_mode = RUNNING_GC;
425
426
_extensions->globalGCStats.metronomeStats._microsToStopMutators = omrtime_hires_delta(exclusiveAccessTime, omrtime_hires_clock(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
427
}
428
429
void
430
MM_Scheduler::startMutators(MM_EnvironmentRealtime *env) {
431
_mode = WAKING_MUTATOR;
432
_gc->getRealtimeDelegate()->releaseExclusiveVMAccess(env, _exclusiveVMAccessRequired);
433
}
434
435
void
436
MM_Scheduler::startGCTime(MM_EnvironmentRealtime *env, bool isDoubleBeat)
437
{
438
if (env->isMainThread()) {
439
setStartTimeOfCurrentGCSlice(_utilTracker->addTimeSlice(env, env->getTimer(), false));
440
}
441
}
442
443
void
444
MM_Scheduler::stopGCTime(MM_EnvironmentRealtime *env)
445
{
446
if (env->isMainThread()) {
447
setStartTimeOfCurrentMutatorSlice(_utilTracker->addTimeSlice(env, env->getTimer(), false));
448
}
449
}
450
451
bool
452
MM_Scheduler::shouldGCDoubleBeat(MM_EnvironmentRealtime *env)
453
{
454
double targetUtilization = _utilTracker->getTargetUtilization();
455
if (targetUtilization <= 0.0) {
456
return true;
457
}
458
I_32 maximumAllowedConsecutiveBeats = (I_32) (1.0 / targetUtilization);
459
if (_currentConsecutiveBeats >= maximumAllowedConsecutiveBeats) {
460
return false;
461
}
462
/* Note that shouldGCDoubleBeat is only called by the main thread, this means we
463
* can call addTimeSlice without checking for isMainThread() */
464
_utilTracker->addTimeSlice(env, env->getTimer(), false);
465
double excessTime = (_utilTracker->getCurrentUtil() - targetUtilization) * window;
466
double excessBeats = excessTime / beat;
467
return (excessBeats >= 2.0);
468
}
469
470
bool
471
MM_Scheduler::shouldMutatorDoubleBeat(MM_EnvironmentRealtime *env, MM_Timer *timer)
472
{
473
_utilTracker->addTimeSlice(env, timer, true);
474
475
/* The call to currentUtil will modify the timeSlice array, so calls to shouldMutatorDoubleBeat
476
* must be protected by a mutex (which is indeed currently the case) */
477
double curUtil = _utilTracker->getCurrentUtil();
478
double excessTime = (curUtil - _utilTracker->getTargetUtilization()) * window;
479
double excessBeats = excessTime / beat;
480
return (excessBeats <= 1.0);
481
}
482
483
void
484
MM_Scheduler::reportStartGCIncrement(MM_EnvironmentRealtime *env)
485
{
486
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
487
488
if(_completeCurrentGCSynchronously) {
489
_completeCurrentGCSynchronouslyMainThreadCopy = true;
490
U_64 exclusiveAccessTimeMicros = 0;
491
U_64 meanExclusiveAccessIdleTimeMicros = 0;
492
493
Trc_MM_SystemGCStart(env->getLanguageVMThread(),
494
_extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW),
495
_extensions->heap->getActiveMemorySize(MEMORY_TYPE_NEW),
496
_extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_OLD),
497
_extensions->heap->getActiveMemorySize(MEMORY_TYPE_OLD),
498
(_extensions-> largeObjectArea ? _extensions->heap->getApproximateActiveFreeLOAMemorySize(MEMORY_TYPE_OLD) : 0 ),
499
(_extensions-> largeObjectArea ? _extensions->heap->getActiveLOAMemorySize(MEMORY_TYPE_OLD) : 0 )
500
);
501
502
exclusiveAccessTimeMicros = omrtime_hires_delta(0, env->getExclusiveAccessTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
503
meanExclusiveAccessIdleTimeMicros = omrtime_hires_delta(0, env->getMeanExclusiveAccessIdleTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
504
Trc_MM_ExclusiveAccess(env->getLanguageVMThread(),
505
(U_32)(exclusiveAccessTimeMicros / 1000),
506
(U_32)(exclusiveAccessTimeMicros % 1000),
507
(U_32)(meanExclusiveAccessIdleTimeMicros / 1000),
508
(U_32)(meanExclusiveAccessIdleTimeMicros % 1000),
509
env->getExclusiveAccessHaltedThreads(),
510
env->getLastExclusiveAccessResponder(),
511
env->exclusiveAccessBeatenByOtherThread());
512
513
_gc->reportSyncGCStart(env, _completeCurrentGCSynchronouslyReason, _completeCurrentGCSynchronouslyReasonParameter);
514
}
515
516
/* GC start/end are reported at each GC increment,
517
* not at the beginning/end of a GC cycle,
518
* since no Java code is supposed to run between those two events */
519
_extensions->globalGCStats.metronomeStats.clearStart();
520
_gc->reportGCStart(env);
521
TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_INCREMENT_START(_extensions->privateHookInterface, env->getOmrVMThread(), omrtime_hires_clock(), J9HOOK_MM_PRIVATE_METRONOME_INCREMENT_START, _extensions->globalGCStats.metronomeStats._microsToStopMutators);
522
523
_currentConsecutiveBeats = 1;
524
startGCTime(env, false);
525
526
_gc->flushCachesForGC(env);
527
}
528
529
void
530
MM_Scheduler::reportStopGCIncrement(MM_EnvironmentRealtime *env, bool isCycleEnd)
531
{
532
/* assumption: only main enters this */
533
534
stopGCTime(env);
535
536
/* This can not be combined with the reportGCCycleEnd below as it has to happen before
537
* the incrementEnd event is triggered.
538
*/
539
if (isCycleEnd) {
540
if (_completeCurrentGCSynchronously) {
541
/* The requests for Sync GC made at the very end of
542
* GC cycle might not had a chance to make the local copy
543
*/
544
if (_completeCurrentGCSynchronouslyMainThreadCopy) {
545
Trc_MM_SystemGCEnd(env->getLanguageVMThread(),
546
_extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW),
547
_extensions->heap->getActiveMemorySize(MEMORY_TYPE_NEW),
548
_extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_OLD),
549
_extensions->heap->getActiveMemorySize(MEMORY_TYPE_OLD),
550
(_extensions->largeObjectArea ? _extensions->heap->getApproximateActiveFreeLOAMemorySize(MEMORY_TYPE_OLD) : 0 ),
551
(_extensions->largeObjectArea ? _extensions->heap->getActiveLOAMemorySize(MEMORY_TYPE_OLD) : 0 )
552
);
553
_gc->reportSyncGCEnd(env);
554
_completeCurrentGCSynchronouslyMainThreadCopy = false;
555
}
556
_completeCurrentGCSynchronously = false;
557
_completeCurrentGCSynchronouslyReason = UNKOWN_REASON;
558
}
559
}
560
561
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
562
TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_INCREMENT_END(_extensions->privateHookInterface, env->getOmrVMThread(), omrtime_hires_clock(), J9HOOK_MM_PRIVATE_METRONOME_INCREMENT_END,
563
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
564
);
565
566
/* GC start/end are reported at each GC increment,
567
* not at the beginning/end of a GC cycle,
568
* since no Java code is supposed to run between those two events */
569
_gc->reportGCEnd(env);
570
_extensions->globalGCStats.metronomeStats.clearEnd();
571
}
572
573
void
574
MM_Scheduler::restartMutatorsAndWait(MM_EnvironmentRealtime *env)
575
{
576
startMutators(env);
577
578
omrthread_monitor_enter(_mainThreadMonitor);
579
/* Atomically change mode to MUTATOR and set threadWaitingOnMainThreadMonitor
580
* (only after the main is fully stoped, we switch from WAKING_MUTATOR to MUTATOR) */
581
_mode = MUTATOR;
582
_threadWaitingOnMainThreadMonitor = env;
583
584
/* If we're shutting down, we don't want to wait. Note that this is safe
585
* since on shutdown, the only mutator thread left is the thread that is
586
* doing the shutdown.
587
*/
588
if (!_mainThreadMustShutDown) {
589
omrthread_monitor_wait(_mainThreadMonitor);
590
/* Main is awoken to either do another increment of GC or
591
* to shutdown (but never both)
592
*/
593
Assert_MM_true((isGCOn() && !_mainThreadMustShutDown) || (!_gcOn &&_mainThreadMustShutDown));
594
}
595
omrthread_monitor_exit(_mainThreadMonitor);
596
}
597
598
bool
599
MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
600
{
601
return internalShouldGCYield(env, timeSlack);
602
}
603
604
/**
605
* Test whether it's time for the GC to yield, and whether yielding is currently enabled.
606
* To enhance the generality of methods that may call this method, the call may occur on
607
* a non-GC thread, in which case this method does nothing.
608
* @param timeSlack a slack factor to apply to time-based scheduling
609
* @param location the phase of the GC during which this call is occurring (for tracing: in
610
* some cases may be approximate).
611
* @return true if the GC thread should yield, false otherwise
612
*/
613
MMINLINE bool
614
MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
615
{
616
if (_completeCurrentGCSynchronouslyMainThreadCopy) {
617
/* If we have degraded to a synchronous GC, don't yield until finished */
618
return false;
619
}
620
/* Be harmless when called indirectly on mutator thread */
621
if (env->getThreadType() == MUTATOR_THREAD) {
622
return false;
623
}
624
/* The GC does not have to yield when ConcurrentTracing or ConcurrentSweeping is
625
* enabled since the GC is not holding exclusive access.
626
*/
627
if (_gc->isCollectorConcurrentTracing() || _gc->isCollectorConcurrentSweeping()) {
628
return false;
629
}
630
631
/* If at least one thread thinks we should yield, than all should yield.
632
* Discrepancy may happen due different timeSlack that GC threads may have */
633
if (_shouldGCYield) {
634
return true;
635
}
636
637
if (env->hasDistanceToYieldTimeCheck()) {
638
return false;
639
}
640
641
I_64 nanosLeft = _utilTracker->getNanosLeft(env, getStartTimeOfCurrentGCSlice());
642
if (nanosLeft > 0) {
643
if ((U_64)nanosLeft > timeSlack) {
644
return false;
645
}
646
}
647
_shouldGCYield = true;
648
return true;
649
}
650
651
bool
652
MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack)
653
{
654
return condYieldFromGC(env, timeSlack);
655
}
656
657
/**
658
* Test whether it's time for the GC to yield, and whether yielding is currently enabled, and
659
* if appropriate actually do the yielding. To enhance the generality of methods that may
660
* call this method, the call may occur on a non-GC thread, in which case this method does
661
* nothing.
662
* @param location the phase of the GC during which this call is occurring (for tracing: in
663
* some cases may be approximate).
664
* @param timeSlack a slack factor to apply to time-based scheduling
665
* @return true if yielding actually occurred, false otherwise
666
*/
667
bool
668
MM_Scheduler::condYieldFromGC(MM_EnvironmentBase *envBase, U_64 timeSlack)
669
{
670
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envBase);
671
672
if (env->getYieldDisableDepth() > 0) {
673
return false;
674
}
675
if (!internalShouldGCYield(env, timeSlack)) {
676
return false;
677
}
678
679
yieldFromGC(env, true);
680
681
env->resetCurrentDistanceToYieldTimeCheck();
682
683
return true;
684
}
685
686
void MM_Scheduler::yieldFromGC(MM_EnvironmentRealtime *env, bool distanceChecked)
687
{
688
assert(!_gc->isCollectorConcurrentTracing());
689
assert(!_gc->isCollectorConcurrentSweeping());
690
if (env->isMainThread()) {
691
if (_yieldCollaborator) {
692
/* wait for workers to yield/sync */
693
_yieldCollaborator->yield(env);
694
}
695
696
_sharedBarrierState = shouldGCDoubleBeat(env);
697
698
if (_sharedBarrierState) {
699
_currentConsecutiveBeats += 1;
700
startGCTime(env, true);
701
} else {
702
reportStopGCIncrement(env);
703
env->reportScanningSuspended();
704
Assert_MM_true(isGCOn());
705
restartMutatorsAndWait(env);
706
waitForMutatorsToStop(env);
707
env->reportScanningResumed();
708
reportStartGCIncrement(env);
709
_shouldGCYield = false;
710
}
711
712
if (_yieldCollaborator) {
713
_yieldCollaborator->resumeWorkersFromYield(env);
714
}
715
716
} else {
717
/* Worker only running here. _yieldCollaborator instance exists for sure */
718
env->reportScanningSuspended();
719
_yieldCollaborator->yield(env);
720
env->reportScanningResumed();
721
}
722
}
723
724
void
725
MM_Scheduler::prepareThreadsForTask(MM_EnvironmentBase *env, MM_Task *task, uintptr_t threadCount)
726
{
727
omrthread_monitor_enter(_workerThreadMutex);
728
_workerThreadsReservedForGC = true;
729
730
task->setSynchronizeMutex(_synchronizeMutex);
731
732
for (uintptr_t index=0; index < threadCount; index++) {
733
_statusTable[index] = worker_status_reserved;
734
_taskTable[index] = task;
735
}
736
737
wakeUpThreads(threadCount);
738
omrthread_monitor_exit(_workerThreadMutex);
739
740
pushYieldCollaborator(((MM_IncrementalParallelTask *)task)->getYieldCollaborator());
741
}
742
743
void
744
MM_Scheduler::completeTask(MM_EnvironmentBase *env)
745
{
746
if (env->isMainThread()) {
747
popYieldCollaborator();
748
}
749
MM_ParallelDispatcher::completeTask(env);
750
}
751
752
bool
753
MM_Scheduler::startUpThreads()
754
{
755
OMRPORT_ACCESS_FROM_OMRVM(_vm);
756
MM_EnvironmentRealtime env(_vm);
757
758
if (_extensions->gcThreadCount > _osInterface->getNumbersOfProcessors()) {
759
omrtty_printf("Please specify fewer GC threads than the number of physical processors.\n");
760
return false;
761
}
762
763
/* Start up the GC threads */
764
if (!MM_ParallelDispatcher::startUpThreads()) {
765
return false;
766
}
767
768
/* At this point, all GC threads have signalled that they are ready.
769
* However, because Metronome uses omrthread_suspend/omrthread_resume to stop and
770
* start threads, there is a race: the thread may have been preempted after
771
* signalling but before suspending itself. An alternative may be to use
772
* omrthread_park/unpark.
773
*/
774
_isInitialized = true;
775
776
/* Now that the GC threads are started, it is safe to start the alarm thread */
777
_alarmThread = MM_MetronomeAlarmThread::newInstance(&env);
778
if (_alarmThread == NULL) {
779
omrtty_printf("Unable to initialize alarm thread for time-based GC scheduling\n");
780
omrtty_printf("Most likely cause is non-supported version of OS\n");
781
return false;
782
}
783
784
if (verbose() >= 1) {
785
showParameters(&env);
786
}
787
788
return true;
789
}
790
791
/**
792
* @copydoc MM_ParallelDispatcher::recomputeActiveThreadCount()
793
* This function is called at the start of a complete GC cycle to calculate the number of
794
* GC threads to use for the cycle.
795
*/
796
void
797
MM_Scheduler::recomputeActiveThreadCount(MM_EnvironmentBase *env)
798
{
799
_activeThreadCount = _threadCount;
800
}
801
802
/**
803
* @copydoc MM_ParallelDispatcher::getThreadPriority()
804
*/
805
uintptr_t
806
MM_Scheduler::getThreadPriority()
807
{
808
/* this is the priority that the threads are started with */
809
return J9THREAD_PRIORITY_USER_MAX + 1;
810
}
811
812
/**
813
* @copydoc MM_MetronomeDispatcher::workerEntryPoint()
814
*/
815
void
816
MM_Scheduler::workerEntryPoint(MM_EnvironmentBase *envModron)
817
{
818
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envModron);
819
820
uintptr_t workerID = env->getWorkerID();
821
822
setThreadInitializationComplete(env);
823
824
omrthread_monitor_enter(_workerThreadMutex);
825
826
while(worker_status_dying != _statusTable[workerID]) {
827
/* Wait for a task to be dispatched to the worker thread */
828
while(worker_status_waiting == _statusTable[workerID]) {
829
omrthread_monitor_wait(_workerThreadMutex);
830
}
831
832
if(worker_status_reserved == _statusTable[workerID]) {
833
/* Found a task to dispatch to - do prep work for dispatch */
834
acceptTask(env);
835
omrthread_monitor_exit(_workerThreadMutex);
836
837
env->_currentTask->run(env);
838
839
omrthread_monitor_enter(_workerThreadMutex);
840
/* Returned from task - do clean up work from dispatch */
841
completeTask(env);
842
}
843
}
844
omrthread_monitor_exit(_workerThreadMutex);
845
}
846
847
/**
848
* @copydoc MM_ParallelDispatcher::mainEntryPoint()
849
*/
850
void
851
MM_Scheduler::mainEntryPoint(MM_EnvironmentBase *envModron)
852
{
853
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envModron);
854
855
setThreadInitializationComplete(env);
856
857
omrthread_monitor_enter(_mainThreadMonitor);
858
_threadWaitingOnMainThreadMonitor = env;
859
omrthread_monitor_wait(_mainThreadMonitor);
860
omrthread_monitor_exit(_mainThreadMonitor);
861
862
/* We want to execute the body of the do-while (run a gc) if a shutdown has
863
* been requested at the same time as the first gc. In other words, we want
864
* a gc to complete before shutting down.
865
*
866
* We however do not want to execute a gc if it hasn't been requested. The
867
* outer while loop guarantees this. It is a while loop (as opposed to an
868
* if) to cover the case of simultaneous gc/shutdown while waiting in
869
* stopGCIntervalAndWait. Again, we want to complete the gc in that case.
870
*/
871
while (isGCOn()) {
872
do {
873
/* Before starting a new GC, recompute the number of threads to use */
874
recomputeActiveThreadCount(env);
875
waitForMutatorsToStop(env);
876
/* note that the cycle and increment start events will be posted from MM_RealtimeGC::internalPreCollect */
877
_gc->_memorySubSpace->collect(env, _gcCode);
878
restartMutatorsAndWait(env);
879
880
/* We must also check for the _mainThreadMustShutDown flag since if we
881
* try to shutdown while we're in a stopGCIntervalAndWait, the GC will
882
* continue potentially changing the status of the main thread
883
*/
884
} while ((worker_status_dying != _statusTable[env->getWorkerID()] && !_mainThreadMustShutDown));
885
}
886
/* TODO: tear down the thread before exiting */
887
}
888
889
/**
890
* If there is an ongoing GC cycle complete it
891
*/
892
void
893
MM_Scheduler::completeCurrentGCSynchronously(MM_EnvironmentRealtime *env)
894
{
895
omrthread_monitor_enter(_vm->_gcCycleOnMonitor);
896
if (_vm->_gcCycleOn || isGCOn()) {
897
_completeCurrentGCSynchronously = true;
898
_completeCurrentGCSynchronouslyReason = VM_SHUTDOWN;
899
900
/* wait till get notified by main that the cycle is finished */
901
omrthread_monitor_wait(_vm->_gcCycleOnMonitor);
902
}
903
omrthread_monitor_exit(_vm->_gcCycleOnMonitor);
904
}
905
906
/**
907
* @copydoc MM_ParallelDispatcher::wakeUpThreads()
908
*/
909
void
910
MM_Scheduler::wakeUpThreads(uintptr_t count)
911
{
912
assert1(count > 0);
913
914
/* Resume the main thread */
915
omrthread_monitor_enter(_mainThreadMonitor);
916
omrthread_monitor_notify(_mainThreadMonitor);
917
omrthread_monitor_exit(_mainThreadMonitor);
918
919
if (count > 1) {
920
wakeUpWorkerThreads(count - 1);
921
}
922
}
923
924
/**
925
* Wakes up `count` worker threads. This function will actually busy wait until
926
* `count` number of workers have been resumed from the suspended state.
927
*
928
* @param count Number of worker threads to wake up
929
*/
930
void
931
MM_Scheduler::wakeUpWorkerThreads(uintptr_t count)
932
{
933
omrthread_monitor_notify_all(_workerThreadMutex);
934
}
935
936
/**
937
* @copydoc MM_ParallelDispatcher::shutDownThreads()
938
*/
939
void
940
MM_Scheduler::shutDownThreads()
941
{
942
/* This will stop threads from requesting another GC cycle to start*/
943
_isInitialized = false;
944
945
/* If the GC is currently in a Cycle complete it before we shutdown */
946
completeCurrentGCSynchronously();
947
948
/* Don't kill the main thread before the alarm thread since the alarm thread
949
* may still refer to the main thread if a continueGC happens to occur during
950
* shutdown.
951
*/
952
shutDownWorkerThreads();
953
954
/* Don't kill the alarm thread until after the GC worker threads, since it may
955
* be needed to drive a final synchronous GC */
956
if (_alarmThread) {
957
MM_EnvironmentBase env(_vm);
958
_alarmThread->kill(&env);
959
_alarmThread = NULL;
960
}
961
962
/* Now that the alarm and trace threads are killed, we can shutdown the main thread */
963
shutDownMainThread();
964
}
965
966
/**
967
* Signals the workers to shutdown, will block until they are all shutdown.
968
*
969
* @note Assumes all threads are live before the function is called (ie: this
970
* must be called before shutDownMainThread)
971
*/
972
void
973
MM_Scheduler::shutDownWorkerThreads()
974
{
975
/* If _threadShutdownCount is 1, only the main must shutdown, if 0,
976
* no shutdown required (happens when args passed to java are invalid
977
* so the vm doesn't actually start up, ex: -Xgc:threads=5 on a 4-way
978
* box)
979
*/
980
if (_threadShutdownCount <= 1) {
981
return;
982
}
983
984
omrthread_monitor_enter(_workerThreadMutex);
985
986
for (uintptr_t threadIndex = 1; threadIndex < _threadCountMaximum; threadIndex++) {
987
_statusTable[threadIndex] = worker_status_dying;
988
}
989
990
_threadCount = 1;
991
992
wakeUpWorkerThreads(_threadShutdownCount - 1);
993
994
omrthread_monitor_exit(_workerThreadMutex);
995
996
/* -1 because the thread shutdown count includes the main thread */
997
omrthread_monitor_enter(_dispatcherMonitor);
998
999
while (1 != _threadShutdownCount) {
1000
omrthread_monitor_wait(_dispatcherMonitor);
1001
}
1002
1003
omrthread_monitor_exit(_dispatcherMonitor);
1004
}
1005
1006
/**
1007
* Signals the main to shutdown, will block until the thread is shutdown.
1008
*
1009
* @note Assumes the main thread is the last gc thread left (ie: this must be
1010
* called after shutDownWorkerThreads)
1011
*/
1012
void
1013
MM_Scheduler::shutDownMainThread()
1014
{
1015
omrthread_monitor_enter(_workerThreadMutex);
1016
_statusTable[0] = worker_status_dying;
1017
omrthread_monitor_exit(_workerThreadMutex);
1018
1019
/* Note: Calling wakeUpThreads at this point would be unsafe since there is
1020
* more than 1 location where the main thread could be waiting and the one
1021
* in stopGcIntervalAndWait [which ultimately gets invoked by the
1022
* condYieldFromGC] assumes that a request for exclusive VM access on behalf
1023
* of the main has been made. Blindly notifying the monitor [as
1024
* wakeUpThreads does] would cause the main thread to wait for exclusive
1025
* access without requesting for it first, causing a hang.
1026
*/
1027
omrthread_monitor_enter(_mainThreadMonitor);
1028
_mainThreadMustShutDown = true;
1029
omrthread_monitor_notify(_mainThreadMonitor);
1030
omrthread_monitor_exit(_mainThreadMonitor);
1031
1032
omrthread_monitor_enter(_dispatcherMonitor);
1033
while (0 != _threadShutdownCount) {
1034
omrthread_monitor_wait(_dispatcherMonitor);
1035
}
1036
omrthread_monitor_exit(_dispatcherMonitor);
1037
}
1038
1039
/**
1040
* Check to see if it is time to do the next GC increment. If beatNanos time
1041
* has elapsed since the end of the last GC increment then start the next
1042
* increment now.
1043
*/
1044
void
1045
MM_Scheduler::startGCIfTimeExpired(MM_EnvironmentBase *envModron)
1046
{
1047
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envModron);
1048
if (isInitialized() && isGCOn() && env->getTimer()->hasTimeElapsed(getStartTimeOfCurrentMutatorSlice(), beatNanos)) {
1049
continueGC(env, TIME_TRIGGER, 0, env->getOmrVMThread(), true);
1050
}
1051
}
1052
1053
uintptr_t
1054
MM_Scheduler::incrementMutatorCount()
1055
{
1056
return MM_AtomicOperations::add(&_mutatorCount, 1);
1057
}
1058
1059
extern "C" {
1060
1061
void
1062
j9gc_startGCIfTimeExpired(OMR_VMThread* vmThread)
1063
{
1064
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(vmThread);
1065
MM_Scheduler *scheduler = (MM_Scheduler *)env->getExtensions()->dispatcher;
1066
scheduler->startGCIfTimeExpired(env);
1067
}
1068
1069
}
1070
1071