Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_realtime/RealtimeGC.cpp
5985 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2020 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "omr.h"
24
#include "omrcfg.h"
25
#include "gcutils.h"
26
27
#include <string.h>
28
29
#include "RealtimeGC.hpp"
30
31
#include "AllocateDescription.hpp"
32
#include "CycleState.hpp"
33
#include "EnvironmentRealtime.hpp"
34
#include "GlobalAllocationManagerSegregated.hpp"
35
#include "Heap.hpp"
36
#include "HeapRegionDescriptorRealtime.hpp"
37
#include "MemoryPoolSegregated.hpp"
38
#include "MemorySubSpace.hpp"
39
#include "modronapicore.hpp"
40
#include "OMRVMInterface.hpp"
41
#include "OSInterface.hpp"
42
#include "ParallelDispatcher.hpp"
43
#include "RealtimeMarkingScheme.hpp"
44
#include "RealtimeMarkTask.hpp"
45
#include "RealtimeSweepTask.hpp"
46
#include "ReferenceChainWalkerMarkMap.hpp"
47
#include "RememberedSetSATB.hpp"
48
#include "Scheduler.hpp"
49
#include "SegregatedAllocationInterface.hpp"
50
#include "SublistFragment.hpp"
51
#include "SweepSchemeRealtime.hpp"
52
#include "Task.hpp"
53
#include "WorkPacketsRealtime.hpp"
54
55
/* TuningFork name/version information for gc_staccato */
56
#define TUNINGFORK_STACCATO_EVENT_SPACE_NAME "com.ibm.realtime.vm.trace.gc.metronome"
57
#define TUNINGFORK_STACCATO_EVENT_SPACE_VERSION 200
58
59
MM_RealtimeGC *
60
MM_RealtimeGC::newInstance(MM_EnvironmentBase *env)
61
{
62
MM_RealtimeGC *globalGC = (MM_RealtimeGC *)env->getForge()->allocate(sizeof(MM_RealtimeGC), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE());
63
if (globalGC) {
64
new(globalGC) MM_RealtimeGC(env);
65
if (!globalGC->initialize(env)) {
66
globalGC->kill(env);
67
globalGC = NULL;
68
}
69
}
70
return globalGC;
71
}
72
73
74
void
75
MM_RealtimeGC::kill(MM_EnvironmentBase *env)
76
{
77
tearDown(env);
78
env->getForge()->free(this);
79
}
80
81
void
82
MM_RealtimeGC::setGCThreadPriority(OMR_VMThread *vmThread, uintptr_t newGCThreadPriority)
83
{
84
if(newGCThreadPriority == (uintptr_t) _currentGCThreadPriority) {
85
return;
86
}
87
88
Trc_MM_GcThreadPriorityChanged(vmThread->_language_vmthread, newGCThreadPriority);
89
90
/* Walk through all GC threads and set the priority */
91
omrthread_t* gcThreadTable = _sched->getThreadTable();
92
for (uintptr_t i = 0; i < _sched->threadCount(); i++) {
93
omrthread_set_priority(gcThreadTable[i], newGCThreadPriority);
94
}
95
_currentGCThreadPriority = (intptr_t) newGCThreadPriority;
96
}
97
98
/**
99
* Initialization.
100
*/
101
bool
102
MM_RealtimeGC::initialize(MM_EnvironmentBase *env)
103
{
104
_gcPhase = GC_PHASE_IDLE;
105
_extensions->realtimeGC = this;
106
_allowGrowth = false;
107
108
if (_extensions->gcTrigger == 0) {
109
_extensions->gcTrigger = (_extensions->memoryMax / 2);
110
_extensions->gcInitialTrigger = (_extensions->memoryMax / 2);
111
}
112
113
_extensions->distanceToYieldTimeCheck = 0;
114
115
/* Only SRT passes this check as the commandline option to specify beatMicro is only enabled on SRT */
116
if (METRONOME_DEFAULT_BEAT_MICRO != _extensions->beatMicro) {
117
/* User-specified quanta time, adjust related parameters */
118
_extensions->timeWindowMicro = 20 * _extensions->beatMicro;
119
/* Currently all supported SRT platforms - AIX and Linux, can only use HRT for alarm thread implementation.
120
* The default value for HRT period is 1/3 of the default quanta: 1 msec for HRT period and 3 msec quanta,
121
* we will attempt to adjust the HRT period to 1/3 of the specified quanta.
122
*/
123
uintptr_t hrtPeriodMicro = _extensions->beatMicro / 3;
124
if ((hrtPeriodMicro < METRONOME_DEFAULT_HRT_PERIOD_MICRO) && (METRONOME_DEFAULT_HRT_PERIOD_MICRO < _extensions->beatMicro)) {
125
/* If the adjusted value is too small for the hires clock resolution, we will use the default HRT period provided that
126
* the default period is smaller than the quanta time specified.
127
* Otherwise we fail to initialize the alarm thread with an error message.
128
*/
129
hrtPeriodMicro = METRONOME_DEFAULT_HRT_PERIOD_MICRO;
130
}
131
Assert_MM_true(0 != hrtPeriodMicro);
132
_extensions->hrtPeriodMicro = hrtPeriodMicro;
133
134
/* On Windows SRT we still use interrupt-based alarm. Set the interrupt period the same as hires timer period.
135
* We will fail to init the alarm if this is too small a resolution for Windows.
136
*/
137
_extensions->itPeriodMicro = _extensions->hrtPeriodMicro;
138
139
/* if the pause time user specified is larger than the default value, calculate if there is opportunity
140
* for the GC to do time checking less often inside condYieldFromGC.
141
*/
142
if (METRONOME_DEFAULT_BEAT_MICRO < _extensions->beatMicro) {
143
uintptr_t intervalToSkipYieldCheckMicro = _extensions->beatMicro - METRONOME_DEFAULT_BEAT_MICRO;
144
uintptr_t maxInterYieldTimeMicro = INTER_YIELD_MAX_NS / 1000;
145
_extensions->distanceToYieldTimeCheck = (U_32)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro);
146
}
147
}
148
149
_osInterface = MM_OSInterface::newInstance(env);
150
if (_osInterface == NULL) {
151
return false;
152
}
153
154
_sched = (MM_Scheduler *)_extensions->dispatcher;
155
156
_workPackets = allocateWorkPackets(env);
157
if (_workPackets == NULL) {
158
return false;
159
}
160
161
_markingScheme = MM_RealtimeMarkingScheme::newInstance(env, this);
162
if (NULL == _markingScheme) {
163
return false;
164
}
165
166
if (!_delegate.initialize(env, NULL, NULL)) {
167
return false;
168
}
169
170
_sweepScheme = MM_SweepSchemeRealtime::newInstance(env, this, _sched, _markingScheme->getMarkMap());
171
if(NULL == _sweepScheme) {
172
return false;
173
}
174
175
if (!_realtimeDelegate.initialize(env)) {
176
return false;
177
}
178
179
_extensions->sATBBarrierRememberedSet = MM_RememberedSetSATB::newInstance(env, _workPackets);
180
if (NULL == _extensions->sATBBarrierRememberedSet) {
181
return false;
182
}
183
184
_stopTracing = false;
185
186
_sched->collectorInitialized(this);
187
188
return true;
189
}
190
191
/**
192
* Initialization.
193
*/
194
void
195
MM_RealtimeGC::tearDown(MM_EnvironmentBase *env)
196
{
197
_delegate.tearDown(env);
198
_realtimeDelegate.tearDown(env);
199
200
if(NULL != _sched) {
201
_sched->kill(env);
202
_sched = NULL;
203
}
204
205
if(NULL != _osInterface) {
206
_osInterface->kill(env);
207
_osInterface = NULL;
208
}
209
210
if(NULL != _workPackets) {
211
_workPackets->kill(env);
212
_workPackets = NULL;
213
}
214
215
if (NULL != _markingScheme) {
216
_markingScheme->kill(env);
217
_markingScheme = NULL;
218
}
219
220
if (NULL != _sweepScheme) {
221
_sweepScheme->kill(env);
222
_sweepScheme = NULL;
223
}
224
225
if (NULL != _extensions->sATBBarrierRememberedSet) {
226
_extensions->sATBBarrierRememberedSet->kill(env);
227
_extensions->sATBBarrierRememberedSet = NULL;
228
}
229
}
230
231
/**
232
* @copydoc MM_GlobalCollector::mainSetupForGC()
233
*/
234
void
235
MM_RealtimeGC::mainSetupForGC(MM_EnvironmentBase *env)
236
{
237
/* Reset memory pools of associated memory spaces */
238
env->_cycleState->_activeSubSpace->reset();
239
240
_workPackets->reset(env);
241
242
/* Clear the gc stats structure */
243
clearGCStats();
244
245
_realtimeDelegate.mainSetupForGC(env);
246
}
247
248
/**
249
* @copydoc MM_GlobalCollector::mainCleanupAfterGC()
250
*/
251
void
252
MM_RealtimeGC::mainCleanupAfterGC(MM_EnvironmentBase *env)
253
{
254
_realtimeDelegate.mainCleanupAfterGC(env);
255
}
256
257
/**
258
* Thread initialization.
259
*/
260
void
261
MM_RealtimeGC::workerSetupForGC(MM_EnvironmentBase *env)
262
{
263
}
264
265
/**
266
*/
267
void
268
MM_RealtimeGC::clearGCStats()
269
{
270
_extensions->globalGCStats.clear();
271
_realtimeDelegate.clearGCStats();
272
}
273
274
/**
275
*/
276
void
277
MM_RealtimeGC::mergeGCStats(MM_EnvironmentBase *env)
278
{
279
}
280
281
uintptr_t
282
MM_RealtimeGC::verbose(MM_EnvironmentBase *env) {
283
return _sched->verbose();
284
}
285
286
/**
287
* @note only called by main thread.
288
*/
289
void
290
MM_RealtimeGC::doAuxiliaryGCWork(MM_EnvironmentBase *env)
291
{
292
_realtimeDelegate.doAuxiliaryGCWork(env);
293
294
/* Restart the caches for all threads. */
295
GC_OMRVMThreadListIterator vmThreadListIterator(_vm);
296
OMR_VMThread *walkThread;
297
while((walkThread = vmThreadListIterator.nextOMRVMThread()) != NULL) {
298
MM_EnvironmentBase *walkEnv = MM_EnvironmentBase::getEnvironment(walkThread);
299
((MM_SegregatedAllocationInterface *)(walkEnv->_objectAllocationInterface))->restartCache(walkEnv);
300
}
301
302
mergeGCStats(env);
303
}
304
305
/**
306
* Incremental Collector.
307
* Employs a double write barrier that saves overwriting (new) values from unscanned threads and
308
* also the first (old) value overwritten by all threads (the latter as in a Yuasa barrier).
309
* @note only called by main thread.
310
*/
311
void
312
MM_RealtimeGC::incrementalCollect(MM_EnvironmentRealtime *env)
313
{
314
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
315
316
mainSetupForGC(env);
317
318
_realtimeDelegate.incrementalCollectStart(env);
319
320
/* Make sure all threads notice GC is ongoing with a barrier. */
321
_extensions->globalGCStats.gcCount++;
322
if (verbose(env) >= 2) {
323
omrtty_printf("RealtimeGC::incrementalCollect\n");
324
}
325
if (verbose(env) >= 3) {
326
omrtty_printf("RealtimeGC::incrementalCollect setup and root phase\n");
327
}
328
if (env->_cycleState->_gcCode.isOutOfMemoryGC()) {
329
env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_soft_as_weak;
330
}
331
332
setCollectorRootMarking();
333
334
reportMarkStart(env);
335
MM_RealtimeMarkTask markTask(env, _sched, this, _markingScheme, env->_cycleState);
336
_sched->run(env, &markTask);
337
reportMarkEnd(env);
338
339
340
_realtimeDelegate.incrementalCollect(env);
341
342
/*
343
* Sweeping.
344
*/
345
reportSweepStart(env);
346
MM_RealtimeSweepTask sweepTask(env, _sched, _sweepScheme);
347
_sched->run(env, &sweepTask);
348
reportSweepEnd(env);
349
350
doAuxiliaryGCWork(env);
351
352
/* Get all components to clean up after themselves at the end of a collect */
353
mainCleanupAfterGC(env);
354
355
_sched->condYieldFromGC(env);
356
setCollectorIdle();
357
358
if (verbose(env) >= 3) {
359
omrtty_printf("RealtimeGC::incrementalCollect gc complete %d MB in use\n", _memoryPool->getBytesInUse() >> 20);
360
}
361
}
362
363
void
364
MM_RealtimeGC::flushCachedFullRegions(MM_EnvironmentBase *env)
365
{
366
/* delegate to the memory pool to perform the flushing of per-context full regions to the region pool */
367
_memoryPool->flushCachedFullRegions(env);
368
}
369
370
/**
371
* This function is called at the end of tracing when it is safe for threads to stop
372
* allocating black and return to allocating white. It iterates through all the threads
373
* and sets their allocationColor to GC_UNMARK. It also sets the new thread allocation
374
* color to GC_UNMARK.
375
**/
376
void
377
MM_RealtimeGC::allThreadsAllocateUnmarked(MM_EnvironmentBase *env) {
378
GC_OMRVMInterface::flushCachesForGC(env);
379
GC_OMRVMThreadListIterator vmThreadListIterator(_vm);
380
381
while(OMR_VMThread *aThread = vmThreadListIterator.nextOMRVMThread()) {
382
MM_EnvironmentRealtime *threadEnv = MM_EnvironmentRealtime::getEnvironment(aThread);
383
assume0(threadEnv->getAllocationColor() == GC_MARK);
384
threadEnv->setAllocationColor(GC_UNMARK);
385
threadEnv->setMonitorCacheCleared(FALSE);
386
}
387
_extensions->newThreadAllocationColor = GC_UNMARK;
388
}
389
390
/****************************************
391
* VM Garbage Collection API
392
****************************************
393
*/
394
/**
395
*/
396
void
397
MM_RealtimeGC::internalPreCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocateDescription *allocDescription, U_32 gcCode)
398
{
399
/* Setup the main thread cycle state */
400
_cycleState = MM_CycleState();
401
env->_cycleState = &_cycleState;
402
env->_cycleState->_gcCode = MM_GCCode(gcCode);
403
env->_cycleState->_type = _cycleType;
404
env->_cycleState->_activeSubSpace = subSpace;
405
406
/* If we are in an excessiveGC level beyond normal then an aggressive GC is
407
* conducted to free up as much space as possible
408
*/
409
if (!env->_cycleState->_gcCode.isExplicitGC()) {
410
if(excessive_gc_normal != _extensions->excessiveGCLevel) {
411
/* convert the current mode to excessive GC mode */
412
env->_cycleState->_gcCode = MM_GCCode(J9MMCONSTANT_IMPLICIT_GC_EXCESSIVE);
413
}
414
}
415
416
/* The minimum free entry size is always re-adjusted at the end of a cycle.
417
* But if the current cycle is triggered due to OOM, at the start of the cycle
418
* set the minimum free entry size to the smallest size class - 16 bytes.
419
*/
420
if (env->_cycleState->_gcCode.isOutOfMemoryGC()) {
421
_memoryPool->setMinimumFreeEntrySize((1 << J9VMGC_SIZECLASSES_LOG_SMALLEST));
422
}
423
424
MM_EnvironmentRealtime *rtEnv = MM_EnvironmentRealtime::getEnvironment(env);
425
/* Having heap walkable after the end of GC may be explicitly required through command line option or GC Check*/
426
if (rtEnv->getExtensions()->fixHeapForWalk) {
427
_fixHeapForWalk = true;
428
}
429
/* we are about to collect so generate the appropriate cycle start and increment start events */
430
reportGCCycleStart(rtEnv);
431
_sched->reportStartGCIncrement(rtEnv);
432
}
433
434
/**
435
*/
436
void
437
MM_RealtimeGC::setupForGC(MM_EnvironmentBase *env)
438
{
439
}
440
441
/**
442
* @note only called by main thread.
443
*/
444
bool
445
MM_RealtimeGC::internalGarbageCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocateDescription *allocDescription)
446
{
447
MM_EnvironmentRealtime *envRealtime = MM_EnvironmentRealtime::getEnvironment(env);
448
449
incrementalCollect(envRealtime);
450
451
_extensions->heap->resetHeapStatistics(true);
452
453
return true;
454
}
455
456
void
457
MM_RealtimeGC::internalPostCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace)
458
{
459
MM_GlobalCollector::internalPostCollect(env, subSpace);
460
461
/* Reset fixHeapForWalk for the next cycle, no matter who set it */
462
_fixHeapForWalk = false;
463
464
/* Check if user overrode the default minimumFreeEntrySize */
465
if (_extensions->minimumFreeEntrySize != UDATA_MAX) {
466
_memoryPool->setMinimumFreeEntrySize(_extensions->minimumFreeEntrySize);
467
} else {
468
/* Set it dynamically based on free heap after the end of collection */
469
float percentFreeHeapAfterCollect = _extensions->heap->getApproximateActiveFreeMemorySize() * 100.0f / _extensions->heap->getMaximumMemorySize();
470
_avgPercentFreeHeapAfterCollect = _avgPercentFreeHeapAfterCollect * 0.8f + percentFreeHeapAfterCollect * 0.2f;
471
/* Has percent range changed? (for example from [80,90] down to [70,80]) */
472
uintptr_t minFreeEntrySize = (uintptr_t)1 << (((uintptr_t)_avgPercentFreeHeapAfterCollect / 10) + 1);
473
if (minFreeEntrySize != _memoryPool->getMinimumFreeEntrySize()) {
474
/* Yes, it did => make sure it changed enough (more than 1% up or below the range boundary) to accept it (in the example, 78.9 is ok, but 79.1 is not */
475
if ((uintptr_t)_avgPercentFreeHeapAfterCollect % 10 >= 1 && (uintptr_t)_avgPercentFreeHeapAfterCollect % 10 < 9) {
476
if (minFreeEntrySize < 16) {
477
minFreeEntrySize = 0;
478
}
479
_memoryPool->setMinimumFreeEntrySize(minFreeEntrySize);
480
}
481
}
482
}
483
484
/*
485
* MM_GC_CYCLE_END is hooked by external components (e.g. JIT), which may cause GC to yield while in the
486
* external callback. Yielding introduces additional METRONOME_INCREMENT_STOP/START verbose events, which must be
487
* processed before the very last METRONOME_INCREMENT_STOP event before the PRIVATE_GC_POST_CYCLE_END event. Otherwise
488
* the METRONOME_INCREMENT_START/END events become out of order and verbose GC will fail.
489
*/
490
reportGCCycleFinalIncrementEnding(env);
491
492
MM_EnvironmentRealtime *rtEnv = MM_EnvironmentRealtime::getEnvironment(env);
493
_sched->reportStopGCIncrement(rtEnv, true);
494
_sched->setGCCode(MM_GCCode(J9MMCONSTANT_IMPLICIT_GC_DEFAULT));
495
reportGCCycleEnd(rtEnv);
496
/*
497
* We could potentially yield during reportGCCycleEnd (e.g. due to JIT callbacks) and the scheduler will only wake up the main if _gcOn is true.
498
* Turn off _gcOn flag at the very last, after cycle end has been reported.
499
*/
500
_sched->stopGC(rtEnv);
501
env->_cycleState->_activeSubSpace = NULL;
502
}
503
504
void
505
MM_RealtimeGC::reportGCCycleFinalIncrementEnding(MM_EnvironmentBase *env)
506
{
507
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
508
509
MM_CommonGCData commonData;
510
TRIGGER_J9HOOK_MM_OMR_GC_CYCLE_END(
511
_extensions->omrHookInterface,
512
env->getOmrVMThread(),
513
omrtime_hires_clock(),
514
J9HOOK_MM_OMR_GC_CYCLE_END,
515
_extensions->getHeap()->initializeCommonGCData(env, &commonData),
516
env->_cycleState->_type,
517
omrgc_condYieldFromGC
518
);
519
}
520
521
/**
522
* @todo Provide method documentation
523
* @ingroup GC_Metronome methodGroup
524
*/
525
void
526
MM_RealtimeGC::reportSyncGCStart(MM_EnvironmentBase *env, GCReason reason, uintptr_t reasonParameter)
527
{
528
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
529
uintptr_t approximateFreeFreeMemorySize;
530
#if defined(OMR_GC_DYNAMIC_CLASS_UNLOADING)
531
MM_ClassUnloadStats *classUnloadStats = &_extensions->globalGCStats.classUnloadStats;
532
#endif /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */
533
534
approximateFreeFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize();
535
536
Trc_MM_SynchGCStart(env->getLanguageVMThread(),
537
reason,
538
getGCReasonAsString(reason),
539
reasonParameter,
540
approximateFreeFreeMemorySize,
541
0
542
);
543
544
#if defined(OMR_GC_DYNAMIC_CLASS_UNLOADING)
545
uintptr_t classLoaderUnloadedCount = isCollectorIdle()?0:classUnloadStats->_classLoaderUnloadedCount;
546
uintptr_t classesUnloadedCount = isCollectorIdle()?0:classUnloadStats->_classesUnloadedCount;
547
uintptr_t anonymousClassesUnloadedCount = isCollectorIdle()?0:classUnloadStats->_anonymousClassesUnloadedCount;
548
#else /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */
549
uintptr_t classLoaderUnloadedCount = 0;
550
uintptr_t classesUnloadedCount = 0;
551
uintptr_t anonymousClassesUnloadedCount = 0;
552
#endif /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */
553
554
/* If main thread was blocked at end of GC, waiting for a new GC cycle,
555
* globalGCStats are not cleared yet. Thus, if we haven't started GC yet,
556
* just report 0s for classLoaders unloaded count */
557
TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_SYNCHRONOUS_GC_START(_extensions->privateHookInterface,
558
env->getOmrVMThread(), omrtime_hires_clock(),
559
J9HOOK_MM_PRIVATE_METRONOME_SYNCHRONOUS_GC_START, reason, reasonParameter,
560
approximateFreeFreeMemorySize,
561
0,
562
classLoaderUnloadedCount,
563
classesUnloadedCount,
564
anonymousClassesUnloadedCount
565
);
566
}
567
568
/**
569
* @todo Provide method documentation
570
* @ingroup GC_Metronome methodGroup
571
*/
572
void
573
MM_RealtimeGC::reportSyncGCEnd(MM_EnvironmentBase *env)
574
{
575
_realtimeDelegate.reportSyncGCEnd(env);
576
}
577
578
/**
579
* @todo Provide method documentation
580
* @ingroup GC_Metronome methodGroup
581
*/
582
void
583
MM_RealtimeGC::reportGCCycleStart(MM_EnvironmentBase *env)
584
{
585
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
586
/* Let VM know that GC cycle is about to start. JIT, in particular uses it,
587
* to not compile while GC cycle is on.
588
*/
589
omrthread_monitor_enter(env->getOmrVM()->_gcCycleOnMonitor);
590
env->getOmrVM()->_gcCycleOn = 1;
591
592
uintptr_t approximateFreeMemorySize = _memoryPool->getApproximateFreeMemorySize();
593
594
Trc_MM_CycleStart(env->getLanguageVMThread(), env->_cycleState->_type, approximateFreeMemorySize);
595
596
MM_CommonGCData commonData;
597
598
TRIGGER_J9HOOK_MM_OMR_GC_CYCLE_START(
599
_extensions->omrHookInterface,
600
env->getOmrVMThread(),
601
omrtime_hires_clock(),
602
J9HOOK_MM_OMR_GC_CYCLE_START,
603
_extensions->getHeap()->initializeCommonGCData(env, &commonData),
604
env->_cycleState->_type
605
);
606
omrthread_monitor_exit(env->getOmrVM()->_gcCycleOnMonitor);
607
}
608
609
/**
610
* @todo Provide method documentation
611
* @ingroup GC_Metronome methodGroup
612
*/
613
void
614
MM_RealtimeGC::reportGCCycleEnd(MM_EnvironmentBase *env)
615
{
616
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
617
omrthread_monitor_enter(env->getOmrVM()->_gcCycleOnMonitor);
618
619
uintptr_t approximateFreeMemorySize = _memoryPool->getApproximateFreeMemorySize();
620
621
Trc_MM_CycleEnd(env->getLanguageVMThread(), env->_cycleState->_type, approximateFreeMemorySize);
622
623
MM_CommonGCData commonData;
624
625
TRIGGER_J9HOOK_MM_PRIVATE_GC_POST_CYCLE_END(
626
_extensions->privateHookInterface,
627
env->getOmrVMThread(),
628
omrtime_hires_clock(),
629
J9HOOK_MM_PRIVATE_GC_POST_CYCLE_END,
630
_extensions->getHeap()->initializeCommonGCData(env, &commonData),
631
env->_cycleState->_type,
632
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),
633
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),
634
_extensions->globalGCStats.workPacketStats.getSTWWorkpacketCountAtOverflow(),
635
_extensions->globalGCStats.fixHeapForWalkReason,
636
_extensions->globalGCStats.fixHeapForWalkTime
637
);
638
639
/* If GC cycle just finished, and trigger start was previously generated, generate trigger end now */
640
if (_memoryPool->getBytesInUse() < _extensions->gcInitialTrigger) {
641
_previousCycleBelowTrigger = true;
642
TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_END(_extensions->privateHookInterface,
643
env->getOmrVMThread(), omrtime_hires_clock(),
644
J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_END
645
);
646
}
647
648
/* Let VM (JIT, in particular) GC cycle is finished. Do a monitor notify, to
649
* unblock parties that waited for the cycle to complete
650
*/
651
env->getOmrVM()->_gcCycleOn = 0;
652
omrthread_monitor_notify_all(env->getOmrVM()->_gcCycleOnMonitor);
653
654
omrthread_monitor_exit(env->getOmrVM()->_gcCycleOnMonitor);
655
}
656
657
/**
658
* @todo Provide method documentation
659
* @ingroup GC_Metronome methodGroup
660
*/
661
bool
662
MM_RealtimeGC::heapAddRange(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, uintptr_t size, void *lowAddress, void *highAddress)
663
{
664
bool result = _markingScheme->heapAddRange(env, subspace, size, lowAddress, highAddress);
665
666
if (result) {
667
if(NULL != _extensions->referenceChainWalkerMarkMap) {
668
result = _extensions->referenceChainWalkerMarkMap->heapAddRange(env, size, lowAddress, highAddress);
669
if (!result) {
670
/* Expansion of Reference Chain Walker Mark Map has failed
671
* Marking Scheme expansion must be reversed
672
*/
673
_markingScheme->heapRemoveRange(env, subspace, size, lowAddress, highAddress, NULL, NULL);
674
}
675
}
676
}
677
return result;
678
}
679
680
/**
681
*/
682
bool
683
MM_RealtimeGC::heapRemoveRange(
684
MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, uintptr_t size, void *lowAddress, void *highAddress,
685
void *lowValidAddress, void *highValidAddress)
686
{
687
bool result = _markingScheme->heapRemoveRange(env, subspace, size, lowAddress, highAddress, lowValidAddress, highValidAddress);
688
689
if(NULL != _extensions->referenceChainWalkerMarkMap) {
690
result = result && _extensions->referenceChainWalkerMarkMap->heapRemoveRange(env, size, lowAddress, highAddress, lowValidAddress, highValidAddress);
691
}
692
return result;
693
}
694
695
/**
696
*/
697
bool
698
MM_RealtimeGC::collectorStartup(MM_GCExtensionsBase* extensions)
699
{
700
((MM_GlobalAllocationManagerSegregated *) extensions->globalAllocationManager)->setSweepScheme(_sweepScheme);
701
((MM_GlobalAllocationManagerSegregated *) extensions->globalAllocationManager)->setMarkingScheme(_markingScheme);
702
return true;
703
}
704
705
/**
706
*/
707
void
708
MM_RealtimeGC::collectorShutdown(MM_GCExtensionsBase *extensions)
709
{
710
}
711
712
/**
713
* Factory method for creating the work packets structure.
714
*
715
* @return the WorkPackets to be used for this Collector.
716
*/
717
MM_WorkPacketsRealtime*
718
MM_RealtimeGC::allocateWorkPackets(MM_EnvironmentBase *env)
719
{
720
return MM_WorkPacketsRealtime::newInstance(env);
721
}
722
723
/**
724
* Calls the Scheduler's yielding API to determine if the GC should yield.
725
* @return true if the GC should yield, false otherwise
726
*/
727
bool
728
MM_RealtimeGC::shouldYield(MM_EnvironmentBase *env)
729
{
730
return _sched->shouldGCYield(MM_EnvironmentRealtime::getEnvironment(env), 0);
731
}
732
733
/**
734
* Yield from GC by calling the Scheduler's API.
735
*/
736
void
737
MM_RealtimeGC::yield(MM_EnvironmentBase *env)
738
{
739
_sched->yieldFromGC(MM_EnvironmentRealtime::getEnvironment(env));
740
}
741
742
/**
743
* Yield only if the Scheduler deems yielding should occur at the time of the
744
* call to this method.
745
*/
746
bool
747
MM_RealtimeGC::condYield(MM_EnvironmentBase *env, U_64 timeSlackNanoSec)
748
{
749
return _sched->condYieldFromGC(MM_EnvironmentRealtime::getEnvironment(env), timeSlackNanoSec);
750
}
751
752
bool
753
MM_RealtimeGC::isMarked(void *objectPtr)
754
{
755
return _markingScheme->isMarked((omrobjectptr_t)(objectPtr));
756
}
757
758
void
759
MM_RealtimeGC::reportMarkStart(MM_EnvironmentBase *env)
760
{
761
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
762
Trc_MM_MarkStart(env->getLanguageVMThread());
763
764
TRIGGER_J9HOOK_MM_PRIVATE_MARK_START(
765
_extensions->privateHookInterface,
766
env->getOmrVMThread(),
767
omrtime_hires_clock(),
768
J9HOOK_MM_PRIVATE_MARK_START);
769
}
770
771
void
772
MM_RealtimeGC::reportMarkEnd(MM_EnvironmentBase *env)
773
{
774
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
775
Trc_MM_MarkEnd(env->getLanguageVMThread());
776
777
TRIGGER_J9HOOK_MM_PRIVATE_MARK_END(
778
_extensions->privateHookInterface,
779
env->getOmrVMThread(),
780
omrtime_hires_clock(),
781
J9HOOK_MM_PRIVATE_MARK_END);
782
}
783
784
void
785
MM_RealtimeGC::reportSweepStart(MM_EnvironmentBase *env)
786
{
787
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
788
Trc_MM_SweepStart(env->getLanguageVMThread());
789
790
TRIGGER_J9HOOK_MM_PRIVATE_SWEEP_START(
791
_extensions->privateHookInterface,
792
env->getOmrVMThread(),
793
omrtime_hires_clock(),
794
J9HOOK_MM_PRIVATE_SWEEP_START);
795
}
796
797
void
798
MM_RealtimeGC::reportSweepEnd(MM_EnvironmentBase *env)
799
{
800
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
801
Trc_MM_SweepEnd(env->getLanguageVMThread());
802
803
TRIGGER_J9HOOK_MM_PRIVATE_SWEEP_END(
804
_extensions->privateHookInterface,
805
env->getOmrVMThread(),
806
omrtime_hires_clock(),
807
J9HOOK_MM_PRIVATE_SWEEP_END);
808
}
809
810
void
811
MM_RealtimeGC::reportGCStart(MM_EnvironmentBase *env)
812
{
813
uintptr_t scavengerCount = 0;
814
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
815
Trc_MM_GlobalGCStart(env->getLanguageVMThread(), _extensions->globalGCStats.gcCount);
816
817
TRIGGER_J9HOOK_MM_OMR_GLOBAL_GC_START(
818
_extensions->omrHookInterface,
819
env->getOmrVMThread(),
820
omrtime_hires_clock(),
821
J9HOOK_MM_OMR_GLOBAL_GC_START,
822
_extensions->globalGCStats.gcCount,
823
scavengerCount,
824
env->_cycleState->_gcCode.isExplicitGC() ? 1 : 0,
825
env->_cycleState->_gcCode.isAggressiveGC() ? 1: 0,
826
_bytesRequested);
827
}
828
829
void
830
MM_RealtimeGC::reportGCEnd(MM_EnvironmentBase *env)
831
{
832
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
833
uintptr_t approximateNewActiveFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW);
834
uintptr_t newActiveMemorySize = _extensions->heap->getActiveMemorySize(MEMORY_TYPE_NEW);
835
uintptr_t approximateOldActiveFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_OLD);
836
uintptr_t oldActiveMemorySize = _extensions->heap->getActiveMemorySize(MEMORY_TYPE_OLD);
837
uintptr_t approximateLoaActiveFreeMemorySize = (_extensions->largeObjectArea ? _extensions->heap->getApproximateActiveFreeLOAMemorySize(MEMORY_TYPE_OLD) : 0 );
838
uintptr_t loaActiveMemorySize = (_extensions->largeObjectArea ? _extensions->heap->getActiveLOAMemorySize(MEMORY_TYPE_OLD) : 0 );
839
840
/* not including LOA in total (already accounted by OLD */
841
uintptr_t approximateTotalActiveFreeMemorySize = approximateNewActiveFreeMemorySize + approximateOldActiveFreeMemorySize;
842
uintptr_t totalActiveMemorySizeTotal = newActiveMemorySize + oldActiveMemorySize;
843
844
845
Trc_MM_GlobalGCEnd(env->getLanguageVMThread(),
846
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),
847
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),
848
approximateTotalActiveFreeMemorySize,
849
totalActiveMemorySizeTotal
850
);
851
852
/* these are assigned to temporary variable out-of-line since some preprocessors get confused if you have directives in macros */
853
uintptr_t approximateActiveFreeMemorySize = 0;
854
uintptr_t activeMemorySize = 0;
855
856
TRIGGER_J9HOOK_MM_OMR_GLOBAL_GC_END(
857
_extensions->omrHookInterface,
858
env->getOmrVMThread(),
859
omrtime_hires_clock(),
860
J9HOOK_MM_OMR_GLOBAL_GC_END,
861
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),
862
_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),
863
_extensions->globalGCStats.workPacketStats.getSTWWorkpacketCountAtOverflow(),
864
approximateNewActiveFreeMemorySize,
865
newActiveMemorySize,
866
approximateOldActiveFreeMemorySize,
867
oldActiveMemorySize,
868
(_extensions-> largeObjectArea ? 1 : 0),
869
approximateLoaActiveFreeMemorySize,
870
loaActiveMemorySize,
871
/* We can't just ask the heap for everything of type FIXED, because that includes scopes as well */
872
approximateActiveFreeMemorySize,
873
activeMemorySize,
874
_extensions->globalGCStats.fixHeapForWalkReason,
875
_extensions->globalGCStats.fixHeapForWalkTime
876
);
877
}
878
879
/**
880
* Enables the write barrier, this should be called at the beginning of the mark phase.
881
*/
882
void
883
MM_RealtimeGC::enableWriteBarrier(MM_EnvironmentBase* env)
884
{
885
MM_GCExtensionsBase* extensions = env->getExtensions();
886
extensions->sATBBarrierRememberedSet->restoreGlobalFragmentIndex(env);
887
}
888
889
/**
890
* Disables the write barrier, this should be called at the end of the mark phase.
891
*/
892
void
893
MM_RealtimeGC::disableWriteBarrier(MM_EnvironmentBase* env)
894
{
895
MM_GCExtensionsBase* extensions = env->getExtensions();
896
extensions->sATBBarrierRememberedSet->preserveGlobalFragmentIndex(env);
897
}
898
899
void
900
MM_RealtimeGC::flushRememberedSet(MM_EnvironmentRealtime *env)
901
{
902
if (_workPackets->inUsePacketsAvailable(env)) {
903
_workPackets->moveInUseToNonEmpty(env);
904
_extensions->sATBBarrierRememberedSet->flushFragments(env);
905
}
906
}
907
908
/**
909
* Perform the tracing phase. For tracing to be complete the work stack and rememberedSet
910
* have to be empty and class tracing has to complete without marking any objects.
911
*
912
* If concurrentMarkingEnabled is true then tracing is completed concurrently.
913
*/
914
void
915
MM_RealtimeGC::completeMarking(MM_EnvironmentRealtime *env)
916
{
917
918
do {
919
if (env->_currentTask->synchronizeGCThreadsAndReleaseMain(env, UNIQUE_ID)) {
920
flushRememberedSet(env);
921
if (_extensions->concurrentTracingEnabled) {
922
setCollectorConcurrentTracing();
923
_realtimeDelegate.releaseExclusiveVMAccess(env, _sched->_exclusiveVMAccessRequired);
924
} else {
925
setCollectorTracing();
926
}
927
928
_moreTracingRequired = false;
929
930
/* From this point on the Scheduler collaborates with WorkPacketsRealtime on yielding.
931
* Strictly speaking this should be done first thing in incrementalCompleteScan().
932
* However, it would require another synchronizeGCThreadsAndReleaseMain barrier.
933
* So we are just reusing the existing one.
934
*/
935
_sched->pushYieldCollaborator(_workPackets->getYieldCollaborator());
936
937
env->_currentTask->releaseSynchronizedGCThreads(env);
938
}
939
940
if(_markingScheme->incrementalCompleteScan(env, MAX_UINT)) {
941
_moreTracingRequired = true;
942
}
943
944
if (env->_currentTask->synchronizeGCThreadsAndReleaseMain(env, UNIQUE_ID)) {
945
/* restore the old Yield Collaborator */
946
_sched->popYieldCollaborator();
947
948
if (_extensions->concurrentTracingEnabled) {
949
_realtimeDelegate.acquireExclusiveVMAccess(env, _sched->_exclusiveVMAccessRequired);
950
setCollectorTracing();
951
}
952
_moreTracingRequired |= _realtimeDelegate.doTracing(env);
953
954
/* the workStack and rememberedSet use the same workPackets
955
* as backing store. If all packets are empty this means the
956
* workStack and rememberedSet processing are both complete.
957
*/
958
_moreTracingRequired |= !_workPackets->isAllPacketsEmpty();
959
env->_currentTask->releaseSynchronizedGCThreads(env);
960
}
961
} while(_moreTracingRequired);
962
}
963
964
void
965
MM_RealtimeGC::enableDoubleBarrier(MM_EnvironmentBase* env)
966
{
967
_realtimeDelegate.enableDoubleBarrier(env);
968
}
969
970
void
971
MM_RealtimeGC::disableDoubleBarrierOnThread(MM_EnvironmentBase* env, OMR_VMThread *vmThread)
972
{
973
_realtimeDelegate.disableDoubleBarrierOnThread(env, vmThread);
974
}
975
976
void
977
MM_RealtimeGC::disableDoubleBarrier(MM_EnvironmentBase* env)
978
{
979
_realtimeDelegate.disableDoubleBarrier(env);
980
}
981
982
983