Path: blob/master/runtime/gc_realtime/RealtimeGC.cpp
5985 views
/*******************************************************************************1* Copyright (c) 1991, 2020 IBM Corp. and others2*3* This program and the accompanying materials are made available under4* the terms of the Eclipse Public License 2.0 which accompanies this5* distribution and is available at https://www.eclipse.org/legal/epl-2.0/6* or the Apache License, Version 2.0 which accompanies this distribution and7* is available at https://www.apache.org/licenses/LICENSE-2.0.8*9* This Source Code may also be made available under the following10* Secondary Licenses when the conditions for such availability set11* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU12* General Public License, version 2 with the GNU Classpath13* Exception [1] and GNU General Public License, version 2 with the14* OpenJDK Assembly Exception [2].15*16* [1] https://www.gnu.org/software/classpath/license.html17* [2] http://openjdk.java.net/legal/assembly-exception.html18*19* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception20*******************************************************************************/2122#include "omr.h"23#include "omrcfg.h"24#include "gcutils.h"2526#include <string.h>2728#include "RealtimeGC.hpp"2930#include "AllocateDescription.hpp"31#include "CycleState.hpp"32#include "EnvironmentRealtime.hpp"33#include "GlobalAllocationManagerSegregated.hpp"34#include "Heap.hpp"35#include "HeapRegionDescriptorRealtime.hpp"36#include "MemoryPoolSegregated.hpp"37#include "MemorySubSpace.hpp"38#include "modronapicore.hpp"39#include "OMRVMInterface.hpp"40#include "OSInterface.hpp"41#include "ParallelDispatcher.hpp"42#include "RealtimeMarkingScheme.hpp"43#include "RealtimeMarkTask.hpp"44#include "RealtimeSweepTask.hpp"45#include "ReferenceChainWalkerMarkMap.hpp"46#include "RememberedSetSATB.hpp"47#include "Scheduler.hpp"48#include "SegregatedAllocationInterface.hpp"49#include "SublistFragment.hpp"50#include "SweepSchemeRealtime.hpp"51#include "Task.hpp"52#include "WorkPacketsRealtime.hpp"5354/* TuningFork name/version information for gc_staccato */55#define TUNINGFORK_STACCATO_EVENT_SPACE_NAME "com.ibm.realtime.vm.trace.gc.metronome"56#define TUNINGFORK_STACCATO_EVENT_SPACE_VERSION 2005758MM_RealtimeGC *59MM_RealtimeGC::newInstance(MM_EnvironmentBase *env)60{61MM_RealtimeGC *globalGC = (MM_RealtimeGC *)env->getForge()->allocate(sizeof(MM_RealtimeGC), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE());62if (globalGC) {63new(globalGC) MM_RealtimeGC(env);64if (!globalGC->initialize(env)) {65globalGC->kill(env);66globalGC = NULL;67}68}69return globalGC;70}717273void74MM_RealtimeGC::kill(MM_EnvironmentBase *env)75{76tearDown(env);77env->getForge()->free(this);78}7980void81MM_RealtimeGC::setGCThreadPriority(OMR_VMThread *vmThread, uintptr_t newGCThreadPriority)82{83if(newGCThreadPriority == (uintptr_t) _currentGCThreadPriority) {84return;85}8687Trc_MM_GcThreadPriorityChanged(vmThread->_language_vmthread, newGCThreadPriority);8889/* Walk through all GC threads and set the priority */90omrthread_t* gcThreadTable = _sched->getThreadTable();91for (uintptr_t i = 0; i < _sched->threadCount(); i++) {92omrthread_set_priority(gcThreadTable[i], newGCThreadPriority);93}94_currentGCThreadPriority = (intptr_t) newGCThreadPriority;95}9697/**98* Initialization.99*/100bool101MM_RealtimeGC::initialize(MM_EnvironmentBase *env)102{103_gcPhase = GC_PHASE_IDLE;104_extensions->realtimeGC = this;105_allowGrowth = false;106107if (_extensions->gcTrigger == 0) {108_extensions->gcTrigger = (_extensions->memoryMax / 2);109_extensions->gcInitialTrigger = (_extensions->memoryMax / 2);110}111112_extensions->distanceToYieldTimeCheck = 0;113114/* Only SRT passes this check as the commandline option to specify beatMicro is only enabled on SRT */115if (METRONOME_DEFAULT_BEAT_MICRO != _extensions->beatMicro) {116/* User-specified quanta time, adjust related parameters */117_extensions->timeWindowMicro = 20 * _extensions->beatMicro;118/* Currently all supported SRT platforms - AIX and Linux, can only use HRT for alarm thread implementation.119* The default value for HRT period is 1/3 of the default quanta: 1 msec for HRT period and 3 msec quanta,120* we will attempt to adjust the HRT period to 1/3 of the specified quanta.121*/122uintptr_t hrtPeriodMicro = _extensions->beatMicro / 3;123if ((hrtPeriodMicro < METRONOME_DEFAULT_HRT_PERIOD_MICRO) && (METRONOME_DEFAULT_HRT_PERIOD_MICRO < _extensions->beatMicro)) {124/* If the adjusted value is too small for the hires clock resolution, we will use the default HRT period provided that125* the default period is smaller than the quanta time specified.126* Otherwise we fail to initialize the alarm thread with an error message.127*/128hrtPeriodMicro = METRONOME_DEFAULT_HRT_PERIOD_MICRO;129}130Assert_MM_true(0 != hrtPeriodMicro);131_extensions->hrtPeriodMicro = hrtPeriodMicro;132133/* On Windows SRT we still use interrupt-based alarm. Set the interrupt period the same as hires timer period.134* We will fail to init the alarm if this is too small a resolution for Windows.135*/136_extensions->itPeriodMicro = _extensions->hrtPeriodMicro;137138/* if the pause time user specified is larger than the default value, calculate if there is opportunity139* for the GC to do time checking less often inside condYieldFromGC.140*/141if (METRONOME_DEFAULT_BEAT_MICRO < _extensions->beatMicro) {142uintptr_t intervalToSkipYieldCheckMicro = _extensions->beatMicro - METRONOME_DEFAULT_BEAT_MICRO;143uintptr_t maxInterYieldTimeMicro = INTER_YIELD_MAX_NS / 1000;144_extensions->distanceToYieldTimeCheck = (U_32)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro);145}146}147148_osInterface = MM_OSInterface::newInstance(env);149if (_osInterface == NULL) {150return false;151}152153_sched = (MM_Scheduler *)_extensions->dispatcher;154155_workPackets = allocateWorkPackets(env);156if (_workPackets == NULL) {157return false;158}159160_markingScheme = MM_RealtimeMarkingScheme::newInstance(env, this);161if (NULL == _markingScheme) {162return false;163}164165if (!_delegate.initialize(env, NULL, NULL)) {166return false;167}168169_sweepScheme = MM_SweepSchemeRealtime::newInstance(env, this, _sched, _markingScheme->getMarkMap());170if(NULL == _sweepScheme) {171return false;172}173174if (!_realtimeDelegate.initialize(env)) {175return false;176}177178_extensions->sATBBarrierRememberedSet = MM_RememberedSetSATB::newInstance(env, _workPackets);179if (NULL == _extensions->sATBBarrierRememberedSet) {180return false;181}182183_stopTracing = false;184185_sched->collectorInitialized(this);186187return true;188}189190/**191* Initialization.192*/193void194MM_RealtimeGC::tearDown(MM_EnvironmentBase *env)195{196_delegate.tearDown(env);197_realtimeDelegate.tearDown(env);198199if(NULL != _sched) {200_sched->kill(env);201_sched = NULL;202}203204if(NULL != _osInterface) {205_osInterface->kill(env);206_osInterface = NULL;207}208209if(NULL != _workPackets) {210_workPackets->kill(env);211_workPackets = NULL;212}213214if (NULL != _markingScheme) {215_markingScheme->kill(env);216_markingScheme = NULL;217}218219if (NULL != _sweepScheme) {220_sweepScheme->kill(env);221_sweepScheme = NULL;222}223224if (NULL != _extensions->sATBBarrierRememberedSet) {225_extensions->sATBBarrierRememberedSet->kill(env);226_extensions->sATBBarrierRememberedSet = NULL;227}228}229230/**231* @copydoc MM_GlobalCollector::mainSetupForGC()232*/233void234MM_RealtimeGC::mainSetupForGC(MM_EnvironmentBase *env)235{236/* Reset memory pools of associated memory spaces */237env->_cycleState->_activeSubSpace->reset();238239_workPackets->reset(env);240241/* Clear the gc stats structure */242clearGCStats();243244_realtimeDelegate.mainSetupForGC(env);245}246247/**248* @copydoc MM_GlobalCollector::mainCleanupAfterGC()249*/250void251MM_RealtimeGC::mainCleanupAfterGC(MM_EnvironmentBase *env)252{253_realtimeDelegate.mainCleanupAfterGC(env);254}255256/**257* Thread initialization.258*/259void260MM_RealtimeGC::workerSetupForGC(MM_EnvironmentBase *env)261{262}263264/**265*/266void267MM_RealtimeGC::clearGCStats()268{269_extensions->globalGCStats.clear();270_realtimeDelegate.clearGCStats();271}272273/**274*/275void276MM_RealtimeGC::mergeGCStats(MM_EnvironmentBase *env)277{278}279280uintptr_t281MM_RealtimeGC::verbose(MM_EnvironmentBase *env) {282return _sched->verbose();283}284285/**286* @note only called by main thread.287*/288void289MM_RealtimeGC::doAuxiliaryGCWork(MM_EnvironmentBase *env)290{291_realtimeDelegate.doAuxiliaryGCWork(env);292293/* Restart the caches for all threads. */294GC_OMRVMThreadListIterator vmThreadListIterator(_vm);295OMR_VMThread *walkThread;296while((walkThread = vmThreadListIterator.nextOMRVMThread()) != NULL) {297MM_EnvironmentBase *walkEnv = MM_EnvironmentBase::getEnvironment(walkThread);298((MM_SegregatedAllocationInterface *)(walkEnv->_objectAllocationInterface))->restartCache(walkEnv);299}300301mergeGCStats(env);302}303304/**305* Incremental Collector.306* Employs a double write barrier that saves overwriting (new) values from unscanned threads and307* also the first (old) value overwritten by all threads (the latter as in a Yuasa barrier).308* @note only called by main thread.309*/310void311MM_RealtimeGC::incrementalCollect(MM_EnvironmentRealtime *env)312{313OMRPORT_ACCESS_FROM_ENVIRONMENT(env);314315mainSetupForGC(env);316317_realtimeDelegate.incrementalCollectStart(env);318319/* Make sure all threads notice GC is ongoing with a barrier. */320_extensions->globalGCStats.gcCount++;321if (verbose(env) >= 2) {322omrtty_printf("RealtimeGC::incrementalCollect\n");323}324if (verbose(env) >= 3) {325omrtty_printf("RealtimeGC::incrementalCollect setup and root phase\n");326}327if (env->_cycleState->_gcCode.isOutOfMemoryGC()) {328env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_soft_as_weak;329}330331setCollectorRootMarking();332333reportMarkStart(env);334MM_RealtimeMarkTask markTask(env, _sched, this, _markingScheme, env->_cycleState);335_sched->run(env, &markTask);336reportMarkEnd(env);337338339_realtimeDelegate.incrementalCollect(env);340341/*342* Sweeping.343*/344reportSweepStart(env);345MM_RealtimeSweepTask sweepTask(env, _sched, _sweepScheme);346_sched->run(env, &sweepTask);347reportSweepEnd(env);348349doAuxiliaryGCWork(env);350351/* Get all components to clean up after themselves at the end of a collect */352mainCleanupAfterGC(env);353354_sched->condYieldFromGC(env);355setCollectorIdle();356357if (verbose(env) >= 3) {358omrtty_printf("RealtimeGC::incrementalCollect gc complete %d MB in use\n", _memoryPool->getBytesInUse() >> 20);359}360}361362void363MM_RealtimeGC::flushCachedFullRegions(MM_EnvironmentBase *env)364{365/* delegate to the memory pool to perform the flushing of per-context full regions to the region pool */366_memoryPool->flushCachedFullRegions(env);367}368369/**370* This function is called at the end of tracing when it is safe for threads to stop371* allocating black and return to allocating white. It iterates through all the threads372* and sets their allocationColor to GC_UNMARK. It also sets the new thread allocation373* color to GC_UNMARK.374**/375void376MM_RealtimeGC::allThreadsAllocateUnmarked(MM_EnvironmentBase *env) {377GC_OMRVMInterface::flushCachesForGC(env);378GC_OMRVMThreadListIterator vmThreadListIterator(_vm);379380while(OMR_VMThread *aThread = vmThreadListIterator.nextOMRVMThread()) {381MM_EnvironmentRealtime *threadEnv = MM_EnvironmentRealtime::getEnvironment(aThread);382assume0(threadEnv->getAllocationColor() == GC_MARK);383threadEnv->setAllocationColor(GC_UNMARK);384threadEnv->setMonitorCacheCleared(FALSE);385}386_extensions->newThreadAllocationColor = GC_UNMARK;387}388389/****************************************390* VM Garbage Collection API391****************************************392*/393/**394*/395void396MM_RealtimeGC::internalPreCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocateDescription *allocDescription, U_32 gcCode)397{398/* Setup the main thread cycle state */399_cycleState = MM_CycleState();400env->_cycleState = &_cycleState;401env->_cycleState->_gcCode = MM_GCCode(gcCode);402env->_cycleState->_type = _cycleType;403env->_cycleState->_activeSubSpace = subSpace;404405/* If we are in an excessiveGC level beyond normal then an aggressive GC is406* conducted to free up as much space as possible407*/408if (!env->_cycleState->_gcCode.isExplicitGC()) {409if(excessive_gc_normal != _extensions->excessiveGCLevel) {410/* convert the current mode to excessive GC mode */411env->_cycleState->_gcCode = MM_GCCode(J9MMCONSTANT_IMPLICIT_GC_EXCESSIVE);412}413}414415/* The minimum free entry size is always re-adjusted at the end of a cycle.416* But if the current cycle is triggered due to OOM, at the start of the cycle417* set the minimum free entry size to the smallest size class - 16 bytes.418*/419if (env->_cycleState->_gcCode.isOutOfMemoryGC()) {420_memoryPool->setMinimumFreeEntrySize((1 << J9VMGC_SIZECLASSES_LOG_SMALLEST));421}422423MM_EnvironmentRealtime *rtEnv = MM_EnvironmentRealtime::getEnvironment(env);424/* Having heap walkable after the end of GC may be explicitly required through command line option or GC Check*/425if (rtEnv->getExtensions()->fixHeapForWalk) {426_fixHeapForWalk = true;427}428/* we are about to collect so generate the appropriate cycle start and increment start events */429reportGCCycleStart(rtEnv);430_sched->reportStartGCIncrement(rtEnv);431}432433/**434*/435void436MM_RealtimeGC::setupForGC(MM_EnvironmentBase *env)437{438}439440/**441* @note only called by main thread.442*/443bool444MM_RealtimeGC::internalGarbageCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocateDescription *allocDescription)445{446MM_EnvironmentRealtime *envRealtime = MM_EnvironmentRealtime::getEnvironment(env);447448incrementalCollect(envRealtime);449450_extensions->heap->resetHeapStatistics(true);451452return true;453}454455void456MM_RealtimeGC::internalPostCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace)457{458MM_GlobalCollector::internalPostCollect(env, subSpace);459460/* Reset fixHeapForWalk for the next cycle, no matter who set it */461_fixHeapForWalk = false;462463/* Check if user overrode the default minimumFreeEntrySize */464if (_extensions->minimumFreeEntrySize != UDATA_MAX) {465_memoryPool->setMinimumFreeEntrySize(_extensions->minimumFreeEntrySize);466} else {467/* Set it dynamically based on free heap after the end of collection */468float percentFreeHeapAfterCollect = _extensions->heap->getApproximateActiveFreeMemorySize() * 100.0f / _extensions->heap->getMaximumMemorySize();469_avgPercentFreeHeapAfterCollect = _avgPercentFreeHeapAfterCollect * 0.8f + percentFreeHeapAfterCollect * 0.2f;470/* Has percent range changed? (for example from [80,90] down to [70,80]) */471uintptr_t minFreeEntrySize = (uintptr_t)1 << (((uintptr_t)_avgPercentFreeHeapAfterCollect / 10) + 1);472if (minFreeEntrySize != _memoryPool->getMinimumFreeEntrySize()) {473/* Yes, it did => make sure it changed enough (more than 1% up or below the range boundary) to accept it (in the example, 78.9 is ok, but 79.1 is not */474if ((uintptr_t)_avgPercentFreeHeapAfterCollect % 10 >= 1 && (uintptr_t)_avgPercentFreeHeapAfterCollect % 10 < 9) {475if (minFreeEntrySize < 16) {476minFreeEntrySize = 0;477}478_memoryPool->setMinimumFreeEntrySize(minFreeEntrySize);479}480}481}482483/*484* MM_GC_CYCLE_END is hooked by external components (e.g. JIT), which may cause GC to yield while in the485* external callback. Yielding introduces additional METRONOME_INCREMENT_STOP/START verbose events, which must be486* processed before the very last METRONOME_INCREMENT_STOP event before the PRIVATE_GC_POST_CYCLE_END event. Otherwise487* the METRONOME_INCREMENT_START/END events become out of order and verbose GC will fail.488*/489reportGCCycleFinalIncrementEnding(env);490491MM_EnvironmentRealtime *rtEnv = MM_EnvironmentRealtime::getEnvironment(env);492_sched->reportStopGCIncrement(rtEnv, true);493_sched->setGCCode(MM_GCCode(J9MMCONSTANT_IMPLICIT_GC_DEFAULT));494reportGCCycleEnd(rtEnv);495/*496* We could potentially yield during reportGCCycleEnd (e.g. due to JIT callbacks) and the scheduler will only wake up the main if _gcOn is true.497* Turn off _gcOn flag at the very last, after cycle end has been reported.498*/499_sched->stopGC(rtEnv);500env->_cycleState->_activeSubSpace = NULL;501}502503void504MM_RealtimeGC::reportGCCycleFinalIncrementEnding(MM_EnvironmentBase *env)505{506OMRPORT_ACCESS_FROM_ENVIRONMENT(env);507508MM_CommonGCData commonData;509TRIGGER_J9HOOK_MM_OMR_GC_CYCLE_END(510_extensions->omrHookInterface,511env->getOmrVMThread(),512omrtime_hires_clock(),513J9HOOK_MM_OMR_GC_CYCLE_END,514_extensions->getHeap()->initializeCommonGCData(env, &commonData),515env->_cycleState->_type,516omrgc_condYieldFromGC517);518}519520/**521* @todo Provide method documentation522* @ingroup GC_Metronome methodGroup523*/524void525MM_RealtimeGC::reportSyncGCStart(MM_EnvironmentBase *env, GCReason reason, uintptr_t reasonParameter)526{527OMRPORT_ACCESS_FROM_ENVIRONMENT(env);528uintptr_t approximateFreeFreeMemorySize;529#if defined(OMR_GC_DYNAMIC_CLASS_UNLOADING)530MM_ClassUnloadStats *classUnloadStats = &_extensions->globalGCStats.classUnloadStats;531#endif /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */532533approximateFreeFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize();534535Trc_MM_SynchGCStart(env->getLanguageVMThread(),536reason,537getGCReasonAsString(reason),538reasonParameter,539approximateFreeFreeMemorySize,5400541);542543#if defined(OMR_GC_DYNAMIC_CLASS_UNLOADING)544uintptr_t classLoaderUnloadedCount = isCollectorIdle()?0:classUnloadStats->_classLoaderUnloadedCount;545uintptr_t classesUnloadedCount = isCollectorIdle()?0:classUnloadStats->_classesUnloadedCount;546uintptr_t anonymousClassesUnloadedCount = isCollectorIdle()?0:classUnloadStats->_anonymousClassesUnloadedCount;547#else /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */548uintptr_t classLoaderUnloadedCount = 0;549uintptr_t classesUnloadedCount = 0;550uintptr_t anonymousClassesUnloadedCount = 0;551#endif /* defined(OMR_GC_DYNAMIC_CLASS_UNLOADING) */552553/* If main thread was blocked at end of GC, waiting for a new GC cycle,554* globalGCStats are not cleared yet. Thus, if we haven't started GC yet,555* just report 0s for classLoaders unloaded count */556TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_SYNCHRONOUS_GC_START(_extensions->privateHookInterface,557env->getOmrVMThread(), omrtime_hires_clock(),558J9HOOK_MM_PRIVATE_METRONOME_SYNCHRONOUS_GC_START, reason, reasonParameter,559approximateFreeFreeMemorySize,5600,561classLoaderUnloadedCount,562classesUnloadedCount,563anonymousClassesUnloadedCount564);565}566567/**568* @todo Provide method documentation569* @ingroup GC_Metronome methodGroup570*/571void572MM_RealtimeGC::reportSyncGCEnd(MM_EnvironmentBase *env)573{574_realtimeDelegate.reportSyncGCEnd(env);575}576577/**578* @todo Provide method documentation579* @ingroup GC_Metronome methodGroup580*/581void582MM_RealtimeGC::reportGCCycleStart(MM_EnvironmentBase *env)583{584OMRPORT_ACCESS_FROM_ENVIRONMENT(env);585/* Let VM know that GC cycle is about to start. JIT, in particular uses it,586* to not compile while GC cycle is on.587*/588omrthread_monitor_enter(env->getOmrVM()->_gcCycleOnMonitor);589env->getOmrVM()->_gcCycleOn = 1;590591uintptr_t approximateFreeMemorySize = _memoryPool->getApproximateFreeMemorySize();592593Trc_MM_CycleStart(env->getLanguageVMThread(), env->_cycleState->_type, approximateFreeMemorySize);594595MM_CommonGCData commonData;596597TRIGGER_J9HOOK_MM_OMR_GC_CYCLE_START(598_extensions->omrHookInterface,599env->getOmrVMThread(),600omrtime_hires_clock(),601J9HOOK_MM_OMR_GC_CYCLE_START,602_extensions->getHeap()->initializeCommonGCData(env, &commonData),603env->_cycleState->_type604);605omrthread_monitor_exit(env->getOmrVM()->_gcCycleOnMonitor);606}607608/**609* @todo Provide method documentation610* @ingroup GC_Metronome methodGroup611*/612void613MM_RealtimeGC::reportGCCycleEnd(MM_EnvironmentBase *env)614{615OMRPORT_ACCESS_FROM_ENVIRONMENT(env);616omrthread_monitor_enter(env->getOmrVM()->_gcCycleOnMonitor);617618uintptr_t approximateFreeMemorySize = _memoryPool->getApproximateFreeMemorySize();619620Trc_MM_CycleEnd(env->getLanguageVMThread(), env->_cycleState->_type, approximateFreeMemorySize);621622MM_CommonGCData commonData;623624TRIGGER_J9HOOK_MM_PRIVATE_GC_POST_CYCLE_END(625_extensions->privateHookInterface,626env->getOmrVMThread(),627omrtime_hires_clock(),628J9HOOK_MM_PRIVATE_GC_POST_CYCLE_END,629_extensions->getHeap()->initializeCommonGCData(env, &commonData),630env->_cycleState->_type,631_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),632_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),633_extensions->globalGCStats.workPacketStats.getSTWWorkpacketCountAtOverflow(),634_extensions->globalGCStats.fixHeapForWalkReason,635_extensions->globalGCStats.fixHeapForWalkTime636);637638/* If GC cycle just finished, and trigger start was previously generated, generate trigger end now */639if (_memoryPool->getBytesInUse() < _extensions->gcInitialTrigger) {640_previousCycleBelowTrigger = true;641TRIGGER_J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_END(_extensions->privateHookInterface,642env->getOmrVMThread(), omrtime_hires_clock(),643J9HOOK_MM_PRIVATE_METRONOME_TRIGGER_END644);645}646647/* Let VM (JIT, in particular) GC cycle is finished. Do a monitor notify, to648* unblock parties that waited for the cycle to complete649*/650env->getOmrVM()->_gcCycleOn = 0;651omrthread_monitor_notify_all(env->getOmrVM()->_gcCycleOnMonitor);652653omrthread_monitor_exit(env->getOmrVM()->_gcCycleOnMonitor);654}655656/**657* @todo Provide method documentation658* @ingroup GC_Metronome methodGroup659*/660bool661MM_RealtimeGC::heapAddRange(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, uintptr_t size, void *lowAddress, void *highAddress)662{663bool result = _markingScheme->heapAddRange(env, subspace, size, lowAddress, highAddress);664665if (result) {666if(NULL != _extensions->referenceChainWalkerMarkMap) {667result = _extensions->referenceChainWalkerMarkMap->heapAddRange(env, size, lowAddress, highAddress);668if (!result) {669/* Expansion of Reference Chain Walker Mark Map has failed670* Marking Scheme expansion must be reversed671*/672_markingScheme->heapRemoveRange(env, subspace, size, lowAddress, highAddress, NULL, NULL);673}674}675}676return result;677}678679/**680*/681bool682MM_RealtimeGC::heapRemoveRange(683MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, uintptr_t size, void *lowAddress, void *highAddress,684void *lowValidAddress, void *highValidAddress)685{686bool result = _markingScheme->heapRemoveRange(env, subspace, size, lowAddress, highAddress, lowValidAddress, highValidAddress);687688if(NULL != _extensions->referenceChainWalkerMarkMap) {689result = result && _extensions->referenceChainWalkerMarkMap->heapRemoveRange(env, size, lowAddress, highAddress, lowValidAddress, highValidAddress);690}691return result;692}693694/**695*/696bool697MM_RealtimeGC::collectorStartup(MM_GCExtensionsBase* extensions)698{699((MM_GlobalAllocationManagerSegregated *) extensions->globalAllocationManager)->setSweepScheme(_sweepScheme);700((MM_GlobalAllocationManagerSegregated *) extensions->globalAllocationManager)->setMarkingScheme(_markingScheme);701return true;702}703704/**705*/706void707MM_RealtimeGC::collectorShutdown(MM_GCExtensionsBase *extensions)708{709}710711/**712* Factory method for creating the work packets structure.713*714* @return the WorkPackets to be used for this Collector.715*/716MM_WorkPacketsRealtime*717MM_RealtimeGC::allocateWorkPackets(MM_EnvironmentBase *env)718{719return MM_WorkPacketsRealtime::newInstance(env);720}721722/**723* Calls the Scheduler's yielding API to determine if the GC should yield.724* @return true if the GC should yield, false otherwise725*/726bool727MM_RealtimeGC::shouldYield(MM_EnvironmentBase *env)728{729return _sched->shouldGCYield(MM_EnvironmentRealtime::getEnvironment(env), 0);730}731732/**733* Yield from GC by calling the Scheduler's API.734*/735void736MM_RealtimeGC::yield(MM_EnvironmentBase *env)737{738_sched->yieldFromGC(MM_EnvironmentRealtime::getEnvironment(env));739}740741/**742* Yield only if the Scheduler deems yielding should occur at the time of the743* call to this method.744*/745bool746MM_RealtimeGC::condYield(MM_EnvironmentBase *env, U_64 timeSlackNanoSec)747{748return _sched->condYieldFromGC(MM_EnvironmentRealtime::getEnvironment(env), timeSlackNanoSec);749}750751bool752MM_RealtimeGC::isMarked(void *objectPtr)753{754return _markingScheme->isMarked((omrobjectptr_t)(objectPtr));755}756757void758MM_RealtimeGC::reportMarkStart(MM_EnvironmentBase *env)759{760OMRPORT_ACCESS_FROM_ENVIRONMENT(env);761Trc_MM_MarkStart(env->getLanguageVMThread());762763TRIGGER_J9HOOK_MM_PRIVATE_MARK_START(764_extensions->privateHookInterface,765env->getOmrVMThread(),766omrtime_hires_clock(),767J9HOOK_MM_PRIVATE_MARK_START);768}769770void771MM_RealtimeGC::reportMarkEnd(MM_EnvironmentBase *env)772{773OMRPORT_ACCESS_FROM_ENVIRONMENT(env);774Trc_MM_MarkEnd(env->getLanguageVMThread());775776TRIGGER_J9HOOK_MM_PRIVATE_MARK_END(777_extensions->privateHookInterface,778env->getOmrVMThread(),779omrtime_hires_clock(),780J9HOOK_MM_PRIVATE_MARK_END);781}782783void784MM_RealtimeGC::reportSweepStart(MM_EnvironmentBase *env)785{786OMRPORT_ACCESS_FROM_ENVIRONMENT(env);787Trc_MM_SweepStart(env->getLanguageVMThread());788789TRIGGER_J9HOOK_MM_PRIVATE_SWEEP_START(790_extensions->privateHookInterface,791env->getOmrVMThread(),792omrtime_hires_clock(),793J9HOOK_MM_PRIVATE_SWEEP_START);794}795796void797MM_RealtimeGC::reportSweepEnd(MM_EnvironmentBase *env)798{799OMRPORT_ACCESS_FROM_ENVIRONMENT(env);800Trc_MM_SweepEnd(env->getLanguageVMThread());801802TRIGGER_J9HOOK_MM_PRIVATE_SWEEP_END(803_extensions->privateHookInterface,804env->getOmrVMThread(),805omrtime_hires_clock(),806J9HOOK_MM_PRIVATE_SWEEP_END);807}808809void810MM_RealtimeGC::reportGCStart(MM_EnvironmentBase *env)811{812uintptr_t scavengerCount = 0;813OMRPORT_ACCESS_FROM_ENVIRONMENT(env);814Trc_MM_GlobalGCStart(env->getLanguageVMThread(), _extensions->globalGCStats.gcCount);815816TRIGGER_J9HOOK_MM_OMR_GLOBAL_GC_START(817_extensions->omrHookInterface,818env->getOmrVMThread(),819omrtime_hires_clock(),820J9HOOK_MM_OMR_GLOBAL_GC_START,821_extensions->globalGCStats.gcCount,822scavengerCount,823env->_cycleState->_gcCode.isExplicitGC() ? 1 : 0,824env->_cycleState->_gcCode.isAggressiveGC() ? 1: 0,825_bytesRequested);826}827828void829MM_RealtimeGC::reportGCEnd(MM_EnvironmentBase *env)830{831OMRPORT_ACCESS_FROM_ENVIRONMENT(env);832uintptr_t approximateNewActiveFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW);833uintptr_t newActiveMemorySize = _extensions->heap->getActiveMemorySize(MEMORY_TYPE_NEW);834uintptr_t approximateOldActiveFreeMemorySize = _extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_OLD);835uintptr_t oldActiveMemorySize = _extensions->heap->getActiveMemorySize(MEMORY_TYPE_OLD);836uintptr_t approximateLoaActiveFreeMemorySize = (_extensions->largeObjectArea ? _extensions->heap->getApproximateActiveFreeLOAMemorySize(MEMORY_TYPE_OLD) : 0 );837uintptr_t loaActiveMemorySize = (_extensions->largeObjectArea ? _extensions->heap->getActiveLOAMemorySize(MEMORY_TYPE_OLD) : 0 );838839/* not including LOA in total (already accounted by OLD */840uintptr_t approximateTotalActiveFreeMemorySize = approximateNewActiveFreeMemorySize + approximateOldActiveFreeMemorySize;841uintptr_t totalActiveMemorySizeTotal = newActiveMemorySize + oldActiveMemorySize;842843844Trc_MM_GlobalGCEnd(env->getLanguageVMThread(),845_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),846_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),847approximateTotalActiveFreeMemorySize,848totalActiveMemorySizeTotal849);850851/* these are assigned to temporary variable out-of-line since some preprocessors get confused if you have directives in macros */852uintptr_t approximateActiveFreeMemorySize = 0;853uintptr_t activeMemorySize = 0;854855TRIGGER_J9HOOK_MM_OMR_GLOBAL_GC_END(856_extensions->omrHookInterface,857env->getOmrVMThread(),858omrtime_hires_clock(),859J9HOOK_MM_OMR_GLOBAL_GC_END,860_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowOccured(),861_extensions->globalGCStats.workPacketStats.getSTWWorkStackOverflowCount(),862_extensions->globalGCStats.workPacketStats.getSTWWorkpacketCountAtOverflow(),863approximateNewActiveFreeMemorySize,864newActiveMemorySize,865approximateOldActiveFreeMemorySize,866oldActiveMemorySize,867(_extensions-> largeObjectArea ? 1 : 0),868approximateLoaActiveFreeMemorySize,869loaActiveMemorySize,870/* We can't just ask the heap for everything of type FIXED, because that includes scopes as well */871approximateActiveFreeMemorySize,872activeMemorySize,873_extensions->globalGCStats.fixHeapForWalkReason,874_extensions->globalGCStats.fixHeapForWalkTime875);876}877878/**879* Enables the write barrier, this should be called at the beginning of the mark phase.880*/881void882MM_RealtimeGC::enableWriteBarrier(MM_EnvironmentBase* env)883{884MM_GCExtensionsBase* extensions = env->getExtensions();885extensions->sATBBarrierRememberedSet->restoreGlobalFragmentIndex(env);886}887888/**889* Disables the write barrier, this should be called at the end of the mark phase.890*/891void892MM_RealtimeGC::disableWriteBarrier(MM_EnvironmentBase* env)893{894MM_GCExtensionsBase* extensions = env->getExtensions();895extensions->sATBBarrierRememberedSet->preserveGlobalFragmentIndex(env);896}897898void899MM_RealtimeGC::flushRememberedSet(MM_EnvironmentRealtime *env)900{901if (_workPackets->inUsePacketsAvailable(env)) {902_workPackets->moveInUseToNonEmpty(env);903_extensions->sATBBarrierRememberedSet->flushFragments(env);904}905}906907/**908* Perform the tracing phase. For tracing to be complete the work stack and rememberedSet909* have to be empty and class tracing has to complete without marking any objects.910*911* If concurrentMarkingEnabled is true then tracing is completed concurrently.912*/913void914MM_RealtimeGC::completeMarking(MM_EnvironmentRealtime *env)915{916917do {918if (env->_currentTask->synchronizeGCThreadsAndReleaseMain(env, UNIQUE_ID)) {919flushRememberedSet(env);920if (_extensions->concurrentTracingEnabled) {921setCollectorConcurrentTracing();922_realtimeDelegate.releaseExclusiveVMAccess(env, _sched->_exclusiveVMAccessRequired);923} else {924setCollectorTracing();925}926927_moreTracingRequired = false;928929/* From this point on the Scheduler collaborates with WorkPacketsRealtime on yielding.930* Strictly speaking this should be done first thing in incrementalCompleteScan().931* However, it would require another synchronizeGCThreadsAndReleaseMain barrier.932* So we are just reusing the existing one.933*/934_sched->pushYieldCollaborator(_workPackets->getYieldCollaborator());935936env->_currentTask->releaseSynchronizedGCThreads(env);937}938939if(_markingScheme->incrementalCompleteScan(env, MAX_UINT)) {940_moreTracingRequired = true;941}942943if (env->_currentTask->synchronizeGCThreadsAndReleaseMain(env, UNIQUE_ID)) {944/* restore the old Yield Collaborator */945_sched->popYieldCollaborator();946947if (_extensions->concurrentTracingEnabled) {948_realtimeDelegate.acquireExclusiveVMAccess(env, _sched->_exclusiveVMAccessRequired);949setCollectorTracing();950}951_moreTracingRequired |= _realtimeDelegate.doTracing(env);952953/* the workStack and rememberedSet use the same workPackets954* as backing store. If all packets are empty this means the955* workStack and rememberedSet processing are both complete.956*/957_moreTracingRequired |= !_workPackets->isAllPacketsEmpty();958env->_currentTask->releaseSynchronizedGCThreads(env);959}960} while(_moreTracingRequired);961}962963void964MM_RealtimeGC::enableDoubleBarrier(MM_EnvironmentBase* env)965{966_realtimeDelegate.enableDoubleBarrier(env);967}968969void970MM_RealtimeGC::disableDoubleBarrierOnThread(MM_EnvironmentBase* env, OMR_VMThread *vmThread)971{972_realtimeDelegate.disableDoubleBarrierOnThread(env, vmThread);973}974975void976MM_RealtimeGC::disableDoubleBarrier(MM_EnvironmentBase* env)977{978_realtimeDelegate.disableDoubleBarrier(env);979}980981982983