Path: blob/master/runtime/gc_vlhgc/CopyForwardScheme.cpp
5986 views
/*******************************************************************************1* Copyright (c) 1991, 2022 IBM Corp. and others2*3* This program and the accompanying materials are made available under4* the terms of the Eclipse Public License 2.0 which accompanies this5* distribution and is available at https://www.eclipse.org/legal/epl-2.0/6* or the Apache License, Version 2.0 which accompanies this distribution and7* is available at https://www.apache.org/licenses/LICENSE-2.0.8*9* This Source Code may also be made available under the following10* Secondary Licenses when the conditions for such availability set11* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU12* General Public License, version 2 with the GNU Classpath13* Exception [1] and GNU General Public License, version 2 with the14* OpenJDK Assembly Exception [2].15*16* [1] https://www.gnu.org/software/classpath/license.html17* [2] http://openjdk.java.net/legal/assembly-exception.html18*19* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception20*******************************************************************************/2122#include "j9.h"23#include "j9cfg.h"24#include "j9protos.h"25#include "j9consts.h"26#include "j2sever.h"27#include "modronopt.h"28#include "ModronAssertions.h"2930#include <string.h>3132#include "mmhook_internal.h"3334#include "CopyForwardScheme.hpp"3536#include "AllocateDescription.hpp"37#include "AllocationContextTarok.hpp"38#include "ArrayletLeafIterator.hpp"39#include "AtomicOperations.hpp"40#include "Bits.hpp"41#include "CardCleaner.hpp"42#include "CardListFlushTask.hpp"43#include "CardTable.hpp"44#include "ClassHeapIterator.hpp"45#include "ClassIterator.hpp"46#include "ClassLoaderClassesIterator.hpp"47#include "ClassLoaderIterator.hpp"48#include "ClassLoaderRememberedSet.hpp"49#include "CopyForwardSchemeTask.hpp"50#include "CompactGroupManager.hpp"51#include "CompactGroupPersistentStats.hpp"52#include "CompressedCardTable.hpp"53#include "CopyForwardCompactGroup.hpp"54#include "CopyForwardGMPCardCleaner.hpp"55#include "CopyForwardNoGMPCardCleaner.hpp"56#include "CopyScanCacheChunkVLHGCInHeap.hpp"57#include "CopyScanCacheListVLHGC.hpp"58#include "CopyScanCacheVLHGC.hpp"59#include "CycleState.hpp"60#include "EnvironmentBase.hpp"61#include "EnvironmentVLHGC.hpp"62#include "FinalizableObjectBuffer.hpp"63#include "FinalizableReferenceBuffer.hpp"64#include "FinalizeListManager.hpp"65#include "ForwardedHeader.hpp"66#include "GlobalAllocationManager.hpp"67#include "Heap.hpp"68#include "HeapMapIterator.hpp"69#include "HeapMapWordIterator.hpp"70#include "HeapRegionDescriptorVLHGC.hpp"71#include "HeapRegionIteratorVLHGC.hpp"72#include "HeapRegionManager.hpp"73#include "HotFieldUtil.hpp"74#include "InterRegionRememberedSet.hpp"75#include "MarkMap.hpp"76#include "MemorySpace.hpp"77#include "MemorySubSpace.hpp"78#include "ObjectAccessBarrier.hpp"79#include "ObjectAllocationInterface.hpp"80#include "ObjectHeapIteratorAddressOrderedList.hpp"81#include "ObjectIteratorState.hpp"82#include "ObjectModel.hpp"83#include "ParallelDispatcher.hpp"84#include "PacketSlotIterator.hpp"85#include "ParallelTask.hpp"86#include "ReferenceObjectBuffer.hpp"87#include "ReferenceObjectList.hpp"88#include "ReferenceStats.hpp"89#include "RegionBasedOverflowVLHGC.hpp"90#include "RootScanner.hpp"91#include "SlotObject.hpp"92#include "StackSlotValidator.hpp"93#include "SublistFragment.hpp"94#include "SublistIterator.hpp"95#include "SublistPool.hpp"96#include "SublistPuddle.hpp"97#include "SublistSlotIterator.hpp"98#include "SurvivorMemoryIterator.hpp"99#include "WorkPacketsIterator.hpp"100#include "WorkPacketsVLHGC.hpp"101102#define INITIAL_FREE_HISTORY_WEIGHT ((float)0.8)103#define TENURE_BYTES_HISTORY_WEIGHT ((float)0.8)104105#define SCAN_CACHES_PER_THREAD 1 /* each thread has 1 scan cache */106#define DEFERRED_CACHES_PER_THREAD 1 /* each thread has 1 deferred cache (hierarchical scan ordering only) */107108#define SCAN_TO_COPY_CACHE_MAX_DISTANCE (UDATA_MAX)109110/* VM Design 1774: Ideally we would pull these cache line values from the port library but this will suffice for111* a quick implementation112*/113#if defined(AIXPPC) || defined(LINUXPPC)114#define CACHE_LINE_SIZE 128115#elif defined(J9ZOS390) || (defined(LINUX) && defined(S390))116#define CACHE_LINE_SIZE 256117#else118#define CACHE_LINE_SIZE 64119#endif120/* create macros to interpret the hot field descriptor */121#define HOTFIELD_SHOULD_ALIGN(descriptor) (0x1 == (0x1 & (descriptor)))122#define HOTFIELD_ALIGNMENT_BIAS(descriptor, heapObjectAlignment) (((descriptor) >> 1) * (heapObjectAlignment))123124/* give a name to the common context. Note that this may need to be stored locally and fetched, at start-up,125* if the common context disappears or becomes defined in a more complicated fashion126*/127#define COMMON_CONTEXT_INDEX 0128129/* If scavenger dynamicBreadthFirstScanOrdering and alwaysDepthCopyFirstOffset is enabled, always copy the first offset of each object after the object itself is copied */130#define DEFAULT_HOT_FIELD_OFFSET 1131132#define AllCompressedCardsInWordClean 0133#define AllCompressedCardsInByteClean 0134#define AllCompressedCardsInWordSurvivor UDATA_MAX135#define AllCompressedCardsInByteSurvivor U_8_MAX136#define CompressedCardSurvivor 1137138MM_CopyForwardScheme::MM_CopyForwardScheme(MM_EnvironmentVLHGC *env, MM_HeapRegionManager *manager)139: MM_BaseNonVirtual()140, _javaVM((J9JavaVM *)env->getLanguageVM())141, _extensions(MM_GCExtensions::getExtensions(env))142, _regionManager(manager)143, _interRegionRememberedSet(NULL)144, _reservedRegionList(NULL)145, _compactGroupMaxCount(MM_CompactGroupManager::getCompactGroupMaxCount(env))146, _phantomReferenceRegionsToProcess(0)147, _minCacheSize(0)148, _maxCacheSize(0)149, _dispatcher(_extensions->dispatcher)150, _cacheFreeList()151, _cacheScanLists(NULL)152, _scanCacheListSize(_extensions->_numaManager.getMaximumNodeNumber() + 1)153, _scanCacheWaitCount(0)154, _scanCacheMonitor(NULL)155, _workQueueWaitCountPtr(&_scanCacheWaitCount)156, _workQueueMonitorPtr(&_scanCacheMonitor)157, _doneIndex(0)158, _markMap(NULL)159, _heapBase(NULL)160, _heapTop(NULL)161, _abortFlag(false)162, _abortInProgress(false)163, _regionCountCannotBeEvacuated(0)164, _regionCountReservedNonEvacuated(0)165, _cacheLineAlignment(0)166, _clearableProcessingStarted(false)167#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)168, _dynamicClassUnloadingEnabled(false)169#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */170, _collectStringConstantsEnabled(false)171, _tracingEnabled(false)172, _commonContext(NULL)173, _compactGroupBlock(NULL)174, _arraySplitSize(0)175, _regionSublistContentionThreshold(0)176, _failedToExpand(false)177, _shouldScanFinalizableObjects(false)178, _objectAlignmentInBytes(env->getObjectAlignmentInBytes())179, _compressedSurvivorTable(NULL)180{181_typeId = __FUNCTION__;182}183184MM_CopyForwardScheme *185MM_CopyForwardScheme::newInstance(MM_EnvironmentVLHGC *env, MM_HeapRegionManager *manager)186{187MM_CopyForwardScheme *scheme = (MM_CopyForwardScheme *)env->getForge()->allocate(sizeof(MM_CopyForwardScheme), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());188if (scheme) {189new(scheme) MM_CopyForwardScheme(env, manager);190if (!scheme->initialize(env)) {191scheme->kill(env);192scheme = NULL;193}194}195return scheme;196}197198void199MM_CopyForwardScheme::kill(MM_EnvironmentVLHGC *env)200{201tearDown(env);202env->getForge()->free(this);203}204205bool206MM_CopyForwardScheme::initialize(MM_EnvironmentVLHGC *env)207{208MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);209210if (!_cacheFreeList.initialize(env)) {211return false;212}213UDATA listsToCreate = _scanCacheListSize;214UDATA scanListsSizeInBytes = sizeof(MM_CopyScanCacheListVLHGC) * listsToCreate;215_cacheScanLists = (MM_CopyScanCacheListVLHGC *)env->getForge()->allocate(scanListsSizeInBytes, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());216if (NULL == _cacheScanLists) {217return false;218}219memset((void*)_cacheScanLists, 0x0, scanListsSizeInBytes);220for (UDATA i = 0; i < listsToCreate; i++) {221new(&_cacheScanLists[i]) MM_CopyScanCacheListVLHGC();222if (!_cacheScanLists[i].initialize(env)) {223/* if we failed part-way through the list, adjust the _scanCacheListSize since tearDown will otherwise fail to224* invoke on the entries in the array which didn't have their constructors called225*/226_scanCacheListSize = i + 1;227return false;228}229}230if(omrthread_monitor_init_with_name(&_scanCacheMonitor, 0, "MM_CopyForwardScheme::cache")) {231return false;232}233234/* Get the estimated cache count required. The cachesPerThread argument is used to ensure there are at least enough active235* caches for all working threads (threadCount * cachesPerThread)236*/237UDATA threadCount = extensions->dispatcher->threadCountMaximum();238UDATA compactGroupCount = MM_CompactGroupManager::getCompactGroupMaxCount(env);239240/* Each thread can have a scan cache and compactGroupCount copy caches. In hierarchical, there could also be a deferred cache. */241UDATA cachesPerThread = SCAN_CACHES_PER_THREAD;242cachesPerThread += compactGroupCount; /* copy caches */243switch (_extensions->scavengerScanOrdering) {244case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_BREADTH_FIRST:245case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST:246break;247case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_HIERARCHICAL:248cachesPerThread += DEFERRED_CACHES_PER_THREAD;249break;250default:251Assert_MM_unreachable();252break;253}254255UDATA minCacheCount = threadCount * cachesPerThread;256257/* Estimate how many caches we might need to describe the entire heap */258UDATA heapCaches = extensions->memoryMax / extensions->tlhMaximumSize;259260/* use whichever value is higher */261UDATA totalCacheCount = OMR_MAX(minCacheCount, heapCaches);262263if (!_cacheFreeList.resizeCacheEntries(env, totalCacheCount)) {264return false;265}266267/* Create and initialize the owned region lists to maintain resource for survivor area heap acquisition */268_reservedRegionList = (MM_ReservedRegionListHeader *)env->getForge()->allocate(sizeof(MM_ReservedRegionListHeader) * _compactGroupMaxCount, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());269if(NULL == _reservedRegionList) {270return false;271}272273memset((void *)_reservedRegionList, 0, sizeof(MM_ReservedRegionListHeader) * _compactGroupMaxCount);274for(UDATA index = 0; index < _compactGroupMaxCount; index++) {275_reservedRegionList[index]._maxSublistCount = 1;276_reservedRegionList[index]._sublistCount = 1;277_reservedRegionList[index]._evacuateRegionCount = 0;278for (UDATA sublistIndex = 0; sublistIndex < MM_ReservedRegionListHeader::MAX_SUBLISTS; sublistIndex++) {279_reservedRegionList[index]._sublists[sublistIndex]._head = NULL;280_reservedRegionList[index]._sublists[sublistIndex]._cacheAcquireCount = 0;281_reservedRegionList[index]._sublists[sublistIndex]._cacheAcquireBytes = 0;282if(!_reservedRegionList[index]._sublists[sublistIndex]._lock.initialize(env, &_extensions->lnrlOptions, "MM_CopyForwardScheme:_reservedRegionList[]._sublists[]._lock")) {283return false;284}285}286_reservedRegionList[index]._freeMemoryCandidates = NULL;287_reservedRegionList[index]._freeMemoryCandidateCount = 0;288if(!_reservedRegionList[index]._freeMemoryCandidatesLock.initialize(env, &_extensions->lnrlOptions, "MM_CopyForwardScheme:_reservedRegionList[]._freeMemoryCandidatesLock")) {289return false;290}291}292293/* Set the min/max sizes for copy scan cache allocation when allocating a general purpose area (does not include non-standard sized objects) */294_minCacheSize = _extensions->tlhMinimumSize;295_maxCacheSize = _extensions->tlhMaximumSize;296297/* Cached pointer to the inter region remembered set */298_interRegionRememberedSet = MM_GCExtensions::getExtensions(env)->interRegionRememberedSet;299300_cacheLineAlignment = CACHE_LINE_SIZE;301302/* TODO: how to determine this value? It should be large enough that each thread does303* real work, but small enough to give good sharing304*/305/* Note: this value should divide evenly into the arraylet leaf size so that each chunk306* is a block of contiguous memory307*/308_arraySplitSize = 4096;309310/* allocate the per-thread, per-compact-group data structures */311Assert_MM_true(0 != _extensions->gcThreadCount);312UDATA allocateSize = sizeof(MM_CopyForwardCompactGroup) * _extensions->gcThreadCount * _compactGroupMaxCount;313_compactGroupBlock = (MM_CopyForwardCompactGroup *)_extensions->getForge()->allocate(allocateSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());314if (NULL == _compactGroupBlock) {315return false;316}317318/* Calculate compressed Survivor table size in bytes */319UDATA compressedSurvivorTableSize = _extensions->heap->getMaximumPhysicalRange() / (CARD_SIZE * BITS_PER_BYTE);320_compressedSurvivorTable = (UDATA *)env->getForge()->allocate(compressedSurvivorTableSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());321if (NULL == _compressedSurvivorTable) {322return false;323}324325return true;326}327328void329MM_CopyForwardScheme::tearDown(MM_EnvironmentVLHGC *env)330{331_cacheFreeList.tearDown(env);332if (NULL != _cacheScanLists) {333UDATA listCount = _scanCacheListSize;334for (UDATA i = 0; i < listCount; i++) {335_cacheScanLists[i].tearDown(env);336}337env->getForge()->free(_cacheScanLists);338_cacheScanLists = NULL;339}340341if (NULL != _scanCacheMonitor) {342omrthread_monitor_destroy(_scanCacheMonitor);343_scanCacheMonitor = NULL;344}345346if(NULL != _reservedRegionList) {347for(UDATA index = 0; index < _compactGroupMaxCount; index++) {348for (UDATA sublistIndex = 0; sublistIndex < MM_ReservedRegionListHeader::MAX_SUBLISTS; sublistIndex++) {349_reservedRegionList[index]._sublists[sublistIndex]._lock.tearDown();350}351_reservedRegionList[index]._freeMemoryCandidatesLock.tearDown();352}353env->getForge()->free(_reservedRegionList);354_reservedRegionList = NULL;355}356357if (NULL != _compactGroupBlock) {358env->getForge()->free(_compactGroupBlock);359_compactGroupBlock = NULL;360}361362if (NULL != _compressedSurvivorTable) {363env->getForge()->free(_compressedSurvivorTable);364_compressedSurvivorTable = NULL;365}366}367368MM_AllocationContextTarok *369MM_CopyForwardScheme::getPreferredAllocationContext(MM_AllocationContextTarok *suggestedContext, J9Object *objectPtr)370{371MM_AllocationContextTarok *preferredContext = suggestedContext;372373if (preferredContext == _commonContext) {374preferredContext = getContextForHeapAddress(objectPtr);375} /* no code beyond this point without modifying else statement below */376return preferredContext;377}378379void380MM_CopyForwardScheme::raiseAbortFlag(MM_EnvironmentVLHGC *env)381{382if (!_abortFlag) {383bool didSetFlag = false;384omrthread_monitor_enter(*_workQueueMonitorPtr);385if (!_abortFlag) {386_abortFlag = true;387didSetFlag = true;388/* if any threads are waiting, notify them so that they can get out of the monitor since nobody else is going to push work for them */389if (0 != *_workQueueWaitCountPtr) {390omrthread_monitor_notify_all(*_workQueueMonitorPtr);391}392}393omrthread_monitor_exit(*_workQueueMonitorPtr);394395if (didSetFlag) {396env->_copyForwardStats._aborted = true;397398Trc_MM_CopyForwardScheme_abortFlagRaised(env->getLanguageVMThread());399PORT_ACCESS_FROM_ENVIRONMENT(env);400TRIGGER_J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT(MM_GCExtensions::getExtensions(env)->privateHookInterface, env->getOmrVMThread(), j9time_hires_clock(), J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT);401}402}403}404405/**406* Clear any global stats associated to the copy forward scheme.407*/408void409MM_CopyForwardScheme::clearGCStats(MM_EnvironmentVLHGC *env)410{411static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats.clear();412static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._workPacketStats.clear();413}414415void416MM_CopyForwardScheme::updateLeafRegions(MM_EnvironmentVLHGC *env)417{418GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);419MM_HeapRegionDescriptorVLHGC *region = NULL;420421while(NULL != (region = regionIterator.nextRegion())) {422if(region->isArrayletLeaf()) {423J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();424Assert_MM_true(NULL != spineObject);425426J9Object *updatedSpineObject = updateForwardedPointer(spineObject);427if(updatedSpineObject != spineObject) {428MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spineObject);429MM_HeapRegionDescriptorVLHGC *updatedSpineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(updatedSpineObject);430431Assert_MM_true(spineRegion->_markData._shouldMark);432Assert_MM_true(spineRegion != updatedSpineRegion);433Assert_MM_true(updatedSpineRegion->containsObjects());434435/* we need to move the leaf to another region's leaf list since its spine has moved */436region->_allocateData.removeFromArrayletLeafList();437region->_allocateData.addToArrayletLeafList(updatedSpineRegion);438region->_allocateData.setSpine((J9IndexableObject *)updatedSpineObject);439} else if (!isLiveObject(spineObject)) {440Assert_MM_true(isObjectInEvacuateMemory(spineObject));441/* the spine is in evacuate space so the arraylet is dead => recycle the leaf */442/* remove arraylet leaf from list */443region->_allocateData.removeFromArrayletLeafList();444/* recycle */445region->_allocateData.setSpine(NULL);446region->getSubSpace()->recycleRegion(env, region);447}448}449}450}451452void453MM_CopyForwardScheme::preProcessRegions(MM_EnvironmentVLHGC *env)454{455GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);456MM_HeapRegionDescriptorVLHGC *region = NULL;457458UDATA ownableSynchronizerCandidates = 0;459UDATA ownableSynchronizerCountInEden = 0;460461_regionCountCannotBeEvacuated = 0;462463while(NULL != (region = regionIterator.nextRegion())) {464region->_copyForwardData._survivor = false;465region->_copyForwardData._freshSurvivor = false;466if(region->containsObjects()) {467region->_copyForwardData._initialLiveSet = true;468region->_copyForwardData._evacuateSet = region->_markData._shouldMark;469if (region->_markData._shouldMark) {470region->getUnfinalizedObjectList()->startUnfinalizedProcessing();471ownableSynchronizerCandidates += region->getOwnableSynchronizerObjectList()->getObjectCount();472if (region->isEden()) {473ownableSynchronizerCountInEden += region->getOwnableSynchronizerObjectList()->getObjectCount();474}475region->getOwnableSynchronizerObjectList()->startOwnableSynchronizerProcessing();476Assert_MM_true(region->getRememberedSetCardList()->isAccurate());477if ((region->_criticalRegionsInUse > 0) || !env->_cycleState->_shouldRunCopyForward || (100 == _extensions->fvtest_forceCopyForwardHybridRatio) || (randomDecideForceNonEvacuatedRegion(_extensions->fvtest_forceCopyForwardHybridRatio))) {478/* set the region is noEvacuation for copyforward collector */479region->_markData._noEvacuation = true;480_regionCountCannotBeEvacuated += 1;481} else if ((_regionCountReservedNonEvacuated > 0) && region->isEden()){482_regionCountReservedNonEvacuated -= 1;483_regionCountCannotBeEvacuated += 1;484region->_markData._noEvacuation = true;485} else {486region->_markData._noEvacuation = false;487}488}489} else {490region->_copyForwardData._evacuateSet = false;491}492493region->getReferenceObjectList()->resetPriorLists();494Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);495}496497/* reset _regionCountReservedNonEvacuated */498_regionCountReservedNonEvacuated = 0;499/* ideally allocationStats._ownableSynchronizerObjectCount should be equal with ownableSynchronizerCountInEden,500* in case partial constructing ownableSynchronizerObject has been moved during previous PGC, notification for new allocation would happen after gc,501* so it is counted for new allocation, but not in Eden region. loose assertion for this special case502*/503Assert_MM_true(_extensions->allocationStats._ownableSynchronizerObjectCount >= ownableSynchronizerCountInEden);504static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerCandidates = ownableSynchronizerCandidates;505}506507void508MM_CopyForwardScheme::postProcessRegions(MM_EnvironmentVLHGC *env)509{510GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);511MM_HeapRegionDescriptorVLHGC *region = NULL;512UDATA survivorSetRegionCount = 0;513514while(NULL != (region = regionIterator.nextRegion())) {515MM_MemoryPool *pool = region->getMemoryPool();516if (region->_copyForwardData._evacuateSet) {517if (region->isEden()) {518static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._edenEvacuateRegionCount += 1;519} else {520static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEdenEvacuateRegionCount += 1;521}522} else if (region->isFreshSurvivorRegion()) {523/* check Eden Survivor Regions */524if (0 == region->getLogicalAge()) {525static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._edenSurvivorRegionCount += 1;526} else {527static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEdenSurvivorRegionCount += 1;528}529}530531/* Any region which is part of the survivor set should be set to "shouldMark" to appear as part of the collection set (and be swept, etc) */532if(region->isSurvivorRegion()) {533Assert_MM_true(region->containsObjects());534Assert_MM_false(region->_copyForwardData._evacuateSet);535Assert_MM_false(region->_markData._shouldMark);536Assert_MM_false(region->_reclaimData._shouldReclaim);537538/* we do not count non-fresh region, only regions that we acquired as free */539if (region->isFreshSurvivorRegion()) {540survivorSetRegionCount += 1;541} else {542((MM_MemoryPoolAddressOrderedList *)pool)->resetFirstUnalignedFreeEntry();543}544545/* store back the remaining memory in the pool as free memory */546region->_sweepData._alreadySwept = true;547if (pool->getFreeMemoryAndDarkMatterBytes() == region->getSize()) {548/* Collector converted this region from FREE/IDLE to ADDRESS_ORDERED, but never ended up using it549* (for example allocated some space but lost on forwarding the object). Converting it back to free550*/551pool->reset(MM_MemoryPool::any);552region->getSubSpace()->recycleRegion(env, region);553} else {554/* this is non-empty merged region - estimate its age based on compact group */555setAllocationAgeForMergedRegion(env, region);556}557}558559/* Clear any copy forward data */560region->_copyForwardData._initialLiveSet = false;561region->_copyForwardData._requiresPhantomReferenceProcessing = false;562region->_copyForwardData._survivor = false;563region->_copyForwardData._freshSurvivor = false;564565if (region->_copyForwardData._evacuateSet) {566Assert_MM_true(region->_sweepData._alreadySwept);567if (abortFlagRaised() || region->_markData._noEvacuation) {568if (region->getRegionType() == MM_HeapRegionDescriptor::ADDRESS_ORDERED) {569region->setRegionType(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);570} else {571Assert_MM_true(region->getRegionType() == MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);572}573Assert_MM_false(region->_previousMarkMapCleared);574/* we want to sweep and compact this region since we may have failed to completely evacuate it */575Assert_MM_true(region->_markData._shouldMark);576region->_sweepData._alreadySwept = false;577region->_reclaimData._shouldReclaim = true;578} else {579pool->reset(MM_MemoryPool::any);580region->getSubSpace()->recycleRegion(env, region);581}582region->_copyForwardData._evacuateSet = false;583}584}585586env->_cycleState->_pgcData._survivorSetRegionCount = survivorSetRegionCount;587static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEvacuateRegionCount = _regionCountCannotBeEvacuated;588}589590/****************************************591* Copy and Forward implementation592****************************************593*/594595bool596MM_CopyForwardScheme::isLiveObject(J9Object *objectPtr)597{598bool result = true;599600if(NULL != objectPtr) {601Assert_MM_true(isHeapObject(objectPtr));602603if (!isObjectInSurvivorMemory(objectPtr)) {604result = _markMap->isBitSet(objectPtr);605}606}607608return result;609}610611612MMINLINE bool613MM_CopyForwardScheme::isObjectInEvacuateMemory(J9Object *objectPtr)614{615bool result = false;616617if(NULL != objectPtr) {618result = isObjectInEvacuateMemoryNoCheck(objectPtr);619}620return result;621}622623MMINLINE bool624MM_CopyForwardScheme::isObjectInEvacuateMemoryNoCheck(J9Object *objectPtr)625{626bool result = false;627628MM_HeapRegionDescriptorVLHGC *region = NULL;629region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);630result = region->_markData._shouldMark;631return result;632}633634MMINLINE bool635MM_CopyForwardScheme::isObjectInSurvivorMemory(J9Object *objectPtr)636{637bool result = false;638639if(NULL != objectPtr) {640MM_HeapRegionDescriptorVLHGC *region = NULL;641region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);642Assert_MM_true(region->_copyForwardData._initialLiveSet || (!region->_markData._shouldMark && !region->_copyForwardData._initialLiveSet));643result = region->isFreshSurvivorRegion();644if (!result && region->isSurvivorRegion()) {645result = isCompressedSurvivor((void*)objectPtr);646}647}648return result;649}650651MMINLINE bool652MM_CopyForwardScheme::isObjectInNurseryMemory(J9Object *objectPtr)653{654bool result = false;655656if(NULL != objectPtr) {657MM_HeapRegionDescriptorVLHGC *region = NULL;658region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);659result = region->_markData._shouldMark || isObjectInSurvivorMemory(objectPtr);660}661return result;662}663664MMINLINE void665MM_CopyForwardScheme::reinitCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, void *base, void *top, UDATA compactGroup)666{667MM_CopyForwardCompactGroup *compactGroupForMarkData = &(env->_copyForwardCompactGroups[compactGroup]);668Assert_MM_true(cache == compactGroupForMarkData->_copyCache);669cache->cacheBase = base;670cache->cacheAlloc = base;671cache->scanCurrent = base;672cache->_hasPartiallyScannedObject = false;673cache->cacheTop = top;674675/* set the mark map cached values to the initial state */676/* Count one slot before the base in order to get the true atomic head location. Regions who do not start on a partial boundary will never see677* the slot previous.678*/679if(base == _heapBase) {680/* Going below heap base would be strange - just use _heapTop which won't collide with anything */681compactGroupForMarkData->_markMapAtomicHeadSlotIndex = _markMap->getSlotIndex((J9Object *)_heapTop);682} else {683compactGroupForMarkData->_markMapAtomicHeadSlotIndex = _markMap->getSlotIndex((J9Object *) (((UDATA)base) - _markMap->getObjectGrain()));684}685compactGroupForMarkData->_markMapAtomicTailSlotIndex = _markMap->getSlotIndex((J9Object *)top);686compactGroupForMarkData->_markMapPGCSlotIndex = 0;687compactGroupForMarkData->_markMapPGCBitMask = 0;688compactGroupForMarkData->_markMapGMPSlotIndex = 0;689compactGroupForMarkData->_markMapGMPBitMask = 0;690691Assert_MM_true(compactGroup < _compactGroupMaxCount);692cache->_compactGroup = compactGroup;693Assert_MM_true(0.0 == cache->_allocationAgeSizeProduct);694695MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(cache->cacheBase);696Trc_MM_CopyForwardScheme_reinitCache(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), cache,697region->getAllocationAgeSizeProduct() / (1024 * 1024) / (1024 * 1024), (double)((UDATA)cache->cacheAlloc - (UDATA)region->getLowAddress()) / (1024 * 1024));698699/* store back the given flags */700cache->flags = J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY | (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_MASK_PERSISTENT);701}702703MMINLINE void704MM_CopyForwardScheme::reinitArraySplitCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, J9IndexableObject *array, UDATA nextIndex)705{706cache->cacheBase = array;707cache->cacheAlloc = array;708cache->scanCurrent = array;709cache->_hasPartiallyScannedObject = false;710cache->cacheTop = array;711cache->_arraySplitIndex = nextIndex;712713/* store back the appropriate flags */714cache->flags = (J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY | J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED) | (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_MASK_PERSISTENT);715}716717void718MM_CopyForwardScheme::clearReservedRegionLists(MM_EnvironmentVLHGC *env)719{720Trc_MM_CopyForwardScheme_clearReservedRegionLists_Entry(env->getLanguageVMThread(), _compactGroupMaxCount);721722for(UDATA index = 0; index < _compactGroupMaxCount; index++) {723Trc_MM_CopyForwardScheme_clearReservedRegionLists_compactGroup(env->getLanguageVMThread(), index, _reservedRegionList[index]._evacuateRegionCount, _reservedRegionList[index]._sublistCount, _reservedRegionList[index]._maxSublistCount, _reservedRegionList[index]._freeMemoryCandidateCount);724if (0 == _reservedRegionList[index]._freeMemoryCandidateCount) {725Assert_MM_true(NULL == _reservedRegionList[index]._freeMemoryCandidates);726} else {727Assert_MM_true(NULL != _reservedRegionList[index]._freeMemoryCandidates);728}729730for (UDATA sublistIndex = 0; sublistIndex < _reservedRegionList[index]._sublistCount; sublistIndex++) {731MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[index]._sublists[sublistIndex];732MM_HeapRegionDescriptorVLHGC *region = regionList->_head;733734while(NULL != region) {735MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;736737releaseRegion(env, regionList, region);738region = next;739}740741if (0 != regionList->_cacheAcquireCount) {742Trc_MM_CopyForwardScheme_clearReservedRegionLists_sublist(env->getLanguageVMThread(), index, sublistIndex, regionList->_cacheAcquireCount, regionList->_cacheAcquireBytes, regionList->_cacheAcquireBytes / regionList->_cacheAcquireCount);743}744745regionList->_head = NULL;746regionList->_cacheAcquireCount = 0;747regionList->_cacheAcquireBytes = 0;748}749_reservedRegionList[index]._sublistCount = 1;750_reservedRegionList[index]._maxSublistCount = 1;751_reservedRegionList[index]._evacuateRegionCount = 0;752_reservedRegionList[index]._freeMemoryCandidates = NULL;753_reservedRegionList[index]._freeMemoryCandidateCount = 0;754}755756Trc_MM_CopyForwardScheme_clearReservedRegionLists_Exit(env->getLanguageVMThread());757}758759MM_HeapRegionDescriptorVLHGC *760MM_CopyForwardScheme::acquireEmptyRegion(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, UDATA compactGroup)761{762MM_HeapRegionDescriptorVLHGC *newRegion = NULL;763764if (!_failedToExpand) {765UDATA allocationContextNumber = MM_CompactGroupManager::getAllocationContextNumberFromGroup(env, compactGroup);766MM_AllocationContextTarok *allocationContext = (MM_AllocationContextTarok *)_extensions->globalAllocationManager->getAllocationContextByIndex(allocationContextNumber);767768newRegion = allocationContext->collectorAcquireRegion(env);769770if(NULL != newRegion) {771MM_CycleState *cycleState = env->_cycleState;772MM_CycleState *externalCycleState = env->_cycleState->_externalCycleState;773774/* a new region starts as ADDRESS_ORDERED but we will always have valid mark map data for this region so set its type now */775newRegion->setMarkMapValid();776if (newRegion->_previousMarkMapCleared) {777newRegion->_previousMarkMapCleared = false;778} else {779cycleState->_markMap->setBitsForRegion(env, newRegion, true);780}781if (NULL != externalCycleState) {782if (newRegion->_nextMarkMapCleared) {783newRegion->_nextMarkMapCleared = false;784if (_extensions->tarokEnableExpensiveAssertions) {785Assert_MM_true(externalCycleState->_markMap->checkBitsForRegion(env, newRegion));786}787} else {788externalCycleState->_markMap->setBitsForRegion(env, newRegion, true);789}790}791792Assert_MM_true(NULL == newRegion->getUnfinalizedObjectList()->getHeadOfList());793Assert_MM_true(NULL == newRegion->getOwnableSynchronizerObjectList()->getHeadOfList());794Assert_MM_false(newRegion->_markData._shouldMark);795796/*797* set logical age here to have a compact groups working properly798* real allocation age will be updated after PGC799*/800UDATA logicalRegionAge = MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup);801newRegion->setAge(0, logicalRegionAge);802803Assert_MM_true(newRegion->getReferenceObjectList()->isSoftListEmpty());804Assert_MM_true(newRegion->getReferenceObjectList()->isWeakListEmpty());805Assert_MM_true(newRegion->getReferenceObjectList()->isPhantomListEmpty());806807setRegionAsSurvivor(env, newRegion, true);808insertRegionIntoLockedList(env, regionList, newRegion);809} else {810/* record that we failed to expand so that we stop trying during this collection */811_failedToExpand = true;812}813}814815return newRegion;816}817818void819MM_CopyForwardScheme::insertRegionIntoLockedList(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, MM_HeapRegionDescriptorVLHGC *newRegion)820{821newRegion->_copyForwardData._nextRegion = regionList->_head;822newRegion->_copyForwardData._previousRegion = NULL;823824if(NULL != regionList->_head) {825regionList->_head->_copyForwardData._previousRegion = newRegion;826}827828regionList->_head = newRegion;829}830831void832MM_CopyForwardScheme::releaseRegion(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, MM_HeapRegionDescriptorVLHGC *region)833{834MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;835MM_HeapRegionDescriptorVLHGC *previous = region->_copyForwardData._previousRegion;836837if (NULL != next) {838next->_copyForwardData._previousRegion = previous;839}840if (NULL != previous) {841previous->_copyForwardData._nextRegion = next;842Assert_MM_false(previous == previous->_copyForwardData._nextRegion);843} else {844regionList->_head = next;845}846region->_copyForwardData._nextRegion = NULL;847region->_copyForwardData._previousRegion = NULL;848}849850void *851MM_CopyForwardScheme::reserveMemoryForObject(MM_EnvironmentVLHGC *env, uintptr_t compactGroup, uintptr_t objectSize, MM_LightweightNonReentrantLock** listLock)852{853MM_AllocateDescription allocDescription(objectSize, 0, false, false);854uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;855Assert_MM_true(sublistCount <= MM_ReservedRegionListHeader::MAX_SUBLISTS);856uintptr_t sublistIndex = env->getWorkerID() % sublistCount;857MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];858void *result = NULL;859860/* Measure the number of acquires before and after we acquire the lock. If it changed, then there is probably contention on the lock. */861uintptr_t acquireCountBefore = regionList->_cacheAcquireCount;862regionList->_lock.acquire();863uintptr_t acquireCountAfter = regionList->_cacheAcquireCount;864865/*866* 1. attempt to use an existing region867*/868MM_HeapRegionDescriptorVLHGC *region = regionList->_head;869while ((NULL == result) && (NULL != region)) {870MM_MemoryPool *memoryPool = region->getMemoryPool();871Assert_MM_true(NULL != memoryPool);872result = memoryPool->collectorAllocate(env, &allocDescription, false);873if (NULL != result) {874break;875}876region = region->_copyForwardData._nextRegion;877}878879/*880* 2. attempt to acquire a region from the free memory candidates list881*/882if ((NULL == result) && (NULL != _reservedRegionList[compactGroup]._freeMemoryCandidates)) {883_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.acquire();884region = _reservedRegionList[compactGroup]._freeMemoryCandidates;885MM_HeapRegionDescriptorVLHGC *resultRegion = NULL;886while ((NULL == result) && (NULL != region)) {887MM_MemoryPool *memoryPool = region->getMemoryPool();888Assert_MM_true(NULL != memoryPool);889890((MM_MemoryPoolAddressOrderedList *)memoryPool)->initialFirstUnalignedFreeEntry();891result = memoryPool->collectorAllocate(env, &allocDescription, false);892893if (NULL != result) {894resultRegion = region;895break;896}897region = region->_copyForwardData._nextRegion;898}899if (NULL != result) {900/* remove this region from the common free memory candidates list and add it to our own sublist */901Assert_MM_true(NULL != resultRegion);902removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], resultRegion);903insertRegionIntoLockedList(env, regionList, resultRegion);904convertFreeMemoryCandidateToSurvivorRegion(env, resultRegion);905}906_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.release();907}908909/*910* 3. attempt to acquire an empty region911*/912if (NULL == result) {913region = acquireEmptyRegion(env, regionList, compactGroup);914if(NULL != region) {915MM_MemoryPool *memoryPool = region->getMemoryPool();916Assert_MM_true(NULL != memoryPool);917result = memoryPool->collectorAllocate(env, &allocDescription, false);918Assert_MM_true(NULL != result); /* This should not have failed at this point */919}920}921922if (NULL != result) {923regionList->_cacheAcquireCount += 1;924regionList->_cacheAcquireBytes += allocDescription.getBytesRequested();925setCompressedSurvivorCards(env, (void*)result, (void*) ((uintptr_t)result + allocDescription.getBytesRequested()));926}927928regionList->_lock.release();929*listLock = ®ionList->_lock;930931Assert_MM_true(acquireCountBefore <= acquireCountAfter);932if ((NULL != result) && (sublistCount < _reservedRegionList[compactGroup]._maxSublistCount)) {933uintptr_t acceptableAcquireCountForContention = acquireCountBefore + _regionSublistContentionThreshold;934if (acceptableAcquireCountForContention < acquireCountAfter) {935/* contention detected on lock -- attempt to increase the number of sublists */936MM_AtomicOperations::lockCompareExchange(&_reservedRegionList[compactGroup]._sublistCount, sublistCount, sublistCount + 1);937}938}939940return result;941}942943bool944MM_CopyForwardScheme::reserveMemoryForCache(MM_EnvironmentVLHGC *env, uintptr_t compactGroup, uintptr_t maxCacheSize, void **addrBase, void **addrTop, MM_LightweightNonReentrantLock** listLock)945{946MM_AllocateDescription allocDescription(maxCacheSize, 0, false, false);947bool result = false;948uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;949Assert_MM_true(sublistCount <= MM_ReservedRegionListHeader::MAX_SUBLISTS);950uintptr_t sublistIndex = env->getWorkerID() % sublistCount;951MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];952953/* Measure the number of acquires before and after we acquire the lock. If it changed, then there is probably contention on the lock. */954uintptr_t acquireCountBefore = regionList->_cacheAcquireCount;955regionList->_lock.acquire();956uintptr_t acquireCountAfter = regionList->_cacheAcquireCount;957958/*959* 1. attempt to use an existing region960*/961MM_HeapRegionDescriptorVLHGC *region = regionList->_head;962while ((!result) && (NULL != region)) {963MM_MemoryPool *memoryPool = region->getMemoryPool();964Assert_MM_true(NULL != memoryPool);965966void *tlhBase = NULL;967void *tlhTop = NULL;968result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));969970MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;971if (result) {972*addrBase = tlhBase;973*addrTop = tlhTop;974break;975} else {976releaseRegion(env, regionList, region);977}978region = next;979}980981/*982* 2. attempt to acquire a region from the free memory candidates list983*/984if ((!result) && (NULL != _reservedRegionList[compactGroup]._freeMemoryCandidates)) {985_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.acquire();986region = _reservedRegionList[compactGroup]._freeMemoryCandidates;987MM_HeapRegionDescriptorVLHGC *resultRegion = NULL;988while ((!result) && (NULL != region)) {989MM_MemoryPool *memoryPool = region->getMemoryPool();990Assert_MM_true(NULL != memoryPool);991992void *tlhBase = NULL;993void *tlhTop = NULL;994((MM_MemoryPoolAddressOrderedList *)memoryPool)->initialFirstUnalignedFreeEntry();995result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));996if (result) {997*addrBase = tlhBase;998*addrTop = tlhTop;999/* remove this region from the common free memory candidates list and add it to our own sublist */1000removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], region);1001insertRegionIntoLockedList(env, regionList, region);1002convertFreeMemoryCandidateToSurvivorRegion(env, region);1003break;10041005}1006Assert_MM_true(0 == memoryPool->getActualFreeMemorySize());1007resultRegion = region;1008region = region->_copyForwardData._nextRegion;1009removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], resultRegion);1010}1011_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.release();1012}10131014/*1015* 3. attempt to acquire an empty region1016*/1017if(!result) {1018region = acquireEmptyRegion(env, regionList, compactGroup);1019if(NULL != region) {1020MM_MemoryPool *memoryPool = region->getMemoryPool();1021Assert_MM_true(NULL != memoryPool);10221023void *tlhBase = NULL;1024void *tlhTop = NULL;1025/* note that we called alignAllocationPointer on this pool when adding it to our copy-forward destination list so this address won't share a card with non-moving objects */1026result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));10271028Assert_MM_true(result); /* This should not have failed at this point */10291030*addrBase = tlhBase;1031*addrTop = tlhTop;1032}1033}10341035if (result) {1036regionList->_cacheAcquireCount += 1;1037regionList->_cacheAcquireBytes += ((uintptr_t)*addrTop) - ((uintptr_t)*addrBase);1038setCompressedSurvivorCards(env, *addrBase, *addrTop);1039}10401041regionList->_lock.release();1042*listLock = ®ionList->_lock;10431044Assert_MM_true(acquireCountBefore <= acquireCountAfter);1045if (result && (sublistCount < _reservedRegionList[compactGroup]._maxSublistCount)) {1046uintptr_t acceptableAcquireCountForContention = acquireCountBefore + _regionSublistContentionThreshold;1047if (acceptableAcquireCountForContention < acquireCountAfter) {1048/* contention detected on lock -- attempt to increase the number of sublists */1049MM_AtomicOperations::lockCompareExchange(&_reservedRegionList[compactGroup]._sublistCount, sublistCount, sublistCount + 1);1050}1051}10521053return result;1054}10551056MM_CopyScanCacheVLHGC *1057MM_CopyForwardScheme::createScanCacheForOverflowInHeap(MM_EnvironmentVLHGC *env)1058{1059bool const compressed = env->compressObjectReferences();1060MM_CopyScanCacheVLHGC * result = NULL;10611062_cacheFreeList.lock();10631064/* check to see if another thread already did this */1065result = _cacheFreeList.popCacheNoLock(env);1066/* find out how many bytes are required to allocate a chunk in the heap */1067UDATA cacheSizeInBytes = MM_CopyScanCacheChunkVLHGCInHeap::bytesRequiredToAllocateChunkInHeap(env);1068/* this we are allocating this in a part of the heap which the copy-forward mechanism will have to walk before it finishes, we need to hide this in a hole so add that header size */1069UDATA bytesToReserve = sizeof(MM_HeapLinkedFreeHeader) + cacheSizeInBytes;1070UDATA suggestedCompactGroup = 0;1071while ((NULL == result) && (suggestedCompactGroup < _compactGroupMaxCount)) {1072MM_LightweightNonReentrantLock *listLock = NULL;1073void *extentBase = reserveMemoryForObject(env, suggestedCompactGroup, bytesToReserve, &listLock);1074if (NULL != extentBase) {1075/* this is not object memory so account for it as free memory while we have the size */1076/* lock the region list for this group and write-back the memory we consumed as free space immediately (this is a rare case so the1077* lock is an acceptable cost to avoid trying to defer the write-back of the free memory size since this case is unusual)1078*/1079Assert_MM_true(NULL != listLock);1080MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(extentBase);1081MM_MemoryPool *pool = region->getMemoryPool();1082listLock->acquire();1083pool->incrementDarkMatterBytes(bytesToReserve);1084listLock->release();1085/* save out how much memory we wasted so the caller can account for it */1086memset(extentBase, 0x0, bytesToReserve);1087void *cacheBase = (void *)((MM_HeapLinkedFreeHeader *)extentBase + 1);1088MM_HeapLinkedFreeHeader::fillWithHoles(extentBase, bytesToReserve, compressed);1089result = _cacheFreeList.allocateCacheEntriesInExistingMemory(env, cacheBase, cacheSizeInBytes);1090}1091suggestedCompactGroup += 1;1092}10931094_cacheFreeList.unlock();10951096return result;1097}10981099UDATA1100MM_CopyForwardScheme::getDesiredCopyCacheSize(MM_EnvironmentVLHGC *env, UDATA compactGroup)1101{1102/* The desired cache size is a fraction of the number of bytes we've copied so far.1103* The upper bound on fragmentation is approximately this fraction, with the expected fragmentation about half of the fraction.1104*/1105const double allowableFragmentation = 2.0 * _extensions->tarokCopyForwardFragmentationTarget;1106const double bytesCopiedInCompactGroup = (double)(env->_copyForwardCompactGroups[compactGroup]._edenStats._copiedBytes + env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._copiedBytes);1107UDATA desiredCacheSize = (UDATA)(allowableFragmentation * bytesCopiedInCompactGroup);1108MM_CompactGroupPersistentStats *stats = &(_extensions->compactGroupPersistentStats[compactGroup]);1109UDATA perThreadSurvivalEstimatedSize = (UDATA)(((double)stats->_measuredLiveBytesBeforeCollectInCollectedSet * stats->_historicalSurvivalRate * allowableFragmentation) / (double)env->_currentTask->getThreadCount());1110desiredCacheSize = OMR_MAX(desiredCacheSize, perThreadSurvivalEstimatedSize);1111desiredCacheSize = MM_Math::roundToCeiling(_objectAlignmentInBytes, desiredCacheSize);1112desiredCacheSize = OMR_MIN(desiredCacheSize, _maxCacheSize);1113desiredCacheSize = OMR_MAX(desiredCacheSize, _minCacheSize);1114return desiredCacheSize;1115}11161117MM_CopyScanCacheVLHGC *1118MM_CopyForwardScheme::reserveMemoryForCopy(MM_EnvironmentVLHGC *env, J9Object *objectToEvacuate, MM_AllocationContextTarok *reservingContext, uintptr_t objectReserveSizeInBytes)1119{1120void *addrBase = NULL;1121void *addrTop = NULL;1122uintptr_t minimumRequiredCacheSize = objectReserveSizeInBytes;1123uintptr_t minimumSingleObjectAllocateSize = _extensions->tlhSurvivorDiscardThreshold;11241125Assert_MM_objectAligned(env, objectReserveSizeInBytes);11261127MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectToEvacuate);1128uintptr_t compactGroup = MM_CompactGroupManager::getCompactGroupNumberInContext(env, region, reservingContext);1129MM_CopyForwardCompactGroup *copyForwardCompactGroup = &env->_copyForwardCompactGroups[compactGroup];11301131Assert_MM_true(compactGroup < _compactGroupMaxCount);11321133MM_CopyScanCacheVLHGC *copyCache = copyForwardCompactGroup->_copyCache;11341135retry:1136/* A survivor copy scan cache exists - check if there is room */1137if ((NULL == copyCache) || (((uintptr_t)copyCache->cacheTop - (uintptr_t)copyCache->cacheAlloc) < minimumRequiredCacheSize)) {1138/* There is no room for current copy cache */1139MM_LightweightNonReentrantLock *listLock = NULL;1140if (minimumRequiredCacheSize < copyForwardCompactGroup->_failedAllocateSize) {1141/* try to use TLH remainder from previous discard */1142if (((uintptr_t)copyForwardCompactGroup->_TLHRemainderTop - (uintptr_t)copyForwardCompactGroup->_TLHRemainderBase) >= minimumRequiredCacheSize) {1143addrBase = copyForwardCompactGroup->_TLHRemainderBase;1144addrTop = copyForwardCompactGroup->_TLHRemainderTop;1145Assert_MM_true(NULL != copyForwardCompactGroup->_TLHRemainderBase);1146Assert_MM_true(NULL != copyForwardCompactGroup->_TLHRemainderTop);1147copyForwardCompactGroup->resetTLHRemainder();11481149uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;1150uintptr_t sublistIndex = env->getWorkerID() % sublistCount;1151MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];1152listLock = ®ionList->_lock;1153} else if (minimumSingleObjectAllocateSize < minimumRequiredCacheSize) {1154addrBase = reserveMemoryForObject(env, compactGroup, minimumRequiredCacheSize, &listLock);11551156if (NULL != addrBase) {1157addrTop = (void *)((uintptr_t)addrBase + minimumRequiredCacheSize);1158} else {1159/* failed to allocate - set the threshold to short-circuit future alloc attempts */1160copyForwardCompactGroup->_failedAllocateSize = minimumRequiredCacheSize;1161}1162} else {1163UDATA desiredCacheSize = getDesiredCopyCacheSize(env, compactGroup);1164desiredCacheSize = OMR_MAX(desiredCacheSize, minimumRequiredCacheSize);1165if (!reserveMemoryForCache(env, compactGroup, desiredCacheSize, &addrBase, &addrTop, &listLock)) {1166/* failed to allocate - set the threshold to short-circut future alloc attempts:1167* we should never (in this GC) attempt to allocate a cache (TLH) from this compact group1168*/1169copyForwardCompactGroup->_failedAllocateSize = 0;1170}1171}1172}11731174if (NULL != copyCache) {1175/* we can't use this cache as a destination so release local cache first. */1176MM_CopyScanCacheVLHGC * stoppedCache = stopCopyingIntoCache(env, compactGroup);1177Assert_MM_true(stoppedCache == copyCache);11781179if (copyCache->isCurrentlyBeingScanned()) {1180/* this cache is already being scanned. The scanning thread will add it to the free list when it's finished */1181copyCache = NULL;1182} else {1183/* assert that deferred or scan cache is not this cache */1184Assert_MM_true(copyCache != env->_scanCache);1185Assert_MM_true(copyCache != env->_deferredScanCache);1186/* Either cache is completely scanned or it has never been scanned.1187* If it has never been scanned, it is here that we should decide if there is scan work to do1188* and whether to add to the scan list1189*/1190if (copyCache->isScanWorkAvailable()) {1191/* must not have local references still in use before adding to global list */1192Assert_MM_true(copyCache->cacheBase <= copyCache->cacheAlloc);1193Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);1194Assert_MM_true(copyCache->scanCurrent <= copyCache->cacheAlloc);1195#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)1196env->_copyForwardStats._releaseScanListCount += 1;1197#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */1198addCacheEntryToScanCacheListAndNotify(env, copyCache);1199copyCache = NULL;1200} else {1201/* we have decided to stop copying into this cache so ensure that we won't try to keep using it as one (we will allocate a new cache structure if the allocate succeeds) */1202addCacheEntryToFreeCacheList(env, copyCache);1203copyCache = NULL;1204}1205}1206}12071208if (NULL != addrBase) {1209/* allocate from reserveMemory or TLHRemainder */1210Assert_MM_true(NULL == copyCache);12111212/* If we didn't already have a copy cache structure or dropped it earlier in the call, allocate a new one */1213copyCache = getFreeCache(env);1214if (NULL != copyCache) {1215copyForwardCompactGroup->_copyCache = copyCache;1216copyForwardCompactGroup->_copyCacheLock = listLock;1217reinitCache(env, copyCache, addrBase, addrTop, compactGroup);12181219Assert_MM_true(NULL != listLock);1220Assert_MM_true(0 == copyForwardCompactGroup->_freeMemoryMeasured);1221} else {1222/* ensure that we have realized the abort flag (since getFreeCache only returns NULL if it had to abort) */1223Assert_MM_true(abortFlagRaised());1224}1225}12261227if (NULL == copyCache) {1228/* Record stats */1229copyForwardCompactGroup->_failedCopiedObjects += 1;1230copyForwardCompactGroup->_failedCopiedBytes += objectReserveSizeInBytes;1231} else {1232Assert_MM_true(NULL != copyCache->cacheAlloc);1233Assert_MM_true(NULL != copyCache->cacheTop);1234Assert_MM_true(NULL != copyCache->cacheBase);1235if (((uintptr_t)addrTop - (uintptr_t)addrBase) < minimumRequiredCacheSize) {1236/* In case of increased tlhSurvivorDiscardThreshold, we may prefer TLH rather then single object allocation1237* in which case TLH cache may not be large enough to satisfy the allocation.1238* We'll try again but force single object allocation.1239* We could have detected earlier in the method the failed TLH allocate, but doing this late gives a chance1240* for current cache remainder or new cache to be preserved (if sufficiently large) as thread's TLH remainder1241*/1242Assert_MM_true(_extensions->tlhSurvivorDiscardThreshold > _minCacheSize);1243minimumSingleObjectAllocateSize = _minCacheSize;1244addrBase = NULL;1245addrTop = NULL;12461247goto retry;1248}1249if (_extensions->tarokEnableExpensiveAssertions) {1250/* verify that the mark map for this range is clear */1251Assert_MM_true(NULL == MM_HeapMapIterator(_extensions, _markMap, (UDATA*)copyCache->cacheAlloc, (UDATA*)copyCache->cacheTop, false).nextObject());1252}1253}1254}12551256return copyCache;1257}12581259MMINLINE bool1260MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, volatile j9object_t* objectPtrIndirect, bool leafType)1261{1262J9Object *originalObjectPtr = *objectPtrIndirect;1263J9Object *objectPtr = originalObjectPtr;1264bool success = true;12651266if((NULL != objectPtr) && isObjectInEvacuateMemory(objectPtr)) {1267/* Object needs to be copy and forwarded. Check if the work has already been done */1268MM_ForwardedHeader forwardHeader(objectPtr, _extensions->compressObjectReferences());1269objectPtr = forwardHeader.getForwardedObject();12701271if(NULL != objectPtr) {1272/* Object has been copied - update the forwarding information and return */1273*objectPtrIndirect = objectPtr;1274} else {1275Assert_GC_true_with_message(env, (UDATA)0x99669966 == _extensions->objectModel.getPreservedClass(&forwardHeader)->eyecatcher, "Invalid class in objectPtr=%p\n", originalObjectPtr);127612771278objectPtr = copy(env, reservingContext, &forwardHeader, leafType);1279if (NULL == objectPtr) {1280success = false;1281} else if (originalObjectPtr != objectPtr) {1282/* Update the slot */1283*objectPtrIndirect = objectPtr;1284}1285}1286}12871288return success;1289}12901291MMINLINE bool1292MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, GC_SlotObject *slotObject, bool leafType)1293{1294J9Object *value = slotObject->readReferenceFromSlot();1295J9Object *preservedValue = value;12961297bool success = copyAndForward(env, reservingContext, &value, leafType);12981299if (success) {1300if(preservedValue != value) {1301slotObject->writeReferenceToSlot(value);1302}1303_interRegionRememberedSet->rememberReferenceForCopyForward(env, objectPtr, value);1304} else {1305Assert_MM_false(_abortInProgress);1306Assert_MM_true(preservedValue == value);1307env->_workStack.push(env, objectPtr);1308}13091310return success;1311}13121313MMINLINE bool1314MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, volatile j9object_t* slot)1315{1316bool success = copyAndForward(env, reservingContext, slot);13171318if (success) {1319_interRegionRememberedSet->rememberReferenceForCopyForward(env, objectPtr, *slot);1320} else {1321Assert_MM_false(_abortInProgress);1322/* Because there is a caller where the slot could be scanned by multiple threads at once, it is possible on failure that1323* the value of the slot HAS in fact changed (other thread had room to satisfy). Because of this, we do cannot check if the preserved1324* slot value would be unchanged (unlike other copyAndForward() implementations).1325*/1326env->_workStack.push(env, objectPtr);1327}13281329return success;1330}13311332MMINLINE bool1333MM_CopyForwardScheme::copyAndForwardPointerArray(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, UDATA startIndex, GC_SlotObject *slotObject)1334{1335J9Object *value = slotObject->readReferenceFromSlot();1336J9Object *preservedValue = value;13371338bool success = copyAndForward(env, reservingContext, &value);13391340if (success) {1341if(preservedValue != value) {1342slotObject->writeReferenceToSlot(value);1343}1344_interRegionRememberedSet->rememberReferenceForCopyForward(env, (J9Object *)arrayPtr, value);1345} else {1346Assert_MM_false(_abortInProgress);1347Assert_MM_true(preservedValue == value);1348/* We push only the current split unit (from startIndex with size of arraySplit size).1349* This is to avoid duplicate work which would otherwise be created,1350* if each failed-to-scan-to-completion copy-scan cache had created the work unit till the end of the array1351*/1352void *element1 = (void *)arrayPtr;1353void *element2 = (void *)((startIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG | PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG);1354Assert_MM_true(startIndex == (((UDATA)element2) >> PACKET_ARRAY_SPLIT_SHIFT));1355env->_workStack.push(env, element1, element2);1356}13571358return success;1359}13601361MMINLINE bool1362MM_CopyForwardScheme::copyAndForwardObjectClass(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr)1363{1364bool success = true;13651366#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)1367_extensions->classLoaderRememberedSet->rememberInstance(env, objectPtr);1368if(isDynamicClassUnloadingEnabled()) {1369j9object_t classObject = (j9object_t)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->classObject;1370Assert_MM_true(J9_INVALID_OBJECT != classObject);1371if (copyAndForward(env, reservingContext, &classObject)) {1372/* we don't need to update anything with the new address of the class object since objectPtr points at the immobile J9Class */1373} else {1374/* we failed to copy (and, therefore, mark) the class so we need to scan this object again */1375Assert_MM_false(_abortInProgress);1376env->_workStack.push(env, objectPtr);1377success = false;1378}1379}1380#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */13811382return success;1383}13841385/**1386* Cleanup after CopyForward work is complete.1387* This should only be called once per collection by the main thread.1388*/1389void1390MM_CopyForwardScheme::mainCleanupForCopyForward(MM_EnvironmentVLHGC *env)1391{1392/* make sure that we have dropped any remaining references to any on-heap scan caches which we would have allocated if we hit overflow */1393_cacheFreeList.removeAllHeapAllocatedChunks(env);13941395if (_extensions->tarokEnableExpensiveAssertions) {1396/* ensure that all managed caches have been returned to the free list */1397Assert_MM_true(_cacheFreeList.getTotalCacheCount() == _cacheFreeList.countCaches());1398}13991400Assert_MM_true(static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerCandidates >= static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerSurvived);1401}14021403/**1404* Initialize the copy forward scheme for a garbage collection.1405* Initialize all internal values to start a garbage collect. This should only be1406* called once per collection by the main thread.1407*/1408void1409MM_CopyForwardScheme::mainSetupForCopyForward(MM_EnvironmentVLHGC *env)1410{1411clearAbortFlag();1412_abortInProgress = false;1413_clearableProcessingStarted = false;1414_failedToExpand = false;1415_phantomReferenceRegionsToProcess = 0;14161417/* Sort all hot fields for all classes as dynamicBreadthFirstScanOrdering is enabled */1418if (MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST == _extensions->scavengerScanOrdering) {1419MM_HotFieldUtil::sortAllHotFieldData(_javaVM, _extensions->globalVLHGCStats.gcCount);1420}14211422/* Cache of the mark map */1423_markMap = env->_cycleState->_markMap;14241425/* Cache heap ranges for fast "valid object" checks (this can change in an expanding heap situation, so we refetch every cycle) */1426_heapBase = _extensions->heap->getHeapBase();1427_heapTop = _extensions->heap->getHeapTop();14281429/* Record any special action for clearing / unloading this cycle */1430#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)1431_dynamicClassUnloadingEnabled = env->_cycleState->_dynamicClassUnloadingEnabled;1432#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */1433_collectStringConstantsEnabled = _extensions->collectStringConstants;14341435/* ensure heap base is aligned to region size */1436UDATA heapBase = (UDATA)_extensions->heap->getHeapBase();1437UDATA regionSize = _regionManager->getRegionSize();1438Assert_MM_true((0 != regionSize) && (0 == (heapBase % regionSize)));14391440/* Reinitialize the _doneIndex */1441_doneIndex = 0;14421443/* Context 0 is currently our "common destination context" */1444_commonContext = (MM_AllocationContextTarok *)_extensions->globalAllocationManager->getAllocationContextByIndex(0);14451446/* We don't want to split too aggressively so take the base2 log of our thread count as our current contention trigger.1447* Note that this number could probably be improved upon but log2 "seemed" to make sense for contention measurement and1448* provided a measurable performance benefit in the tests we were running.1449*/1450_regionSublistContentionThreshold = MM_Math::floorLog2(_extensions->dispatcher->activeThreadCount());14511452_interRegionRememberedSet->setupForPartialCollect(env);14531454/* Record whether finalizable processing is required in this copy-forward collection */1455_shouldScanFinalizableObjects = _extensions->finalizeListManager->isFinalizableObjectProcessingRequired();14561457cleanCompressedSurvivorCardTable(env);1458}14591460/**1461* Per worker thread pre-gc initialization.1462*/1463void1464MM_CopyForwardScheme::workerSetupForCopyForward(MM_EnvironmentVLHGC *env)1465{1466/* Reset the copy caches */1467Assert_MM_true(NULL == env->_scanCache);1468Assert_MM_true(NULL == env->_deferredScanCache);14691470/* install this thread's compact group structures */1471Assert_MM_true(NULL == env->_copyForwardCompactGroups);1472Assert_MM_true(NULL != _compactGroupBlock);1473env->_copyForwardCompactGroups = &_compactGroupBlock[env->getWorkerID() * _compactGroupMaxCount];14741475for (UDATA compactGroup = 0; compactGroup < _compactGroupMaxCount; compactGroup++) {1476env->_copyForwardCompactGroups[compactGroup].initialize(env);1477}14781479Assert_MM_true(NULL == env->_lastOverflowedRsclWithReleasedBuffers);1480}14811482/**1483* Merge any per thread GC stats into the main stat structure.1484*/1485void1486MM_CopyForwardScheme::mergeGCStats(MM_EnvironmentVLHGC *env)1487{1488PORT_ACCESS_FROM_ENVIRONMENT(env);1489MM_CopyForwardStats *localStats = &env->_copyForwardStats;1490MM_CompactGroupPersistentStats *persistentStats = _extensions->compactGroupPersistentStats;14911492/* the following statistics are only updated at the merge point */1493Assert_MM_true(0 == localStats->_copyObjectsTotal);1494Assert_MM_true(0 == localStats->_copyBytesTotal);1495Assert_MM_true(0 == localStats->_copyDiscardBytesTotal);1496Assert_MM_true(0 == localStats->_copyObjectsEden);1497Assert_MM_true(0 == localStats->_copyBytesEden);1498Assert_MM_true(0 == localStats->_copyDiscardBytesEden);1499Assert_MM_true(0 == localStats->_copyObjectsNonEden);1500Assert_MM_true(0 == localStats->_copyBytesNonEden);1501Assert_MM_true(0 == localStats->_copyDiscardBytesNonEden);15021503/* sum up the per-compact group data before entering the lock */1504for (UDATA compactGroupNumber = 0; compactGroupNumber < _compactGroupMaxCount; compactGroupNumber++) {1505MM_CopyForwardCompactGroup *compactGroup = &env->_copyForwardCompactGroups[compactGroupNumber];1506UDATA totalCopiedBytes = compactGroup->_edenStats._copiedBytes + compactGroup->_nonEdenStats._copiedBytes;1507UDATA totalLiveBytes = compactGroup->_edenStats._liveBytes + compactGroup->_nonEdenStats._liveBytes;15081509localStats->_copyObjectsTotal += compactGroup->_edenStats._copiedObjects + compactGroup->_nonEdenStats._copiedObjects;1510localStats->_copyBytesTotal += totalCopiedBytes;1511localStats->_scanObjectsTotal += compactGroup->_edenStats._scannedObjects + compactGroup->_nonEdenStats._scannedObjects;1512localStats->_scanBytesTotal += compactGroup->_edenStats._scannedBytes + compactGroup->_nonEdenStats._scannedBytes;15131514localStats->_copyObjectsEden += compactGroup->_edenStats._copiedObjects;1515localStats->_copyBytesEden += compactGroup->_edenStats._copiedBytes;1516localStats->_scanObjectsEden += compactGroup->_edenStats._scannedObjects;1517localStats->_scanBytesEden += compactGroup->_edenStats._scannedBytes;15181519localStats->_copyObjectsNonEden += compactGroup->_nonEdenStats._copiedObjects;1520localStats->_copyBytesNonEden += compactGroup->_nonEdenStats._copiedBytes;1521localStats->_scanObjectsNonEden += compactGroup->_nonEdenStats._scannedObjects;1522localStats->_scanBytesNonEden += compactGroup->_nonEdenStats._scannedBytes;15231524localStats->_copyDiscardBytesTotal += compactGroup->_discardedBytes;1525localStats->_TLHRemainderCount += compactGroup->_TLHRemainderCount;15261527if (0 == MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroupNumber)) {1528localStats->_copyDiscardBytesEden += compactGroup->_discardedBytes;1529} else {1530localStats->_copyDiscardBytesNonEden += compactGroup->_discardedBytes;1531}15321533/* use an atomic since other threads may be doing this at the same time */1534if (0 != totalLiveBytes) {1535MM_AtomicOperations::add(&persistentStats[compactGroupNumber]._measuredBytesCopiedFromGroupDuringCopyForward, totalLiveBytes);1536}15371538if (0 != totalCopiedBytes) {1539MM_AtomicOperations::add(&persistentStats[compactGroupNumber]._measuredBytesCopiedToGroupDuringCopyForward, totalCopiedBytes);1540MM_AtomicOperations::addU64(&persistentStats[compactGroupNumber]._measuredAllocationAgeToGroupDuringCopyForward, compactGroup->_allocationAge);1541}15421543if (0 != (totalCopiedBytes + compactGroup->_discardedBytes)) {1544Trc_MM_CopyForwardScheme_mergeGCStats_efficiency(env->getLanguageVMThread(), compactGroupNumber, totalCopiedBytes, compactGroup->_discardedBytes, (double)(compactGroup->_discardedBytes) / (double)(totalCopiedBytes + compactGroup->_discardedBytes));1545}1546}15471548/* Protect the merge with the mutex (this is done by multiple threads in the parallel collector) */1549omrthread_monitor_enter(_extensions->gcStatsMutex);1550static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats.merge(localStats);1551static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._workPacketStats.merge(&env->_workPacketStats);1552static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._irrsStats.merge(&env->_irrsStats);1553omrthread_monitor_exit(_extensions->gcStatsMutex);15541555/* record the thread-specific parallelism stats in the trace buffer. This partially duplicates info in -Xtgc:parallel */1556Trc_MM_CopyForwardScheme_parallelStats(1557env->getLanguageVMThread(),1558(U_32)env->getWorkerID(),1559(U_32)j9time_hires_delta(0, env->_copyForwardStats._workStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1560(U_32)j9time_hires_delta(0, env->_copyForwardStats._completeStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1561(U_32)j9time_hires_delta(0, env->_copyForwardStats._syncStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1562(U_32)j9time_hires_delta(0, env->_copyForwardStats._irrsStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1563(U_32)env->_copyForwardStats._workStallCount,1564(U_32)env->_copyForwardStats._completeStallCount,1565(U_32)env->_copyForwardStats._syncStallCount,1566(U_32)env->_copyForwardStats._irrsStallCount,1567env->_copyForwardStats._acquireFreeListCount,1568env->_copyForwardStats._releaseFreeListCount,1569env->_copyForwardStats._acquireScanListCount,1570env->_copyForwardStats._releaseScanListCount,1571env->_copyForwardStats._copiedArraysSplit);15721573if (env->_copyForwardStats._aborted) {1574Trc_MM_CopyForwardScheme_parallelStatsForAbort(1575env->getLanguageVMThread(),1576(U_32)env->getWorkerID(),1577(U_32)j9time_hires_delta(0, env->_workPacketStats._workStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1578(U_32)j9time_hires_delta(0, env->_workPacketStats._completeStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1579(U_32)j9time_hires_delta(0, env->_copyForwardStats._markStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1580(U_32)j9time_hires_delta(0, env->_copyForwardStats._abortStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),1581(U_32)env->_workPacketStats._workStallCount,1582(U_32)env->_workPacketStats._completeStallCount,1583(U_32)env->_copyForwardStats._markStallCount,1584(U_32)env->_copyForwardStats._abortStallCount,1585env->_workPacketStats.workPacketsAcquired,1586env->_workPacketStats.workPacketsReleased,1587env->_workPacketStats.workPacketsExchanged,1588env->_copyForwardStats._markedArraysSplit);1589}1590}15911592void1593MM_CopyForwardScheme::copyForwardPreProcess(MM_EnvironmentVLHGC *env)1594{1595PORT_ACCESS_FROM_ENVIRONMENT(env);15961597/* stats management */1598static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._startTime = j9time_hires_clock();1599/* Clear the gc statistics */1600clearGCStats(env);16011602/* Perform any pre copy forwarding changes to the region set */1603preProcessRegions(env);16041605if (0 != _regionCountCannotBeEvacuated) {1606/* need to run Hybrid mode, reuse InputListMonitor for both workPackets and ScanCopyCache */1607_workQueueMonitorPtr = env->_cycleState->_workPackets->getInputListMonitorPtr();1608_workQueueWaitCountPtr = env->_cycleState->_workPackets->getInputListWaitCountPtr();1609}1610/* Perform any main-specific setup */1611mainSetupForCopyForward(env);1612}16131614void1615MM_CopyForwardScheme::copyForwardPostProcess(MM_EnvironmentVLHGC *env)1616{1617PORT_ACCESS_FROM_ENVIRONMENT(env);16181619mainCleanupForCopyForward(env);16201621/* Record the completion time of the copy forward cycle */1622static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._endTime = j9time_hires_clock();16231624updateLeafRegions(env);16251626/* We used memory from the ACs for survivor space - make sure it doesn't hang around as allocation space */1627clearReservedRegionLists(env);1628_extensions->globalAllocationManager->flushAllocationContexts(env);16291630copyForwardCompletedSuccessfully(env);16311632if(_extensions->tarokEnableExpensiveAssertions) {1633/* Verify the result of the copy forward operation (heap integrity, etc) */1634verifyCopyForwardResult(MM_EnvironmentVLHGC::getEnvironment(env));1635}16361637if (0 != _regionCountCannotBeEvacuated) {1638_workQueueMonitorPtr = &_scanCacheMonitor;1639_workQueueWaitCountPtr = &_scanCacheWaitCount;1640}16411642/* Do any final work to regions in order to release them back to the main collector implementation */1643postProcessRegions(env);16441645static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_abortFlagRaisedDuringPGC = copyForwardCompletedSuccessfully(env);1646}16471648#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)1649void1650MM_CopyForwardScheme::concurrentCopyForwardCollectionSet(MM_EnvironmentVLHGC *env)1651{1652/* isConcurrentCycleInProgress() tells us if this is the first PGC increment or not. If it is1653* we'll call copyForwardPreProcess(). isConcurrentCycleInProgress state/value will get updated1654* preventing copyForwardPreProcess from being called in subsequent increments. For initial increment,1655* isConcurrentCycleInProgress will change from false to true causing only preProcess step to1656* be performed */1657if (!isConcurrentCycleInProgress())1658{1659copyForwardPreProcess(env);1660}16611662/* Perform the copy forward. This step will update the isConcurrentCycleInProgress state/value.1663* Note: The following is temporary as this will be updated to call concurrent copy forward state machine */1664MM_CopyForwardSchemeTask copyForwardTask(env, _dispatcher, this, env->_cycleState);1665_dispatcher->run(env, ©ForwardTask);16661667/* isConcurrentCycleInProgress() tells us if this is the last PGC increment or not. If this is the1668* last increment, copyForwardPreProcess state/value would have been updated from from true to false,1669* which will cause the following copyForwardPostProcess step to be performed */1670if (!isConcurrentCycleInProgress())1671{1672copyForwardPostProcess(env);1673}1674}1675#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */16761677void1678MM_CopyForwardScheme::copyForwardCollectionSet(MM_EnvironmentVLHGC *env)1679{1680copyForwardPreProcess(env);16811682/* And perform the copy forward */1683MM_CopyForwardSchemeTask copyForwardTask(env, _dispatcher, this, env->_cycleState);1684_dispatcher->run(env, ©ForwardTask);16851686copyForwardPostProcess(env);1687}16881689/**1690* Determine whether a copy forward that has been started did complete successfully.1691* @return true if the copyForward completed successfully, false otherwise.1692*/1693bool1694MM_CopyForwardScheme::copyForwardCompletedSuccessfully(MM_EnvironmentVLHGC *env)1695{1696return !abortFlagRaised();1697}16981699/****************************************1700* Copy-Scan Cache management1701****************************************1702* TODO: move all the CopyScanCache methods into the CopyScanCache class.1703*/17041705/* getFreeCache makes the assumption that there will be at least 1 entry on the scan list if there are no entries on the free list.1706* This requires that there be at (N * _cachesPerThread) scan cache entries, where N is the number of threads (Main + workers)1707*/1708MM_CopyScanCacheVLHGC *1709MM_CopyForwardScheme::getFreeCache(MM_EnvironmentVLHGC *env)1710{1711#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)1712env->_copyForwardStats._acquireFreeListCount += 1;1713#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */1714/* Check the free list */1715MM_CopyScanCacheVLHGC *cache = _cacheFreeList.popCache(env);1716if(NULL != cache) {1717return cache;1718}17191720/* No thread can use more than _cachesPerThread cache entries at 1 time (flip, tenure, scan, large, possibly deferred)1721* So long as (N * _cachesPerThread) cache entries exist,1722* the head of the scan list will contain a valid entry */1723env->_copyForwardStats._scanCacheOverflow = true;17241725if (NULL == cache) {1726/* we couldn't get a free cache so we must be in an overflow scenario. Try creating new cache structures on the heap */1727cache = createScanCacheForOverflowInHeap(env);1728if (NULL == cache) {1729/* we couldn't overflow so we have no choice but to abort the copy-forward */1730raiseAbortFlag(env);1731}1732}1733/* Overflow or abort was hit so alert other threads that are waiting */1734omrthread_monitor_enter(*_workQueueMonitorPtr);1735if(0 != *_workQueueWaitCountPtr) {1736omrthread_monitor_notify(*_workQueueMonitorPtr);1737}1738omrthread_monitor_exit(*_workQueueMonitorPtr);1739return cache;1740}17411742void1743MM_CopyForwardScheme::addCacheEntryToFreeCacheList(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *newCacheEntry)1744{1745_cacheFreeList.pushCache(env, newCacheEntry);1746}17471748void1749MM_CopyForwardScheme::addCacheEntryToScanCacheListAndNotify(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *newCacheEntry)1750{1751UDATA numaNode = _regionManager->tableDescriptorForAddress(newCacheEntry->scanCurrent)->getNumaNode();1752_cacheScanLists[numaNode].pushCache(env, newCacheEntry);1753if (0 != *_workQueueWaitCountPtr) {1754/* Added an entry to the scan list - notify any other threads that a new entry has appeared on the list */1755omrthread_monitor_enter(*_workQueueMonitorPtr);1756omrthread_monitor_notify(*_workQueueMonitorPtr);1757omrthread_monitor_exit(*_workQueueMonitorPtr);1758}1759}17601761void1762MM_CopyForwardScheme::flushCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)1763{1764Assert_MM_false(cache->isSplitArray());1765if(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {1766if(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED)) {1767clearCache(env, cache);1768}1769#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)1770env->_copyForwardStats._releaseFreeListCount += 1;1771#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */1772addCacheEntryToFreeCacheList(env, cache);1773}1774}17751776bool1777MM_CopyForwardScheme::clearCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)1778{1779uintptr_t discardSize = (uintptr_t)cache->cacheTop - (uintptr_t)cache->cacheAlloc;1780Assert_MM_true(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED));1781Assert_MM_false(cache->isSplitArray());1782bool remainderCreated = false;17831784UDATA compactGroup = cache->_compactGroup;1785Assert_MM_true(compactGroup < _compactGroupMaxCount);1786MM_CopyForwardCompactGroup *compactGroupForMarkData = &(env->_copyForwardCompactGroups[compactGroup]);17871788if (0 < discardSize) {1789if ((discardSize < env->getExtensions()->tlhSurvivorDiscardThreshold) ||1790(discardSize <= ((uintptr_t)compactGroupForMarkData->_TLHRemainderTop - (uintptr_t)compactGroupForMarkData->_TLHRemainderBase))) {1791/* Abandon the current entry in the cache */1792compactGroupForMarkData->discardTLHRemainder(env, cache->cacheAlloc, cache->cacheTop);1793} else {1794/* Abandon the current TLHRemainder if one exists */1795compactGroupForMarkData->discardTLHRemainder(env);1796remainderCreated = true;1797compactGroupForMarkData->setTLHRemainder(cache->cacheAlloc, cache->cacheTop);1798}1799}18001801/* Broadcast details of that portion of memory within which objects have been allocated */1802TRIGGER_J9HOOK_MM_PRIVATE_CACHE_CLEARED(_extensions->privateHookInterface, env->getOmrVMThread(), env->_cycleState->_activeSubSpace,1803cache->cacheBase, cache->cacheAlloc, cache->cacheTop);18041805cache->flags |= J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED;18061807return remainderCreated;1808}18091810MM_CopyScanCacheVLHGC *1811MM_CopyForwardScheme::stopCopyingIntoCache(MM_EnvironmentVLHGC *env, UDATA compactGroup)1812{1813MM_CopyScanCacheVLHGC *copyCache = env->_copyForwardCompactGroups[compactGroup]._copyCache;1814MM_LightweightNonReentrantLock *copyCacheLock = env->_copyForwardCompactGroups[compactGroup]._copyCacheLock;18151816if (NULL != copyCache) {1817Assert_MM_false(copyCache->isSplitArray());1818UDATA wastedMemory = env->_copyForwardCompactGroups[compactGroup]._freeMemoryMeasured;1819env->_copyForwardCompactGroups[compactGroup]._freeMemoryMeasured = 0;18201821MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(copyCache->cacheBase);18221823/* atomically add (age * usedBytes) product from this cache to the regions product */1824double newAllocationAgeSizeProduct = region->atomicIncrementAllocationAgeSizeProduct(copyCache->_allocationAgeSizeProduct);1825region->updateAgeBounds(copyCache->_lowerAgeBound, copyCache->_upperAgeBound);18261827/* Return any remaining memory to the pool */1828discardRemainingCache(env, copyCache, copyCacheLock, wastedMemory);18291830Trc_MM_CopyForwardScheme_stopCopyingIntoCache(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), copyCache,1831(double)(newAllocationAgeSizeProduct - copyCache->_allocationAgeSizeProduct) / (1024 * 1024) / (1024 * 1024), (double)((UDATA)copyCache->cacheAlloc - (UDATA)region->getLowAddress()) / (1024 * 1024),1832(double)copyCache->_allocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024), (double)copyCache->_objectSize / (1024 * 1024), (double)newAllocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024));18331834copyCache->_allocationAgeSizeProduct = 0.0;1835copyCache->_objectSize = 0;1836copyCache->_lowerAgeBound = U_64_MAX;1837copyCache->_upperAgeBound = 0;18381839/* Push any cached mark map data out */1840flushCacheMarkMap(env, copyCache);1841/* Update a region's projected live bytes from copy cache*/1842updateProjectedLiveBytesFromCopyScanCache(env, copyCache);1843/* Clear the current entry in the cache */1844clearCache(env, copyCache);1845/* This is no longer a copy cache */1846copyCache->flags &= ~J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY;1847/* drop this cache from the env */1848env->_copyForwardCompactGroups[compactGroup]._copyCache = NULL;1849env->_copyForwardCompactGroups[compactGroup]._copyCacheLock = NULL;1850}1851return copyCache;1852}18531854void1855MM_CopyForwardScheme::updateProjectedLiveBytesFromCopyScanCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)1856{1857MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(cache->cacheBase);1858Assert_MM_true(region->isSurvivorRegion());1859UDATA consumedBytes = (UDATA) cache->cacheAlloc - (UDATA) cache->cacheBase;1860MM_AtomicOperations::add(®ion->_projectedLiveBytes, consumedBytes);1861}18621863void1864MM_CopyForwardScheme::discardRemainingCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, MM_LightweightNonReentrantLock *cacheLock, UDATA wastedMemory)1865{1866Assert_MM_false(cache->isSplitArray());1867if (0 != wastedMemory) {1868MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(cache->cacheBase);1869MM_MemoryPool *pool = region->getMemoryPool();1870pool->incrementDarkMatterBytes(wastedMemory);1871}1872}18731874void1875MM_CopyForwardScheme::addCopyCachesToFreeList(MM_EnvironmentVLHGC *env)1876{1877for(UDATA index = 0; index < _compactGroupMaxCount; index++) {1878MM_CopyScanCacheVLHGC * copyCache = stopCopyingIntoCache(env, index);1879if (NULL != copyCache) {1880addCacheEntryToFreeCacheList(env, copyCache);1881}1882}1883}18841885J9Object *1886MM_CopyForwardScheme::updateForwardedPointer(J9Object *objectPtr)1887{1888J9Object *forwardPtr;18891890if(isObjectInEvacuateMemory(objectPtr)) {1891MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());1892forwardPtr = forwardedHeader.getForwardedObject();1893if(forwardPtr != NULL) {1894return forwardPtr;1895}1896}18971898return objectPtr;1899}19001901MMINLINE MM_AllocationContextTarok *1902MM_CopyForwardScheme::getContextForHeapAddress(void *address)1903{1904return ((MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(address))->_allocateData._owningContext;1905}19061907J9Object *1908MM_CopyForwardScheme::copy(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_ForwardedHeader* forwardedHeader, bool leafType)1909{1910bool const compressed = env->compressObjectReferences();1911J9Object *result = NULL;1912J9Object *object = forwardedHeader->getObject();1913uintptr_t objectCopySizeInBytes = 0;1914uintptr_t objectReserveSizeInBytes = 0;19151916bool noEvacuation = false;1917if (0 != _regionCountCannotBeEvacuated) {1918noEvacuation = isObjectInNoEvacuationRegions(env, object);1919}19201921if (_abortInProgress || noEvacuation) {1922/* Once threads agreed that abort is in progress or the object is in noEvacuation region, only mark/push should be happening, no attempts even to allocate/copy */19231924if (_markMap->atomicSetBit(object)) {1925Assert_MM_false(MM_ForwardedHeader(object, compressed).isForwardedPointer());1926/* don't need to push leaf object in work stack */1927if (!leafType) {1928env->_workStack.push(env, object);1929}1930}19311932result = object;1933} else {1934uintptr_t hotFieldsDescriptor = 0;1935uintptr_t hotFieldsAlignment = 0;1936uintptr_t *hotFieldPadBase = NULL;1937uintptr_t hotFieldPadSize = 0;1938MM_CopyScanCacheVLHGC *copyCache = NULL;1939void *newCacheAlloc = NULL;1940GC_ObjectModel *objectModel = &_extensions->objectModel;19411942/* Object is in the evacuate space but not forwarded. */1943objectModel->calculateObjectDetailsForCopy(env, forwardedHeader, &objectCopySizeInBytes, &objectReserveSizeInBytes, &hotFieldsDescriptor);19441945Assert_MM_objectAligned(env, objectReserveSizeInBytes);19461947#if defined(J9VM_INTERP_NATIVE_SUPPORT)1948/* adjust the reserved object's size if we are aligning hot fields and this class has a known hot field */1949if (_extensions->scavengerAlignHotFields && HOTFIELD_SHOULD_ALIGN(hotFieldsDescriptor)) {1950/* set the descriptor field if we should be aligning (since assuming that 0 means no is not safe) */1951hotFieldsAlignment = hotFieldsDescriptor;1952/* for simplicity, add the maximum padding we could need (and back off after allocation) */1953objectReserveSizeInBytes += (_cacheLineAlignment - _objectAlignmentInBytes);1954Assert_MM_objectAligned(env, objectReserveSizeInBytes);1955}1956#endif /* J9VM_INTERP_NATIVE_SUPPORT */19571958reservingContext = getPreferredAllocationContext(reservingContext, object);19591960copyCache = reserveMemoryForCopy(env, object, reservingContext, objectReserveSizeInBytes);19611962/* Check if memory was reserved successfully */1963if(NULL == copyCache) {1964raiseAbortFlag(env);1965Assert_MM_true(NULL == result);1966} else {1967Assert_MM_false(copyCache->isSplitArray());19681969/* Memory has been reserved */1970uintptr_t destinationCompactGroup = copyCache->_compactGroup;1971J9Object *destinationObjectPtr = (J9Object *)copyCache->cacheAlloc;1972Assert_MM_true(NULL != destinationObjectPtr);19731974/* now correct for the hot field alignment */1975#if defined(J9VM_INTERP_NATIVE_SUPPORT)1976if (0 != hotFieldsAlignment) {1977uintptr_t remainingInCacheLine = _cacheLineAlignment - ((uintptr_t)destinationObjectPtr % _cacheLineAlignment);1978uintptr_t alignmentBias = HOTFIELD_ALIGNMENT_BIAS(hotFieldsAlignment, _objectAlignmentInBytes);1979/* do alignment only if the object cannot fit in the remaining space in the cache line */1980if ((remainingInCacheLine < objectCopySizeInBytes) && (alignmentBias < remainingInCacheLine)) {1981hotFieldPadSize = ((remainingInCacheLine + _cacheLineAlignment) - (alignmentBias % _cacheLineAlignment)) % _cacheLineAlignment;1982hotFieldPadBase = (uintptr_t *)destinationObjectPtr;1983/* now fix the object pointer so that the hot field is aligned */1984destinationObjectPtr = (J9Object *)((uintptr_t)destinationObjectPtr + hotFieldPadSize);1985}1986/* and update the reserved size so that we "un-reserve" the extra memory we said we might need. This is done by1987* removing the excess reserve since we already accounted for the hotFieldPadSize by bumping the destination pointer1988* and now we need to revert to the amount needed for the object allocation and its array alignment so the rest of1989* the method continues to function without needing to know about this extra alignment calculation1990*/1991objectReserveSizeInBytes = objectReserveSizeInBytes - (_cacheLineAlignment - _objectAlignmentInBytes);1992}1993#endif /* J9VM_INTERP_NATIVE_SUPPORT */19941995/* and correct for the double array alignment */1996newCacheAlloc = (void *)((uintptr_t)destinationObjectPtr + objectReserveSizeInBytes);19971998/* Try to swap the forwarding pointer to the destination copy array into the source object */1999J9Object* originalDestinationObjectPtr = destinationObjectPtr;2000destinationObjectPtr = forwardedHeader->setForwardedObject(destinationObjectPtr);2001Assert_MM_true(NULL != destinationObjectPtr);2002if (destinationObjectPtr == originalDestinationObjectPtr) {2003/* Succeeded in forwarding the object - copy and adjust the age value */20042005#if defined(J9VM_INTERP_NATIVE_SUPPORT)2006if (NULL != hotFieldPadBase) {2007/* lay down a hole (XXX: This assumes that we are using AOL (address-ordered-list)) */2008MM_HeapLinkedFreeHeader::fillWithHoles(hotFieldPadBase, hotFieldPadSize, compressed);2009}2010#endif /* J9VM_INTERP_NATIVE_SUPPORT */20112012memcpy((void *)destinationObjectPtr, forwardedHeader->getObject(), objectCopySizeInBytes);20132014forwardedHeader->fixupForwardedObject(destinationObjectPtr);20152016if (objectModel->isIndexable(destinationObjectPtr)) {2017_extensions->indexableObjectModel.fixupInternalLeafPointersAfterCopy((J9IndexableObject *)destinationObjectPtr, (J9IndexableObject *)forwardedHeader->getObject());20182019/* Updates internal data address of indexable objects. Every indexable object have a void *dataAddr2020* that always points to the array data. It will always point to the address right after the header,2021* in case of contiguous data it will point to the data itself, and in case of discontiguous2022* arraylet it will point to the first arrayiod. dataAddr is only updated if dataAddr points to data2023* within heap. */2024_extensions->indexableObjectModel.fixupDataAddr(destinationObjectPtr);2025}20262027objectModel->fixupHashFlagsAndSlot(forwardedHeader, destinationObjectPtr);20282029/* Update any mark maps and transfer card table data as appropriate for a successful copy */2030updateMarkMapAndCardTableOnCopy(env, forwardedHeader->getObject(), destinationObjectPtr, copyCache);20312032/* Move the cache allocate pointer to reflect the consumed memory */2033Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);20342035if (_tracingEnabled) {2036PORT_ACCESS_FROM_ENVIRONMENT(env);2037j9tty_printf(PORTLIB, "Cache alloc: %p newAlloc: %p origO: %p copyO: %p\n", copyCache->cacheAlloc, newCacheAlloc, forwardedHeader->getObject(), destinationObjectPtr);2038}20392040copyCache->cacheAlloc = newCacheAlloc;2041Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);20422043/* Update the stats */2044if (hotFieldPadSize > 0) {2045/* account for this as free memory */2046env->_copyForwardCompactGroups[destinationCompactGroup]._freeMemoryMeasured += hotFieldPadSize;2047}2048MM_HeapRegionDescriptorVLHGC * sourceRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(object);2049uintptr_t sourceCompactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, sourceRegion);2050if (sourceRegion->isEden()) {2051env->_copyForwardCompactGroups[sourceCompactGroup]._edenStats._liveObjects += 1;2052env->_copyForwardCompactGroups[sourceCompactGroup]._edenStats._liveBytes += objectCopySizeInBytes;2053env->_copyForwardCompactGroups[destinationCompactGroup]._edenStats._copiedObjects += 1;2054env->_copyForwardCompactGroups[destinationCompactGroup]._edenStats._copiedBytes += objectCopySizeInBytes;2055} else {2056env->_copyForwardCompactGroups[sourceCompactGroup]._nonEdenStats._liveObjects += 1;2057env->_copyForwardCompactGroups[sourceCompactGroup]._nonEdenStats._liveBytes += objectCopySizeInBytes;2058env->_copyForwardCompactGroups[destinationCompactGroup]._nonEdenStats._copiedObjects += 1;2059env->_copyForwardCompactGroups[destinationCompactGroup]._nonEdenStats._copiedBytes += objectCopySizeInBytes;2060}2061copyCache->_allocationAgeSizeProduct += ((double)objectReserveSizeInBytes * (double)sourceRegion->getAllocationAge());2062copyCache->_objectSize += objectReserveSizeInBytes;2063copyCache->_lowerAgeBound = OMR_MIN(copyCache->_lowerAgeBound, sourceRegion->getLowerAgeBound());2064copyCache->_upperAgeBound = OMR_MAX(copyCache->_upperAgeBound, sourceRegion->getUpperAgeBound());20652066#if defined(J9VM_GC_LEAF_BITS)2067if (_extensions->tarokEnableLeafFirstCopying) {2068copyLeafChildren(env, reservingContext, destinationObjectPtr);2069}2070#endif /* J9VM_GC_LEAF_BITS */2071/* depth copy the hot fields of an object if scavenger dynamicBreadthFirstScanOrdering is enabled */2072depthCopyHotFields(env, objectModel->getPreservedClass(forwardedHeader), destinationObjectPtr, reservingContext);2073}2074/* return value for updating the slot */2075result = destinationObjectPtr;2076}2077}20782079return result;2080}20812082#if defined(J9VM_GC_LEAF_BITS)2083void2084MM_CopyForwardScheme::copyLeafChildren(MM_EnvironmentVLHGC* env, MM_AllocationContextTarok *reservingContext, J9Object* objectPtr)2085{2086J9Class *clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);2087if (GC_ObjectModel::SCAN_MIXED_OBJECT == _extensions->objectModel.getScanType(clazz)) {2088UDATA instanceLeafDescription = (UDATA)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceLeafDescription;2089/* For now we only support leaf children in small objects. If the leaf description isn't immediate, ignore it to keep the code simple. */2090if (1 == (instanceLeafDescription & 1)) {2091bool const compressed = env->compressObjectReferences();2092fj9object_t* scanPtr = _extensions->mixedObjectModel.getHeadlessObject(objectPtr);2093UDATA leafBits = instanceLeafDescription >> 1;2094while (0 != leafBits) {2095if (1 == (leafBits & 1)) {2096/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2097GC_SlotObject slotObject(_javaVM->omrVM, scanPtr);2098/* pass leaf flag into copy method for optimizing abort case and hybrid case (don't need to push leaf object in work stack) */2099copyAndForward(env, reservingContext, objectPtr, &slotObject, true);2100}2101leafBits >>= 1;2102scanPtr = GC_SlotObject::addToSlotAddress(scanPtr, 1, compressed);2103}2104}2105}2106}2107#endif /* J9VM_GC_LEAF_BITS */21082109MMINLINE void2110MM_CopyForwardScheme::depthCopyHotFields(MM_EnvironmentVLHGC *env, J9Class *clazz, J9Object *destinationObjectPtr, MM_AllocationContextTarok *reservingContext) {2111/* depth copy the hot fields of an object up to a depth specified by depthCopyMax */2112J9ClassHotFieldsInfo* hotFieldsInfo = clazz->hotFieldsInfo;2113if (env->_hotFieldCopyDepthCount < _extensions->depthCopyMax && NULL != hotFieldsInfo) {2114U_8 hotFieldOffset = hotFieldsInfo->hotFieldOffset1;2115if (U_8_MAX != hotFieldOffset) {2116copyHotField(env, destinationObjectPtr, hotFieldOffset, reservingContext);2117U_8 hotFieldOffset2 = hotFieldsInfo->hotFieldOffset2;2118if (U_8_MAX !=hotFieldOffset2) {2119copyHotField(env, destinationObjectPtr, hotFieldOffset2, reservingContext);2120U_8 hotFieldOffset3 = hotFieldsInfo->hotFieldOffset3;2121if (U_8_MAX != hotFieldOffset3) {2122copyHotField(env, destinationObjectPtr, hotFieldOffset3, reservingContext);2123}2124}2125} else if ((_extensions->alwaysDepthCopyFirstOffset) && (false == _extensions->objectModel.isIndexable(destinationObjectPtr))) {2126copyHotField(env, destinationObjectPtr, DEFAULT_HOT_FIELD_OFFSET, reservingContext);2127}2128}2129}21302131MMINLINE void2132MM_CopyForwardScheme::copyHotField(MM_EnvironmentVLHGC *env, J9Object *destinationObjectPtr, U_8 offset, MM_AllocationContextTarok *reservingContext) {2133bool const compressed = _extensions->compressObjectReferences();2134GC_SlotObject hotFieldObject(_javaVM->omrVM, GC_SlotObject::addToSlotAddress((fomrobject_t*)((uintptr_t)destinationObjectPtr), offset, compressed));2135omrobjectptr_t objectPtr = hotFieldObject.readReferenceFromSlot();2136if (isObjectInEvacuateMemory(objectPtr)) {2137/* Hot field needs to be copy and forwarded. Check if the work has already been done */2138MM_ForwardedHeader forwardHeaderHotField(objectPtr, compressed);2139if (!forwardHeaderHotField.isForwardedPointer()) {2140env->_hotFieldCopyDepthCount += 1;2141copy(env, reservingContext, &forwardHeaderHotField);2142env->_hotFieldCopyDepthCount -= 1;2143}2144}2145}21462147void2148MM_CopyForwardScheme::flushCacheMarkMap(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)2149{2150MM_CopyForwardCompactGroup *compactGroup = &(env->_copyForwardCompactGroups[cache->_compactGroup]);2151Assert_MM_true(cache == compactGroup->_copyCache);2152Assert_MM_false(UDATA_MAX == compactGroup->_markMapPGCSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */2153Assert_MM_false(UDATA_MAX == compactGroup->_markMapGMPSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */2154Assert_MM_false(cache->isSplitArray());21552156if(0 != compactGroup->_markMapPGCBitMask) {2157UDATA pgcFlushSlotIndex = compactGroup->_markMapPGCSlotIndex;2158if((pgcFlushSlotIndex == compactGroup->_markMapAtomicHeadSlotIndex) || (pgcFlushSlotIndex == compactGroup->_markMapAtomicTailSlotIndex)) {2159_markMap->atomicSetSlot(pgcFlushSlotIndex, compactGroup->_markMapPGCBitMask);2160} else {2161_markMap->setSlot(pgcFlushSlotIndex, compactGroup->_markMapPGCBitMask);2162}21632164/* We set the slot index to an invalid value to assert on later if seen */2165compactGroup->_markMapPGCSlotIndex = UDATA_MAX;2166compactGroup->_markMapPGCBitMask = 0;2167}21682169if(NULL != env->_cycleState->_externalCycleState) {2170if(0 != compactGroup->_markMapGMPBitMask) {2171UDATA gmpFlushSlotIndex = compactGroup->_markMapGMPSlotIndex;2172if((gmpFlushSlotIndex == compactGroup->_markMapAtomicHeadSlotIndex) || (gmpFlushSlotIndex == compactGroup->_markMapAtomicTailSlotIndex)) {2173env->_cycleState->_externalCycleState->_markMap->atomicSetSlot(gmpFlushSlotIndex, compactGroup->_markMapGMPBitMask);2174} else {2175env->_cycleState->_externalCycleState->_markMap->setSlot(gmpFlushSlotIndex, compactGroup->_markMapGMPBitMask);2176}21772178/* We set the slot index to an invalid value to assert on later if seen */2179compactGroup->_markMapGMPSlotIndex = UDATA_MAX;2180compactGroup->_markMapGMPBitMask = 0;2181}2182}21832184compactGroup->_markMapAtomicHeadSlotIndex = 0;2185compactGroup->_markMapAtomicTailSlotIndex = 0;2186}21872188void2189MM_CopyForwardScheme::updateMarkMapCache(MM_EnvironmentVLHGC *env, MM_MarkMap *markMap, J9Object *object,2190UDATA *slotIndexIndirect, UDATA *bitMaskIndirect, UDATA atomicHeadSlotIndex, UDATA atomicTailSlotIndex)2191{2192UDATA slotIndex = 0;2193UDATA bitMask = 0;21942195markMap->getSlotIndexAndMask(object, &slotIndex, &bitMask);21962197if(*slotIndexIndirect != slotIndex) {2198if(0 != *bitMaskIndirect) {2199UDATA flushSlotIndex = *slotIndexIndirect;2200if((flushSlotIndex == atomicHeadSlotIndex) || (flushSlotIndex == atomicTailSlotIndex)) {2201markMap->atomicSetSlot(flushSlotIndex, *bitMaskIndirect);2202} else {2203markMap->setSlot(flushSlotIndex, *bitMaskIndirect);2204}2205}2206*slotIndexIndirect = slotIndex;2207*bitMaskIndirect = bitMask;2208} else {2209*bitMaskIndirect |= bitMask;2210}2211}22122213void2214MM_CopyForwardScheme::updateMarkMapAndCardTableOnCopy(MM_EnvironmentVLHGC *env, J9Object *srcObject, J9Object *dstObject, MM_CopyScanCacheVLHGC *dstCache)2215{2216MM_CopyForwardCompactGroup *destinationGroup = &(env->_copyForwardCompactGroups[dstCache->_compactGroup]);2217Assert_MM_true(dstCache == destinationGroup->_copyCache);2218Assert_MM_false(UDATA_MAX == destinationGroup->_markMapPGCSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */2219Assert_MM_false(UDATA_MAX == destinationGroup->_markMapGMPSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */2220Assert_MM_false(dstCache->isSplitArray());22212222updateMarkMapCache(env, _markMap, dstObject, &destinationGroup->_markMapPGCSlotIndex, &destinationGroup->_markMapPGCBitMask, destinationGroup->_markMapAtomicHeadSlotIndex, destinationGroup->_markMapAtomicTailSlotIndex);22232224/* If there is an external cycle in progress, see if any information needs to be migrated */2225if(NULL != env->_cycleState->_externalCycleState) {2226MM_MarkMap *externalMap = env->_cycleState->_externalCycleState->_markMap;22272228if(externalMap->isBitSet(srcObject)) {2229/* The external cycle has already visited the live object - move the mark map and card information across */2230updateMarkMapCache(env, externalMap, dstObject, &destinationGroup->_markMapGMPSlotIndex, &destinationGroup->_markMapGMPBitMask, destinationGroup->_markMapAtomicHeadSlotIndex, destinationGroup->_markMapAtomicTailSlotIndex);22312232MM_CardTable *cardTable = _extensions->cardTable;2233Card *card = cardTable->heapAddrToCardAddr(env, srcObject);22342235switch(*card) {2236case CARD_GMP_MUST_SCAN:2237case CARD_DIRTY:2238{2239Card *dstCard = cardTable->heapAddrToCardAddr(env, dstObject);2240if(CARD_GMP_MUST_SCAN != *dstCard) {2241*dstCard = CARD_GMP_MUST_SCAN;2242}2243break;2244}2245case CARD_PGC_MUST_SCAN:2246case CARD_CLEAN:2247/* do nothing */2248break;2249default:2250Assert_MM_unreachable();2251}2252}2253}2254}22552256/****************************************2257* Object scan and copy routines2258****************************************2259*/2260MMINLINE void2261MM_CopyForwardScheme::scanOwnableSynchronizerObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)2262{2263if (SCAN_REASON_COPYSCANCACHE == reason) {2264addOwnableSynchronizerObjectInList(env, objectPtr);2265} else if (SCAN_REASON_PACKET == reason) {2266if (isObjectInEvacuateMemoryNoCheck(objectPtr)) {2267addOwnableSynchronizerObjectInList(env, objectPtr);2268}2269}2270scanMixedObjectSlots(env, reservingContext, objectPtr, reason);2271}22722273/**2274* Iterate the slot reference and parse and pass leaf bit of the reference to copy forward2275* to avoid to push leaf object to work stack in case the reference need to be marked instead of copied.2276*/2277MMINLINE bool2278MM_CopyForwardScheme::iterateAndCopyforwardSlotReference(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr) {2279bool success = true;2280fj9object_t *endScanPtr;2281UDATA *descriptionPtr;2282UDATA descriptionBits;2283UDATA descriptionIndex;2284#if defined(J9VM_GC_LEAF_BITS)2285UDATA *leafPtr = (UDATA *)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceLeafDescription;2286UDATA leafBits;2287#endif /* J9VM_GC_LEAF_BITS */2288bool const compressed = env->compressObjectReferences();22892290/* Object slots */2291volatile fj9object_t* scanPtr = _extensions->mixedObjectModel.getHeadlessObject(objectPtr);2292UDATA objectSize = _extensions->mixedObjectModel.getSizeInBytesWithHeader(objectPtr);22932294endScanPtr = (fj9object_t*)(((U_8 *)objectPtr) + objectSize);2295descriptionPtr = (UDATA *)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceDescription;22962297if (((UDATA)descriptionPtr) & 1) {2298descriptionBits = ((UDATA)descriptionPtr) >> 1;2299#if defined(J9VM_GC_LEAF_BITS)2300leafBits = ((UDATA)leafPtr) >> 1;2301#endif /* J9VM_GC_LEAF_BITS */2302} else {2303descriptionBits = *descriptionPtr++;2304#if defined(J9VM_GC_LEAF_BITS)2305leafBits = *leafPtr++;2306#endif /* J9VM_GC_LEAF_BITS */2307}2308descriptionIndex = J9_OBJECT_DESCRIPTION_SIZE - 1;23092310while (success && (scanPtr < endScanPtr)) {2311/* Determine if the slot should be processed */2312if (descriptionBits & 1) {2313GC_SlotObject slotObject(_javaVM->omrVM, scanPtr);23142315/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2316#if defined(J9VM_GC_LEAF_BITS)2317success = copyAndForward(env, reservingContext, objectPtr, &slotObject, 1 == (leafBits & 1));2318#else /* J9VM_GC_LEAF_BITS */2319success = copyAndForward(env, reservingContext, objectPtr, &slotObject);2320#endif /* J9VM_GC_LEAF_BITS */2321}2322descriptionBits >>= 1;2323#if defined(J9VM_GC_LEAF_BITS)2324leafBits >>= 1;2325#endif /* J9VM_GC_LEAF_BITS */2326if (descriptionIndex-- == 0) {2327descriptionBits = *descriptionPtr++;2328#if defined(J9VM_GC_LEAF_BITS)2329leafBits = *leafPtr++;2330#endif /* J9VM_GC_LEAF_BITS */2331descriptionIndex = J9_OBJECT_DESCRIPTION_SIZE - 1;2332}2333scanPtr = GC_SlotObject::addToSlotAddress((fomrobject_t*)scanPtr, 1, compressed);2334}2335return success;2336}23372338void2339MM_CopyForwardScheme::scanMixedObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)2340{2341if(_tracingEnabled) {2342PORT_ACCESS_FROM_ENVIRONMENT(env);2343j9tty_printf(PORTLIB, "@%p\n", objectPtr);2344}23452346bool success = copyAndForwardObjectClass(env, reservingContext, objectPtr);23472348if (success) {2349/* Iteratoring and copyforwarding the slot reference with leaf bit */2350success = iterateAndCopyforwardSlotReference(env, reservingContext, objectPtr);2351}23522353updateScanStats(env, objectPtr, reason);2354}23552356void2357MM_CopyForwardScheme::scanReferenceObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)2358{2359bool success = copyAndForwardObjectClass(env, reservingContext, objectPtr);23602361I_32 referenceState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, objectPtr);23622363/* if the reference isn't part of the collection set, treat it as a strong reference */2364bool isReferenceInCollectionSet = isObjectInNurseryMemory(objectPtr);2365bool isReferenceCleared = (GC_ObjectModel::REF_STATE_CLEARED == referenceState) || (GC_ObjectModel::REF_STATE_ENQUEUED == referenceState);2366bool referentMustBeMarked = isReferenceCleared || !isReferenceInCollectionSet;2367bool referentMustBeCleared = false;2368if (isReferenceInCollectionSet) {2369UDATA referenceObjectOptions = env->_cycleState->_referenceObjectOptions;2370UDATA referenceObjectType = J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(objectPtr, env)) & J9AccClassReferenceMask;2371switch (referenceObjectType) {2372case J9AccClassReferenceWeak:2373referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_weak)) ;2374break;2375case J9AccClassReferenceSoft:2376referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_soft));2377referentMustBeMarked = referentMustBeMarked || (2378((0 == (referenceObjectOptions & MM_CycleState::references_soft_as_weak))2379&& ((UDATA)J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, objectPtr) < _extensions->getDynamicMaxSoftReferenceAge())));2380break;2381case J9AccClassReferencePhantom:2382referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_phantom));2383break;2384default:2385Assert_MM_unreachable();2386}2387}23882389GC_SlotObject referentPtr(_javaVM->omrVM, J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, objectPtr));23902391/* Iterating and copyforwarding regular reference slots, except the special (soft) referent slot. Not making use of leaf bit optimization,2392* sacrificing minor performance to avoid code complication. Could optimize later, if/when using ObjectScanner */2393GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);2394GC_SlotObject *slotObject = NULL;2395while (success && (NULL != (slotObject = mixedObjectIterator.nextSlot()))) {2396if ((slotObject->readAddressFromSlot() != referentPtr.readAddressFromSlot()) || referentMustBeMarked) {2397/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2398success = copyAndForward(env, reservingContext, objectPtr, slotObject);2399}2400}24012402if (SCAN_REASON_OVERFLOWED_REGION == reason) {2403/* handled when we empty packet to overflow */2404} else {2405if (referentMustBeCleared) {2406Assert_MM_true(isReferenceInCollectionSet);2407referentPtr.writeReferenceToSlot(NULL);2408if (!isReferenceCleared) {2409J9GC_J9VMJAVALANGREFERENCE_STATE(env, objectPtr) = GC_ObjectModel::REF_STATE_CLEARED;2410}2411} else if (isReferenceInCollectionSet) {2412if (!isReferenceCleared) {2413if (success) {2414env->getGCEnvironment()->_referenceObjectBuffer->add(env, objectPtr);2415}2416}2417}2418}24192420updateScanStats(env, objectPtr, reason);2421}24222423UDATA2424MM_CopyForwardScheme::createNextSplitArrayWorkUnit(MM_EnvironmentVLHGC *env, J9IndexableObject *arrayPtr, UDATA startIndex, bool currentSplitUnitOnly)2425{2426UDATA sizeInElements = _extensions->indexableObjectModel.getSizeInElements(arrayPtr);2427UDATA slotsToScan = 0;24282429if (sizeInElements > 0) {2430Assert_MM_true(startIndex < sizeInElements);2431slotsToScan = sizeInElements - startIndex;24322433if (slotsToScan > _arraySplitSize) {2434slotsToScan = _arraySplitSize;24352436/* immediately make the next chunk available for another thread to start processing */2437UDATA nextIndex = startIndex + slotsToScan;2438Assert_MM_true(nextIndex < sizeInElements);24392440bool noEvacuation = false;2441if (0 != _regionCountCannotBeEvacuated) {2442noEvacuation = isObjectInNoEvacuationRegions(env, (J9Object *) arrayPtr);2443}24442445if (abortFlagRaised() || noEvacuation) {2446if (!currentSplitUnitOnly) {2447/* work stack driven */2448env->_workStack.push(env, (void *)arrayPtr, (void *)((nextIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG));2449env->_workStack.flushOutputPacket(env);2450#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2451env->_copyForwardStats._markedArraysSplit += 1;2452#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */2453}2454} else {2455Assert_MM_false(currentSplitUnitOnly);2456/* copy-scan cache driven */2457MM_CopyScanCacheVLHGC *splitCache = getFreeCache(env);2458if (NULL != splitCache) {2459reinitArraySplitCache(env, splitCache, arrayPtr, nextIndex);2460addCacheEntryToScanCacheListAndNotify(env, splitCache);2461#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2462env->_copyForwardStats._copiedArraysSplit += 1;2463#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */2464} else {2465Assert_MM_true(_abortFlag);2466void *element1 = (void *)arrayPtr;2467void *element2 = (void *)((nextIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG);2468Assert_MM_true(nextIndex == (((UDATA)element2) >> PACKET_ARRAY_SPLIT_SHIFT));2469env->_workStack.push(env, element1, element2);2470env->_workStack.flushOutputPacket(env);2471#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2472env->_copyForwardStats._markedArraysSplit += 1;2473#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */2474Trc_MM_CopyForwardScheme_scanPointerArrayObjectSlotsSplit_failedToAllocateCache(env->getLanguageVMThread(), sizeInElements);2475}2476}2477}2478}24792480return slotsToScan;2481}2482UDATA2483MM_CopyForwardScheme::scanPointerArrayObjectSlotsSplit(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, UDATA startIndex, bool currentSplitUnitOnly)2484{2485if(_tracingEnabled) {2486PORT_ACCESS_FROM_ENVIRONMENT(env);2487j9tty_printf(PORTLIB, "#");2488}24892490/* there's no harm in remembering the array multiple times, so do this for each split chunk */2491bool success = copyAndForwardObjectClass(env, reservingContext, (J9Object *)arrayPtr);24922493UDATA slotsToScan = createNextSplitArrayWorkUnit(env, arrayPtr, startIndex, currentSplitUnitOnly);24942495if (slotsToScan > 0) {2496/* TODO: this iterator scans the array backwards - change it to forward, and optimize it since we can guarantee the range will be contiguous */2497GC_PointerArrayIterator pointerArrayIterator(_javaVM, (J9Object *)arrayPtr);2498pointerArrayIterator.setIndex(startIndex + slotsToScan);24992500for (UDATA scanCount = 0; success && (scanCount < slotsToScan); scanCount++) {2501GC_SlotObject *slotObject = pointerArrayIterator.nextSlot();2502if (NULL == slotObject) {2503/* this can happen if the array is only partially allocated */2504break;2505}25062507/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2508success = copyAndForwardPointerArray(env, reservingContext, arrayPtr, startIndex, slotObject);2509}2510}25112512return slotsToScan;2513}25142515void2516MM_CopyForwardScheme::scanClassObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *classObject, ScanReason reason)2517{2518scanMixedObjectSlots(env, reservingContext, classObject, reason);25192520J9Class *classPtr = J9VM_J9CLASS_FROM_HEAPCLASS((J9VMThread*)env->getLanguageVMThread(), classObject);25212522if (NULL != classPtr) {2523volatile j9object_t * slotPtr = NULL;2524bool success = true;25252526do {2527/*2528* Scan J9Class internals using general iterator2529* - scan statics fields2530* - scan call sites2531* - scan MethodTypes2532* - scan VarHandle MethodTypes2533* - scan constants pool objects2534*/2535GC_ClassIterator classIterator(env, classPtr, false);2536while (success && (NULL != (slotPtr = classIterator.nextSlot()))) {2537/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2538success = copyAndForward(env, reservingContext, classObject, slotPtr);2539}25402541/*2542* Usually we don't care about class to class references because its can be marked as a part of alive classloader or find in Hash Table2543* However we need to scan them for case of Anonymous classes. Its are unloaded on individual basis so it is important to reach each one2544*/2545if (J9_ARE_ANY_BITS_SET(J9CLASS_EXTENDED_FLAGS(classPtr), J9ClassIsAnonymous)) {2546GC_ClassIteratorClassSlots classSlotIterator(_javaVM, classPtr);2547J9Class *classPtr;2548while (success && (NULL != (classPtr = classSlotIterator.nextSlot()))) {2549slotPtr = &(classPtr->classObject);2550/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2551success = copyAndForward(env, reservingContext, classObject, slotPtr);2552}2553}25542555if (success) {2556/* we can safely ignore any classes referenced by the constant pool, since2557* these are guaranteed to be referenced by our class loader2558* except anonymous case handled above2559*/2560/* By scanning the class object, we've committed to it either being in a card external to the collection set, or that it is already part of a copied set and2561* being scanned through the copy/scan cache. In either case, a simple pointer forward update is all that is required.2562*/2563classPtr->classObject = (j9object_t)updateForwardedPointer((J9Object *)classPtr->classObject);2564Assert_MM_true(isLiveObject((J9Object *)classPtr->classObject));2565}2566classPtr = classPtr->replacedClass;2567} while (success && (NULL != classPtr));2568}2569}25702571void2572MM_CopyForwardScheme::scanClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *classLoaderObject, ScanReason reason)2573{2574scanMixedObjectSlots(env, reservingContext, classLoaderObject, reason);25752576J9ClassLoader *classLoader = J9VMJAVALANGCLASSLOADER_VMREF((J9VMThread*)env->getLanguageVMThread(), classLoaderObject);2577if (NULL != classLoader) {2578/* By scanning the class loader object, we've committed to it either being in a card external to the collection set, or that it is already part of a copied set and2579* being scanned through the copy/scan cache. In either case, a simple pointer forward update is all that is required.2580*/2581classLoader->classLoaderObject = updateForwardedPointer((J9Object *)classLoader->classLoaderObject);2582Assert_MM_true(isLiveObject((J9Object *)classLoader->classLoaderObject));25832584/* No lock is required because this only runs under exclusive access */2585/* (NULL == classLoader->classHashTable) is true ONLY for DEAD class loaders */2586Assert_MM_true(NULL != classLoader->classHashTable);25872588/* Do this for all classloaders except anonymous */2589if (0 == (classLoader->flags & J9CLASSLOADER_ANON_CLASS_LOADER)) {2590GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);2591J9Class *clazz = NULL;2592bool success = true;2593while (success && (NULL != (clazz = iterator.nextClass()))) {2594Assert_MM_true(NULL != clazz->classObject);2595/* Copy/Forward the slot reference and perform any inter-region remember work that is required */2596success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(clazz->classObject));2597}25982599if (NULL != classLoader->moduleHashTable) {2600J9HashTableState walkState;2601J9Module **modulePtr = (J9Module **)hashTableStartDo(classLoader->moduleHashTable, &walkState);2602while (success && (NULL != modulePtr)) {2603J9Module * const module = *modulePtr;2604success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->moduleObject));2605if (success) {2606if (NULL != module->moduleName) {2607success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->moduleName));2608}2609}2610if (success) {2611if (NULL != module->version) {2612success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->version));2613}2614}2615modulePtr = (J9Module**)hashTableNextDo(&walkState);2616}26172618if (success && (classLoader == _javaVM->systemClassLoader)) {2619success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(_javaVM->unamedModuleForSystemLoader->moduleObject));2620}2621}2622}2623}2624}26252626/****************************************2627* Scan completion routines2628****************************************2629*/2630bool2631MM_CopyForwardScheme::isScanCacheWorkAvailable(MM_CopyScanCacheListVLHGC *scanCacheList)2632{2633return !scanCacheList->isEmpty();2634}26352636bool2637MM_CopyForwardScheme::isAnyScanCacheWorkAvailable()2638{2639bool result = false;2640UDATA nodeLists = _scanCacheListSize;2641for (UDATA i = 0; (!result) && (i < nodeLists); i++) {2642result = isScanCacheWorkAvailable(&_cacheScanLists[i]);2643}2644return result;2645}26462647bool2648MM_CopyForwardScheme::isAnyScanWorkAvailable(MM_EnvironmentVLHGC *env)2649{2650return (isAnyScanCacheWorkAvailable() || ((0 != _regionCountCannotBeEvacuated) && !abortFlagRaised() && env->_workStack.inputPacketAvailableFromWorkPackets(env)));2651}26522653MM_CopyScanCacheVLHGC *2654MM_CopyForwardScheme::getSurvivorCacheForScan(MM_EnvironmentVLHGC *env)2655{2656MM_CopyScanCacheVLHGC *cache = NULL;26572658for(UDATA index = 0; index < _compactGroupMaxCount; index++) {2659cache = env->_copyForwardCompactGroups[index]._copyCache;2660if((NULL != cache) && cache->isScanWorkAvailable()) {2661return cache;2662}2663}26642665return NULL;2666}26672668MM_CopyForwardScheme::ScanReason2669MM_CopyForwardScheme::getNextWorkUnit(MM_EnvironmentVLHGC *env, UDATA preferredNumaNode)2670{2671env->_scanCache = NULL;2672ScanReason ret = SCAN_REASON_NONE;26732674MM_CopyScanCacheVLHGC *cache = NULL;2675/* Preference is to use survivor copy cache */2676if(NULL != (cache = getSurvivorCacheForScan(env))) {2677env->_scanCache = cache;2678ret = SCAN_REASON_COPYSCANCACHE;2679return ret;2680}26812682if (NULL != env->_deferredScanCache) {2683/* there is deferred scanning to do from partial depth first scanning */2684cache = (MM_CopyScanCacheVLHGC *)env->_deferredScanCache;2685env->_deferredScanCache = NULL;2686env->_scanCache = cache;2687ret = SCAN_REASON_COPYSCANCACHE;2688return ret;2689}26902691#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2692env->_copyForwardStats._acquireScanListCount += 1;2693#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */26942695bool doneFlag = false;2696volatile UDATA doneIndex = _doneIndex;26972698while ((SCAN_REASON_NONE == ret) && !doneFlag) {2699if (SCAN_REASON_NONE == (ret = getNextWorkUnitNoWait(env, preferredNumaNode))) {2700omrthread_monitor_enter(*_workQueueMonitorPtr);2701*_workQueueWaitCountPtr += 1;27022703if(doneIndex == _doneIndex) {2704if((*_workQueueWaitCountPtr == env->_currentTask->getThreadCount()) && !isAnyScanWorkAvailable(env)) {2705*_workQueueWaitCountPtr = 0;2706_doneIndex += 1;2707omrthread_monitor_notify_all(*_workQueueMonitorPtr);2708} else {2709while(!isAnyScanWorkAvailable(env) && (doneIndex == _doneIndex)) {2710#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2711PORT_ACCESS_FROM_ENVIRONMENT(env);2712U_64 waitEndTime, waitStartTime;2713waitStartTime = j9time_hires_clock();2714#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */2715omrthread_monitor_wait(*_workQueueMonitorPtr);2716#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)2717waitEndTime = j9time_hires_clock();2718if (doneIndex == _doneIndex) {2719env->_copyForwardStats.addToWorkStallTime(waitStartTime, waitEndTime);2720} else {2721env->_copyForwardStats.addToCompleteStallTime(waitStartTime, waitEndTime);2722}2723#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */2724}2725}2726}27272728/* Set the local done flag and if we are done and the waiting count is 0 (last thread) exit */2729doneFlag = (doneIndex != _doneIndex);2730if (!doneFlag) {2731*_workQueueWaitCountPtr -= 1;2732}2733omrthread_monitor_exit(*_workQueueMonitorPtr);2734}2735}27362737return ret;2738}27392740MM_CopyForwardScheme::ScanReason2741MM_CopyForwardScheme::getNextWorkUnitOnNode(MM_EnvironmentVLHGC *env, UDATA numaNode)2742{2743ScanReason ret = SCAN_REASON_NONE;27442745MM_CopyScanCacheVLHGC *cache = _cacheScanLists[numaNode].popCache(env);2746if(NULL != cache) {2747/* Check if there are threads waiting that should be notified because of pending entries */2748if((0 != *_workQueueWaitCountPtr) && isScanCacheWorkAvailable(&_cacheScanLists[numaNode])) {2749omrthread_monitor_enter(*_workQueueMonitorPtr);2750if(0 != *_workQueueWaitCountPtr) {2751omrthread_monitor_notify(*_workQueueMonitorPtr);2752}2753omrthread_monitor_exit(*_workQueueMonitorPtr);2754}2755env->_scanCache = cache;2756ret = SCAN_REASON_COPYSCANCACHE;2757}27582759return ret;2760}27612762MM_CopyForwardScheme::ScanReason2763MM_CopyForwardScheme::getNextWorkUnitNoWait(MM_EnvironmentVLHGC *env, UDATA preferredNumaNode)2764{2765UDATA nodeLists = _scanCacheListSize;2766ScanReason ret = SCAN_REASON_NONE;2767/* local node first */2768ret = getNextWorkUnitOnNode(env, preferredNumaNode);2769if (SCAN_REASON_NONE == ret) {2770/* we failed to find a scan cache on our preferred node */2771if (COMMON_CONTEXT_INDEX != preferredNumaNode) {2772/* try the common node */2773ret = getNextWorkUnitOnNode(env, COMMON_CONTEXT_INDEX);2774}2775/* now try the remaining nodes */2776UDATA nextNode = (preferredNumaNode + 1) % nodeLists;2777while ((SCAN_REASON_NONE == ret) && (nextNode != preferredNumaNode)) {2778if (COMMON_CONTEXT_INDEX != nextNode) {2779ret = getNextWorkUnitOnNode(env, nextNode);2780}2781nextNode = (nextNode + 1) % nodeLists;2782}2783}2784if (SCAN_REASON_NONE == ret && (0 != _regionCountCannotBeEvacuated) && !abortFlagRaised()) {2785if (env->_workStack.retrieveInputPacket(env)) {2786ret = SCAN_REASON_PACKET;2787}2788}2789return ret;2790}27912792/**2793* Calculates distance from the allocation pointer to the scan pointer for the given cache.2794*2795* If the allocation pointer is less than or equal to the scan pointer, or the cache is NULL2796* the distance is set to the maximum unsigned UDATA, SCAN_TO_COPY_CACHE_MAX_DISTANCE.2797* @return distance calculated.2798*/2799MMINLINE UDATA2800MM_CopyForwardScheme::scanToCopyDistance(MM_CopyScanCacheVLHGC* cache)2801{2802if (cache == NULL) {2803return SCAN_TO_COPY_CACHE_MAX_DISTANCE;2804}2805IDATA distance = ((UDATA) cache->cacheAlloc) - ((UDATA) cache->scanCurrent);2806UDATA udistance;2807if (distance <= 0) {2808udistance = SCAN_TO_COPY_CACHE_MAX_DISTANCE;2809} else {2810udistance = distance;2811}2812return udistance;2813}28142815/**2816* For a given copyCache and scanCache (which may or may not also be a copy cache), return the2817* best cache for scanning of these two caches.2818*2819* If the copyCache has work to scan, and the scanCache is not a copy cache, then the copyCache is2820* the better one. If they are both copy caches (it is assumed the scanCache in this context has2821* work to scan), then the one with the shorter scanToCopyDistance is the better one to scan.2822*2823* @param copyCache the candidate copy cache2824* @param scanCache the current best scan cache, which may be updated.2825* @return true if the scanCache has been updated with the best cache to scan.2826*/2827MMINLINE bool2828MM_CopyForwardScheme::bestCacheForScanning(MM_CopyScanCacheVLHGC* copyCache, MM_CopyScanCacheVLHGC** scanCache)2829{2830if (!copyCache->isScanWorkAvailable()) {2831return false;2832}2833if (!((*scanCache)->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {2834*scanCache = copyCache;2835return true;2836}2837if (scanToCopyDistance(copyCache) < scanToCopyDistance(*scanCache)) {2838*scanCache = copyCache;2839return true;2840}2841return false;2842}28432844MMINLINE bool2845MM_CopyForwardScheme::aliasToCopyCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC** nextScanCache)2846{2847bool interruptScanning = false;28482849Assert_MM_unimplemented();2850#if 02851/* VMDESIGN 1359.2852* Only alias the _survivorCopyScanCache IF there are 0 threads waiting. If the current thread is the only producer and2853* it aliases it's survivor cache then it will be the only thread able to consume. This will alleviate the stalling issues2854* described in the above mentioned design.2855*/2856if (0 == *_workQueueWaitCountPtr) {2857interruptScanning = bestCacheForScanning(env->_survivorCopyScanCache, nextScanCache) || interruptScanning;2858}2859#endif /* 0 */28602861return interruptScanning;2862}28632864MMINLINE void2865MM_CopyForwardScheme::scanObject(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)2866{2867J9Class* clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);2868Assert_MM_mustBeClass(clazz);2869switch(_extensions->objectModel.getScanType(clazz)) {2870case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:2871case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:2872case GC_ObjectModel::SCAN_MIXED_OBJECT:2873scanMixedObjectSlots(env, reservingContext, objectPtr, reason);2874break;2875case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:2876scanOwnableSynchronizerObjectSlots(env, reservingContext, objectPtr, reason);2877break;2878case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:2879scanReferenceObjectSlots(env, reservingContext, objectPtr, reason);2880break;2881case GC_ObjectModel::SCAN_CLASS_OBJECT:2882scanClassObjectSlots(env, reservingContext, objectPtr, reason);2883break;2884case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:2885scanClassLoaderObjectSlots(env, reservingContext, objectPtr, reason);2886break;2887case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:2888scanPointerArrayObjectSlots(env, reservingContext, (J9IndexableObject *)objectPtr, reason);2889break;2890case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:2891if (SCAN_REASON_DIRTY_CARD != reason) {2892/* since we copy arrays in the non-aborting case, count them as scanned in the abort case for symmetry */2893updateScanStats(env, objectPtr, reason);2894}2895break;2896default:2897Trc_MM_CopyForwardScheme_scanObject_invalid(env->getLanguageVMThread(), objectPtr, reason);2898Assert_MM_unreachable();2899}2900}29012902MMINLINE void2903MM_CopyForwardScheme::updateScanStats(MM_EnvironmentVLHGC *env, J9Object *objectPtr, ScanReason reason)2904{2905bool noEvacuation = false;2906if (0 != _regionCountCannotBeEvacuated) {2907noEvacuation = isObjectInNoEvacuationRegions(env, objectPtr);2908}29092910if (SCAN_REASON_DIRTY_CARD == reason) {2911UDATA objectSize = _extensions->objectModel.getSizeInBytesWithHeader(objectPtr);2912env->_copyForwardStats._objectsCardClean += 1;2913env->_copyForwardStats._bytesCardClean += objectSize;2914} else if (abortFlagRaised() || noEvacuation) {2915UDATA objectSize = _extensions->objectModel.getSizeInBytesWithHeader(objectPtr);2916Assert_MM_false(SCAN_REASON_DIRTY_CARD == reason);2917MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);2918UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);2919if (region->isEden()) {2920env->_copyForwardCompactGroups[compactGroup]._edenStats._liveObjects += 1;2921env->_copyForwardCompactGroups[compactGroup]._edenStats._liveBytes += objectSize;2922env->_copyForwardCompactGroups[compactGroup]._edenStats._scannedObjects += 1;2923env->_copyForwardCompactGroups[compactGroup]._edenStats._scannedBytes += objectSize;2924} else {2925env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._liveObjects += 1;2926env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._liveBytes += objectSize;2927env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._scannedObjects += 1;2928env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._scannedBytes += objectSize;2929}2930}29312932/* else:2933* if not abort, object is copied and stats are updated through copy method2934* if abort, object is both copied and scanned, but we do not report those stats2935*/2936}293729382939void2940MM_CopyForwardScheme::scanPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, ScanReason reason)2941{2942UDATA index = 0;2943bool currentSplitUnitOnly = false;29442945/* only _abortInProgress==true or noEvacuation==true case are expected here, but we should handle all of exception cases(such as abortFlagRaised() case) */2946if (SCAN_REASON_PACKET == reason) {2947UDATA peekValue = (UDATA)env->_workStack.peek(env);2948if ((PACKET_ARRAY_SPLIT_TAG == (peekValue & PACKET_ARRAY_SPLIT_TAG))) {2949UDATA workItem = (UDATA)env->_workStack.pop(env);2950index = workItem >> PACKET_ARRAY_SPLIT_SHIFT;2951currentSplitUnitOnly = ((PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG == (peekValue & PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG)));2952}2953}2954if (0 == index) {2955/* make sure we only record stats for the object once -- note that this means we might2956* attribute the scanning cost to the wrong thread, but that's not really important2957*/2958updateScanStats(env, (J9Object*)arrayPtr, reason);2959}29602961scanPointerArrayObjectSlotsSplit(env, reservingContext, arrayPtr, index, currentSplitUnitOnly);2962}29632964/**2965* Scans all the objects to scan in the env->_scanCache and flushes the cache at the end.2966*/2967void2968MM_CopyForwardScheme::completeScanCache(MM_EnvironmentVLHGC *env)2969{2970MM_CopyScanCacheVLHGC *scanCache = (MM_CopyScanCacheVLHGC *)env->_scanCache;29712972/* mark that cache is in use as a scan cache */2973scanCache->setCurrentlyBeingScanned();2974if (scanCache->isSplitArray()) {2975/* a scan cache can't be a split array and have generic work available */2976Assert_MM_false(scanCache->isScanWorkAvailable());2977MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(scanCache->scanCurrent);2978J9IndexableObject *arrayObject = (J9IndexableObject *)scanCache->scanCurrent;2979UDATA nextIndex = scanCache->_arraySplitIndex;2980Assert_MM_true(0 != nextIndex);2981scanPointerArrayObjectSlotsSplit(env, reservingContext, arrayObject, nextIndex);2982scanCache->clearSplitArray();2983} else if (scanCache->isScanWorkAvailable()) {2984/* we want to perform a NUMA-aware analogue to "hierarchical scanning" so this scan cache should pull other objects into its node */2985MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(scanCache->scanCurrent);2986do {2987GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(2988_extensions,2989(J9Object *)scanCache->scanCurrent,2990(J9Object *)scanCache->cacheAlloc, false);2991/* Advance the scan pointer to the top of the cache to signify that this has been scanned */2992scanCache->scanCurrent = scanCache->cacheAlloc;2993/* Scan the chunk for all live objects */2994J9Object *objectPtr = NULL;2995while((objectPtr = heapChunkIterator.nextObject()) != NULL) {2996scanObject(env, reservingContext, objectPtr, SCAN_REASON_COPYSCANCACHE);2997}2998} while(scanCache->isScanWorkAvailable());29993000}3001/* mark cache as no longer in use for scanning */3002scanCache->clearCurrentlyBeingScanned();3003/* Done with the cache - build a free list entry in the hole, release the cache to the free list (if not used), and continue */3004flushCache(env, scanCache);3005}30063007MMINLINE bool3008MM_CopyForwardScheme::incrementalScanMixedObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,3009bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)3010{3011GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM);30123013if (!hasPartiallyScannedObject) {3014/* finished previous object, step up for next one */3015mixedObjectIterator.initialize(_javaVM->omrVM, objectPtr);3016} else {3017/* retrieve partial scan state of cache */3018mixedObjectIterator.restore(&(scanCache->_objectIteratorState));3019}3020GC_SlotObject *slotObject;3021bool success = true;3022while (success && ((slotObject = mixedObjectIterator.nextSlot()) != NULL)) {3023/* Copy/Forward the slot reference and perform any inter-region remember work that is required */3024success = copyAndForward(env, reservingContext, objectPtr, slotObject);30253026/* interrupt scanning this cache if it should be aliased or re-aliased */3027if (aliasToCopyCache(env, nextScanCache)) {3028/* save scan state of cache */3029mixedObjectIterator.save(&(scanCache->_objectIteratorState));3030return true;3031}3032}30333034return false;3035}30363037MMINLINE bool3038MM_CopyForwardScheme::incrementalScanClassObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,3039bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)3040{3041/* NOTE: An incremental scan solution should be provided here. For now, just use a full scan and ignore any hierarchical needs. */3042scanClassObjectSlots(env, reservingContext, objectPtr);3043return false;3044}30453046MMINLINE bool3047MM_CopyForwardScheme::incrementalScanClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,3048bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)3049{3050/* NOTE: An incremental scan solution should be provided here. For now, just use a full scan and ignore any hierarchical needs. */3051scanClassLoaderObjectSlots(env, reservingContext, objectPtr);3052return false;3053}30543055MMINLINE bool3056MM_CopyForwardScheme::incrementalScanPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,3057bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)3058{3059GC_PointerArrayIterator pointerArrayIterator(_javaVM);30603061if (!hasPartiallyScannedObject) {3062/* finished previous object, step up for next one */3063pointerArrayIterator.initialize(_javaVM, objectPtr);3064} else {3065/* retrieve partial scan state of cache */3066pointerArrayIterator.restore(&(scanCache->_objectIteratorState));3067}30683069GC_SlotObject *slotObject = NULL;3070bool success = true;30713072while (success && ((slotObject = pointerArrayIterator.nextSlot()) != NULL)) {3073/* Copy/Forward the slot reference and perform any inter-region remember work that is required */3074success = copyAndForward(env, reservingContext, objectPtr, slotObject);30753076/* interrupt scanning this cache if it should be aliased or re-aliased */3077if (aliasToCopyCache(env, nextScanCache)) {3078/* save scan state of cache */3079pointerArrayIterator.save(&(scanCache->_objectIteratorState));3080return true;3081}3082}30833084return false;3085}30863087MMINLINE bool3088MM_CopyForwardScheme::incrementalScanReferenceObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,3089bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)3090{3091GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM);3092fj9object_t *referentPtr = J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, objectPtr);3093bool referentMustBeMarked = false;30943095if (!hasPartiallyScannedObject) {3096/* finished previous object, step up for next one */3097mixedObjectIterator.initialize(_javaVM->omrVM, objectPtr);3098} else {3099/* retrieve partial scan state of cache */3100mixedObjectIterator.restore(&(scanCache->_objectIteratorState));3101}31023103if (J9AccClassReferenceSoft == (J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(objectPtr, env)) & J9AccClassReferenceMask)) {3104/* Object is a Soft Reference: mark it if not expired */3105U_32 age = J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, objectPtr);3106referentMustBeMarked = age < _extensions->getDynamicMaxSoftReferenceAge();3107}31083109GC_SlotObject *slotObject;3110bool success = true;3111while (success && ((slotObject = mixedObjectIterator.nextSlot()) != NULL)) {3112if (((fj9object_t *)slotObject->readAddressFromSlot() != referentPtr) || referentMustBeMarked) {3113/* Copy/Forward the slot reference and perform any inter-region remember work that is required */3114success = copyAndForward(env, reservingContext, objectPtr, slotObject);31153116/* interrupt scanning this cache if it should be aliased or re-aliased */3117if (aliasToCopyCache(env, nextScanCache)) {3118/* save scan state of cache */3119mixedObjectIterator.save(&(scanCache->_objectIteratorState));3120return true;3121}3122}3123}31243125return false;3126}31273128void3129MM_CopyForwardScheme::incrementalScanCacheBySlot(MM_EnvironmentVLHGC *env)3130{3131MM_CopyScanCacheVLHGC* scanCache = (MM_CopyScanCacheVLHGC *)env->_scanCache;3132J9Object *objectPtr;3133MM_CopyScanCacheVLHGC* nextScanCache = scanCache;31343135nextCache:3136/* mark that cache is in use as a scan cache */3137scanCache->setCurrentlyBeingScanned();3138bool hasPartiallyScannedObject = scanCache->_hasPartiallyScannedObject;3139if (scanCache->isScanWorkAvailable()) {3140/* we want to perform a NUMA-aware analogue to "hierarchical scanning" so this scan cache should pull other objects into its node */3141MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(env->_scanCache->scanCurrent);3142do {3143void *cacheAlloc = scanCache->cacheAlloc;3144GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(3145_extensions,3146(J9Object *)scanCache->scanCurrent,3147(J9Object *)cacheAlloc,3148false);31493150/* Scan the chunk for live objects, incrementally slot by slot */3151while ((objectPtr = heapChunkIterator.nextObject()) != NULL) {3152/* retrieve scan state of the scan cache */3153switch(_extensions->objectModel.getScanType(objectPtr)) {3154case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:3155case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:3156case GC_ObjectModel::SCAN_MIXED_OBJECT:3157case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:3158hasPartiallyScannedObject = incrementalScanMixedObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);3159break;3160case GC_ObjectModel::SCAN_CLASS_OBJECT:3161hasPartiallyScannedObject = incrementalScanClassObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);3162break;3163case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:3164hasPartiallyScannedObject = incrementalScanClassLoaderObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);3165break;3166case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:3167hasPartiallyScannedObject = incrementalScanPointerArrayObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);3168break;3169case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:3170hasPartiallyScannedObject = incrementalScanReferenceObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);3171break;3172case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:3173continue;3174break;3175default:3176Assert_MM_unreachable();3177}31783179/* object was not completely scanned in order to interrupt scan */3180if (hasPartiallyScannedObject) {3181/* interrupt scan, save scan state of cache before deferring */3182scanCache->scanCurrent = objectPtr;3183scanCache->_hasPartiallyScannedObject = true;3184/* Only save scan cache if it is not a copy cache, and then don't add to scanlist - this3185* can cause contention, just defer to later time on same thread3186* if deferred cache is occupied, then queue current scan cache on scan list3187*/3188scanCache->clearCurrentlyBeingScanned();3189if (!(scanCache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {3190if (NULL == env->_deferredScanCache) {3191env->_deferredScanCache = scanCache;3192} else {3193#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)3194env->_copyForwardStats._releaseScanListCount += 1;3195#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */3196addCacheEntryToScanCacheListAndNotify(env, scanCache);3197}3198}3199env->_scanCache = scanCache = nextScanCache;3200goto nextCache;3201}3202}3203/* Advance the scan pointer for the objects that were scanned */3204scanCache->scanCurrent = cacheAlloc;3205} while (scanCache->isScanWorkAvailable());3206}3207/* although about to flush this cache, the flush occurs only if the cache is not in use3208* hence we still need to store the state of current scanning */3209scanCache->_hasPartiallyScannedObject = false;3210/* mark cache as no longer in use for scanning */3211scanCache->clearCurrentlyBeingScanned();3212/* Done with the cache - build a free list entry in the hole, release the cache to the free list (if not used), and continue */3213flushCache(env, scanCache);3214}32153216void3217MM_CopyForwardScheme::cleanOverflowedRegion(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region, U_8 flagToClean)3218{3219Assert_MM_true(region->containsObjects());3220/* do we need to clean this region? */3221U_8 flags = region->_markData._overflowFlags;3222if (flagToClean == (flags & flagToClean)) {3223/* Region must be cleaned */3224/* save back the new flags, first, in case we re-overflow in another thread (or this thread) */3225U_8 newFlags = flags & ~flagToClean;3226region->_markData._overflowFlags = newFlags;3227/* Force our write of the overflow flags from our cache and ensure that we have no stale mark map data before we walk */3228MM_AtomicOperations::sync();3229if (region->_copyForwardData._evacuateSet || region->isFreshSurvivorRegion()) {3230cleanOverflowInRange(env, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());3231} else if (region->isSurvivorRegion()) {3232GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);3233while (survivorIterator.next()) {3234cleanOverflowInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());3235}3236}3237}3238}32393240bool3241MM_CopyForwardScheme::isWorkPacketsOverflow(MM_EnvironmentVLHGC *env)3242{3243MM_WorkPackets *packets = (MM_WorkPackets *)(env->_cycleState->_workPackets);3244bool result = false;3245if (packets->getOverflowFlag()) {3246result = true;3247}3248return result;3249}32503251bool3252MM_CopyForwardScheme::handleOverflow(MM_EnvironmentVLHGC *env)3253{3254MM_WorkPackets *packets = (MM_WorkPackets *)(env->_cycleState->_workPackets);3255bool result = false;32563257if (packets->getOverflowFlag()) {3258result = true;3259if (((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsAndReleaseMainForMark(env, UNIQUE_ID)) {3260packets->clearOverflowFlag();3261env->_currentTask->releaseSynchronizedGCThreads(env);3262}3263/* our overflow handling mechanism is to set flags in the region descriptor so clean those regions */3264U_8 flagToRemove = MM_RegionBasedOverflowVLHGC::overflowFlagForCollectionType(env, env->_cycleState->_collectionType);3265GC_HeapRegionIteratorVLHGC regionIterator = GC_HeapRegionIteratorVLHGC(_regionManager);3266MM_HeapRegionDescriptorVLHGC *region = NULL;3267while (NULL != (region = regionIterator.nextRegion())) {3268if (region->containsObjects()) {3269if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3270cleanOverflowedRegion(env, region, flagToRemove);3271}3272}3273}3274((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForMark(env, UNIQUE_ID);3275}3276return result;3277}32783279void3280MM_CopyForwardScheme::completeScanForAbort(MM_EnvironmentVLHGC *env)3281{3282/* From this point on, no copying should happen - reservingContext is irrelevant */3283MM_AllocationContextTarok *reservingContext = _commonContext;32843285J9Object *objectPtr = NULL;3286do {3287while (NULL != (objectPtr = (J9Object *)env->_workStack.pop(env))) {3288do {3289Assert_MM_false(MM_ForwardedHeader(objectPtr, _extensions->compressObjectReferences()).isForwardedPointer());3290scanObject(env, reservingContext, objectPtr, SCAN_REASON_PACKET);32913292objectPtr = (J9Object *)env->_workStack.popNoWait(env);3293} while (NULL != objectPtr);3294}3295((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForMark(env, UNIQUE_ID);3296} while (handleOverflow(env));3297}32983299void3300MM_CopyForwardScheme::completeScanWorkPacket(MM_EnvironmentVLHGC *env)3301{3302MM_AllocationContextTarok *reservingContext = _commonContext;3303J9Object *objectPtr = NULL;33043305while (NULL != (objectPtr = (J9Object *)env->_workStack.popNoWaitFromCurrentInputPacket(env))) {3306Assert_MM_false(MM_ForwardedHeader(objectPtr, _extensions->compressObjectReferences()).isForwardedPointer());3307scanObject(env, reservingContext, objectPtr, SCAN_REASON_PACKET);3308}3309}33103311void3312MM_CopyForwardScheme::completeScan(MM_EnvironmentVLHGC *env)3313{3314UDATA nodeOfThread = 0;33153316/* if we aren't using NUMA, we don't want to check the thread affinity since we will have only one list of scan caches */3317if (_extensions->_numaManager.isPhysicalNUMASupported()) {3318nodeOfThread = env->getNumaAffinity();3319Assert_MM_true(nodeOfThread <= _extensions->_numaManager.getMaximumNodeNumber());3320}3321ScanReason scanReason = SCAN_REASON_NONE;3322while(SCAN_REASON_NONE != (scanReason = getNextWorkUnit(env, nodeOfThread))) {3323if (SCAN_REASON_COPYSCANCACHE == scanReason) {3324Assert_MM_true(env->_scanCache->cacheBase <= env->_scanCache->cacheAlloc);3325Assert_MM_true(env->_scanCache->cacheAlloc <= env->_scanCache->cacheTop);3326Assert_MM_true(env->_scanCache->scanCurrent <= env->_scanCache->cacheAlloc);33273328switch (_extensions->scavengerScanOrdering) {3329case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_BREADTH_FIRST:3330case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST:3331completeScanCache(env);3332break;3333case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_HIERARCHICAL:3334incrementalScanCacheBySlot(env);3335break;3336default:3337Assert_MM_unreachable();3338break;3339} /* end of switch on type of scan order */3340} else if (SCAN_REASON_PACKET == scanReason) {3341completeScanWorkPacket(env);3342}3343}33443345/* flush Mark Map caches before we start draining Work Stack (in case of Abort) */3346addCopyCachesToFreeList(env);33473348if (((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsAndReleaseMainForAbort(env, UNIQUE_ID)) {3349if (abortFlagRaised()) {3350_abortInProgress = true;3351}3352/* using abort case to handle work packets overflow during copyforwardHybrid */3353if (!_abortInProgress && (0 != _regionCountCannotBeEvacuated) && isWorkPacketsOverflow(env)) {3354_abortInProgress = true;3355}3356env->_currentTask->releaseSynchronizedGCThreads(env);3357}33583359if(_abortInProgress) {3360completeScanForAbort(env);3361}3362}33633364MMINLINE void3365MM_CopyForwardScheme::addOwnableSynchronizerObjectInList(MM_EnvironmentVLHGC *env, j9object_t object)3366{3367if (NULL != _extensions->accessBarrier->isObjectInOwnableSynchronizerList(object)) {3368env->getGCEnvironment()->_ownableSynchronizerObjectBuffer->add(env, object);3369env->_copyForwardStats._ownableSynchronizerSurvived += 1;3370}3371}33723373#if defined(J9VM_GC_FINALIZATION)3374void3375MM_CopyForwardScheme::scanUnfinalizedObjects(MM_EnvironmentVLHGC *env)3376{3377/* ensure that all clearable processing is complete up to this point since this phase resurrects objects */3378env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);33793380GC_FinalizableObjectBuffer buffer(_extensions);3381MM_HeapRegionDescriptorVLHGC *region = NULL;3382GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);3383while(NULL != (region = regionIterator.nextRegion())) {3384if (region->_copyForwardData._evacuateSet && !region->getUnfinalizedObjectList()->wasEmpty()) {3385if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3386J9Object *pointer = region->getUnfinalizedObjectList()->getPriorList();3387while (NULL != pointer) {3388bool finalizable = false;3389env->_copyForwardStats._unfinalizedCandidates += 1;33903391Assert_MM_true(region->isAddressInRegion(pointer));33923393/* NOTE: it is safe to read from the forwarded object since either:3394* 1. it was copied before unfinalized processing began, or3395* 2. it was copied by this thread.3396*/3397MM_ForwardedHeader forwardedHeader(pointer, _extensions->compressObjectReferences());3398J9Object* forwardedPtr = forwardedHeader.getForwardedObject();3399if (NULL == forwardedPtr) {3400if (_markMap->isBitSet(pointer)) {3401forwardedPtr = pointer;3402} else {3403Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));3404/* TODO: Use the context for the finalize thread */3405MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(pointer);3406forwardedPtr = copy(env, reservingContext, &forwardedHeader);3407finalizable = true;34083409if (NULL == forwardedPtr) {3410/* We failed to copy the object. This must have caused an abort. This will be dealt with in scanUnfinalizedObjectsComplete */3411Assert_MM_false(_abortInProgress);3412Assert_MM_true(abortFlagRaised());3413forwardedPtr = pointer;3414}3415}3416}34173418J9Object* next = _extensions->accessBarrier->getFinalizeLink(forwardedPtr);3419if (finalizable) {3420/* object was not previously marked -- it is now finalizable so push it to the local buffer */3421env->_copyForwardStats._unfinalizedEnqueued += 1;3422buffer.add(env, forwardedPtr);3423env->_cycleState->_finalizationRequired = true;3424} else {3425env->getGCEnvironment()->_unfinalizedObjectBuffer->add(env, forwardedPtr);3426}34273428pointer = next;3429}34303431/* Flush the local buffer of finalizable objects to the global list.3432* This is done once per region to ensure that multi-tenant lists3433* only contain objects from the same allocation context3434*/3435buffer.flush(env);3436}3437}3438}34393440/* restore everything to a flushed state before exiting */3441env->getGCEnvironment()->_unfinalizedObjectBuffer->flush(env);3442}3443#endif /* J9VM_GC_FINALIZATION */34443445void3446MM_CopyForwardScheme::cleanCardTable(MM_EnvironmentVLHGC *env)3447{3448Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);3449if (NULL != env->_cycleState->_externalCycleState) {3450/* A GMP is in progress */3451MM_CopyForwardGMPCardCleaner cardCleaner(this);3452cleanCardTableForPartialCollect(env, &cardCleaner);3453} else {3454/* No GMP is in progress so we can clear more aggressively */3455MM_CopyForwardNoGMPCardCleaner cardCleaner(this);3456cleanCardTableForPartialCollect(env, &cardCleaner);3457}3458}34593460void3461MM_CopyForwardScheme::cleanCardTableForPartialCollect(MM_EnvironmentVLHGC *env, MM_CardCleaner *cardCleaner)3462{3463PORT_ACCESS_FROM_ENVIRONMENT(env);3464U_64 cleanStartTime = j9time_hires_clock();34653466bool gmpIsRunning = (NULL != env->_cycleState->_externalCycleState);3467MM_CardTable* cardTable = _extensions->cardTable;3468GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);3469MM_HeapRegionDescriptorVLHGC *region = NULL;3470while(NULL != (region = regionIterator.nextRegion())) {3471/* Don't include survivor regions as we scan - they don't need to be processed and this will throw off the work unit indices */3472if (region->containsObjects() && region->_copyForwardData._initialLiveSet) {3473if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3474if (!region->_markData._shouldMark) {3475/* this region isn't part of the collection set, so it may have dirty or remembered cards in it. */3476cardTable->cleanCardsInRegion(env, cardCleaner, region);3477} else {3478/* this region is part of the collection set, so just change its dirty cards to clean (or GMP_MUST_SCAN) */3479void *low = region->getLowAddress();3480void *high = region->getHighAddress();3481Card *card = cardTable->heapAddrToCardAddr(env, low);3482Card *toCard = cardTable->heapAddrToCardAddr(env, high);34833484while (card < toCard) {3485Card fromState = *card;3486switch(fromState) {3487case CARD_PGC_MUST_SCAN:3488*card = CARD_CLEAN;3489break;3490case CARD_GMP_MUST_SCAN:3491/* This can only occur if a GMP is currently active, no transition is required */3492Assert_MM_true(gmpIsRunning);3493break;3494case CARD_DIRTY:3495if (gmpIsRunning) {3496*card = CARD_GMP_MUST_SCAN;3497} else {3498*card = CARD_CLEAN;3499}3500break;3501case CARD_CLEAN:3502/* do nothing */3503break;3504case CARD_REMEMBERED:3505/* card state valid if left over during aborted card cleaning */3506*card = CARD_CLEAN;3507break;3508case CARD_REMEMBERED_AND_GMP_SCAN:3509/* card state valid if left over during aborted card cleaning */3510Assert_MM_true(gmpIsRunning);3511*card = CARD_GMP_MUST_SCAN;3512break;3513default:3514Assert_MM_unreachable();3515}3516card += 1;3517}3518}3519}3520}3521}35223523U_64 cleanEndTime = j9time_hires_clock();3524env->_cardCleaningStats.addToCardCleaningTime(cleanStartTime, cleanEndTime);3525}35263527void3528MM_CopyForwardScheme::updateOrDeleteObjectsFromExternalCycle(MM_EnvironmentVLHGC *env)3529{3530/* this function has knowledge of the collection set, which is only valid during a PGC */3531Assert_MM_true(NULL != env->_cycleState->_externalCycleState);35323533MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;3534Assert_MM_true(externalMarkMap != _markMap);35353536MM_HeapRegionDescriptorVLHGC *region = NULL;3537GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);3538while(NULL != (region = regionIterator.nextRegion())) {3539if(region->_markData._shouldMark) {3540if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3541Assert_MM_true(region->_copyForwardData._initialLiveSet);3542Assert_MM_false(region->isSurvivorRegion());3543Assert_MM_true(region->containsObjects());35443545if(abortFlagRaised() || region->_markData._noEvacuation) {3546/* Walk the mark map range for the region and fixing mark bits to be the subset of the current mark map.3547* (Those bits that are cleared have been moved and their bits are already set).3548*/3549UDATA currentExternalIndex = externalMarkMap->getSlotIndex((J9Object *)region->getLowAddress());3550UDATA topExternalIndex = externalMarkMap->getSlotIndex((J9Object *)region->getHighAddress());3551UDATA currentIndex = _markMap->getSlotIndex((J9Object *)region->getLowAddress());35523553while(currentExternalIndex < topExternalIndex) {3554UDATA slot = externalMarkMap->getSlot(currentExternalIndex);3555if(0 != slot) {3556externalMarkMap->setSlot(currentExternalIndex, slot & _markMap->getSlot(currentIndex));3557}3558currentExternalIndex += 1;3559currentIndex += 1;3560}3561} else {3562Assert_MM_false(region->_nextMarkMapCleared);3563externalMarkMap->setBitsForRegion(env, region, true);3564}3565}3566}3567}35683569/* Mark map processing must be completed before we move to work packets */3570env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);35713572/* Clear or update references on external cycle work packets, depending on whether the reference has been forwarded or not */3573UDATA totalCount = 0;3574UDATA deletedCount = 0;3575UDATA preservedCount = 0;3576MM_WorkPacketsIterator packetIterator(env, env->_cycleState->_externalCycleState->_workPackets);3577MM_Packet *packet = NULL;3578while (NULL != (packet = packetIterator.nextPacket(env))) {3579if (!packet->isEmpty()) {3580/* there is data in this packet so use it */3581if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3582MM_PacketSlotIterator slotIterator(packet);3583J9Object **slot = NULL;3584while (NULL != (slot = slotIterator.nextSlot())) {3585J9Object *object = *slot;3586Assert_MM_true(NULL != object);3587if (PACKET_INVALID_OBJECT != (UDATA)object) {3588totalCount += 1;3589if(isLiveObject(object)) {3590Assert_MM_true(externalMarkMap->isBitSet(object));3591Assert_MM_true(_markMap->isBitSet(object));3592Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ(object, env));3593} else {3594Assert_MM_true(isObjectInEvacuateMemory(object));3595J9Object *forwardedObject = updateForwardedPointer(object);3596if(externalMarkMap->isBitSet(forwardedObject)) {3597Assert_MM_true(_markMap->isBitSet(forwardedObject));3598Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ(forwardedObject, env));3599preservedCount += 1;3600*slot = forwardedObject;3601} else {3602/* this object failed to survive the PGC cycle */3603Assert_MM_true(!_markMap->isBitSet(forwardedObject));3604deletedCount += 1;3605slotIterator.resetSplitTagIndexForObject(object, PACKET_INVALID_OBJECT);3606*slot = (J9Object*)PACKET_INVALID_OBJECT;3607}3608}3609}3610}3611}3612}3613}36143615Trc_MM_CopyForwardScheme_deleteDeadObjectsFromExternalCycle(env->getLanguageVMThread(), totalCount, deletedCount, preservedCount);3616}36173618bool3619MM_CopyForwardScheme::scanObjectsInRange(MM_EnvironmentVLHGC *env, void *lowAddress, void *highAddress, bool rememberedObjectsOnly)3620{3621/* we only support scanning exactly one card at a time */3622Assert_MM_true(0 == ((UDATA)lowAddress & (J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP - 1)));3623Assert_MM_true(((UDATA)lowAddress + CARD_SIZE) == (UDATA)highAddress);3624/* card cleaning is done after stack processing so any objects we copy should be copied into the node which refers to them, even from cards */3625MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(lowAddress);36263627if (rememberedObjectsOnly) {3628for (UDATA bias = 0; bias < CARD_SIZE; bias += J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP) {3629void *scanAddress = (void *)((UDATA)lowAddress + bias);3630MM_HeapMapWordIterator markedObjectIterator(_markMap, scanAddress);3631J9Object *fromObject = NULL;3632while (NULL != (fromObject = markedObjectIterator.nextObject())) {3633/* this object needs to be re-scanned (to update next mark map and RSM) */3634if (_extensions->objectModel.isRemembered(fromObject)) {3635scanObject(env, reservingContext, fromObject, SCAN_REASON_DIRTY_CARD);36363637}3638}3639}3640} else {3641for (UDATA bias = 0; bias < CARD_SIZE; bias += J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP) {3642void *scanAddress = (void *)((UDATA)lowAddress + bias);3643MM_HeapMapWordIterator markedObjectIterator(_markMap, scanAddress);3644J9Object *fromObject = NULL;3645while (NULL != (fromObject = markedObjectIterator.nextObject())) {3646/* this object needs to be re-scanned (to update next mark map and RSM) */3647scanObject(env, reservingContext, fromObject, SCAN_REASON_DIRTY_CARD);3648}3649}3650}3651/* we can only clean the card if we haven't raised the abort flag since we might have aborted in this thread3652* while processing the card while another thread copied an object that this card referred to. We need to3653* make sure that we re-clean this card in abort processing, in that case, so don't clean the card.3654* If an abort _is_ already in progress, however, no objects can be copied so we are safe to clean this card3655* knowing that all its objects have correct references.3656*/3657return _abortInProgress || !abortFlagRaised();3658}365936603661/**3662* The root set scanner for MM_CopyForwardScheme.3663* @copydoc MM_RootScanner3664* @ingroup GC_Modron_Standard3665*/3666class MM_CopyForwardSchemeRootScanner : public MM_RootScanner3667{3668private:3669MM_CopyForwardScheme *_copyForwardScheme; /**< Local reference back to the copy forward scheme driving the collection */36703671private:3672virtual void doSlot(J9Object **slotPtr) {3673if (NULL != *slotPtr) {3674/* we don't have the context of this slot so just relocate the object into the same node where we found it */3675MM_AllocationContextTarok *reservingContext = _copyForwardScheme->getContextForHeapAddress(*slotPtr);3676_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);3677}3678}36793680virtual void doStackSlot(J9Object **slotPtr, void *walkState, const void* stackLocation) {3681if (_copyForwardScheme->isHeapObject(*slotPtr)) {3682/* heap object - validate and mark */3683Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::COULD_BE_FORWARDED, *slotPtr, stackLocation, walkState).validate(_env));3684/* we know that threads are bound to nodes so relocalize this object into the node of the thread which directly references it */3685J9VMThread *thread = ((J9StackWalkState *)walkState)->currentThread;3686MM_AllocationContextTarok *reservingContext = (MM_AllocationContextTarok *)MM_EnvironmentVLHGC::getEnvironment(thread)->getAllocationContext();3687_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);3688} else if (NULL != *slotPtr) {3689/* stack object - just validate */3690Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::NOT_ON_HEAP, *slotPtr, stackLocation, walkState).validate(_env));3691}3692}36933694virtual void doVMThreadSlot(J9Object **slotPtr, GC_VMThreadIterator *vmThreadIterator) {3695if (_copyForwardScheme->isHeapObject(*slotPtr)) {3696/* we know that threads are bound to nodes so relocalize this object into the node of the thread which directly references it */3697J9VMThread *thread = vmThreadIterator->getVMThread();3698MM_AllocationContextTarok *reservingContext = (MM_AllocationContextTarok *)MM_EnvironmentVLHGC::getEnvironment(thread)->getAllocationContext();3699_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);3700} else if (NULL != *slotPtr) {3701Assert_MM_true(vmthreaditerator_state_monitor_records == vmThreadIterator->getState());3702}3703}37043705virtual void doClass(J9Class *clazz) {3706/* Should never try to scan J9Class structures - these are handled by j.l.c and class loader references on the heap */3707Assert_MM_unreachable();3708}37093710virtual void doClassLoader(J9ClassLoader *classLoader) {3711if(0 == (classLoader->gcFlags & J9_GC_CLASS_LOADER_DEAD)) {3712/* until we decide if class loaders should be common, just relocate this object back into its existing node */3713MM_AllocationContextTarok *reservingContext = _copyForwardScheme->getContextForHeapAddress(classLoader->classLoaderObject);3714_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, &classLoader->classLoaderObject);3715}3716}37173718#if defined(J9VM_GC_FINALIZATION)3719virtual void doFinalizableObject(j9object_t object) {3720Assert_MM_unreachable();3721}37223723virtual void scanFinalizableObjects(MM_EnvironmentBase *env) {3724reportScanningStarted(RootScannerEntity_FinalizableObjects);3725/* synchronization can be expensive so skip it if there's no work to do */3726if (_copyForwardScheme->_shouldScanFinalizableObjects) {3727if (env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {3728_copyForwardScheme->scanFinalizableObjects(MM_EnvironmentVLHGC::getEnvironment(env));3729env->_currentTask->releaseSynchronizedGCThreads(env);3730}3731} else {3732/* double check that there really was no work to do */3733Assert_MM_true(!MM_GCExtensions::getExtensions(env)->finalizeListManager->isFinalizableObjectProcessingRequired());3734}3735reportScanningEnded(RootScannerEntity_FinalizableObjects);3736}3737#endif /* J9VM_GC_FINALIZATION */37383739public:3740MM_CopyForwardSchemeRootScanner(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :3741MM_RootScanner(env),3742_copyForwardScheme(copyForwardScheme)3743{3744_typeId = __FUNCTION__;3745};37463747/**3748* Scan all root set references from the VM into the heap.3749* For all slots that are hard root references into the heap, the appropriate slot handler will be called.3750*/3751void3752scanRoots(MM_EnvironmentBase *env)3753{3754/* threads and their stacks tell us more about NUMA affinity than anything else so ensure that we scan them first and process all scan caches that they produce before proceeding */3755scanThreads(env);3756_copyForwardScheme->completeScan(MM_EnvironmentVLHGC::getEnvironment(env));37573758Assert_MM_true(_classDataAsRoots == !_copyForwardScheme->isDynamicClassUnloadingEnabled());3759if (_classDataAsRoots) {3760/* The classLoaderObject of a class loader might be in the nursery, but a class loader3761* can never be in the remembered set, so include class loaders here.3762*/3763scanClassLoaders(env);3764}37653766#if defined(J9VM_GC_FINALIZATION)3767scanFinalizableObjects(env);3768#endif /* J9VM_GC_FINALIZATION */3769scanJNIGlobalReferences(env);37703771if(_stringTableAsRoot){3772scanStringTable(env);3773}3774}3775};37763777/**3778* The clearable root set scanner for MM_CopyForwardScheme.3779* @copydoc MM_RootScanner3780* @ingroup GC_Modron_Standard3781*/3782class MM_CopyForwardSchemeRootClearer : public MM_RootScanner3783{3784private:3785MM_CopyForwardScheme *_copyForwardScheme;37863787private:3788virtual void doSlot(J9Object **slotPtr) {3789Assert_MM_unreachable(); /* Should not have gotten here - how do you clear a generic slot? */3790}37913792virtual void doClass(J9Class *clazz) {3793Assert_MM_unreachable(); /* Should not have gotten here - how do you clear a class? */3794}37953796virtual void scanSoftReferenceObjects(MM_EnvironmentBase *env) {3797reportScanningStarted(RootScannerEntity_SoftReferenceObjects);3798_copyForwardScheme->scanSoftReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));3799reportScanningEnded(RootScannerEntity_SoftReferenceObjects);3800}38013802virtual CompletePhaseCode scanSoftReferencesComplete(MM_EnvironmentBase *env) {3803/* do nothing -- no new objects could have been discovered by soft reference processing */3804return complete_phase_OK;3805}38063807virtual void scanWeakReferenceObjects(MM_EnvironmentBase *env) {3808reportScanningStarted(RootScannerEntity_WeakReferenceObjects);3809_copyForwardScheme->scanWeakReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));3810reportScanningEnded(RootScannerEntity_WeakReferenceObjects);3811}38123813virtual CompletePhaseCode scanWeakReferencesComplete(MM_EnvironmentBase *env) {3814/* No new objects could have been discovered by soft / weak reference processing,3815* but we must complete this phase prior to unfinalized processing to ensure that3816* finalizable referents get cleared */3817env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);3818return complete_phase_OK;3819}38203821#if defined(J9VM_GC_FINALIZATION)3822virtual void scanUnfinalizedObjects(MM_EnvironmentBase *env) {3823/* allow the scheme to handle this, since it knows which regions are interesting */3824reportScanningStarted(RootScannerEntity_UnfinalizedObjects);3825_copyForwardScheme->scanUnfinalizedObjects(MM_EnvironmentVLHGC::getEnvironment(env));3826reportScanningEnded(RootScannerEntity_UnfinalizedObjects);3827}38283829virtual CompletePhaseCode scanUnfinalizedObjectsComplete(MM_EnvironmentBase *env) {3830reportScanningStarted(RootScannerEntity_UnfinalizedObjectsComplete);3831/* ensure that all unfinalized processing is complete before we start marking additional objects */3832env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);38333834bool wasAbortAlreadyInProgress = _copyForwardScheme->_abortInProgress;3835_copyForwardScheme->completeScan(MM_EnvironmentVLHGC::getEnvironment(env));38363837if (!wasAbortAlreadyInProgress && _copyForwardScheme->_abortInProgress) {3838/* an abort occurred during unfinalized processing: there could be unscanned or unforwarded objects on the finalizable list */3839if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3840/* since we know we're in abort handling mode and won't be copying any of these objects we don't need to synchronize here */3841_copyForwardScheme->scanFinalizableObjects(MM_EnvironmentVLHGC::getEnvironment(env));3842}3843_copyForwardScheme->completeScanForAbort(MM_EnvironmentVLHGC::getEnvironment(env));3844}3845reportScanningEnded(RootScannerEntity_UnfinalizedObjectsComplete);3846return complete_phase_OK;3847}3848#endif /* J9VM_GC_FINALIZATION */38493850virtual void scanOwnableSynchronizerObjects(MM_EnvironmentBase *env) {3851/* allow the scheme to handle this, since it knows which regions are interesting */3852/* empty, move ownable synchronizer processing in copy-continuous phase */3853}38543855virtual void scanPhantomReferenceObjects(MM_EnvironmentBase *env) {3856reportScanningStarted(RootScannerEntity_PhantomReferenceObjects);3857_copyForwardScheme->scanPhantomReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));3858reportScanningEnded(RootScannerEntity_PhantomReferenceObjects);3859}38603861virtual CompletePhaseCode scanPhantomReferencesComplete(MM_EnvironmentBase *envBase) {3862MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);38633864reportScanningStarted(RootScannerEntity_PhantomReferenceObjectsComplete);3865env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);3866Assert_MM_true(MM_CycleState::references_clear_phantom == (env->_cycleState->_referenceObjectOptions & MM_CycleState::references_clear_phantom));38673868/* phantom reference processing may resurrect objects - scan them now */3869_copyForwardScheme->completeScan(env);38703871reportScanningEnded(RootScannerEntity_PhantomReferenceObjectsComplete);3872return complete_phase_OK;3873}38743875virtual void doMonitorReference(J9ObjectMonitor *objectMonitor, GC_HashTableIterator *monitorReferenceIterator) {3876J9ThreadAbstractMonitor * monitor = (J9ThreadAbstractMonitor*)objectMonitor->monitor;3877MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._monitorReferenceCandidates += 1;3878J9Object *objectPtr = (J9Object *)monitor->userData;3879if(!_copyForwardScheme->isLiveObject(objectPtr)) {3880Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3881MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3882J9Object *forwardPtr = forwardedHeader.getForwardedObject();3883if(NULL != forwardPtr) {3884monitor->userData = (UDATA)forwardPtr;3885} else {3886Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));3887monitorReferenceIterator->removeSlot();3888MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._monitorReferenceCleared += 1;3889/* We must call objectMonitorDestroy (as opposed to omrthread_monitor_destroy) when the3890* monitor is not internal to the GC3891*/3892static_cast<J9JavaVM*>(_omrVM->_language_vm)->internalVMFunctions->objectMonitorDestroy(static_cast<J9JavaVM*>(_omrVM->_language_vm), (J9VMThread *)_env->getLanguageVMThread(), (omrthread_monitor_t)monitor);3893}3894}3895}38963897virtual CompletePhaseCode scanMonitorReferencesComplete(MM_EnvironmentBase *envBase) {3898MM_EnvironmentVLHGC* env = MM_EnvironmentVLHGC::getEnvironment(envBase);3899reportScanningStarted(RootScannerEntity_MonitorReferenceObjectsComplete);3900((J9JavaVM *)env->getLanguageVM())->internalVMFunctions->objectMonitorDestroyComplete((J9JavaVM *)env->getLanguageVM(), (J9VMThread *)env->getLanguageVMThread());3901reportScanningEnded(RootScannerEntity_MonitorReferenceObjectsComplete);3902return complete_phase_OK;3903}39043905virtual void doJNIWeakGlobalReference(J9Object **slotPtr) {3906J9Object *objectPtr = *slotPtr;3907if(!_copyForwardScheme->isLiveObject(objectPtr)) {3908Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3909MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3910*slotPtr = forwardedHeader.getForwardedObject();3911}3912}39133914virtual void doStringTableSlot(J9Object **slotPtr, GC_StringTableIterator *stringTableIterator) {3915J9Object *objectPtr = *slotPtr;3916MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._stringConstantsCandidates += 1;3917if(!_copyForwardScheme->isLiveObject(objectPtr)) {3918Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3919MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3920objectPtr = forwardedHeader.getForwardedObject();3921if(NULL == objectPtr) {3922Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));3923MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._stringConstantsCleared += 1;3924stringTableIterator->removeSlot();3925} else {3926*slotPtr = objectPtr;3927}3928}3929}39303931#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)3932virtual void doDoubleMappedObjectSlot(J9Object *objectPtr, struct J9PortVmemIdentifier *identifier) {3933MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);3934env->_copyForwardStats._doubleMappedArrayletsCandidates += 1;3935if (!_copyForwardScheme->isLiveObject(objectPtr)) {3936Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3937MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3938objectPtr = forwardedHeader.getForwardedObject();3939if (NULL == objectPtr) {3940Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));3941env->_copyForwardStats._doubleMappedArrayletsCleared += 1;3942OMRPORT_ACCESS_FROM_OMRVM(_omrVM);3943omrvmem_release_double_mapped_region(identifier->address, identifier->size, identifier);3944}3945}3946}3947#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */39483949/**3950* @Clear the string table cache slot if the object is not marked3951*/3952virtual void doStringCacheTableSlot(J9Object **slotPtr) {3953J9Object *objectPtr = *slotPtr;3954if(!_copyForwardScheme->isLiveObject(objectPtr)) {3955Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3956MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3957*slotPtr = forwardedHeader.getForwardedObject();3958}3959}39603961#if defined(J9VM_OPT_JVMTI)3962virtual void doJVMTIObjectTagSlot(J9Object **slotPtr, GC_JVMTIObjectTagTableIterator *objectTagTableIterator)3963{3964J9Object *objectPtr = *slotPtr;3965if(!_copyForwardScheme->isLiveObject(objectPtr)) {3966Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));3967MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());3968*slotPtr = forwardedHeader.getForwardedObject();3969}3970}3971#endif /* J9VM_OPT_JVMTI */39723973#if defined(J9VM_GC_FINALIZATION)3974virtual void doFinalizableObject(j9object_t object) {3975Assert_MM_unreachable();3976}3977#endif /* J9VM_GC_FINALIZATION */39783979public:3980MM_CopyForwardSchemeRootClearer(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :3981MM_RootScanner(env),3982_copyForwardScheme(copyForwardScheme)3983{3984_typeId = __FUNCTION__;3985};3986};39873988void3989MM_CopyForwardScheme::clearMarkMapForPartialCollect(MM_EnvironmentVLHGC *env)3990{3991Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);39923993/* Walk the collection set to determine what ranges of the mark map should be cleared */3994MM_HeapRegionDescriptorVLHGC *region = NULL;3995GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);3996while(NULL != (region = regionIterator.nextRegion())) {3997if (region->_copyForwardData._evacuateSet) {3998if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {3999/* we start with an assumption that abort will occur, so we set _previousMarkMapCleared to false.4000* if not, the region will be recycled, in which moment the flag will turn to true4001*/4002if (region->_previousMarkMapCleared) {4003region->_previousMarkMapCleared = false;4004if (_extensions->tarokEnableExpensiveAssertions) {4005Assert_MM_true(_markMap->checkBitsForRegion(env, region));4006}4007/* TODO: need to handle region->hasValidMarkMap() case for optimum performance */4008/* consider remembering where the last allocated object is, to minimize clearing for regions with low occupancy (indeed, regions with low occupancy are rather good candidates for evacuation). */4009// } else if (region->hasValidMarkMap()) {4010} else {4011_markMap->setBitsForRegion(env, region, true);4012}4013}4014}4015}4016}40174018void4019MM_CopyForwardScheme::clearCardTableForPartialCollect(MM_EnvironmentVLHGC *env)4020{4021Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);4022bool gmpIsRunning = (NULL != env->_cycleState->_externalCycleState);40234024if (gmpIsRunning) {4025/* Walk the collection set to determine what ranges of the mark map should be cleared */4026MM_HeapRegionDescriptorVLHGC *region = NULL;4027GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4028MM_CardTable *cardTable = _extensions->cardTable;4029while(NULL != (region = regionIterator.nextRegion())) {4030if (region->_copyForwardData._evacuateSet && !region->_markData._noEvacuation) {4031if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4032void *low = region->getLowAddress();4033void *high = region->getHighAddress();4034Card *lowCard = cardTable->heapAddrToCardAddr(env, low);4035Card *highCard = cardTable->heapAddrToCardAddr(env, high);4036UDATA cardRangeSize = (UDATA)highCard - (UDATA)lowCard;4037memset(lowCard, CARD_CLEAN, cardRangeSize);4038}4039}4040}4041}4042}40434044void4045MM_CopyForwardScheme::workThreadGarbageCollect(MM_EnvironmentVLHGC *env)4046{4047/* GC init (set up per-invocation values) */4048workerSetupForCopyForward(env);40494050env->_workStack.prepareForWork(env, env->_cycleState->_workPackets);40514052/* pre-populate the _reservedRegionList with the flushed regions */4053/* this is a simple operation, so do it in one GC thread */4054if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4055GC_HeapRegionIteratorVLHGC regionIterator(_regionManager, MM_HeapRegionDescriptor::MANAGED);4056MM_HeapRegionDescriptorVLHGC *region = NULL;4057while (NULL != (region = regionIterator.nextRegion())) {4058if (region->containsObjects()) {4059UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);4060if (region->_markData._shouldMark) {4061_reservedRegionList[compactGroup]._evacuateRegionCount += 1;4062} else {4063Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED == region->getRegionType());4064MM_MemoryPool *pool = region->getMemoryPool();4065/* only add regions with pools which could possibly satisfy a TLH allocation */4066if ((pool->getActualFreeMemorySize() >= pool->getMinimumFreeEntrySize()) &&4067((pool->getActualFreeMemorySize()/pool->getActualFreeEntryCount()) >= _extensions->freeSizeThresholdForSurvivor)4068) {4069Assert_MM_true(pool->getActualFreeMemorySize() < region->getSize());4070Assert_MM_false(region->isSurvivorRegion());4071insertFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], region);4072}4073}4074}4075}40764077/* initialize the maximum number of sublists for each compact group; ensure that we try to produce fewer survivor regions than evacuate regions */4078for(UDATA index = 0; index < _compactGroupMaxCount; index++) {4079UDATA evacuateCount = _reservedRegionList[index]._evacuateRegionCount;4080/* Arbitrarily set the max to half the evacuate count. This means that, if it's possible, we'll use no more than half as many survivor regions as there were evacuate regions */4081UDATA maxSublistCount = evacuateCount / 2;4082maxSublistCount = OMR_MAX(maxSublistCount, 1);4083maxSublistCount = OMR_MIN(maxSublistCount, MM_ReservedRegionListHeader::MAX_SUBLISTS);4084_reservedRegionList[index]._maxSublistCount = maxSublistCount;4085}4086}40874088/* another thread clears the class loader remembered set */4089if (_extensions->tarokEnableIncrementalClassGC) {4090if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4091MM_ClassLoaderRememberedSet *classLoaderRememberedSet = _extensions->classLoaderRememberedSet;4092classLoaderRememberedSet->resetRegionsToClear(env);4093MM_HeapRegionDescriptorVLHGC *region = NULL;4094GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4095while(NULL != (region = regionIterator.nextRegion())) {4096if (region->_markData._shouldMark) {4097classLoaderRememberedSet->prepareToClearRememberedSetForRegion(env, region);4098}4099}4100classLoaderRememberedSet->clearRememberedSets(env);4101}4102}410341044105/* We want to clear all out-going references from the nursery set since those regions4106* will be walked and their precise out-going references will be used to reconstruct the RS4107*/4108_interRegionRememberedSet->clearFromRegionReferencesForCopyForward(env);41094110clearMarkMapForPartialCollect(env);41114112if (NULL != env->_cycleState->_externalCycleState) {4113rememberReferenceListsFromExternalCycle(env);4114}4115((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForInterRegionRememberedSet(env, UNIQUE_ID);41164117/* Enable dynamicBreadthFirstScanOrdering depth copying if dynamicBreadthFirstScanOrdering is enabled */4118env->enableHotFieldDepthCopy();41194120/* scan roots before cleaning the card table since the roots give us more concrete NUMA recommendations */4121scanRoots(env);41224123cleanCardTable(env);41244125completeScan(env);41264127/* TODO: check if abort happened during root scanning/cardTable clearing (and optimize in any other way) */4128if(abortFlagRaised()) {4129Assert_MM_true(_abortInProgress);4130/* rescan to fix up root slots, but also to complete scanning of roots that we miss to mark/push in original root scanning */4131scanRoots(env);41324133cleanCardTable(env);41344135completeScan(env);4136}4137/* Disable dynamicBreadthFirstScanOrdering depth copying after root scanning and main phase of PGC cycle */4138env->disableHotFieldDepthCopy();41394140/* ensure that all buffers have been flushed before we start reference processing */4141env->getGCEnvironment()->_referenceObjectBuffer->flush(env);41424143UDATA preservedGcReadBarrierType = 0;4144if(env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {4145_clearableProcessingStarted = true;41464147/* During clearable pass, GC threads can access clearable slots other than the one they are directly processing.4148* Such other slots could still point to fowarded objects and forwarded pointer needs to be4149* resolved (at least in thread local sense) to be able to access the object.4150* An example of that is string comparator, that may be used when removing4151* an entry from the string table, as part of AVL rebalancing.4152* String comparator happens to be used also in the context of mutator thread when adding new elements,4153* and it already uses Read Barrier (to support concurrent evacuating GCs).4154* That read barrier will do exactly what we need for our clearable pass (well it will do more,4155* not just locally resolve FP, but even fix the slot, but it's correct for this pass, too). We just need4156* to enable the RB, if not already enabled.4157*/4158preservedGcReadBarrierType = _javaVM->gcReadBarrierType;4159_javaVM->gcReadBarrierType = J9_GC_READ_BARRIER_TYPE_ALWAYS;41604161/* Soft and weak references resurrected by finalization need to be cleared immediately since weak and soft processing has already completed.4162* This has to be set before unfinalizable (and phantom) processing, because it can copy object to a non-fresh region, in which case we do4163* not want to put GMP refs to REMEMBERED state (we want have a chance to put it back to INITIAL state).4164*/4165env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_soft;4166env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_weak;4167/* since we need a sync point here anyway, use this opportunity to determine which regions contain weak and soft references or unfinalized objects */4168/* (we can't do phantom references yet because unfinalized processing may find more of them) */4169MM_HeapRegionDescriptorVLHGC *region = NULL;4170GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4171while(NULL != (region = regionIterator.nextRegion())) {4172if (region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) {4173region->getReferenceObjectList()->startSoftReferenceProcessing();4174region->getReferenceObjectList()->startWeakReferenceProcessing();4175}4176}4177env->_currentTask->releaseSynchronizedGCThreads(env);4178}41794180MM_CopyForwardSchemeRootClearer rootClearer(env, this);4181rootClearer.setStringTableAsRoot(!isCollectStringConstantsEnabled());4182rootClearer.scanClearable(env);41834184/* Clearable must not uncover any new work */4185Assert_MM_true(NULL == env->_workStack.popNoWait(env));41864187if(env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {4188_javaVM->gcReadBarrierType = preservedGcReadBarrierType;4189env->_currentTask->releaseSynchronizedGCThreads(env);4190}41914192if(!abortFlagRaised()) {4193clearCardTableForPartialCollect(env);4194}41954196/* make sure that we aren't leaving any stale scan work behind */4197Assert_MM_false(isAnyScanCacheWorkAvailable());41984199if(NULL != env->_cycleState->_externalCycleState) {4200updateOrDeleteObjectsFromExternalCycle(env);4201}42024203env->_workStack.flush(env);4204/* flush the buffer after clearable phase --- cmvc 198798 */4205/* flush ownable synchronizer object buffer after rebuild the ownableSynchronizerObjectList during main scan phase */4206env->getGCEnvironment()->_ownableSynchronizerObjectBuffer->flush(env);42074208abandonTLHRemainders(env);42094210/* No matter what happens, always sum up the gc stats */4211mergeGCStats(env);42124213env->_copyForwardCompactGroups = NULL;42144215return ;4216}42174218void4219MM_CopyForwardScheme::scanRoots(MM_EnvironmentVLHGC* env)4220{4221MM_CopyForwardSchemeRootScanner rootScanner(env, this);4222rootScanner.setStringTableAsRoot(!isCollectStringConstantsEnabled());4223rootScanner.setClassDataAsRoots(!isDynamicClassUnloadingEnabled());4224rootScanner.setIncludeStackFrameClassReferences(isDynamicClassUnloadingEnabled());42254226rootScanner.scanRoots(env);42274228/* Mark root set classes */4229#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)4230if(isDynamicClassUnloadingEnabled()) {4231/* A single thread processes all class loaders, marking any loader which has instances outside of the collection set. */4232if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4233bool foundSystemClassLoader = false;4234bool foundApplicationClassLoader = false;4235bool foundAnonymousClassLoader = false;42364237MM_ClassLoaderRememberedSet *classLoaderRememberedSet = _extensions->classLoaderRememberedSet;4238GC_ClassLoaderIterator classLoaderIterator(_javaVM->classLoaderBlocks);4239J9ClassLoader *classLoader = NULL;42404241while (NULL != (classLoader = classLoaderIterator.nextSlot())) {4242if (0 == (classLoader->gcFlags & J9_GC_CLASS_LOADER_DEAD)) {4243if(J9_ARE_ANY_BITS_SET(classLoader->flags, J9CLASSLOADER_ANON_CLASS_LOADER)) {4244foundAnonymousClassLoader = true;4245/* Anonymous classloader should be scanned on level of classes every time */4246GC_ClassLoaderSegmentIterator segmentIterator(classLoader, MEMORY_TYPE_RAM_CLASS);4247J9MemorySegment *segment = NULL;4248while(NULL != (segment = segmentIterator.nextSegment())) {4249GC_ClassHeapIterator classHeapIterator(_javaVM, segment);4250J9Class *clazz = NULL;4251while(NULL != (clazz = classHeapIterator.nextClass())) {4252if (classLoaderRememberedSet->isClassRemembered(env, clazz)) {4253MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(clazz->classObject);4254copyAndForward(env, reservingContext, &clazz->classObject);4255}4256}4257}4258} else {4259if (classLoaderRememberedSet->isRemembered(env, classLoader)) {4260foundSystemClassLoader = foundSystemClassLoader || (classLoader == _javaVM->systemClassLoader);4261foundApplicationClassLoader = foundApplicationClassLoader || (classLoader == _javaVM->applicationClassLoader);4262if (NULL != classLoader->classLoaderObject) {4263/* until we decide if class loaders should be common, just relocate this object back into its existing node */4264MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(classLoader->classLoaderObject);4265copyAndForward(env, reservingContext, &classLoader->classLoaderObject);4266} else {4267/* Only system/app classloaders can have a null classloader object (only during early bootstrap) */4268Assert_MM_true((classLoader == _javaVM->systemClassLoader) || (classLoader == _javaVM->applicationClassLoader));42694270/* We will never find the object for this class loader during scanning, so scan its class table immediately */4271GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);4272J9Class *clazz = NULL;4273bool success = true;42744275while (success && (NULL != (clazz = iterator.nextClass()))) {4276Assert_MM_true(NULL != clazz->classObject);4277MM_AllocationContextTarok *clazzContext = getContextForHeapAddress(clazz->classObject);4278/* Copy/Forward the slot reference*/4279success = copyAndForward(env, clazzContext, (J9Object **)&(clazz->classObject));4280}42814282if (NULL != classLoader->moduleHashTable) {4283J9HashTableState walkState;4284J9Module **modulePtr = (J9Module **)hashTableStartDo(classLoader->moduleHashTable, &walkState);4285while (success && (NULL != modulePtr)) {4286J9Module * const module = *modulePtr;4287success = copyAndForward(env, getContextForHeapAddress(module->moduleObject), (J9Object **)&(module->moduleObject));4288if (success) {4289if (NULL != module->moduleName) {4290success = copyAndForward(env, getContextForHeapAddress(module->moduleName), (J9Object **)&(module->moduleName));4291}4292}4293if (success) {4294if (NULL != module->version) {4295success = copyAndForward(env, getContextForHeapAddress(module->version), (J9Object **)&(module->version));4296}4297}4298modulePtr = (J9Module**)hashTableNextDo(&walkState);4299}43004301if (success && (classLoader == _javaVM->systemClassLoader)) {4302success = copyAndForward(env, getContextForHeapAddress(_javaVM->unamedModuleForSystemLoader->moduleObject), (J9Object **)&(_javaVM->unamedModuleForSystemLoader->moduleObject));4303}4304}4305}4306}4307}4308}4309}43104311/* verify that we found the permanent class loaders in the above loop */4312Assert_MM_true(NULL != _javaVM->systemClassLoader);4313Assert_MM_true(foundSystemClassLoader);4314Assert_MM_true( (NULL == _javaVM->applicationClassLoader) || foundApplicationClassLoader );4315Assert_MM_true(NULL != _javaVM->anonClassLoader);4316Assert_MM_true(foundAnonymousClassLoader);4317}4318}4319#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */4320}43214322void4323MM_CopyForwardScheme::verifyDumpObjectDetails(MM_EnvironmentVLHGC *env, const char *title, J9Object *object)4324{4325PORT_ACCESS_FROM_ENVIRONMENT(env);43264327j9tty_printf(PORTLIB, "%s: %p\n", title, object);43284329if(NULL != object) {4330MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(object);43314332j9tty_printf(PORTLIB, "\tregion:%p base:%p top:%p regionProperties:%u\n",4333region,4334region->getLowAddress(),4335region->getHighAddress(),4336region->getRegionProperties()4337);43384339j9tty_printf(PORTLIB, "\t\tbitSet:%c externalBitSet:%c shouldMark:%c initialLiveSet:%c survivorSet:%c freshSurvivorSet:%c age:%zu\n",4340_markMap->isBitSet(object) ? 'Y' : 'N',4341(NULL == env->_cycleState->_externalCycleState) ? 'N' : (env->_cycleState->_externalCycleState->_markMap->isBitSet(object) ? 'Y' : 'N'),4342region->_markData._shouldMark ? 'Y' : 'N',4343region->_copyForwardData._initialLiveSet ? 'Y' : 'N',4344region->isSurvivorRegion() ? 'Y' : 'N',4345region->isFreshSurvivorRegion() ? 'Y' : 'N',4346region->getLogicalAge()4347);4348}4349}43504351class MM_CopyForwardVerifyScanner : public MM_RootScanner4352{4353public:4354protected:4355private:4356MM_CopyForwardScheme *_copyForwardScheme; /**< Local reference back to the copy forward scheme driving the collection */43574358private:4359void verifyObject(J9Object **slotPtr)4360{4361MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);43624363J9Object *objectPtr = *slotPtr;4364if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {4365PORT_ACCESS_FROM_ENVIRONMENT(env);4366j9tty_printf(PORTLIB, "Root slot points into evacuate! Slot %p dstObj %p. RootScannerEntity=%zu\n", slotPtr, objectPtr, (UDATA)_scanningEntity);4367Assert_MM_unreachable();4368}4369}43704371virtual void doSlot(J9Object **slotPtr) {4372verifyObject(slotPtr);4373}43744375virtual void doStackSlot(J9Object **slotPtr, void *walkState, const void* stackLocation) {4376if (_copyForwardScheme->isHeapObject(*slotPtr)) {4377/* heap object - validate and mark */4378Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::COULD_BE_FORWARDED, *slotPtr, stackLocation, walkState).validate(_env));4379verifyObject(slotPtr);4380Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, ((J9StackWalkState*)walkState)->walkThread));4381} else if (NULL != *slotPtr) {4382/* stack object - just validate */4383Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::NOT_ON_HEAP, *slotPtr, stackLocation, walkState).validate(_env));4384}4385}43864387virtual void doVMThreadSlot(J9Object **slotPtr, GC_VMThreadIterator *vmThreadIterator) {4388if (_copyForwardScheme->isHeapObject(*slotPtr)) {4389verifyObject(slotPtr);4390Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, vmThreadIterator->getVMThread()));4391} else if (NULL != *slotPtr) {4392Assert_MM_true(vmthreaditerator_state_monitor_records == vmThreadIterator->getState());4393Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, vmThreadIterator->getVMThread()));4394}4395}43964397virtual void doClass(J9Class *clazz) {4398J9Object *classObject = (J9Object *)clazz->classObject;4399if(NULL != classObject) {4400if (_copyForwardScheme->isDynamicClassUnloadingEnabled() && !_copyForwardScheme->isLiveObject(classObject)) {4401/* don't verify garbage collected classes */4402} else {4403_copyForwardScheme->verifyClassObjectSlots(MM_EnvironmentVLHGC::getEnvironment(_env), classObject);4404}4405}4406}44074408virtual void doClassLoader(J9ClassLoader *classLoader) {4409J9Object *classLoaderObject = J9GC_J9CLASSLOADER_CLASSLOADEROBJECT(classLoader);4410if(NULL != classLoaderObject) {4411if (_copyForwardScheme->isDynamicClassUnloadingEnabled() && !_copyForwardScheme->isLiveObject(classLoaderObject)) {4412/* don't verify garbage collected class loaders */4413} else {4414verifyObject(J9GC_J9CLASSLOADER_CLASSLOADEROBJECT_EA(classLoader));4415}4416}4417}44184419#if defined(J9VM_GC_FINALIZATION)4420virtual void doUnfinalizedObject(J9Object *objectPtr, MM_UnfinalizedObjectList *list) {4421MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);44224423if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {4424PORT_ACCESS_FROM_ENVIRONMENT(env);4425j9tty_printf(PORTLIB, "Unfinalized object list points into evacuate! list %p object %p\n", list, objectPtr);4426Assert_MM_unreachable();4427}4428}4429#endif /* J9VM_GC_FINALIZATION */44304431#if defined(J9VM_GC_FINALIZATION)4432virtual void doFinalizableObject(j9object_t objectPtr) {4433MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);44344435if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {4436PORT_ACCESS_FROM_ENVIRONMENT(env);4437j9tty_printf(PORTLIB, "Finalizable object in evacuate! object %p\n", objectPtr);4438Assert_MM_unreachable();4439}4440}4441#endif /* J9VM_GC_FINALIZATION */44424443virtual void doOwnableSynchronizerObject(J9Object *objectPtr, MM_OwnableSynchronizerObjectList *list) {4444MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);44454446if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {4447PORT_ACCESS_FROM_ENVIRONMENT(env);4448j9tty_printf(PORTLIB, "OwnableSynchronizer object list points into evacuate! list %p object %p\n", list, objectPtr);4449Assert_MM_unreachable();4450}4451}44524453public:4454MM_CopyForwardVerifyScanner(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :4455MM_RootScanner(env, true),4456_copyForwardScheme(copyForwardScheme)4457{4458_typeId = __FUNCTION__;4459};44604461protected:4462private:44634464};44654466void4467MM_CopyForwardScheme::verifyCopyForwardResult(MM_EnvironmentVLHGC *env)4468{4469/* Destination regions verifying their integrity */4470GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4471MM_HeapRegionDescriptorVLHGC *region = NULL;44724473while(NULL != (region = regionIterator.nextRegion())) {4474if(region->isArrayletLeaf()) {4475J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();4476Assert_MM_true(NULL != spineObject);4477/* the spine must be marked if it was copied as a live object or if we aborted the copy-forward */4478/* otherwise, it must not be forwarded (since that would imply that the spine survived but the pointer wasn't updated) */4479if(!_markMap->isBitSet(spineObject)) {4480MM_ForwardedHeader forwardedSpine(spineObject, _extensions->compressObjectReferences());4481if (forwardedSpine.isForwardedPointer()) {4482PORT_ACCESS_FROM_ENVIRONMENT(env);4483j9tty_printf(PORTLIB, "Spine pointer is not marked and is forwarded (leaf region's pointer to spine not updated)! Region %p Spine %p (should be %p)\n", region, spineObject, forwardedSpine.getForwardedObject());4484verifyDumpObjectDetails(env, "spineObject", spineObject);4485Assert_MM_unreachable();4486}4487}4488} else {4489if(region->containsObjects()) {4490if(region->isSurvivorRegion()) {4491if (region->isFreshSurvivorRegion()) {4492verifyChunkSlotsAndMapSlotsInRange(env, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());4493} else {4494/* iterating from isCompressedSurvivor */4495GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);4496while (survivorIterator.next()) {4497verifyChunkSlotsAndMapSlotsInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());4498}4499}4500}45014502if(region->_copyForwardData._initialLiveSet) {4503/* iterating from isNotCompressedSurvivor */4504GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable, false);4505while (survivorIterator.next()) {4506verifyObjectsInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());4507}4508}4509}4510}4511}45124513MM_CopyForwardVerifyScanner scanner(env, this);4514scanner.scanAllSlots(env);45154516if(NULL != env->_cycleState->_externalCycleState) {4517verifyExternalState(env);4518}4519}45204521void4522MM_CopyForwardScheme::verifyObject(MM_EnvironmentVLHGC *env, J9Object *objectPtr)4523{4524J9Class* clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);4525Assert_MM_mustBeClass(clazz);4526switch(_extensions->objectModel.getScanType(clazz)) {4527case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:4528case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:4529case GC_ObjectModel::SCAN_MIXED_OBJECT:4530case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:4531verifyMixedObjectSlots(env, objectPtr);4532break;4533case GC_ObjectModel::SCAN_CLASS_OBJECT:4534verifyClassObjectSlots(env, objectPtr);4535break;4536case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:4537verifyClassLoaderObjectSlots(env, objectPtr);4538break;4539case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:4540verifyPointerArrayObjectSlots(env, objectPtr);4541break;4542case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:4543verifyReferenceObjectSlots(env, objectPtr);4544break;4545case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:4546/* nothing to do */4547break;4548default:4549Assert_MM_unreachable();4550}4551}45524553void4554MM_CopyForwardScheme::verifyMixedObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)4555{4556GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);4557GC_SlotObject *slotObject = NULL;45584559while (NULL != (slotObject = mixedObjectIterator.nextSlot())) {4560J9Object *dstObject = slotObject->readReferenceFromSlot();4561if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4562PORT_ACCESS_FROM_ENVIRONMENT(env);4563j9tty_printf(PORTLIB, "Mixed object slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4564verifyDumpObjectDetails(env, "srcObj", objectPtr);4565verifyDumpObjectDetails(env, "dstObj", dstObject);4566Assert_MM_unreachable();4567}4568if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4569PORT_ACCESS_FROM_ENVIRONMENT(env);4570j9tty_printf(PORTLIB, "Mixed object slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4571verifyDumpObjectDetails(env, "srcObj", objectPtr);4572verifyDumpObjectDetails(env, "dstObj", dstObject);4573Assert_MM_unreachable();4574}4575}4576}45774578void4579MM_CopyForwardScheme::verifyReferenceObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)4580{4581fj9object_t referentToken = J9GC_J9VMJAVALANGREFERENCE_REFERENT(env, objectPtr);4582J9Object* referentPtr = _extensions->accessBarrier->convertPointerFromToken(referentToken);4583if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, referentPtr) && verifyIsPointerInEvacute(env, referentPtr)) {4584PORT_ACCESS_FROM_ENVIRONMENT(env);4585j9tty_printf(PORTLIB, "RefMixed referent slot points to evacuate! srcObj %p dstObj %p\n", objectPtr, referentPtr);4586Assert_MM_unreachable();4587}4588if((NULL != referentPtr) && !_markMap->isBitSet(referentPtr)) {4589PORT_ACCESS_FROM_ENVIRONMENT(env);4590j9tty_printf(PORTLIB, "RefMixed referent slot points to unmarked object! srcObj %p dstObj %p\n", objectPtr, referentPtr);4591verifyDumpObjectDetails(env, "srcObj", objectPtr);4592verifyDumpObjectDetails(env, "referentPtr", referentPtr);4593Assert_MM_unreachable();4594}45954596GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);4597GC_SlotObject *slotObject = NULL;45984599while (NULL != (slotObject = mixedObjectIterator.nextSlot())) {4600J9Object *dstObject = slotObject->readReferenceFromSlot();4601if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4602PORT_ACCESS_FROM_ENVIRONMENT(env);4603j9tty_printf(PORTLIB, "RefMixed object slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4604Assert_MM_unreachable();4605}4606if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4607PORT_ACCESS_FROM_ENVIRONMENT(env);4608j9tty_printf(PORTLIB, "RefMixed object slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4609verifyDumpObjectDetails(env, "srcObj", objectPtr);4610verifyDumpObjectDetails(env, "dstPtr", dstObject);4611Assert_MM_unreachable();4612}4613}4614}46154616void4617MM_CopyForwardScheme::verifyPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)4618{4619GC_PointerArrayIterator pointerArrayIterator(_javaVM, objectPtr);4620GC_SlotObject *slotObject = NULL;46214622while((slotObject = pointerArrayIterator.nextSlot()) != NULL) {4623J9Object *dstObject = slotObject->readReferenceFromSlot();4624if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4625PORT_ACCESS_FROM_ENVIRONMENT(env);4626j9tty_printf(PORTLIB, "Pointer array slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4627Assert_MM_unreachable();4628}4629if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4630PORT_ACCESS_FROM_ENVIRONMENT(env);4631j9tty_printf(PORTLIB, "Pointer array slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);4632verifyDumpObjectDetails(env, "srcObj", objectPtr);4633verifyDumpObjectDetails(env, "dstObj", dstObject);4634Assert_MM_unreachable();4635}4636}4637}46384639void4640MM_CopyForwardScheme::verifyClassObjectSlots(MM_EnvironmentVLHGC *env, J9Object *classObject)4641{4642verifyMixedObjectSlots(env, classObject);46434644J9Class *classPtr = J9VM_J9CLASS_FROM_HEAPCLASS((J9VMThread*)env->getLanguageVMThread(), classObject);46454646if (NULL != classPtr) {4647volatile j9object_t * slotPtr = NULL;46484649do {4650/*4651* scan static fields4652*/4653GC_ClassStaticsIterator classStaticsIterator(env, classPtr);4654while(NULL != (slotPtr = classStaticsIterator.nextSlot())) {4655J9Object *dstObject = *slotPtr;4656if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4657PORT_ACCESS_FROM_ENVIRONMENT(env);4658j9tty_printf(PORTLIB, "Class static slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4659Assert_MM_unreachable();4660}4661if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4662PORT_ACCESS_FROM_ENVIRONMENT(env);4663j9tty_printf(PORTLIB, "Class static slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4664verifyDumpObjectDetails(env, "classObject", classObject);4665verifyDumpObjectDetails(env, "dstObj", dstObject);4666Assert_MM_unreachable();4667}4668}46694670/*4671* scan call sites4672*/4673GC_CallSitesIterator callSitesIterator(classPtr);4674while(NULL != (slotPtr = callSitesIterator.nextSlot())) {4675J9Object *dstObject = *slotPtr;4676if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4677PORT_ACCESS_FROM_ENVIRONMENT(env);4678j9tty_printf(PORTLIB, "Class call site slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4679Assert_MM_unreachable();4680}4681if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4682PORT_ACCESS_FROM_ENVIRONMENT(env);4683j9tty_printf(PORTLIB, "Class call site slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4684verifyDumpObjectDetails(env, "classObject", classObject);4685verifyDumpObjectDetails(env, "dstObj", dstObject);4686Assert_MM_unreachable();4687}4688}46894690/*4691* scan MethodTypes4692*/4693#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)4694GC_MethodTypesIterator methodTypesIterator(classPtr->romClass->invokeCacheCount, classPtr->invokeCache);4695#else /* defined(J9VM_OPT_OPENJDK_METHODHANDLE) */4696GC_MethodTypesIterator methodTypesIterator(classPtr->romClass->methodTypeCount, classPtr->methodTypes);4697#endif /* defined(J9VM_OPT_OPENJDK_METHODHANDLE) */46984699while(NULL != (slotPtr = methodTypesIterator.nextSlot())) {4700J9Object *dstObject = *slotPtr;4701if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4702PORT_ACCESS_FROM_ENVIRONMENT(env);4703j9tty_printf(PORTLIB, "Class MethodType slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4704Assert_MM_unreachable();4705}4706if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4707PORT_ACCESS_FROM_ENVIRONMENT(env);4708j9tty_printf(PORTLIB, "Class MethodType slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4709verifyDumpObjectDetails(env, "classObject", classObject);4710verifyDumpObjectDetails(env, "dstObj", dstObject);4711Assert_MM_unreachable();4712}4713}47144715/*4716* scan VarHandle MethodTypes4717*/4718#if defined(J9VM_OPT_METHOD_HANDLE)4719GC_MethodTypesIterator varHandleMethodTypesIterator(classPtr->romClass->varHandleMethodTypeCount, classPtr->varHandleMethodTypes);4720while(NULL != (slotPtr = varHandleMethodTypesIterator.nextSlot())) {4721J9Object *dstObject = *slotPtr;4722if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4723PORT_ACCESS_FROM_ENVIRONMENT(env);4724j9tty_printf(PORTLIB, "Class MethodType slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4725Assert_MM_unreachable();4726}4727if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4728PORT_ACCESS_FROM_ENVIRONMENT(env);4729j9tty_printf(PORTLIB, "Class MethodType slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4730verifyDumpObjectDetails(env, "classObject", classObject);4731verifyDumpObjectDetails(env, "dstObj", dstObject);4732Assert_MM_unreachable();4733}4734}4735#endif /* defined(J9VM_OPT_METHOD_HANDLE) */47364737/*4738* scan constant pool objects4739*/4740/* we can safely ignore any classes referenced by the constant pool, since4741* these are guaranteed to be referenced by our class loader4742*/4743GC_ConstantPoolObjectSlotIterator constantPoolIterator(_javaVM, classPtr);4744while(NULL != (slotPtr = constantPoolIterator.nextSlot())) {4745J9Object *dstObject = *slotPtr;4746if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {4747PORT_ACCESS_FROM_ENVIRONMENT(env);4748j9tty_printf(PORTLIB, "Class CP slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4749Assert_MM_unreachable();4750}4751if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {4752PORT_ACCESS_FROM_ENVIRONMENT(env);4753j9tty_printf(PORTLIB, "Class CP slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);4754verifyDumpObjectDetails(env, "classObject", classObject);4755verifyDumpObjectDetails(env, "dstObj", dstObject);4756Assert_MM_unreachable();4757}4758}4759classPtr = classPtr->replacedClass;4760} while (NULL != classPtr);4761}4762}47634764void4765MM_CopyForwardScheme::verifyClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, J9Object *classLoaderObject)4766{4767verifyMixedObjectSlots(env, classLoaderObject);47684769J9ClassLoader *classLoader = J9VMJAVALANGCLASSLOADER_VMREF((J9VMThread*)env->getLanguageVMThread(), classLoaderObject);4770if ((NULL != classLoader) && (0 == (classLoader->flags & J9CLASSLOADER_ANON_CLASS_LOADER))) {4771/* No lock is required because this only runs under exclusive access */4772/* (NULL == classLoader->classHashTable) is true ONLY for DEAD class loaders */4773Assert_MM_true(NULL != classLoader->classHashTable);4774GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);4775J9Class *clazz = NULL;4776while (NULL != (clazz = iterator.nextClass())) {4777if (!_abortInProgress && !isObjectInNoEvacuationRegions(env, (J9Object *)clazz->classObject) && verifyIsPointerInEvacute(env, (J9Object *)clazz->classObject)) {4778PORT_ACCESS_FROM_ENVIRONMENT(env);4779j9tty_printf(PORTLIB, "Class loader table class object points to evacuate! srcObj %p clazz %p clazzObj %p\n", classLoaderObject, clazz, clazz->classObject);4780Assert_MM_unreachable();4781}4782if ((NULL != clazz->classObject) && !_markMap->isBitSet((J9Object *)clazz->classObject)) {4783PORT_ACCESS_FROM_ENVIRONMENT(env);4784j9tty_printf(PORTLIB, "Class loader table class object points to unmarked object! srcObj %p clazz %p clazzObj %p\n", classLoaderObject, clazz, clazz->classObject);4785verifyDumpObjectDetails(env, "classLoaderObject", classLoaderObject);4786verifyDumpObjectDetails(env, "classObject", (J9Object *)clazz->classObject);4787Assert_MM_unreachable();4788}4789}4790}4791}47924793void4794MM_CopyForwardScheme::verifyExternalState(MM_EnvironmentVLHGC *env)4795{4796/* this function has knowledge of the collection set, which is only valid during a PGC */4797Assert_MM_true(NULL != env->_cycleState->_externalCycleState);47984799MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;4800Assert_MM_true(externalMarkMap != _markMap);48014802MM_HeapRegionDescriptorVLHGC *region = NULL;4803GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4804while(NULL != (region = regionIterator.nextRegion())) {4805if(region->containsObjects()) {4806if(region->_markData._shouldMark) {4807Assert_MM_true(region->_copyForwardData._initialLiveSet);48084809if(_abortInProgress || region->_markData._noEvacuation) {4810MM_HeapMapIterator mapIterator(_extensions, externalMarkMap, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress(), false);4811J9Object *objectPtr = NULL;48124813while(NULL != (objectPtr = mapIterator.nextObject())) {4814Assert_MM_true(_markMap->isBitSet(objectPtr));4815}4816} else {4817/* Evacuate space - make sure the GMP mark map is clear */4818UDATA lowIndex = externalMarkMap->getSlotIndex((J9Object *)region->getLowAddress());4819UDATA highIndex = externalMarkMap->getSlotIndex((J9Object *)region->getHighAddress());48204821for(UDATA slotIndex = lowIndex; slotIndex < highIndex; slotIndex++) {4822Assert_MM_true(0 == externalMarkMap->getSlot(slotIndex));4823}4824}4825} else if (region->isSurvivorRegion()) {4826/* Survivor space - check that anything marked in the GMP map is also marked in the PGC map */4827if (region->isFreshSurvivorRegion()) {4828checkConsistencyGMPMapAndPGCMap(env, region, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());4829} else {4830/* iterating from isCompressedSurvivor */4831GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);4832while (survivorIterator.next()) {4833checkConsistencyGMPMapAndPGCMap(env, region, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());4834}4835}4836}4837}4838}48394840/* Check that no object in the work packets appears in the evacuate space.4841* If it appears in survivor, verify that both map bits are set.4842*/4843MM_WorkPacketsIterator packetIterator(env, env->_cycleState->_externalCycleState->_workPackets);4844MM_Packet *packet = NULL;4845while (NULL != (packet = packetIterator.nextPacket(env))) {4846if (!packet->isEmpty()) {4847/* there is data in this packet so use it */4848MM_PacketSlotIterator slotIterator(packet);4849J9Object **slot = NULL;4850while (NULL != (slot = slotIterator.nextSlot())) {4851J9Object *object = *slot;4852Assert_MM_true(NULL != object);4853if (PACKET_INVALID_OBJECT != (UDATA)object) {4854Assert_MM_false(!_abortInProgress && !isObjectInNoEvacuationRegions(env, object) && verifyIsPointerInEvacute(env, object));4855Assert_MM_true(!verifyIsPointerInSurvivor(env, object) || (_markMap->isBitSet(object) && externalMarkMap->isBitSet(object)));4856}4857}4858}4859}4860}48614862bool4863MM_CopyForwardScheme::verifyIsPointerInSurvivor(MM_EnvironmentVLHGC *env, J9Object *object)4864{4865if(NULL == object) {4866return false;4867}4868MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->physicalTableDescriptorForAddress(object);4869bool result = region->isFreshSurvivorRegion();4870if (!result && region->isSurvivorRegion()) {4871result = isCompressedSurvivor((void*)object);4872}4873return result;4874}48754876bool4877MM_CopyForwardScheme::verifyIsPointerInEvacute(MM_EnvironmentVLHGC *env, J9Object *object)4878{4879if(NULL == object) {4880return false;4881}48824883MM_HeapRegionDescriptorVLHGC *region = NULL;4884region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->physicalTableDescriptorForAddress(object);4885return region->_markData._shouldMark;4886}48874888void4889MM_CopyForwardScheme::verifyObjectsInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)4890{4891MM_HeapMapIterator iterator(_extensions, _markMap, lowAddress, highAddress, false);4892J9Object *objectPtr = NULL;4893while (NULL != (objectPtr = (iterator.nextObject()))) {4894verifyObject(env, objectPtr);4895}4896}48974898void4899MM_CopyForwardScheme::verifyChunkSlotsAndMapSlotsInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)4900{4901MM_HeapMapIterator mapIterator(_extensions, _markMap, lowAddress, highAddress, false);4902GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(_extensions, (J9Object *)lowAddress, (J9Object *)highAddress, false);4903J9Object *objectPtr = NULL;49044905while(NULL != (objectPtr = heapChunkIterator.nextObject())) {4906J9Object *mapObjectPtr = mapIterator.nextObject();49074908if(objectPtr != mapObjectPtr) {4909PORT_ACCESS_FROM_ENVIRONMENT(env);4910j9tty_printf(PORTLIB, "ChunkIterator and mapIterator did not match up during walk of survivor space! ChunkSlot %p MapSlot %p\n", objectPtr, mapObjectPtr);4911Assert_MM_unreachable();4912break;4913}4914verifyObject(env, objectPtr);4915}4916if(NULL != mapIterator.nextObject()) {4917PORT_ACCESS_FROM_ENVIRONMENT(env);4918j9tty_printf(PORTLIB, "Survivor space mapIterator did not end when the chunkIterator did!\n");4919Assert_MM_unreachable();4920}4921}49224923void4924MM_CopyForwardScheme:: cleanOverflowInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)4925{4926/* At this point, no copying should happen, so that reservingContext is irrelevant */4927MM_AllocationContextTarok *reservingContext = _commonContext;4928MM_HeapMapIterator objectIterator = MM_HeapMapIterator(MM_GCExtensions::getExtensions(env), env->_cycleState->_markMap, lowAddress, highAddress);49294930J9Object *object = NULL;4931while (NULL != (object = objectIterator.nextObject())) {4932scanObject(env, reservingContext, object, SCAN_REASON_OVERFLOWED_REGION);4933}4934}49354936void4937MM_CopyForwardScheme::checkConsistencyGMPMapAndPGCMap(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region, UDATA *lowAddress, UDATA *highAddress)4938{4939MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;4940MM_HeapMapIterator mapIterator(_extensions, externalMarkMap, lowAddress, highAddress, false);4941J9Object *objectPtr = NULL;49424943while(NULL != (objectPtr = mapIterator.nextObject())) {4944Assert_MM_true(_markMap->isBitSet(objectPtr));4945Assert_MM_true(objectPtr >= region->getLowAddress());4946Assert_MM_true(objectPtr < region->getHighAddress());4947}4948}49494950void4951MM_CopyForwardScheme::scanWeakReferenceObjects(MM_EnvironmentVLHGC *env)4952{4953Assert_MM_true(env->getGCEnvironment()->_referenceObjectBuffer->isEmpty());49544955MM_HeapRegionDescriptorVLHGC *region = NULL;4956GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4957while(NULL != (region = regionIterator.nextRegion())) {4958if ((region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) && !region->getReferenceObjectList()->wasWeakListEmpty()) {4959if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4960processReferenceList(env, region, region->getReferenceObjectList()->getPriorWeakList(), &env->_copyForwardStats._weakReferenceStats);4961}4962}4963}49644965/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */4966env->getGCEnvironment()->_referenceObjectBuffer->flush(env);4967}49684969void4970MM_CopyForwardScheme::scanSoftReferenceObjects(MM_EnvironmentVLHGC *env)4971{4972Assert_MM_true(env->getGCEnvironment()->_referenceObjectBuffer->isEmpty());49734974MM_HeapRegionDescriptorVLHGC *region = NULL;4975GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4976while(NULL != (region = regionIterator.nextRegion())) {4977if ((region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) && !region->getReferenceObjectList()->wasSoftListEmpty()) {4978if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {4979processReferenceList(env, region, region->getReferenceObjectList()->getPriorSoftList(), &env->_copyForwardStats._softReferenceStats);4980}4981}4982}49834984/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */4985env->getGCEnvironment()->_referenceObjectBuffer->flush(env);4986}49874988void4989MM_CopyForwardScheme::scanPhantomReferenceObjects(MM_EnvironmentVLHGC *env)4990{4991/* unfinalized processing may discover more phantom reference objects */4992env->getGCEnvironment()->_referenceObjectBuffer->flush(env);49934994if (env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {4995Assert_MM_true(0 == _phantomReferenceRegionsToProcess);4996env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_phantom;4997MM_HeapRegionDescriptorVLHGC *region = NULL;4998GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);4999while(NULL != (region = regionIterator.nextRegion())) {5000Assert_MM_true(region->getReferenceObjectList()->wasPhantomListEmpty());5001Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);5002if (region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) {5003region->getReferenceObjectList()->startPhantomReferenceProcessing();5004if (!region->getReferenceObjectList()->wasPhantomListEmpty()) {5005region->_copyForwardData._requiresPhantomReferenceProcessing = true;5006_phantomReferenceRegionsToProcess += 1;5007}5008}5009}5010env->_currentTask->releaseSynchronizedGCThreads(env);5011}50125013UDATA phantomReferenceRegionsProcessed = 0;5014MM_HeapRegionDescriptorVLHGC *region = NULL;5015GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);5016while(NULL != (region = regionIterator.nextRegion())) {5017if (region->_copyForwardData._requiresPhantomReferenceProcessing) {5018Assert_MM_true(region->isSurvivorRegion() || region->_copyForwardData._evacuateSet);5019Assert_MM_false(region->getReferenceObjectList()->wasPhantomListEmpty());5020phantomReferenceRegionsProcessed += 1;5021if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {5022processReferenceList(env, region, region->getReferenceObjectList()->getPriorPhantomList(), &env->_copyForwardStats._phantomReferenceStats);5023}5024}5025}50265027Assert_MM_true(_phantomReferenceRegionsToProcess == phantomReferenceRegionsProcessed);50285029/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */5030env->getGCEnvironment()->_referenceObjectBuffer->flush(env);5031}50325033void5034MM_CopyForwardScheme::processReferenceList(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC* region, J9Object* headOfList, MM_ReferenceStats *referenceStats)5035{5036/* no list can possibly contain more reference objects than there are bytes in a region. */5037const UDATA maxObjects = _regionManager->getRegionSize();5038UDATA objectsVisited = 0;5039GC_FinalizableReferenceBuffer buffer(_extensions);5040bool const compressed = env->compressObjectReferences();50415042J9Object* referenceObj = headOfList;5043while (NULL != referenceObj) {5044Assert_MM_true(isLiveObject(referenceObj));50455046objectsVisited += 1;5047referenceStats->_candidates += 1;50485049Assert_MM_true(region->isAddressInRegion(referenceObj));5050Assert_MM_true(objectsVisited < maxObjects);50515052J9Object* nextReferenceObj = _extensions->accessBarrier->getReferenceLink(referenceObj);50535054GC_SlotObject referentSlotObject(_extensions->getOmrVM(), J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, referenceObj));5055J9Object *referent = referentSlotObject.readReferenceFromSlot();5056if (NULL != referent) {5057UDATA referenceObjectType = J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(referenceObj, env)) & J9AccClassReferenceMask;50585059/* update the referent if it's been forwarded */5060MM_ForwardedHeader forwardedReferent(referent, compressed);5061if (forwardedReferent.isForwardedPointer()) {5062referent = forwardedReferent.getForwardedObject();5063referentSlotObject.writeReferenceToSlot(referent);5064} else {5065Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedReferent));5066}50675068if (isLiveObject(referent)) {5069if (J9AccClassReferenceSoft == referenceObjectType) {5070U_32 age = J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, referenceObj);5071if (age < _extensions->getMaxSoftReferenceAge()) {5072/* Soft reference hasn't aged sufficiently yet - increment the age */5073J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, referenceObj) = age + 1;5074}5075}5076_interRegionRememberedSet->rememberReferenceForMark(env, referenceObj, referent);5077} else {5078Assert_MM_true(isObjectInEvacuateMemory(referent));5079/* transition the state to cleared */5080I_32 previousState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj);5081Assert_MM_true((GC_ObjectModel::REF_STATE_INITIAL == previousState) || (GC_ObjectModel::REF_STATE_REMEMBERED == previousState));50825083referenceStats->_cleared += 1;5084J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_CLEARED;50855086/* Phantom references keep it's referent alive in Java 8 and doesn't in Java 9 and later */5087if ((J9AccClassReferencePhantom == referenceObjectType) && ((J2SE_VERSION(_javaVM) & J2SE_VERSION_MASK) <= J2SE_18)) {5088/* Scanning will be done after the enqueuing */5089copyAndForward(env, region->_allocateData._owningContext, referenceObj, &referentSlotObject);5090if (GC_ObjectModel::REF_STATE_REMEMBERED == previousState) {5091Assert_MM_true(NULL != env->_cycleState->_externalCycleState);5092/* We changed the state from REMEMBERED to CLEARED, so this will not be enqueued back to region's reference queue.5093* However, GMP has to revisit this reference to mark the referent in its own mark map.5094*/5095_extensions->cardTable->dirtyCardWithValue(env, referenceObj, CARD_GMP_MUST_SCAN);5096}5097} else {5098referentSlotObject.writeReferenceToSlot(NULL);5099}51005101/* Check if the reference has a queue */5102if (0 != J9GC_J9VMJAVALANGREFERENCE_QUEUE(env, referenceObj)) {5103/* Reference object can be enqueued onto the finalizable list */5104referenceStats->_enqueued += 1;5105buffer.add(env, referenceObj);5106env->_cycleState->_finalizationRequired = true;5107}5108}5109}51105111switch (J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj)) {5112case GC_ObjectModel::REF_STATE_REMEMBERED:5113Assert_MM_true(NULL != env->_cycleState->_externalCycleState);5114/* This reference object was on a list of GMP reference objects at the start of the cycle. Restore it to its original condition. */5115J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_INITIAL;5116env->getGCEnvironment()->_referenceObjectBuffer->add(env, referenceObj);5117break;5118case GC_ObjectModel::REF_STATE_CLEARED:5119break;5120case GC_ObjectModel::REF_STATE_INITIAL:5121/* if the object isn't in nursery space it should have been REMEMBERED */5122Assert_MM_true(isObjectInNurseryMemory(referenceObj));5123break;5124case GC_ObjectModel::REF_STATE_ENQUEUED:5125/* this object shouldn't have been on the list */5126Assert_MM_unreachable();5127break;5128default:5129Assert_MM_unreachable();5130break;5131}51325133referenceObj = nextReferenceObj;5134}5135buffer.flush(env);5136}51375138void5139MM_CopyForwardScheme::rememberReferenceList(MM_EnvironmentVLHGC *env, J9Object* headOfList)5140{5141Assert_MM_true((NULL == headOfList) || (NULL != env->_cycleState->_externalCycleState));5142/* If phantom reference processing has already started this list will never be processed */5143Assert_MM_true(0 == _phantomReferenceRegionsToProcess);51445145J9Object* referenceObj = headOfList;5146while (NULL != referenceObj) {5147J9Object* next = _extensions->accessBarrier->getReferenceLink(referenceObj);5148I_32 referenceState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj);5149switch (referenceState) {5150case GC_ObjectModel::REF_STATE_INITIAL:5151/* The reference object was on a list of GMP reference objects at the start of the cycle. Remember this */5152J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_REMEMBERED;5153if (!isObjectInEvacuateMemory(referenceObj)) {5154Assert_MM_true(_markMap->isBitSet(referenceObj));5155Assert_MM_true(!isObjectInNurseryMemory(referenceObj));5156env->getGCEnvironment()->_referenceObjectBuffer->add(env, referenceObj);5157}5158break;5159case GC_ObjectModel::REF_STATE_CLEARED:5160/* The reference object was cleared (probably by an explicit call to the clear() Java API).5161* No need to remember it, since it's already in its terminal state.5162*/5163break;5164case GC_ObjectModel::REF_STATE_ENQUEUED:5165/* The reference object was enqueued. This could have happened either5166* 1) during previous GC (+ finalization), in which case it has been removed from the list at GC time or5167* 2) in Java through explicit enqueue(), in which case it may still be in the list5168* Explicit enqueue() will clear reference queue field. So, if we still see it in the list, the queue must be null.5169* This GC will rebuild the list, after which the reference must not be on the list anymore. *5170*/5171Assert_MM_true(0 == J9GC_J9VMJAVALANGREFERENCE_QUEUE(env, referenceObj));5172break;5173case GC_ObjectModel::REF_STATE_REMEMBERED:5174/* The reference object must not already be remembered */5175default:5176PORT_ACCESS_FROM_ENVIRONMENT(env);5177j9tty_printf(PORTLIB, "rememberReferenceList referenceObj=%p, referenceState=%zu\n", referenceObj, referenceState);5178Assert_MM_unreachable();5179}5180referenceObj = next;5181}5182}51835184void5185MM_CopyForwardScheme::rememberReferenceListsFromExternalCycle(MM_EnvironmentVLHGC *env)5186{5187GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);5188MM_HeapRegionDescriptorVLHGC *region = NULL;5189while(NULL != (region = regionIterator.nextRegion())) {5190if (region->_markData._shouldMark) {5191if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {5192rememberAndResetReferenceLists(env, region);5193}5194}5195}5196}51975198void5199MM_CopyForwardScheme::rememberAndResetReferenceLists(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region)5200{5201MM_ReferenceObjectList *referenceObjectList = region->getReferenceObjectList();5202UDATA referenceObjectOptions = env->_cycleState->_referenceObjectOptions;52035204if (0 == (referenceObjectOptions & MM_CycleState::references_clear_weak)) {5205referenceObjectList->startWeakReferenceProcessing();5206J9Object* headOfList = referenceObjectList->getPriorWeakList();5207if (NULL != headOfList) {5208Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberWeak(env->getLanguageVMThread(), region, headOfList);5209rememberReferenceList(env, headOfList);5210}5211}52125213if (0 == (referenceObjectOptions & MM_CycleState::references_clear_soft)) {5214referenceObjectList->startSoftReferenceProcessing();5215J9Object* headOfList = referenceObjectList->getPriorSoftList();5216if (NULL != headOfList) {5217Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberSoft(env->getLanguageVMThread(), region, headOfList);5218rememberReferenceList(env, headOfList);5219}5220}52215222if (0 == (referenceObjectOptions & MM_CycleState::references_clear_phantom)) {5223referenceObjectList->startPhantomReferenceProcessing();5224J9Object* headOfList = referenceObjectList->getPriorPhantomList();5225if (NULL != headOfList) {5226Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberPhantom(env->getLanguageVMThread(), region, headOfList);5227rememberReferenceList(env, headOfList);5228}5229}52305231referenceObjectList->resetPriorLists();5232}52335234#if defined(J9VM_GC_FINALIZATION)5235void5236MM_CopyForwardScheme::scanFinalizableObjects(MM_EnvironmentVLHGC *env)5237{5238GC_FinalizeListManager * finalizeListManager = _extensions->finalizeListManager;52395240/* If we're copying objects this code must be run single-threaded and we should only be here if work is actually required */5241/* This function is also used during abort; these assertions aren't applicable to that case because objects can't be copied during abort */5242Assert_MM_true(_abortInProgress || env->_currentTask->isSynchronized());5243Assert_MM_true(_abortInProgress || _shouldScanFinalizableObjects);5244Assert_MM_true(_abortInProgress || finalizeListManager->isFinalizableObjectProcessingRequired());52455246/* walk finalizable objects loaded by the system class loader */5247j9object_t systemObject = finalizeListManager->resetSystemFinalizableObjects();5248if (NULL != systemObject) {5249scanFinalizableList(env, systemObject);5250}52515252/* walk finalizable objects loaded by the all other class loaders */5253j9object_t defaultObject = finalizeListManager->resetDefaultFinalizableObjects();5254if (NULL != defaultObject) {5255scanFinalizableList(env, defaultObject);5256}5257525852595260{5261/* walk reference objects */5262GC_FinalizableReferenceBuffer referenceBuffer(_extensions);5263j9object_t referenceObject = finalizeListManager->resetReferenceObjects();5264while (NULL != referenceObject) {5265j9object_t next = NULL;5266if(!isLiveObject(referenceObject)) {5267Assert_MM_true(isObjectInEvacuateMemory(referenceObject));5268MM_ForwardedHeader forwardedHeader(referenceObject, _extensions->compressObjectReferences());5269if (!forwardedHeader.isForwardedPointer()) {5270Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));5271next = _extensions->accessBarrier->getReferenceLink(referenceObject);52725273MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(referenceObject);5274J9Object* copyObject = copy(env, reservingContext, &forwardedHeader);5275if ( (NULL == copyObject) || (referenceObject == copyObject) ) {5276referenceBuffer.add(env, referenceObject);5277} else {5278/* It's only safe to copy objects on the finalizable list if we're in single threaded mode */5279Assert_MM_true(!_abortInProgress);5280referenceBuffer.add(env, copyObject);5281}5282} else {5283J9Object *forwardedPtr = forwardedHeader.getForwardedObject();5284Assert_MM_true(NULL != forwardedPtr);5285next = _extensions->accessBarrier->getReferenceLink(forwardedPtr);5286referenceBuffer.add(env, forwardedPtr);5287}5288} else {5289next = _extensions->accessBarrier->getReferenceLink(referenceObject);5290referenceBuffer.add(env, referenceObject);5291}52925293referenceObject = next;5294}5295referenceBuffer.flush(env);5296}5297}52985299void5300MM_CopyForwardScheme::scanFinalizableList(MM_EnvironmentVLHGC *env, j9object_t headObject)5301{5302GC_FinalizableObjectBuffer objectBuffer(_extensions);53035304while (NULL != headObject) {5305j9object_t next = NULL;53065307if(!isLiveObject(headObject)) {5308Assert_MM_true(isObjectInEvacuateMemory(headObject));5309MM_ForwardedHeader forwardedHeader(headObject, _extensions->compressObjectReferences());5310if (!forwardedHeader.isForwardedPointer()) {5311Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));5312next = _extensions->accessBarrier->getFinalizeLink(headObject);53135314MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(headObject);5315J9Object* copyObject = copy(env, reservingContext, &forwardedHeader);5316if ( (NULL == copyObject) || (headObject == copyObject) ) {5317objectBuffer.add(env, headObject);5318} else {5319/* It's only safe to copy objects on the finalizable list if we're in single threaded mode */5320Assert_MM_true(!_abortInProgress);5321objectBuffer.add(env, copyObject);5322}5323} else {5324J9Object *forwardedPtr = forwardedHeader.getForwardedObject();5325Assert_MM_true(NULL != forwardedPtr);5326next = _extensions->accessBarrier->getFinalizeLink(forwardedPtr);5327objectBuffer.add(env, forwardedPtr);5328}5329} else {5330next = _extensions->accessBarrier->getFinalizeLink(headObject);5331objectBuffer.add(env, headObject);5332}53335334headObject = next;5335}53365337objectBuffer.flush(env);5338}5339#endif /* J9VM_GC_FINALIZATION */53405341void5342MM_CopyForwardScheme::removeFreeMemoryCandidate(MM_EnvironmentVLHGC* env, MM_ReservedRegionListHeader* regionList, MM_HeapRegionDescriptorVLHGC *region)5343{5344Assert_MM_true(NULL != regionList->_freeMemoryCandidates);5345Assert_MM_true(0 < regionList->_freeMemoryCandidateCount);53465347regionList->_freeMemoryCandidateCount -= 1;53485349MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;5350MM_HeapRegionDescriptorVLHGC *previous = region->_copyForwardData._previousRegion;5351if (NULL != next) {5352next->_copyForwardData._previousRegion = previous;5353}5354if (NULL != previous) {5355previous->_copyForwardData._nextRegion = next;5356Assert_MM_true(previous != previous->_copyForwardData._nextRegion);5357} else {5358Assert_MM_true(region == regionList->_freeMemoryCandidates);5359regionList->_freeMemoryCandidates = next;5360}5361}53625363void5364MM_CopyForwardScheme::insertFreeMemoryCandidate(MM_EnvironmentVLHGC* env, MM_ReservedRegionListHeader* regionList, MM_HeapRegionDescriptorVLHGC *region)5365{5366region->_copyForwardData._nextRegion = regionList->_freeMemoryCandidates;5367region->_copyForwardData._previousRegion = NULL;5368if(NULL != regionList->_freeMemoryCandidates) {5369regionList->_freeMemoryCandidates->_copyForwardData._previousRegion = region;5370}5371regionList->_freeMemoryCandidates = region;5372regionList->_freeMemoryCandidateCount += 1;5373}53745375void5376MM_CopyForwardScheme::convertFreeMemoryCandidateToSurvivorRegion(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region)5377{5378Trc_MM_CopyForwardScheme_convertFreeMemoryCandidateToSurvivorRegion_Entry(env->getLanguageVMThread(), region);5379Assert_MM_true(NULL != region);5380Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED == region->getRegionType());5381Assert_MM_false(region->isSurvivorRegion());5382Assert_MM_false(region->isFreshSurvivorRegion());53835384setRegionAsSurvivor(env, region, false);53855386/* TODO: Remembering does not really have to be done under a lock, but dual (prev, current) list implementation indirectly forces us to do it this way. */5387rememberAndResetReferenceLists(env, region);53885389Trc_MM_CopyForwardScheme_convertFreeMemoryCandidateToSurvivorRegion_Exit(env->getLanguageVMThread());5390}53915392void5393MM_CopyForwardScheme::setRegionAsSurvivor(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region, bool freshSurvivor)5394{5395UDATA usedBytes = region->getSize() - region->getMemoryPool()->getFreeMemoryAndDarkMatterBytes();53965397/* convert allocation age into (usedBytes * age) multiple. it will be converted back to pure age at the end of GC.5398* in the mean time as caches are allocated from the region, the age will be merged5399*/5400double allocationAgeSizeProduct = (double)usedBytes * (double)region->getAllocationAge();54015402Trc_MM_CopyForwardScheme_setRegionAsSurvivor(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), MM_CompactGroupManager::getCompactGroupNumber(env, region),5403(double)region->getAllocationAge() / (1024 * 1024), (double)usedBytes / (1024 * 1024), allocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024));54045405Assert_MM_true(0.0 == region->getAllocationAgeSizeProduct());5406region->setAllocationAgeSizeProduct(allocationAgeSizeProduct);5407if (freshSurvivor) {5408region->resetAgeBounds();5409}54105411/* update the pool so it only knows about the free memory occurring before survivor base. We will add whatever we don't use at the end of the copy-forward */5412Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);5413region->_copyForwardData._survivor = true;5414region->_copyForwardData._freshSurvivor = freshSurvivor;5415}54165417void5418MM_CopyForwardScheme::setAllocationAgeForMergedRegion(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region)5419{5420UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);5421UDATA usedBytes = region->getSize() - region->getMemoryPool()->getFreeMemoryAndDarkMatterBytes();54225423Assert_MM_true(0 != usedBytes);54245425/* convert allocation age product (usedBytes * age) back to pure age */5426U_64 newAllocationAge = (U_64)(region->getAllocationAgeSizeProduct() / (double)usedBytes);54275428Trc_MM_CopyForwardScheme_setAllocationAgeForMergedRegion(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), compactGroup,5429region->getAllocationAgeSizeProduct() / (1024 * 1024) / (1024 * 1024), (double)usedBytes / (1024 * 1024), (double)newAllocationAge / (1024 * 1024),5430(double)region->getLowerAgeBound() / (1024 * 1024), (double)region->getUpperAgeBound() / (1024 * 1024));54315432if (_extensions->tarokAllocationAgeEnabled) {5433Assert_MM_true(newAllocationAge < _extensions->compactGroupPersistentStats[compactGroup]._maxAllocationAge);5434Assert_MM_true((MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup) == 0) || (newAllocationAge >= _extensions->compactGroupPersistentStats[compactGroup - 1]._maxAllocationAge));5435}54365437UDATA logicalAge = 0;5438if (_extensions->tarokAllocationAgeEnabled) {5439logicalAge = MM_CompactGroupManager::calculateLogicalAgeForRegion(env, newAllocationAge);5440} else {5441logicalAge = MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup);5442}54435444region->setAge(newAllocationAge, logicalAge);5445/* reset aging auxiliary datea for future usage */5446region->setAllocationAgeSizeProduct(0.0);54475448}54495450bool5451MM_CopyForwardScheme::isObjectInNoEvacuationRegions(MM_EnvironmentVLHGC *env, J9Object *objectPtr)5452{5453if ((NULL == objectPtr) || (0 == _regionCountCannotBeEvacuated)) {5454return false;5455}5456MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);5457return region->_markData._noEvacuation;5458}54595460bool5461MM_CopyForwardScheme::randomDecideForceNonEvacuatedRegion(UDATA ratio) {5462bool ret = false;5463if ((0 < ratio) && (ratio <= 100)) {5464ret = ((UDATA)(rand() % 100) <= (UDATA)(ratio - 1));5465}5466return ret;5467}54685469MMINLINE bool5470MM_CopyForwardScheme::isCompressedSurvivor(void *heapAddr)5471{5472UDATA compressedCardOffset = ((UDATA)heapAddr - (UDATA)_heapBase) / CARD_SIZE;5473UDATA compressedCardIndex = compressedCardOffset / COMPRESSED_CARDS_PER_WORD;5474UDATA compressedSurvivorWord = _compressedSurvivorTable[compressedCardIndex];5475bool isSurvivor = false;54765477if (AllCompressedCardsInWordClean != compressedSurvivorWord) {5478UDATA bit = compressedCardOffset % COMPRESSED_CARDS_PER_WORD;5479isSurvivor = (CompressedCardSurvivor == ((compressedSurvivorWord >> bit) & 1));5480}5481return isSurvivor;5482}54835484/**5485* compressedSurvivorTable bit to Card Table, it is for identifying if live object is in survivor memory in current PGC5486* setCompressedSurvivorCards() are called for requiring free memory from region and preparing preserved TLHRemainders.5487*/5488MMINLINE void5489MM_CopyForwardScheme::setCompressedSurvivorCards(MM_EnvironmentVLHGC *env, void *startHeapAddress, void *endHeapAddress)5490{5491UDATA compressedCardStartOffset = ((UDATA)startHeapAddress - (UDATA)_heapBase) / CARD_SIZE;5492UDATA compressedCardStartIndex = compressedCardStartOffset / COMPRESSED_CARDS_PER_WORD;5493UDATA compressedCardEndOffset = (((UDATA)endHeapAddress - (UDATA)_heapBase) + (CARD_SIZE -1))/ CARD_SIZE;5494UDATA compressedCardEndIndex = compressedCardEndOffset / COMPRESSED_CARDS_PER_WORD;5495UDATA mask = 1;5496UDATA endOfWord = ((UDATA)1) << (COMPRESSED_CARDS_PER_WORD - 1);5497UDATA compressedSurvivorWord = AllCompressedCardsInWordClean;54985499UDATA *compressedSurvivor = &_compressedSurvivorTable[compressedCardStartIndex];55005501UDATA shiftStart = compressedCardStartOffset % COMPRESSED_CARDS_PER_WORD;5502mask = mask << shiftStart;5503UDATA offset = compressedCardStartOffset;5504UDATA idx = compressedCardStartIndex;5505if (idx == compressedCardEndIndex) {5506endOfWord = ((UDATA)1) << ((compressedCardEndOffset - 1) % COMPRESSED_CARDS_PER_WORD);5507}5508while (offset < compressedCardEndOffset) {5509/* invert bit */5510compressedSurvivorWord ^= mask;55115512if (mask == endOfWord) {5513/*only first and last word need atomic update, non-atomic for intermediate ones */5514if ((idx != compressedCardStartIndex) && (idx != compressedCardEndIndex)) {5515*compressedSurvivor |= compressedSurvivorWord;5516} else {5517/* atomic update */5518volatile UDATA *localAddr = compressedSurvivor;5519UDATA oldValue = *localAddr;5520UDATA newValue = oldValue | compressedSurvivorWord;5521if (newValue != oldValue) {5522while ((MM_AtomicOperations::lockCompareExchange(localAddr, oldValue, newValue)) != oldValue) {5523oldValue = *localAddr;5524newValue = oldValue | compressedSurvivorWord;5525}5526}5527}5528compressedSurvivor++;5529compressedSurvivorWord = AllCompressedCardsInWordClean;5530idx += 1;5531if (idx == compressedCardEndIndex) {5532endOfWord = ((UDATA)1) << ((compressedCardEndOffset - 1) % COMPRESSED_CARDS_PER_WORD);5533}5534mask = 1;5535} else {5536/* mask for next bit to handle */5537mask = mask << 1;5538}5539offset += 1;5540}5541}55425543MMINLINE void5544MM_CopyForwardScheme::cleanCompressedSurvivorCardTable(MM_EnvironmentVLHGC *env)5545{5546UDATA compressedSurvivorTableSize = _extensions->heap->getMaximumPhysicalRange() / (CARD_SIZE * BITS_PER_BYTE);5547memset((void*)_compressedSurvivorTable, AllCompressedCardsInByteClean, compressedSurvivorTableSize);5548}55495550void5551MM_CopyForwardScheme::abandonTLHRemainders(MM_EnvironmentVLHGC *env)5552{5553for (UDATA compactGroup = 0; compactGroup < _compactGroupMaxCount; compactGroup++) {5554MM_CopyForwardCompactGroup *copyForwardCompactGroup = &env->_copyForwardCompactGroups[compactGroup];5555if (_extensions->recycleRemainders) {5556if ((MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup) >= _extensions->tarokNurseryMaxAge._valueSpecified) &&5557(copyForwardCompactGroup->getTLHRemainderSize() >= _extensions->minimumFreeSizeForSurvivor)) {5558copyForwardCompactGroup->recycleTLHRemainder(env);5559} else {5560copyForwardCompactGroup->discardTLHRemainder(env);5561}5562} else {5563copyForwardCompactGroup->discardTLHRemainder(env);5564}5565}5566}556755685569