Path: blob/master/runtime/gc_vlhgc/AllocationContextBalanced.cpp
5986 views
/*******************************************************************************1* Copyright (c) 1991, 2021 IBM Corp. and others2*3* This program and the accompanying materials are made available under4* the terms of the Eclipse Public License 2.0 which accompanies this5* distribution and is available at https://www.eclipse.org/legal/epl-2.0/6* or the Apache License, Version 2.0 which accompanies this distribution and7* is available at https://www.apache.org/licenses/LICENSE-2.0.8*9* This Source Code may also be made available under the following10* Secondary Licenses when the conditions for such availability set11* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU12* General Public License, version 2 with the GNU Classpath13* Exception [1] and GNU General Public License, version 2 with the14* OpenJDK Assembly Exception [2].15*16* [1] https://www.gnu.org/software/classpath/license.html17* [2] http://openjdk.java.net/legal/assembly-exception.html18*19* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception20*******************************************************************************/212223#include "j9.h"24#include "j9cfg.h"25#include "j9protos.h"26#include "j9consts.h"27#include "modronopt.h"28#include "ModronAssertions.h"2930#include "AllocationContextBalanced.hpp"3132#include "AllocateDescription.hpp"33#include "AllocationContextTarok.hpp"34#include "CardTable.hpp"35#include "EnvironmentBase.hpp"36#include "EnvironmentVLHGC.hpp"37#include "HeapRegionDescriptorVLHGC.hpp"38#include "HeapRegionManager.hpp"39#include "MemoryPool.hpp"40#include "MemorySubSpaceTarok.hpp"41#include "ObjectAllocationInterface.hpp"424344MM_AllocationContextBalanced *45MM_AllocationContextBalanced::newInstance(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace, UDATA numaNode, UDATA allocationContextNumber)46{47MM_AllocationContextBalanced *context = (MM_AllocationContextBalanced *)env->getForge()->allocate(sizeof(MM_AllocationContextBalanced), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());48if (context) {49new(context) MM_AllocationContextBalanced(subspace, numaNode, allocationContextNumber);50if (!context->initialize(env)) {51context->kill(env);52context = NULL;53}54}55return context;56}5758/**59* Initialization.60*/61bool62MM_AllocationContextBalanced::initialize(MM_EnvironmentBase *env)63{64if (!MM_AllocationContext::initialize(env)) {65return false;66}6768MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);69if (!_contextLock.initialize(env, &extensions->lnrlOptions, "MM_AllocationContextBalanced:_contextLock")) {70return false;71}72if (!_freeListLock.initialize(env, &extensions->lnrlOptions, "MM_AllocationContextBalanced:_freeListLock")) {73return false;74}7576UDATA freeProcessorNodeCount = 0;77J9MemoryNodeDetail const *freeProcessorNodes = extensions->_numaManager.getFreeProcessorPool(&freeProcessorNodeCount);78/* our local cache needs +1 since we reserve a slot for each context to use */79_freeProcessorNodeCount = freeProcessorNodeCount + 1;80UDATA arraySizeInBytes = sizeof(UDATA) * _freeProcessorNodeCount;81_freeProcessorNodes = (UDATA *)env->getForge()->allocate(arraySizeInBytes, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());82if (NULL != _freeProcessorNodes) {83memset(_freeProcessorNodes, 0x0, arraySizeInBytes);84_freeProcessorNodes[0] = getNumaNode();85for (UDATA i = 0; i < freeProcessorNodeCount; i++) {86/* we save at i+1 since index 0 is reserved */87_freeProcessorNodes[i+1] = freeProcessorNodes[i].j9NodeNumber;88}89} else {90return false;91}9293_cachedReplenishPoint = this;94_heapRegionManager = MM_GCExtensions::getExtensions(env)->heapRegionManager;9596return true;97}9899/**100* Shut down.101*/102void103MM_AllocationContextBalanced::tearDown(MM_EnvironmentBase *env)104{105Assert_MM_true(NULL == _allocationRegion);106Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());107Assert_MM_true(NULL == _discardRegionList.peekFirstRegion());108109_contextLock.tearDown();110_freeListLock.tearDown();111112if (NULL != _freeProcessorNodes) {113env->getForge()->free(_freeProcessorNodes);114_freeProcessorNodes = NULL;115}116117MM_AllocationContext::tearDown(env);118}119120void121MM_AllocationContextBalanced::flushInternal(MM_EnvironmentBase *env)122{123/* flush all the regions we own for active allocation */124if (NULL != _allocationRegion){125MM_MemoryPool *pool = _allocationRegion->getMemoryPool();126Assert_MM_true(NULL != pool);127UDATA allocatableBytes = pool->getActualFreeMemorySize();128_freeMemorySize -= allocatableBytes;129_flushedRegions.insertRegion(_allocationRegion);130_allocationRegion = NULL;131Trc_MM_AllocationContextBalanced_flushInternal_clearAllocationRegion(env->getLanguageVMThread(), this);132}133MM_HeapRegionDescriptorVLHGC *walk = _nonFullRegions.peekFirstRegion();134while (NULL != walk) {135Assert_MM_true(this == walk->_allocateData._owningContext);136MM_HeapRegionDescriptorVLHGC *next = _nonFullRegions.peekRegionAfter(walk);137_nonFullRegions.removeRegion(walk);138MM_MemoryPool *pool = walk->getMemoryPool();139Assert_MM_true(NULL != pool);140UDATA allocatableBytes = pool->getActualFreeMemorySize();141_freeMemorySize -= allocatableBytes;142_flushedRegions.insertRegion(walk);143walk = next;144}145/* flush all the regions we own which were no longer candidates for allocation */146walk = _discardRegionList.peekFirstRegion();147while (NULL != walk) {148Assert_MM_true(this == walk->_allocateData._owningContext);149MM_HeapRegionDescriptorVLHGC *next = _discardRegionList.peekRegionAfter(walk);150_discardRegionList.removeRegion(walk);151MM_MemoryPool *pool = walk->getMemoryPool();152Assert_MM_true(NULL != pool);153pool->recalculateMemoryPoolStatistics(env);154_flushedRegions.insertRegion(walk);155walk = next;156}157_cachedReplenishPoint = this;158Assert_MM_true(0 == _freeMemorySize);159}160161void162MM_AllocationContextBalanced::flush(MM_EnvironmentBase *env)163{164flushInternal(env);165}166167void168MM_AllocationContextBalanced::flushForShutdown(MM_EnvironmentBase *env)169{170flushInternal(env);171}172173174void *175MM_AllocationContextBalanced::allocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface, bool shouldCollectOnFailure)176{177void *result = NULL;178lockCommon();179result = lockedAllocateTLH(env, allocateDescription, objectAllocationInterface);180/* if we failed, try to replenish */181if (NULL == result) {182result = lockedReplenishAndAllocate(env, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);183}184unlockCommon();185/* if that still fails, try to invoke the collector */186if (shouldCollectOnFailure && (NULL == result)) {187result = _subspace->replenishAllocationContextFailed(env, _subspace, this, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);188}189return result;190}191void *192MM_AllocationContextBalanced::lockedAllocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface)193{194void *result = NULL;195/* first, try allocating the TLH in our _allocationRegion (fast-path) */196if (NULL != _allocationRegion) {197MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();198Assert_MM_true(NULL != memoryPool);199UDATA spaceBefore = memoryPool->getActualFreeMemorySize();200result = objectAllocationInterface->allocateTLH(env, allocateDescription, _subspace, memoryPool);201UDATA spaceAfter = memoryPool->getActualFreeMemorySize();202if (NULL == result) {203/* this region isn't useful so remove it from our list for consideration and add it to our discard list */204Assert_MM_true(spaceAfter < memoryPool->getMinimumFreeEntrySize());205Assert_MM_true(spaceBefore == spaceAfter);206_freeMemorySize -= spaceBefore;207_discardRegionList.insertRegion(_allocationRegion);208_allocationRegion = NULL;209Trc_MM_AllocationContextBalanced_lockedAllocateTLH_clearAllocationRegion(env->getLanguageVMThread(), this);210} else {211Assert_MM_true(spaceBefore > spaceAfter);212_freeMemorySize -= (spaceBefore - spaceAfter);213}214}215/* if we couldn't satisfy the allocate, go to the non-full region list (slow-path) before failing over into replenishment */216if (NULL == result) {217/* scan through our regions which are still active for allocation and attempt the TLH allocation in each. Any which are too full or fragmented to satisfy a TLH allocation must be moved to the "discard" list so we won't consider them for allocation until after the next collection */218MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();219while ((NULL == result) && (NULL != region)) {220MM_MemoryPool *memoryPool = region->getMemoryPool();221Assert_MM_true(NULL != memoryPool);222UDATA spaceBefore = memoryPool->getActualFreeMemorySize();223result = objectAllocationInterface->allocateTLH(env, allocateDescription, _subspace, memoryPool);224UDATA spaceAfter = memoryPool->getActualFreeMemorySize();225MM_HeapRegionDescriptorVLHGC *next = _nonFullRegions.peekRegionAfter(region);226/* remove this region from the list since we are either discarding it or re-promoting it to the fast-path */227_nonFullRegions.removeRegion(region);228if (NULL == result) {229/* this region isn't useful so remove it from our list for consideration and add it to our discard list */230Assert_MM_true(spaceAfter < memoryPool->getMinimumFreeEntrySize());231Assert_MM_true(spaceBefore == spaceAfter);232_freeMemorySize -= spaceBefore;233_discardRegionList.insertRegion(region);234} else {235Assert_MM_true(spaceBefore > spaceAfter);236_freeMemorySize -= (spaceBefore - spaceAfter);237/* we succeeded so this region is a good choice for future fast-path allocations */238Assert_MM_true(NULL == _allocationRegion);239_allocationRegion = region;240Trc_MM_AllocationContextBalanced_lockedAllocateTLH_setAllocationRegion(env->getLanguageVMThread(), this, region);241}242region = next;243}244}245return result;246}247void *248MM_AllocationContextBalanced::allocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure)249{250void *result = NULL;251lockCommon();252result = lockedAllocateObject(env, allocateDescription);253/* if we failed, try to replenish */254if (NULL == result) {255result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);256}257unlockCommon();258/* if that still fails, try to invoke the collector */259if (shouldCollectOnFailure && (NULL == result)) {260result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);261}262if (NULL != result) {263allocateDescription->setObjectFlags(_subspace->getObjectFlags());264allocateDescription->setMemorySubSpace(_subspace);265}266return result;267}268void *269MM_AllocationContextBalanced::lockedAllocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription)270{271Assert_MM_true(allocateDescription->getContiguousBytes() <= _heapRegionManager->getRegionSize());272273void *result = NULL;274/* first, try allocating the object in our _allocationRegion (fast-path) */275if (NULL != _allocationRegion) {276MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();277Assert_MM_true(NULL != memoryPool);278UDATA spaceBefore = memoryPool->getActualFreeMemorySize();279result = memoryPool->allocateObject(env, allocateDescription);280UDATA spaceAfter = memoryPool->getActualFreeMemorySize();281if (NULL == result) {282Assert_MM_true(spaceBefore == spaceAfter);283/* if we failed the allocate, move the region into the non-full list since a TLH allocate can consume any space remaining, prior to discarding */284_nonFullRegions.insertRegion(_allocationRegion);285_allocationRegion = NULL;286Trc_MM_AllocationContextBalanced_lockedAllocateObject_clearAllocationRegion(env->getLanguageVMThread(), this);287} else {288Assert_MM_true(spaceBefore > spaceAfter);289_freeMemorySize -= (spaceBefore - spaceAfter);290}291}292/* if we couldn't satisfy the allocate, go to the non-full region list (slow-path) before failing over into replenishment */293if (NULL == result) {294Assert_MM_true(NULL == _allocationRegion);295/* scan through our active region list and attempt the allocation in each. Failing to satisfy a one-off object allocation, such as this, will not force a region into the discard list, however */296MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();297while ((NULL == result) && (NULL != region)) {298MM_MemoryPool *memoryPool = region->getMemoryPool();299Assert_MM_true(NULL != memoryPool);300UDATA spaceBefore = memoryPool->getActualFreeMemorySize();301result = memoryPool->allocateObject(env, allocateDescription);302if (NULL != result) {303UDATA spaceAfter = memoryPool->getActualFreeMemorySize();304Assert_MM_true(spaceBefore > spaceAfter);305_freeMemorySize -= (spaceBefore - spaceAfter);306}307region = _nonFullRegions.peekRegionAfter(region);308}309} else {310Assert_MM_true(NULL != _allocationRegion);311}312return result;313}314315void *316MM_AllocationContextBalanced::allocateArrayletLeaf(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure)317{318/* this AC implementation doesn't try to cache leaf regions so just call into the subspace to hand us a region and then we will use it in lockedAllocateArrayletLeaf */319void *result = NULL;320lockCommon();321result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);322unlockCommon();323/* if that fails, try to invoke the collector */324if (shouldCollectOnFailure && (NULL == result)) {325result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);326}327if (NULL != result) {328/* zero the leaf here since we are not under any of the context or exclusive locks */329OMRZeroMemory(result, _heapRegionManager->getRegionSize());330}331return result;332}333334void *335MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *envBase, MM_AllocateDescription *allocateDescription, MM_HeapRegionDescriptorVLHGC *freeRegionForArrayletLeaf)336{337MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);338339Assert_MM_true(NULL != freeRegionForArrayletLeaf);340Assert_MM_true(MM_HeapRegionDescriptor::FREE == freeRegionForArrayletLeaf->getRegionType());341342J9IndexableObject *spine = allocateDescription->getSpine();343Assert_MM_true(NULL != spine);344345/* cache the allocate data pointer since we need to use it in several operations */346MM_HeapRegionDataForAllocate *leafAllocateData = &(freeRegionForArrayletLeaf->_allocateData);347/* ask the region to become a leaf type */348leafAllocateData->taskAsArrayletLeaf(env);349/* look up the spine region since we need to add this region to its leaf list */350MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_heapRegionManager->tableDescriptorForAddress(spine);351/* the leaf requires a pointer back to the spine object so that it can verify its liveness elsewhere in the collector */352leafAllocateData->setSpine(spine);353freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());354/* add the leaf to the spine region's leaf list */355/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.356* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs357* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that358* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,359* and worse multiple spines originally allocated from different ACs may end up in a single common context region.360*/361362MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;363if (this != spineContext) {364Assert_MM_true(env->getCommonAllocationContext() == spineContext);365/* The common allocation context is always an instance of AllocationContextBalanced */366((MM_AllocationContextBalanced *)spineContext)->lockCommon();367}368369leafAllocateData->addToArrayletLeafList(spineRegion);370371if (this != spineContext) {372/* The common allocation context is always an instance of AllocationContextBalanced */373((MM_AllocationContextBalanced *)spineContext)->unlockCommon();374}375376/* store the base address of the leaf for the memset and the return */377return freeRegionForArrayletLeaf->getLowAddress();378}379380MM_HeapRegionDescriptorVLHGC *381MM_AllocationContextBalanced::collectorAcquireRegion(MM_EnvironmentBase *env)382{383MM_HeapRegionDescriptorVLHGC *region = NULL;384region = internalCollectorAcquireRegion(env);385return region;386}387388MM_HeapRegionDescriptorVLHGC *389MM_AllocationContextBalanced::internalCollectorAcquireRegion(MM_EnvironmentBase *env)390{391MM_HeapRegionDescriptorVLHGC *region = NULL;392393lockCommon();394Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());395do {396Assert_MM_true(NULL == _allocationRegion);397region = internalReplenishActiveRegion(env, false);398/* If failed to allocate region, attempt to expand.399* If we successfully expanded, we are not yet guaranteed that retry on replenish will succeed,400* since another thread from another AC may steal the expanded regions. Thus we keep expanding401* until we succeed to replenish or no more expansion is possible.402* This may not be the same AC receiving the expanded region, so this problem exists even without403* stealing.404*/405} while ((NULL == region) && (0 != _subspace->collectorExpand(env)));406407if (NULL != region) {408Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());409Assert_MM_true(region == _allocationRegion);410UDATA regionSize = _heapRegionManager->getRegionSize();411_freeMemorySize -= regionSize;412413_allocationRegion = NULL;414Trc_MM_AllocationContextBalanced_internalCollectorAcquireRegion_clearAllocationRegion(env->getLanguageVMThread(), this);415Assert_MM_true(NULL != region->getMemoryPool());416_flushedRegions.insertRegion(region);417}418unlockCommon();419420return region;421}422423void *424MM_AllocationContextBalanced::allocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)425{426void *result = NULL;427switch(allocationType) {428case MM_MemorySubSpace::ALLOCATION_TYPE_TLH:429result = allocateTLH(env, allocateDescription, objectAllocationInterface, false);430break;431case MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT:432result = allocateObject(env, allocateDescription, false);433break;434case MM_MemorySubSpace::ALLOCATION_TYPE_LEAF:435result = allocateArrayletLeaf(env, allocateDescription, false);436break;437default:438Assert_MM_unreachable();439break;440}441return result;442}443444void *445MM_AllocationContextBalanced::lockedAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)446{447void *result = NULL;448switch (allocationType) {449case MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT:450result = lockedAllocateObject(env, allocateDescription);451break;452case MM_MemorySubSpace::ALLOCATION_TYPE_TLH:453result = lockedAllocateTLH(env, allocateDescription, objectAllocationInterface);454break;455case MM_MemorySubSpace::ALLOCATION_TYPE_LEAF:456/* callers allocating an arraylet leaf should call lockedAllocateArrayletLeaf() directly */457Assert_MM_unreachable();458break;459default:460Assert_MM_unreachable();461}462return result;463}464465void466MM_AllocationContextBalanced::setNextSibling(MM_AllocationContextBalanced *sibling)467{468Assert_MM_true(NULL == _nextSibling);469_nextSibling = sibling;470Assert_MM_true(NULL != _nextSibling);471}472473void474MM_AllocationContextBalanced::setStealingCousin(MM_AllocationContextBalanced *cousin)475{476Assert_MM_true(NULL == _stealingCousin);477_stealingCousin = cousin;478_nextToSteal = cousin;479Assert_MM_true(NULL != _stealingCousin);480}481482void483MM_AllocationContextBalanced::recycleRegion(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region)484{485MM_HeapRegionDataForAllocate *allocateData = ®ion->_allocateData;486MM_AllocationContextTarok *owningContext = allocateData->_owningContext;487MM_AllocationContextTarok *originalOwningContext = allocateData->_originalOwningContext;488Assert_MM_true((this == owningContext) || (this == originalOwningContext));489Assert_MM_true(region->getNumaNode() == getNumaNode());490if (NULL == originalOwningContext) {491originalOwningContext = owningContext;492}493Assert_MM_true(this == originalOwningContext);494495/* the region is being returned to us, set the fields appropriately before returning it to the list */496allocateData->_originalOwningContext = NULL;497allocateData->_owningContext = this;498499switch (region->getRegionType()) {500case MM_HeapRegionDescriptor::ADDRESS_ORDERED:501case MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED:502{503owningContext->removeRegionFromFlushedList(region);504allocateData->taskAsIdlePool(env);505_freeListLock.acquire();506_idleMPRegions.insertRegion(region);507_freeListLock.release();508MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);509if (extensions->tarokEnableExpensiveAssertions) {510void *low = region->getLowAddress();511void *high = region->getHighAddress();512MM_CardTable *cardTable = extensions->cardTable;513Card *card = cardTable->heapAddrToCardAddr(env, low);514Card *toCard = cardTable->heapAddrToCardAddr(env, high);515516while (card < toCard) {517Assert_MM_true(CARD_CLEAN == *card);518card += 1;519}520}521}522break;523case MM_HeapRegionDescriptor::ARRAYLET_LEAF:524Assert_MM_true(NULL == allocateData->getNextArrayletLeafRegion());525Assert_MM_true(NULL == allocateData->getSpine());526527if (MM_GCExtensions::getExtensions(env)->tarokDebugEnabled) {528/* poison the unused region so we can identify it in a crash (to be removed when 1953 is stable) */529memset(region->getLowAddress(), 0x0F, region->getSize());530}531allocateData->taskAsFreePool(env);532/* now, return the region to our free list */533addRegionToFreeList(env, region);534break;535case MM_HeapRegionDescriptor::FREE:536/* calling recycle on a free region implies an incorrect assumption in the caller */537Assert_MM_unreachable();538break;539default:540Assert_MM_unreachable();541}542}543544void545MM_AllocationContextBalanced::tearDownRegion(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)546{547MM_MemoryPoolAddressOrderedList *memoryPool = (MM_MemoryPoolAddressOrderedList *)region->getMemoryPool();548if (NULL != memoryPool) {549memoryPool->tearDown(env);550region->setMemoryPool(NULL);551}552}553554void555MM_AllocationContextBalanced::addRegionToFreeList(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)556{557Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());558Assert_MM_true(getNumaNode() == region->getNumaNode());559Assert_MM_true(NULL == region->_allocateData._originalOwningContext);560_freeListLock.acquire();561_freeRegions.insertRegion(region);562_freeListLock.release();563}564565void566MM_AllocationContextBalanced::resetLargestFreeEntry()567{568lockCommon();569if (NULL != _allocationRegion) {570_allocationRegion->getMemoryPool()->resetLargestFreeEntry();571}572MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();573while (NULL != region) {574region->getMemoryPool()->resetLargestFreeEntry();575region = _nonFullRegions.peekRegionAfter(region);576}577region = _discardRegionList.peekFirstRegion();578while (NULL != region) {579region->getMemoryPool()->resetLargestFreeEntry();580region = _discardRegionList.peekRegionAfter(region);581}582region = _flushedRegions.peekFirstRegion();583while (NULL != region) {584region->getMemoryPool()->resetLargestFreeEntry();585region = _flushedRegions.peekRegionAfter(region);586}587unlockCommon();588}589590UDATA591MM_AllocationContextBalanced::getLargestFreeEntry()592{593UDATA largest = 0;594595lockCommon();596/* if we have a free region, largest free entry is the region size */597MM_HeapRegionDescriptorVLHGC *free = _idleMPRegions.peekFirstRegion();598if (NULL == free) {599free = _freeRegions.peekFirstRegion();600}601if (NULL != free) {602largest = free->getSize();603} else {604if (NULL != _allocationRegion) {605MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();606Assert_MM_true(NULL != memoryPool);607UDATA candidate = memoryPool->getLargestFreeEntry();608largest = candidate;609}610MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();611while (NULL != region) {612MM_MemoryPool *memoryPool = region->getMemoryPool();613Assert_MM_true(NULL != memoryPool);614UDATA candidate = memoryPool->getLargestFreeEntry();615largest = OMR_MAX(largest, candidate);616region = _nonFullRegions.peekRegionAfter(region);617}618region = _flushedRegions.peekFirstRegion();619while (NULL != region) {620MM_MemoryPool *memoryPool = region->getMemoryPool();621Assert_MM_true(NULL != memoryPool);622UDATA candidate = memoryPool->getLargestFreeEntry();623largest = OMR_MAX(largest, candidate);624region = _flushedRegions.peekRegionAfter(region);625}626}627unlockCommon();628629return largest;630}631632MM_HeapRegionDescriptorVLHGC *633MM_AllocationContextBalanced::acquireMPRegionFromHeap(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, MM_AllocationContextTarok *requestingContext)634{635MM_HeapRegionDescriptorVLHGC *region = acquireMPRegionFromNode(env, subspace, requestingContext);636637/* _nextToSteal will be this if NUMA is not enabled */638if ((NULL == region) && (_nextToSteal != this)) {639MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);640Assert_MM_true(0 != extensions->_numaManager.getAffinityLeaderCount());641/* we didn't get any memory yet we are in a NUMA system so we should steal from a foreign node */642MM_AllocationContextBalanced *firstTheftAttempt = _nextToSteal;643do {644region = _nextToSteal->acquireMPRegionFromNode(env, subspace, requestingContext);645if (NULL != region) {646/* make sure that we record the original owner so that the region can be identified as foreign */647Assert_MM_true(NULL == region->_allocateData._originalOwningContext);648region->_allocateData._originalOwningContext = _nextToSteal;649}650/* advance to the next node whether we succeeded or not since we want to distribute our "theft" as evenly as possible */651_nextToSteal = _nextToSteal->getStealingCousin();652if (this == _nextToSteal) {653/* never try to steal from ourselves since that wouldn't be possible and the code interprets this case as a uniform system */654_nextToSteal = _nextToSteal->getStealingCousin();655}656} while ((NULL == region) && (firstTheftAttempt != _nextToSteal));657}658659return region;660}661662MM_HeapRegionDescriptorVLHGC *663MM_AllocationContextBalanced::acquireFreeRegionFromHeap(MM_EnvironmentBase *env)664{665MM_HeapRegionDescriptorVLHGC *region = acquireFreeRegionFromNode(env);666667/* _nextToSteal will be this if NUMA is not enabled */668if ((NULL == region) && (_nextToSteal != this)) {669MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);670Assert_MM_true(0 != extensions->_numaManager.getAffinityLeaderCount());671/* we didn't get any memory yet we are in a NUMA system so we should steal from a foreign node */672MM_AllocationContextBalanced *firstTheftAttempt = _nextToSteal;673do {674region = _nextToSteal->acquireFreeRegionFromNode(env);675if (NULL != region) {676region->_allocateData._originalOwningContext = _nextToSteal;677}678/* advance to the next node whether we succeeded or not since we want to distribute our "theft" as evenly as possible */679_nextToSteal = _nextToSteal->getStealingCousin();680if (this == _nextToSteal) {681/* never try to steal from ourselves since that wouldn't be possible and the code interprets this case as a uniform system */682_nextToSteal = _nextToSteal->getStealingCousin();683}684} while ((NULL == region) && (firstTheftAttempt != _nextToSteal));685}686687return region;688}689690MM_HeapRegionDescriptorVLHGC *691MM_AllocationContextBalanced::acquireMPRegionFromNode(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext)692{693Trc_MM_AllocationContextBalanced_acquireMPBPRegionFromNode_Entry(env->getLanguageVMThread(), this, requestingContext);694/* this can only be called on the context itself or through stealing cousin relationships */695Assert_MM_true((this == requestingContext) || (getNumaNode() != requestingContext->getNumaNode()));696697MM_HeapRegionDescriptorVLHGC *region = _cachedReplenishPoint->acquireMPRegionFromContext(env, subSpace, requestingContext);698MM_AllocationContextBalanced *targetContext = _cachedReplenishPoint->getNextSibling();699while ((NULL == region) && (targetContext != this)) {700region = targetContext->acquireMPRegionFromContext(env, subSpace, requestingContext);701if (NULL != region) {702_cachedReplenishPoint = targetContext;703}704targetContext = targetContext->getNextSibling();705}706if (NULL != region) {707/* Regions made available for allocation are identified by their region type (ADDRESS_ORDERED, as opposed to ADDRESS_ORDERED_MARKED) */708Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED == region->getRegionType());709Assert_MM_true(requestingContext == region->_allocateData._owningContext);710Assert_MM_true(getNumaNode() == region->getNumaNode());711}712Trc_MM_AllocationContextBalanced_acquireMPBPRegionFromNode_Exit(env->getLanguageVMThread(), region);713return region;714}715716MM_HeapRegionDescriptorVLHGC *717MM_AllocationContextBalanced::acquireMPRegionFromContext(MM_EnvironmentBase *envBase, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext)718{719MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);720721_freeListLock.acquire();722MM_HeapRegionDescriptorVLHGC *region= _idleMPRegions.peekFirstRegion();723if (NULL != region) {724_idleMPRegions.removeRegion(region);725} else {726region = _freeRegions.peekFirstRegion();727if (NULL != region) {728_freeRegions.removeRegion(region);729}730}731_freeListLock.release();732if (NULL != region) {733if (MM_HeapRegionDescriptor::FREE == region->getRegionType()) {734if (region->_allocateData.taskAsMemoryPool(env, requestingContext)) {735/* this is a new region. Initialize it for the given pool */736region->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());737MM_MemoryPool *mpaol = region->getMemoryPool();738mpaol->setSubSpace(subSpace);739mpaol->expandWithRange(env, region->getSize(), region->getLowAddress(), region->getHighAddress(), false);740mpaol->recalculateMemoryPoolStatistics(env);741} else {742/* something went wrong so put the region back in the free list and return NULL (even though the region might have been found in another context, where we put it back is largely arbitrary and this path should never actually be taken) */743addRegionToFreeList(env, region);744region = NULL;745}746} else if (MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE == region->getRegionType()) {747bool success = region->_allocateData.taskAsMemoryPool(env, requestingContext);748/* we can't fail to convert an IDLE region to an active one */749Assert_MM_true(success);750/* also add this region into our owned region list */751region->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());752region->_allocateData._owningContext = requestingContext;753MM_MemoryPool *pool = region->getMemoryPool();754Assert_MM_true(subSpace == pool->getSubSpace());755pool->rebuildFreeListInRegion(env, region, NULL);756pool->recalculateMemoryPoolStatistics(env);757Assert_MM_true(pool->getLargestFreeEntry() == region->getSize());758} else {759Assert_MM_unreachable();760}761if (NULL != region) {762Assert_MM_true(getNumaNode() == region->getNumaNode());763Assert_MM_true(NULL == region->_allocateData._originalOwningContext);764}765}766return region;767768}769770MM_HeapRegionDescriptorVLHGC *771MM_AllocationContextBalanced::acquireFreeRegionFromNode(MM_EnvironmentBase *env)772{773MM_HeapRegionDescriptorVLHGC *region = _cachedReplenishPoint->acquireFreeRegionFromContext(env);774MM_AllocationContextBalanced *targetContext = _cachedReplenishPoint->getNextSibling();775while ((NULL == region) && (targetContext != this)) {776region = targetContext->acquireFreeRegionFromContext(env);777if (NULL != region) {778_cachedReplenishPoint = targetContext;779}780targetContext = targetContext->getNextSibling();781}782if (NULL != region) {783Assert_MM_true(getNumaNode() == region->getNumaNode());784}785return region;786}787788MM_HeapRegionDescriptorVLHGC *789MM_AllocationContextBalanced::acquireFreeRegionFromContext(MM_EnvironmentBase *env)790{791_freeListLock.acquire();792MM_HeapRegionDescriptorVLHGC *region = _freeRegions.peekFirstRegion();793if (NULL != region) {794_freeRegions.removeRegion(region);795} else {796region = _idleMPRegions.peekFirstRegion();797if (NULL != region) {798_idleMPRegions.removeRegion(region);799region->_allocateData.taskAsFreePool(env);800}801}802_freeListLock.release();803if (NULL != region) {804Assert_MM_true(getNumaNode() == region->getNumaNode());805}806return region;807808}809810void811MM_AllocationContextBalanced::lockCommon()812{813_contextLock.acquire();814}815void816MM_AllocationContextBalanced::unlockCommon()817{818_contextLock.release();819}820821UDATA822MM_AllocationContextBalanced::getFreeMemorySize()823{824UDATA regionSize = _heapRegionManager->getRegionSize();825UDATA freeRegions = getFreeRegionCount();826return _freeMemorySize + (freeRegions * regionSize);827}828829UDATA830MM_AllocationContextBalanced::getFreeRegionCount()831{832return _idleMPRegions.listSize() + _freeRegions.listSize();833}834835void836MM_AllocationContextBalanced::migrateRegionToAllocationContext(MM_HeapRegionDescriptorVLHGC *region, MM_AllocationContextTarok *newOwner)837{838/*839* This is the point where we reconcile the data held in the region descriptors and the contexts. Prior to this, compaction planning may have decided840* to migrate a region into a new context but couldn't update the contexts' meta-structures due to performance concerns around the locks required to841* manipulate the lists. After this call returns, region's meta-data will be consistent with its owning context.842*/843if (region->containsObjects()) {844Assert_MM_true(NULL != region->getMemoryPool());845_flushedRegions.removeRegion(region);846Assert_MM_true(region->_allocateData._owningContext == newOwner);847newOwner->acceptMigratingRegion(region);848} else if (region->isArrayletLeaf()) {849/* nothing to do */850} else {851Assert_MM_unreachable();852}853/* we can only do direct migration between contexts with the same NUMA properties, at this time (note that 0 is special since it can accept memory from any node) */854Assert_MM_true((region->getNumaNode() == newOwner->getNumaNode()) || (0 == newOwner->getNumaNode()));855}856857void858MM_AllocationContextBalanced::acceptMigratingRegion(MM_HeapRegionDescriptorVLHGC *region)859{860_flushedRegions.insertRegion(region);861}862863void864MM_AllocationContextBalanced::resetHeapStatistics(bool globalCollect)865{866lockCommon();867if (NULL != _allocationRegion) {868_allocationRegion->getMemoryPool()->resetHeapStatistics(globalCollect);869}870MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();871while (NULL != region) {872region->getMemoryPool()->resetHeapStatistics(globalCollect);873region = _nonFullRegions.peekRegionAfter(region);874}875region = _discardRegionList.peekFirstRegion();876while (NULL != region) {877region->getMemoryPool()->resetHeapStatistics(globalCollect);878region = _discardRegionList.peekRegionAfter(region);879}880region = _flushedRegions.peekFirstRegion();881while (NULL != region) {882region->getMemoryPool()->resetHeapStatistics(globalCollect);883region = _flushedRegions.peekRegionAfter(region);884}885unlockCommon();886}887888void889MM_AllocationContextBalanced::mergeHeapStats(MM_HeapStats *heapStats, UDATA includeMemoryType)890{891lockCommon();892if (NULL != _allocationRegion) {893_allocationRegion->getMemoryPool()->mergeHeapStats(heapStats, true);894}895MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();896while (NULL != region) {897region->getMemoryPool()->mergeHeapStats(heapStats, true);898region = _nonFullRegions.peekRegionAfter(region);899}900region = _discardRegionList.peekFirstRegion();901while (NULL != region) {902region->getMemoryPool()->mergeHeapStats(heapStats, true);903region = _discardRegionList.peekRegionAfter(region);904}905region = _flushedRegions.peekFirstRegion();906while (NULL != region) {907region->getMemoryPool()->mergeHeapStats(heapStats, true);908region = _flushedRegions.peekRegionAfter(region);909}910unlockCommon();911}912913void *914MM_AllocationContextBalanced::lockedReplenishAndAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)915{916void * result = NULL;917MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);918UDATA regionSize = extensions->regionSize;919920UDATA contiguousAllocationSize;921if (MM_MemorySubSpace::ALLOCATION_TYPE_LEAF == allocationType) {922contiguousAllocationSize = regionSize;923} else {924contiguousAllocationSize = allocateDescription->getContiguousBytes();925}926927Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Entry(env->getLanguageVMThread(), regionSize, contiguousAllocationSize);928929if (MM_MemorySubSpace::ALLOCATION_TYPE_LEAF == allocationType) {930if (_subspace->consumeFromTaxationThreshold(env, regionSize)) {931/* acquire a free region */932MM_HeapRegionDescriptorVLHGC *leafRegion = acquireFreeRegionFromHeap(env);933if (NULL != leafRegion) {934result = lockedAllocateArrayletLeaf(env, allocateDescription, leafRegion);935leafRegion->_allocateData._owningContext = this;936Assert_MM_true(leafRegion->getLowAddress() == result);937Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_acquiredFreeRegion(env->getLanguageVMThread(), regionSize);938}939}940} else {941Assert_MM_true(NULL == _allocationRegion);942MM_HeapRegionDescriptorVLHGC *newRegion = internalReplenishActiveRegion(env, true);943if (NULL != newRegion) {944/* the new region must be our current allocation region and it must be completely empty */945Assert_MM_true(_allocationRegion == newRegion);946Assert_MM_true(newRegion->getMemoryPool()->getActualFreeMemorySize() == newRegion->getSize());947948result = lockedAllocate(env, objectAllocationInterface, allocateDescription, allocationType);949Assert_MM_true(NULL != result);950}951}952953if (NULL != result) {954Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Success(env->getLanguageVMThread());955} else {956Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Failure(env->getLanguageVMThread());957}958959return result;960}961962MM_HeapRegionDescriptorVLHGC *963MM_AllocationContextBalanced::internalReplenishActiveRegion(MM_EnvironmentBase *env, bool payTax)964{965MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);966UDATA regionSize = extensions->regionSize;967MM_HeapRegionDescriptorVLHGC *newRegion = NULL;968969Assert_MM_true(NULL == _allocationRegion);970971if (!payTax || _subspace->consumeFromTaxationThreshold(env, regionSize)) {972newRegion = acquireMPRegionFromHeap(env, _subspace, this);973if (NULL != newRegion) {974Trc_MM_AllocationContextBalanced_internalReplenishActiveRegion_convertedFreeRegion(env->getLanguageVMThread(), newRegion, regionSize);975_allocationRegion = newRegion;976Trc_MM_AllocationContextBalanced_internalReplenishActiveRegion_setAllocationRegion(env->getLanguageVMThread(), this, newRegion);977_freeMemorySize += newRegion->getMemoryPool()->getActualFreeMemorySize();978}979}980981Assert_MM_true(newRegion == _allocationRegion);982983return newRegion;984}985986void987MM_AllocationContextBalanced::accountForRegionLocation(MM_HeapRegionDescriptorVLHGC *region, UDATA *localCount, UDATA *foreignCount)988{989Assert_MM_true((NULL == region->_allocateData._owningContext) || (this == region->_allocateData._owningContext));990if (NULL == region->_allocateData._originalOwningContext) {991/* local */992*localCount += 1;993Assert_MM_true(region->getNumaNode() == getNumaNode());994} else {995/* foreign (stolen) */996*foreignCount += 1;997Assert_MM_true(region->getNumaNode() != getNumaNode());998}999}10001001void1002MM_AllocationContextBalanced::countRegionsInList(MM_RegionListTarok *list, UDATA *localCount, UDATA *foreignCount)1003{1004MM_HeapRegionDescriptorVLHGC *region = list->peekFirstRegion();1005while (NULL != region) {1006accountForRegionLocation(region, localCount, foreignCount);1007region = list->peekRegionAfter(region);1008}1009}1010void1011MM_AllocationContextBalanced::getRegionCount(UDATA *localCount, UDATA *foreignCount)1012{1013if (NULL != _allocationRegion) {1014accountForRegionLocation(_allocationRegion, localCount, foreignCount);1015}1016countRegionsInList(&_nonFullRegions, localCount, foreignCount);1017countRegionsInList(&_discardRegionList, localCount, foreignCount);1018countRegionsInList(&_flushedRegions, localCount, foreignCount);1019countRegionsInList(&_freeRegions, localCount, foreignCount);1020countRegionsInList(&_idleMPRegions, localCount, foreignCount);1021}10221023void1024MM_AllocationContextBalanced::removeRegionFromFlushedList(MM_HeapRegionDescriptorVLHGC *region)1025{1026_flushedRegions.removeRegion(region);1027}10281029MM_HeapRegionDescriptorVLHGC *1030MM_AllocationContextBalanced::selectRegionForContraction(MM_EnvironmentBase *env)1031{1032/* since we know this is only called during contraction we could skip the lock, but rather be safe */1033_freeListLock.acquire();10341035/* prefer free regions since idle MPAOL regions are more valuable to us */1036MM_HeapRegionDescriptorVLHGC *region = _freeRegions.peekFirstRegion();1037if (NULL != region) {1038_freeRegions.removeRegion(region);1039} else {1040region = _idleMPRegions.peekFirstRegion();1041if (NULL != region) {1042_idleMPRegions.removeRegion(region);1043region->_allocateData.taskAsFreePool(env);1044}1045}1046if (NULL != region) {1047Assert_MM_true(getNumaNode() == region->getNumaNode());1048Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());1049}10501051_freeListLock.release();10521053return region;1054}10551056bool1057MM_AllocationContextBalanced::setNumaAffinityForThread(MM_EnvironmentBase *env)1058{1059bool success = true;10601061bool hasPhysicalNUMASupport = MM_GCExtensions::getExtensions(env)->_numaManager.isPhysicalNUMASupported();1062if (hasPhysicalNUMASupport && (0 != getNumaNode())) {1063/* TODO: should we try to read the affinity first and find the best node? */1064success = env->setNumaAffinity(_freeProcessorNodes, _freeProcessorNodeCount);1065}10661067return success;1068}1069107010711072