Path: blob/master/runtime/gc_vlhgc/GlobalAllocationManagerTarok.cpp
5986 views
/*******************************************************************************1* Copyright (c) 1991, 2021 IBM Corp. and others2*3* This program and the accompanying materials are made available under4* the terms of the Eclipse Public License 2.0 which accompanies this5* distribution and is available at https://www.eclipse.org/legal/epl-2.0/6* or the Apache License, Version 2.0 which accompanies this distribution and7* is available at https://www.apache.org/licenses/LICENSE-2.0.8*9* This Source Code may also be made available under the following10* Secondary Licenses when the conditions for such availability set11* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU12* General Public License, version 2 with the GNU Classpath13* Exception [1] and GNU General Public License, version 2 with the14* OpenJDK Assembly Exception [2].15*16* [1] https://www.gnu.org/software/classpath/license.html17* [2] http://openjdk.java.net/legal/assembly-exception.html18*19* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception20*******************************************************************************/212223#include "j9.h"24#include "j9cfg.h"2526#include "GlobalAllocationManagerTarok.hpp"2728#include "AllocationContextBalanced.hpp"29#include "AllocationContextTarok.hpp"30#include "EnvironmentBase.hpp"31#include "HeapRegionIteratorVLHGC.hpp"32#include "HeapRegionManager.hpp"33#include "IncrementalGenerationalGC.hpp"34#include "VMThreadListIterator.hpp"35#include "Wildcard.hpp"3637/* the common context is used for the main thread, specifically, so it is not equivalent to other contexts in the list */38#define COMMON_CONTEXT_INDEX 0394041MM_GlobalAllocationManagerTarok*42MM_GlobalAllocationManagerTarok::newInstance(MM_EnvironmentBase *env)43{44MM_GlobalAllocationManagerTarok *allocationManager = (MM_GlobalAllocationManagerTarok *)env->getForge()->allocate(sizeof(MM_GlobalAllocationManagerTarok), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());45if (NULL != allocationManager) {46allocationManager = new(allocationManager) MM_GlobalAllocationManagerTarok(env);47if (!allocationManager->initialize(env)) {48allocationManager->kill(env);49allocationManager = NULL;50}51}52return allocationManager;53}5455void56MM_GlobalAllocationManagerTarok::kill(MM_EnvironmentBase *env)57{58tearDown(env);59env->getForge()->free(this);60}6162bool63MM_GlobalAllocationManagerTarok::shouldIdentifyThreadAsCommon(MM_EnvironmentBase *env)64{65bool result = false;6667if (_extensions->tarokAttachedThreadsAreCommon) {68result = (J9_PRIVATE_FLAGS_ATTACHED_THREAD == ((J9_PRIVATE_FLAGS_ATTACHED_THREAD | J9_PRIVATE_FLAGS_SYSTEM_THREAD) & ((J9VMThread *)env->getLanguageVMThread())->privateFlags));69}7071if (!result) {72/* determine if the thread's class matches any of the wildcards specified using -XXgc:numaCommonThreadClass= */73J9Object* threadObject = ((J9VMThread *)env->getLanguageVMThread())->threadObject;74if (NULL != threadObject) {75J9Class *threadClass = J9GC_J9OBJECT_CLAZZ(threadObject, env);76J9UTF8* classNameUTF8 = J9ROMCLASS_CLASSNAME(threadClass->romClass);77MM_Wildcard *wildcard = MM_GCExtensions::getExtensions(_extensions)->numaCommonThreadClassNamePatterns;78while (!result && (NULL != wildcard)) {79result = wildcard->match((char*)J9UTF8_DATA(classNameUTF8), J9UTF8_LENGTH(classNameUTF8));80wildcard = wildcard->_next;81}82}83}8485return result;86}8788bool89MM_GlobalAllocationManagerTarok::acquireAllocationContext(MM_EnvironmentBase *env)90{91Assert_MM_true(NULL == env->getAllocationContext());92MM_AllocationContextTarok *context = NULL;93if ((1 == _managedAllocationContextCount) || shouldIdentifyThreadAsCommon(env)) {94context = (MM_AllocationContextTarok*)_managedAllocationContexts[COMMON_CONTEXT_INDEX];95/* attached threads get the common context */96env->setAllocationContext(context);97} else {98UDATA index = _nextAllocationContext;99/* context 0 is the common context so clamp the range of worker contexts above 0 */100_nextAllocationContext = (index + 1) % (_managedAllocationContextCount - 1);101UDATA thisIndex = index + 1;102Assert_MM_true(COMMON_CONTEXT_INDEX != thisIndex);103context = (MM_AllocationContextTarok*)_managedAllocationContexts[thisIndex];104env->setAllocationContext(context);105context->setNumaAffinityForThread(env);106}107108env->setCommonAllocationContext(_managedAllocationContexts[COMMON_CONTEXT_INDEX]);109110/* this check is kind of gratuitous but it ensures that the env accepted the context we gave it and that this GAM is correctly initialized */111return (context == env->getAllocationContext());112}113114void115MM_GlobalAllocationManagerTarok::releaseAllocationContext(MM_EnvironmentBase *env)116{117/* just disassociate the env from this context */118env->setAllocationContext(NULL);119}120121bool122MM_GlobalAllocationManagerTarok::initializeAllocationContexts(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace)123{124UDATA allocationSize = sizeof(MM_AllocationContextBalanced *) * _managedAllocationContextCount;125MM_AllocationContextBalanced **contexts = (MM_AllocationContextBalanced **)env->getForge()->allocate(allocationSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());126if (NULL == contexts) {127return false;128}129memset(contexts, 0, allocationSize);130_managedAllocationContexts = (MM_AllocationContext **)contexts;131132UDATA affinityLeaderCount = 0;133J9MemoryNodeDetail const* affinityLeaders = _extensions->_numaManager.getAffinityLeaders(&affinityLeaderCount);134Assert_MM_true((1 + affinityLeaderCount) == _managedAllocationContextCount);135UDATA forceNode = _extensions->fvtest_tarokForceNUMANode;136137/* create the array of contexts indexed by node */138UDATA maximumNodeNumberOwningMemory = 0;139if (UDATA_MAX == forceNode) {140for (UDATA i = 0; i < affinityLeaderCount; i++) {141maximumNodeNumberOwningMemory = OMR_MAX(maximumNodeNumberOwningMemory, affinityLeaders[i].j9NodeNumber);142}143} else {144maximumNodeNumberOwningMemory = forceNode;145}146UDATA owningByNodeSize = sizeof(MM_AllocationContextBalanced *) * (maximumNodeNumberOwningMemory + 1);147_perNodeContextSets = (MM_AllocationContextBalanced **)env->getForge()->allocate(owningByNodeSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());148if (NULL == _perNodeContextSets) {149return false;150}151memset(_perNodeContextSets, 0x0, owningByNodeSize);152153/* create the common context */154MM_AllocationContextBalanced *commonContext = MM_AllocationContextBalanced::newInstance(env, subspace, 0, COMMON_CONTEXT_INDEX);155if (NULL == commonContext) {156return false;157}158contexts[COMMON_CONTEXT_INDEX] = commonContext;159commonContext->setNextSibling(commonContext);160_perNodeContextSets[0] = commonContext;161Assert_MM_true(0 == COMMON_CONTEXT_INDEX);162UDATA nextContextIndex = 1;163164/* create affinity leader contexts */165for (UDATA i = 0; i < affinityLeaderCount; i++) {166UDATA numaNode = 0;167if (UDATA_MAX == forceNode) {168numaNode = affinityLeaders[i].j9NodeNumber;169} else {170numaNode = forceNode;171}172MM_AllocationContextBalanced *context = MM_AllocationContextBalanced::newInstance(env, subspace, numaNode, nextContextIndex);173if (NULL == context) {174return false;175}176/* sibling relationship is for lock splitting but not currently in use so just short-circuit it */177context->setNextSibling(context);178_perNodeContextSets[numaNode] = context;179/* every context is the cousin of the one before it */180context->setStealingCousin(contexts[nextContextIndex - 1]);181contexts[nextContextIndex] = context;182nextContextIndex += 1;183}184185commonContext->setStealingCousin(contexts[nextContextIndex - 1]);186_nextAllocationContext = (1 == _managedAllocationContextCount) ? 0 : (_extensions->fvtest_tarokFirstContext % (_managedAllocationContextCount - 1));187188189return true;190}191192bool193MM_GlobalAllocationManagerTarok::initialize(MM_EnvironmentBase *env)194{195bool result = MM_GlobalAllocationManager::initialize(env);196if (result) {197_managedAllocationContextCount = calculateIdealManagedContextCount(_extensions);198}199if (result) {200result = _runtimeExecManager.initialize(env);201}202203if (result) {204Assert_MM_true((UDATA_MAX / (getTotalAllocationContextCount() + 1)) > _extensions->tarokRegionMaxAge);205}206207208return result;209}210211/**212* Tear down a GAM instance213*/214void215MM_GlobalAllocationManagerTarok::tearDown(MM_EnvironmentBase *env)216{217if (NULL != _managedAllocationContexts) {218for (UDATA i = 0; i < _managedAllocationContextCount; i++) {219if (NULL != _managedAllocationContexts[i]) {220_managedAllocationContexts[i]->kill(env);221_managedAllocationContexts[i] = NULL;222}223}224225env->getForge()->free(_managedAllocationContexts);226_managedAllocationContexts = NULL;227}228229230if (NULL != _perNodeContextSets) {231env->getForge()->free(_perNodeContextSets);232_perNodeContextSets = NULL;233}234235_runtimeExecManager.tearDown(env);236237238MM_GlobalAllocationManager::tearDown(env);239}240241/**242* Print current counters for AC region count and resets the counters afterwards243*/244void245MM_GlobalAllocationManagerTarok::printAllocationContextStats(MM_EnvironmentBase *env, UDATA eventNum, J9HookInterface** hookInterface)246{247PORT_ACCESS_FROM_ENVIRONMENT(env);248UDATA totalRegionCount = 0;249const char *eventName = " ";250J9HookInterface** externalHookInterface = MM_GCExtensions::getExtensions(env)->getOmrHookInterface();251252if ((eventNum == J9HOOK_MM_OMR_GLOBAL_GC_START) && (hookInterface == externalHookInterface)) {253eventName = "GCSTART";254} else if ((eventNum == J9HOOK_MM_OMR_GLOBAL_GC_END) && (hookInterface == externalHookInterface)) {255eventName = "GCEND ";256} else {257Assert_MM_unreachable();258}259260for (UDATA i = 0; i < _managedAllocationContextCount; i++) {261MM_AllocationContextTarok *ac = (MM_AllocationContextTarok *)_managedAllocationContexts[i];262ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED);263ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE);264ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);265ac->resetThreadCount();266}267268GC_VMThreadListIterator threadIterator((J9JavaVM *)env->getLanguageVM());269J9VMThread * walkThread = NULL;270while (NULL != (walkThread = threadIterator.nextVMThread())) {271MM_EnvironmentBase *envThread = MM_EnvironmentBase::getEnvironment(walkThread->omrVMThread);272if ((envThread->getThreadType() == MUTATOR_THREAD)) {273((MM_AllocationContextTarok *)envThread->getAllocationContext())->incThreadCount();274}275}276277GC_HeapRegionIteratorVLHGC regionIterator(MM_GCExtensions::getExtensions(env)->heapRegionManager);278MM_HeapRegionDescriptorVLHGC *region = NULL;279while (NULL != (region = regionIterator.nextRegion())) {280if (NULL != region->getMemoryPool()) {281region->_allocateData._owningContext->incRegionCount(region->getRegionType());282}283}284285for (UDATA i = 0; i < _managedAllocationContextCount; i++) {286MM_AllocationContextTarok *ac = (MM_AllocationContextTarok *)_managedAllocationContexts[i];287UDATA acRegionCount = ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED);288acRegionCount += ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE);289acRegionCount += ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);290totalRegionCount += acRegionCount;291UDATA localCount = 0;292UDATA foreignCount = 0;293ac->getRegionCount(&localCount, &foreignCount);294295j9tty_printf(PORTLIB, "AC %3d %s MPAOL regionCount %5d (AO %5d AO_IDLE %5d AO_MARKED %5d) mutatorCount %3d numaNode %d (%d local, %d foreign)\n",296i, eventName, acRegionCount, ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED),297ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE), ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED),298ac->getThreadCount(), ac->getNumaNode(), localCount, foreignCount);299}300301j9tty_printf(PORTLIB, "AC sum %s MPAOL regionCount %5d (total %d) \n", eventName, totalRegionCount, MM_GCExtensions::getExtensions(env)->heapRegionManager->getTableRegionCount());302}303304UDATA305MM_GlobalAllocationManagerTarok::getActualFreeMemorySize()306{307UDATA freeMemory = 0;308for (UDATA i = 0; i < _managedAllocationContextCount; i++) {309freeMemory += ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getFreeMemorySize();310}311return freeMemory;312}313314UDATA315MM_GlobalAllocationManagerTarok::getFreeRegionCount()316{317UDATA freeRegions = 0;318for (UDATA i = 0; i < _managedAllocationContextCount; i++) {319freeRegions += ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getFreeRegionCount();320}321return freeRegions;322}323324UDATA325MM_GlobalAllocationManagerTarok::getApproximateFreeMemorySize()326{327return getActualFreeMemorySize();328}329330void331MM_GlobalAllocationManagerTarok::resetLargestFreeEntry()332{333for (UDATA i = 0; i < _managedAllocationContextCount; i++) {334((MM_AllocationContextTarok *)_managedAllocationContexts[i])->resetLargestFreeEntry();335}336}337void338MM_GlobalAllocationManagerTarok::expand(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)339{340/* we can only work on committed, free regions */341Assert_MM_true(region->isCommitted());342Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());343344/* find the node to which this region has been bound */345UDATA nodeNumber = region->getNumaNode();346MM_AllocationContextBalanced *targetContext = _perNodeContextSets[nodeNumber];347targetContext->addRegionToFreeList(env, region);348/* now "rotate the wheel" so that the next expansion into this node will be distributed to the next context */349_perNodeContextSets[nodeNumber] = targetContext->getNextSibling();350}351UDATA352MM_GlobalAllocationManagerTarok::getLargestFreeEntry()353{354UDATA largest = 0;355for (UDATA i = 0; i < _managedAllocationContextCount; i++) {356UDATA candidate = ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getLargestFreeEntry();357largest = OMR_MAX(largest, candidate);358}359return largest;360}361void362MM_GlobalAllocationManagerTarok::resetHeapStatistics(bool globalCollect)363{364for (UDATA i = 0; i < _managedAllocationContextCount; i++) {365((MM_AllocationContextTarok *)_managedAllocationContexts[i])->resetHeapStatistics(globalCollect);366}367}368void369MM_GlobalAllocationManagerTarok::mergeHeapStats(MM_HeapStats *heapStats, UDATA includeMemoryType)370{371for (UDATA i = 0; i < _managedAllocationContextCount; i++) {372((MM_AllocationContextTarok *)_managedAllocationContexts[i])->mergeHeapStats(heapStats, includeMemoryType);373}374}375376UDATA377MM_GlobalAllocationManagerTarok::calculateIdealTotalContextCount(MM_GCExtensions *extensions)378{379UDATA totalCount = calculateIdealManagedContextCount(extensions);380return totalCount;381382}383384UDATA385MM_GlobalAllocationManagerTarok::calculateIdealManagedContextCount(MM_GCExtensionsBase *extensions)386{387UDATA affinityLeaderCount = extensions->_numaManager.getAffinityLeaderCount();388UDATA desiredAllocationContextCount = 1 + affinityLeaderCount;389UDATA regionCount = extensions->memoryMax / extensions->regionSize;390/* heuristic -- ACs are permitted to waste up to 1/8th of the heap in slack regions. This number may need to be adjusted */391UDATA maxAllocationContextCount = regionCount / 8;392return OMR_MAX(1, OMR_MIN(desiredAllocationContextCount, maxAllocationContextCount));393}394395396MM_AllocationContextBalanced *397MM_GlobalAllocationManagerTarok::getAllocationContextForNumaNode(UDATA numaNode)398{399MM_AllocationContextBalanced * result = NULL;400for (UDATA i = 0; i < _managedAllocationContextCount; i++) {401MM_AllocationContextBalanced *allocationContext = (MM_AllocationContextBalanced*)_managedAllocationContexts[i];402if (allocationContext->getNumaNode() == numaNode) {403result = allocationContext;404break;405}406}407Assert_MM_true(NULL != result);408return result;409}410411412413