Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_vlhgc/GlobalAllocationManagerTarok.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
24
#include "j9.h"
25
#include "j9cfg.h"
26
27
#include "GlobalAllocationManagerTarok.hpp"
28
29
#include "AllocationContextBalanced.hpp"
30
#include "AllocationContextTarok.hpp"
31
#include "EnvironmentBase.hpp"
32
#include "HeapRegionIteratorVLHGC.hpp"
33
#include "HeapRegionManager.hpp"
34
#include "IncrementalGenerationalGC.hpp"
35
#include "VMThreadListIterator.hpp"
36
#include "Wildcard.hpp"
37
38
/* the common context is used for the main thread, specifically, so it is not equivalent to other contexts in the list */
39
#define COMMON_CONTEXT_INDEX 0
40
41
42
MM_GlobalAllocationManagerTarok*
43
MM_GlobalAllocationManagerTarok::newInstance(MM_EnvironmentBase *env)
44
{
45
MM_GlobalAllocationManagerTarok *allocationManager = (MM_GlobalAllocationManagerTarok *)env->getForge()->allocate(sizeof(MM_GlobalAllocationManagerTarok), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
46
if (NULL != allocationManager) {
47
allocationManager = new(allocationManager) MM_GlobalAllocationManagerTarok(env);
48
if (!allocationManager->initialize(env)) {
49
allocationManager->kill(env);
50
allocationManager = NULL;
51
}
52
}
53
return allocationManager;
54
}
55
56
void
57
MM_GlobalAllocationManagerTarok::kill(MM_EnvironmentBase *env)
58
{
59
tearDown(env);
60
env->getForge()->free(this);
61
}
62
63
bool
64
MM_GlobalAllocationManagerTarok::shouldIdentifyThreadAsCommon(MM_EnvironmentBase *env)
65
{
66
bool result = false;
67
68
if (_extensions->tarokAttachedThreadsAreCommon) {
69
result = (J9_PRIVATE_FLAGS_ATTACHED_THREAD == ((J9_PRIVATE_FLAGS_ATTACHED_THREAD | J9_PRIVATE_FLAGS_SYSTEM_THREAD) & ((J9VMThread *)env->getLanguageVMThread())->privateFlags));
70
}
71
72
if (!result) {
73
/* determine if the thread's class matches any of the wildcards specified using -XXgc:numaCommonThreadClass= */
74
J9Object* threadObject = ((J9VMThread *)env->getLanguageVMThread())->threadObject;
75
if (NULL != threadObject) {
76
J9Class *threadClass = J9GC_J9OBJECT_CLAZZ(threadObject, env);
77
J9UTF8* classNameUTF8 = J9ROMCLASS_CLASSNAME(threadClass->romClass);
78
MM_Wildcard *wildcard = MM_GCExtensions::getExtensions(_extensions)->numaCommonThreadClassNamePatterns;
79
while (!result && (NULL != wildcard)) {
80
result = wildcard->match((char*)J9UTF8_DATA(classNameUTF8), J9UTF8_LENGTH(classNameUTF8));
81
wildcard = wildcard->_next;
82
}
83
}
84
}
85
86
return result;
87
}
88
89
bool
90
MM_GlobalAllocationManagerTarok::acquireAllocationContext(MM_EnvironmentBase *env)
91
{
92
Assert_MM_true(NULL == env->getAllocationContext());
93
MM_AllocationContextTarok *context = NULL;
94
if ((1 == _managedAllocationContextCount) || shouldIdentifyThreadAsCommon(env)) {
95
context = (MM_AllocationContextTarok*)_managedAllocationContexts[COMMON_CONTEXT_INDEX];
96
/* attached threads get the common context */
97
env->setAllocationContext(context);
98
} else {
99
UDATA index = _nextAllocationContext;
100
/* context 0 is the common context so clamp the range of worker contexts above 0 */
101
_nextAllocationContext = (index + 1) % (_managedAllocationContextCount - 1);
102
UDATA thisIndex = index + 1;
103
Assert_MM_true(COMMON_CONTEXT_INDEX != thisIndex);
104
context = (MM_AllocationContextTarok*)_managedAllocationContexts[thisIndex];
105
env->setAllocationContext(context);
106
context->setNumaAffinityForThread(env);
107
}
108
109
env->setCommonAllocationContext(_managedAllocationContexts[COMMON_CONTEXT_INDEX]);
110
111
/* this check is kind of gratuitous but it ensures that the env accepted the context we gave it and that this GAM is correctly initialized */
112
return (context == env->getAllocationContext());
113
}
114
115
void
116
MM_GlobalAllocationManagerTarok::releaseAllocationContext(MM_EnvironmentBase *env)
117
{
118
/* just disassociate the env from this context */
119
env->setAllocationContext(NULL);
120
}
121
122
bool
123
MM_GlobalAllocationManagerTarok::initializeAllocationContexts(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace)
124
{
125
UDATA allocationSize = sizeof(MM_AllocationContextBalanced *) * _managedAllocationContextCount;
126
MM_AllocationContextBalanced **contexts = (MM_AllocationContextBalanced **)env->getForge()->allocate(allocationSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
127
if (NULL == contexts) {
128
return false;
129
}
130
memset(contexts, 0, allocationSize);
131
_managedAllocationContexts = (MM_AllocationContext **)contexts;
132
133
UDATA affinityLeaderCount = 0;
134
J9MemoryNodeDetail const* affinityLeaders = _extensions->_numaManager.getAffinityLeaders(&affinityLeaderCount);
135
Assert_MM_true((1 + affinityLeaderCount) == _managedAllocationContextCount);
136
UDATA forceNode = _extensions->fvtest_tarokForceNUMANode;
137
138
/* create the array of contexts indexed by node */
139
UDATA maximumNodeNumberOwningMemory = 0;
140
if (UDATA_MAX == forceNode) {
141
for (UDATA i = 0; i < affinityLeaderCount; i++) {
142
maximumNodeNumberOwningMemory = OMR_MAX(maximumNodeNumberOwningMemory, affinityLeaders[i].j9NodeNumber);
143
}
144
} else {
145
maximumNodeNumberOwningMemory = forceNode;
146
}
147
UDATA owningByNodeSize = sizeof(MM_AllocationContextBalanced *) * (maximumNodeNumberOwningMemory + 1);
148
_perNodeContextSets = (MM_AllocationContextBalanced **)env->getForge()->allocate(owningByNodeSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
149
if (NULL == _perNodeContextSets) {
150
return false;
151
}
152
memset(_perNodeContextSets, 0x0, owningByNodeSize);
153
154
/* create the common context */
155
MM_AllocationContextBalanced *commonContext = MM_AllocationContextBalanced::newInstance(env, subspace, 0, COMMON_CONTEXT_INDEX);
156
if (NULL == commonContext) {
157
return false;
158
}
159
contexts[COMMON_CONTEXT_INDEX] = commonContext;
160
commonContext->setNextSibling(commonContext);
161
_perNodeContextSets[0] = commonContext;
162
Assert_MM_true(0 == COMMON_CONTEXT_INDEX);
163
UDATA nextContextIndex = 1;
164
165
/* create affinity leader contexts */
166
for (UDATA i = 0; i < affinityLeaderCount; i++) {
167
UDATA numaNode = 0;
168
if (UDATA_MAX == forceNode) {
169
numaNode = affinityLeaders[i].j9NodeNumber;
170
} else {
171
numaNode = forceNode;
172
}
173
MM_AllocationContextBalanced *context = MM_AllocationContextBalanced::newInstance(env, subspace, numaNode, nextContextIndex);
174
if (NULL == context) {
175
return false;
176
}
177
/* sibling relationship is for lock splitting but not currently in use so just short-circuit it */
178
context->setNextSibling(context);
179
_perNodeContextSets[numaNode] = context;
180
/* every context is the cousin of the one before it */
181
context->setStealingCousin(contexts[nextContextIndex - 1]);
182
contexts[nextContextIndex] = context;
183
nextContextIndex += 1;
184
}
185
186
commonContext->setStealingCousin(contexts[nextContextIndex - 1]);
187
_nextAllocationContext = (1 == _managedAllocationContextCount) ? 0 : (_extensions->fvtest_tarokFirstContext % (_managedAllocationContextCount - 1));
188
189
190
return true;
191
}
192
193
bool
194
MM_GlobalAllocationManagerTarok::initialize(MM_EnvironmentBase *env)
195
{
196
bool result = MM_GlobalAllocationManager::initialize(env);
197
if (result) {
198
_managedAllocationContextCount = calculateIdealManagedContextCount(_extensions);
199
}
200
if (result) {
201
result = _runtimeExecManager.initialize(env);
202
}
203
204
if (result) {
205
Assert_MM_true((UDATA_MAX / (getTotalAllocationContextCount() + 1)) > _extensions->tarokRegionMaxAge);
206
}
207
208
209
return result;
210
}
211
212
/**
213
* Tear down a GAM instance
214
*/
215
void
216
MM_GlobalAllocationManagerTarok::tearDown(MM_EnvironmentBase *env)
217
{
218
if (NULL != _managedAllocationContexts) {
219
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
220
if (NULL != _managedAllocationContexts[i]) {
221
_managedAllocationContexts[i]->kill(env);
222
_managedAllocationContexts[i] = NULL;
223
}
224
}
225
226
env->getForge()->free(_managedAllocationContexts);
227
_managedAllocationContexts = NULL;
228
}
229
230
231
if (NULL != _perNodeContextSets) {
232
env->getForge()->free(_perNodeContextSets);
233
_perNodeContextSets = NULL;
234
}
235
236
_runtimeExecManager.tearDown(env);
237
238
239
MM_GlobalAllocationManager::tearDown(env);
240
}
241
242
/**
243
* Print current counters for AC region count and resets the counters afterwards
244
*/
245
void
246
MM_GlobalAllocationManagerTarok::printAllocationContextStats(MM_EnvironmentBase *env, UDATA eventNum, J9HookInterface** hookInterface)
247
{
248
PORT_ACCESS_FROM_ENVIRONMENT(env);
249
UDATA totalRegionCount = 0;
250
const char *eventName = " ";
251
J9HookInterface** externalHookInterface = MM_GCExtensions::getExtensions(env)->getOmrHookInterface();
252
253
if ((eventNum == J9HOOK_MM_OMR_GLOBAL_GC_START) && (hookInterface == externalHookInterface)) {
254
eventName = "GCSTART";
255
} else if ((eventNum == J9HOOK_MM_OMR_GLOBAL_GC_END) && (hookInterface == externalHookInterface)) {
256
eventName = "GCEND ";
257
} else {
258
Assert_MM_unreachable();
259
}
260
261
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
262
MM_AllocationContextTarok *ac = (MM_AllocationContextTarok *)_managedAllocationContexts[i];
263
ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED);
264
ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE);
265
ac->resetRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);
266
ac->resetThreadCount();
267
}
268
269
GC_VMThreadListIterator threadIterator((J9JavaVM *)env->getLanguageVM());
270
J9VMThread * walkThread = NULL;
271
while (NULL != (walkThread = threadIterator.nextVMThread())) {
272
MM_EnvironmentBase *envThread = MM_EnvironmentBase::getEnvironment(walkThread->omrVMThread);
273
if ((envThread->getThreadType() == MUTATOR_THREAD)) {
274
((MM_AllocationContextTarok *)envThread->getAllocationContext())->incThreadCount();
275
}
276
}
277
278
GC_HeapRegionIteratorVLHGC regionIterator(MM_GCExtensions::getExtensions(env)->heapRegionManager);
279
MM_HeapRegionDescriptorVLHGC *region = NULL;
280
while (NULL != (region = regionIterator.nextRegion())) {
281
if (NULL != region->getMemoryPool()) {
282
region->_allocateData._owningContext->incRegionCount(region->getRegionType());
283
}
284
}
285
286
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
287
MM_AllocationContextTarok *ac = (MM_AllocationContextTarok *)_managedAllocationContexts[i];
288
UDATA acRegionCount = ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED);
289
acRegionCount += ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE);
290
acRegionCount += ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);
291
totalRegionCount += acRegionCount;
292
UDATA localCount = 0;
293
UDATA foreignCount = 0;
294
ac->getRegionCount(&localCount, &foreignCount);
295
296
j9tty_printf(PORTLIB, "AC %3d %s MPAOL regionCount %5d (AO %5d AO_IDLE %5d AO_MARKED %5d) mutatorCount %3d numaNode %d (%d local, %d foreign)\n",
297
i, eventName, acRegionCount, ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED),
298
ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE), ac->getRegionCount(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED),
299
ac->getThreadCount(), ac->getNumaNode(), localCount, foreignCount);
300
}
301
302
j9tty_printf(PORTLIB, "AC sum %s MPAOL regionCount %5d (total %d) \n", eventName, totalRegionCount, MM_GCExtensions::getExtensions(env)->heapRegionManager->getTableRegionCount());
303
}
304
305
UDATA
306
MM_GlobalAllocationManagerTarok::getActualFreeMemorySize()
307
{
308
UDATA freeMemory = 0;
309
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
310
freeMemory += ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getFreeMemorySize();
311
}
312
return freeMemory;
313
}
314
315
UDATA
316
MM_GlobalAllocationManagerTarok::getFreeRegionCount()
317
{
318
UDATA freeRegions = 0;
319
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
320
freeRegions += ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getFreeRegionCount();
321
}
322
return freeRegions;
323
}
324
325
UDATA
326
MM_GlobalAllocationManagerTarok::getApproximateFreeMemorySize()
327
{
328
return getActualFreeMemorySize();
329
}
330
331
void
332
MM_GlobalAllocationManagerTarok::resetLargestFreeEntry()
333
{
334
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
335
((MM_AllocationContextTarok *)_managedAllocationContexts[i])->resetLargestFreeEntry();
336
}
337
}
338
void
339
MM_GlobalAllocationManagerTarok::expand(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)
340
{
341
/* we can only work on committed, free regions */
342
Assert_MM_true(region->isCommitted());
343
Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());
344
345
/* find the node to which this region has been bound */
346
UDATA nodeNumber = region->getNumaNode();
347
MM_AllocationContextBalanced *targetContext = _perNodeContextSets[nodeNumber];
348
targetContext->addRegionToFreeList(env, region);
349
/* now "rotate the wheel" so that the next expansion into this node will be distributed to the next context */
350
_perNodeContextSets[nodeNumber] = targetContext->getNextSibling();
351
}
352
UDATA
353
MM_GlobalAllocationManagerTarok::getLargestFreeEntry()
354
{
355
UDATA largest = 0;
356
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
357
UDATA candidate = ((MM_AllocationContextTarok *)_managedAllocationContexts[i])->getLargestFreeEntry();
358
largest = OMR_MAX(largest, candidate);
359
}
360
return largest;
361
}
362
void
363
MM_GlobalAllocationManagerTarok::resetHeapStatistics(bool globalCollect)
364
{
365
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
366
((MM_AllocationContextTarok *)_managedAllocationContexts[i])->resetHeapStatistics(globalCollect);
367
}
368
}
369
void
370
MM_GlobalAllocationManagerTarok::mergeHeapStats(MM_HeapStats *heapStats, UDATA includeMemoryType)
371
{
372
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
373
((MM_AllocationContextTarok *)_managedAllocationContexts[i])->mergeHeapStats(heapStats, includeMemoryType);
374
}
375
}
376
377
UDATA
378
MM_GlobalAllocationManagerTarok::calculateIdealTotalContextCount(MM_GCExtensions *extensions)
379
{
380
UDATA totalCount = calculateIdealManagedContextCount(extensions);
381
return totalCount;
382
383
}
384
385
UDATA
386
MM_GlobalAllocationManagerTarok::calculateIdealManagedContextCount(MM_GCExtensionsBase *extensions)
387
{
388
UDATA affinityLeaderCount = extensions->_numaManager.getAffinityLeaderCount();
389
UDATA desiredAllocationContextCount = 1 + affinityLeaderCount;
390
UDATA regionCount = extensions->memoryMax / extensions->regionSize;
391
/* heuristic -- ACs are permitted to waste up to 1/8th of the heap in slack regions. This number may need to be adjusted */
392
UDATA maxAllocationContextCount = regionCount / 8;
393
return OMR_MAX(1, OMR_MIN(desiredAllocationContextCount, maxAllocationContextCount));
394
}
395
396
397
MM_AllocationContextBalanced *
398
MM_GlobalAllocationManagerTarok::getAllocationContextForNumaNode(UDATA numaNode)
399
{
400
MM_AllocationContextBalanced * result = NULL;
401
for (UDATA i = 0; i < _managedAllocationContextCount; i++) {
402
MM_AllocationContextBalanced *allocationContext = (MM_AllocationContextBalanced*)_managedAllocationContexts[i];
403
if (allocationContext->getNumaNode() == numaNode) {
404
result = allocationContext;
405
break;
406
}
407
}
408
Assert_MM_true(NULL != result);
409
return result;
410
}
411
412
413