Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_vlhgc/AllocationContextBalanced.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
24
#include "j9.h"
25
#include "j9cfg.h"
26
#include "j9protos.h"
27
#include "j9consts.h"
28
#include "modronopt.h"
29
#include "ModronAssertions.h"
30
31
#include "AllocationContextBalanced.hpp"
32
33
#include "AllocateDescription.hpp"
34
#include "AllocationContextTarok.hpp"
35
#include "CardTable.hpp"
36
#include "EnvironmentBase.hpp"
37
#include "EnvironmentVLHGC.hpp"
38
#include "HeapRegionDescriptorVLHGC.hpp"
39
#include "HeapRegionManager.hpp"
40
#include "MemoryPool.hpp"
41
#include "MemorySubSpaceTarok.hpp"
42
#include "ObjectAllocationInterface.hpp"
43
44
45
MM_AllocationContextBalanced *
46
MM_AllocationContextBalanced::newInstance(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace, UDATA numaNode, UDATA allocationContextNumber)
47
{
48
MM_AllocationContextBalanced *context = (MM_AllocationContextBalanced *)env->getForge()->allocate(sizeof(MM_AllocationContextBalanced), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
49
if (context) {
50
new(context) MM_AllocationContextBalanced(subspace, numaNode, allocationContextNumber);
51
if (!context->initialize(env)) {
52
context->kill(env);
53
context = NULL;
54
}
55
}
56
return context;
57
}
58
59
/**
60
* Initialization.
61
*/
62
bool
63
MM_AllocationContextBalanced::initialize(MM_EnvironmentBase *env)
64
{
65
if (!MM_AllocationContext::initialize(env)) {
66
return false;
67
}
68
69
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
70
if (!_contextLock.initialize(env, &extensions->lnrlOptions, "MM_AllocationContextBalanced:_contextLock")) {
71
return false;
72
}
73
if (!_freeListLock.initialize(env, &extensions->lnrlOptions, "MM_AllocationContextBalanced:_freeListLock")) {
74
return false;
75
}
76
77
UDATA freeProcessorNodeCount = 0;
78
J9MemoryNodeDetail const *freeProcessorNodes = extensions->_numaManager.getFreeProcessorPool(&freeProcessorNodeCount);
79
/* our local cache needs +1 since we reserve a slot for each context to use */
80
_freeProcessorNodeCount = freeProcessorNodeCount + 1;
81
UDATA arraySizeInBytes = sizeof(UDATA) * _freeProcessorNodeCount;
82
_freeProcessorNodes = (UDATA *)env->getForge()->allocate(arraySizeInBytes, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
83
if (NULL != _freeProcessorNodes) {
84
memset(_freeProcessorNodes, 0x0, arraySizeInBytes);
85
_freeProcessorNodes[0] = getNumaNode();
86
for (UDATA i = 0; i < freeProcessorNodeCount; i++) {
87
/* we save at i+1 since index 0 is reserved */
88
_freeProcessorNodes[i+1] = freeProcessorNodes[i].j9NodeNumber;
89
}
90
} else {
91
return false;
92
}
93
94
_cachedReplenishPoint = this;
95
_heapRegionManager = MM_GCExtensions::getExtensions(env)->heapRegionManager;
96
97
return true;
98
}
99
100
/**
101
* Shut down.
102
*/
103
void
104
MM_AllocationContextBalanced::tearDown(MM_EnvironmentBase *env)
105
{
106
Assert_MM_true(NULL == _allocationRegion);
107
Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());
108
Assert_MM_true(NULL == _discardRegionList.peekFirstRegion());
109
110
_contextLock.tearDown();
111
_freeListLock.tearDown();
112
113
if (NULL != _freeProcessorNodes) {
114
env->getForge()->free(_freeProcessorNodes);
115
_freeProcessorNodes = NULL;
116
}
117
118
MM_AllocationContext::tearDown(env);
119
}
120
121
void
122
MM_AllocationContextBalanced::flushInternal(MM_EnvironmentBase *env)
123
{
124
/* flush all the regions we own for active allocation */
125
if (NULL != _allocationRegion){
126
MM_MemoryPool *pool = _allocationRegion->getMemoryPool();
127
Assert_MM_true(NULL != pool);
128
UDATA allocatableBytes = pool->getActualFreeMemorySize();
129
_freeMemorySize -= allocatableBytes;
130
_flushedRegions.insertRegion(_allocationRegion);
131
_allocationRegion = NULL;
132
Trc_MM_AllocationContextBalanced_flushInternal_clearAllocationRegion(env->getLanguageVMThread(), this);
133
}
134
MM_HeapRegionDescriptorVLHGC *walk = _nonFullRegions.peekFirstRegion();
135
while (NULL != walk) {
136
Assert_MM_true(this == walk->_allocateData._owningContext);
137
MM_HeapRegionDescriptorVLHGC *next = _nonFullRegions.peekRegionAfter(walk);
138
_nonFullRegions.removeRegion(walk);
139
MM_MemoryPool *pool = walk->getMemoryPool();
140
Assert_MM_true(NULL != pool);
141
UDATA allocatableBytes = pool->getActualFreeMemorySize();
142
_freeMemorySize -= allocatableBytes;
143
_flushedRegions.insertRegion(walk);
144
walk = next;
145
}
146
/* flush all the regions we own which were no longer candidates for allocation */
147
walk = _discardRegionList.peekFirstRegion();
148
while (NULL != walk) {
149
Assert_MM_true(this == walk->_allocateData._owningContext);
150
MM_HeapRegionDescriptorVLHGC *next = _discardRegionList.peekRegionAfter(walk);
151
_discardRegionList.removeRegion(walk);
152
MM_MemoryPool *pool = walk->getMemoryPool();
153
Assert_MM_true(NULL != pool);
154
pool->recalculateMemoryPoolStatistics(env);
155
_flushedRegions.insertRegion(walk);
156
walk = next;
157
}
158
_cachedReplenishPoint = this;
159
Assert_MM_true(0 == _freeMemorySize);
160
}
161
162
void
163
MM_AllocationContextBalanced::flush(MM_EnvironmentBase *env)
164
{
165
flushInternal(env);
166
}
167
168
void
169
MM_AllocationContextBalanced::flushForShutdown(MM_EnvironmentBase *env)
170
{
171
flushInternal(env);
172
}
173
174
175
void *
176
MM_AllocationContextBalanced::allocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface, bool shouldCollectOnFailure)
177
{
178
void *result = NULL;
179
lockCommon();
180
result = lockedAllocateTLH(env, allocateDescription, objectAllocationInterface);
181
/* if we failed, try to replenish */
182
if (NULL == result) {
183
result = lockedReplenishAndAllocate(env, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);
184
}
185
unlockCommon();
186
/* if that still fails, try to invoke the collector */
187
if (shouldCollectOnFailure && (NULL == result)) {
188
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, objectAllocationInterface, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_TLH);
189
}
190
return result;
191
}
192
void *
193
MM_AllocationContextBalanced::lockedAllocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface)
194
{
195
void *result = NULL;
196
/* first, try allocating the TLH in our _allocationRegion (fast-path) */
197
if (NULL != _allocationRegion) {
198
MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();
199
Assert_MM_true(NULL != memoryPool);
200
UDATA spaceBefore = memoryPool->getActualFreeMemorySize();
201
result = objectAllocationInterface->allocateTLH(env, allocateDescription, _subspace, memoryPool);
202
UDATA spaceAfter = memoryPool->getActualFreeMemorySize();
203
if (NULL == result) {
204
/* this region isn't useful so remove it from our list for consideration and add it to our discard list */
205
Assert_MM_true(spaceAfter < memoryPool->getMinimumFreeEntrySize());
206
Assert_MM_true(spaceBefore == spaceAfter);
207
_freeMemorySize -= spaceBefore;
208
_discardRegionList.insertRegion(_allocationRegion);
209
_allocationRegion = NULL;
210
Trc_MM_AllocationContextBalanced_lockedAllocateTLH_clearAllocationRegion(env->getLanguageVMThread(), this);
211
} else {
212
Assert_MM_true(spaceBefore > spaceAfter);
213
_freeMemorySize -= (spaceBefore - spaceAfter);
214
}
215
}
216
/* if we couldn't satisfy the allocate, go to the non-full region list (slow-path) before failing over into replenishment */
217
if (NULL == result) {
218
/* scan through our regions which are still active for allocation and attempt the TLH allocation in each. Any which are too full or fragmented to satisfy a TLH allocation must be moved to the "discard" list so we won't consider them for allocation until after the next collection */
219
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
220
while ((NULL == result) && (NULL != region)) {
221
MM_MemoryPool *memoryPool = region->getMemoryPool();
222
Assert_MM_true(NULL != memoryPool);
223
UDATA spaceBefore = memoryPool->getActualFreeMemorySize();
224
result = objectAllocationInterface->allocateTLH(env, allocateDescription, _subspace, memoryPool);
225
UDATA spaceAfter = memoryPool->getActualFreeMemorySize();
226
MM_HeapRegionDescriptorVLHGC *next = _nonFullRegions.peekRegionAfter(region);
227
/* remove this region from the list since we are either discarding it or re-promoting it to the fast-path */
228
_nonFullRegions.removeRegion(region);
229
if (NULL == result) {
230
/* this region isn't useful so remove it from our list for consideration and add it to our discard list */
231
Assert_MM_true(spaceAfter < memoryPool->getMinimumFreeEntrySize());
232
Assert_MM_true(spaceBefore == spaceAfter);
233
_freeMemorySize -= spaceBefore;
234
_discardRegionList.insertRegion(region);
235
} else {
236
Assert_MM_true(spaceBefore > spaceAfter);
237
_freeMemorySize -= (spaceBefore - spaceAfter);
238
/* we succeeded so this region is a good choice for future fast-path allocations */
239
Assert_MM_true(NULL == _allocationRegion);
240
_allocationRegion = region;
241
Trc_MM_AllocationContextBalanced_lockedAllocateTLH_setAllocationRegion(env->getLanguageVMThread(), this, region);
242
}
243
region = next;
244
}
245
}
246
return result;
247
}
248
void *
249
MM_AllocationContextBalanced::allocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure)
250
{
251
void *result = NULL;
252
lockCommon();
253
result = lockedAllocateObject(env, allocateDescription);
254
/* if we failed, try to replenish */
255
if (NULL == result) {
256
result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);
257
}
258
unlockCommon();
259
/* if that still fails, try to invoke the collector */
260
if (shouldCollectOnFailure && (NULL == result)) {
261
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT);
262
}
263
if (NULL != result) {
264
allocateDescription->setObjectFlags(_subspace->getObjectFlags());
265
allocateDescription->setMemorySubSpace(_subspace);
266
}
267
return result;
268
}
269
void *
270
MM_AllocationContextBalanced::lockedAllocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription)
271
{
272
Assert_MM_true(allocateDescription->getContiguousBytes() <= _heapRegionManager->getRegionSize());
273
274
void *result = NULL;
275
/* first, try allocating the object in our _allocationRegion (fast-path) */
276
if (NULL != _allocationRegion) {
277
MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();
278
Assert_MM_true(NULL != memoryPool);
279
UDATA spaceBefore = memoryPool->getActualFreeMemorySize();
280
result = memoryPool->allocateObject(env, allocateDescription);
281
UDATA spaceAfter = memoryPool->getActualFreeMemorySize();
282
if (NULL == result) {
283
Assert_MM_true(spaceBefore == spaceAfter);
284
/* if we failed the allocate, move the region into the non-full list since a TLH allocate can consume any space remaining, prior to discarding */
285
_nonFullRegions.insertRegion(_allocationRegion);
286
_allocationRegion = NULL;
287
Trc_MM_AllocationContextBalanced_lockedAllocateObject_clearAllocationRegion(env->getLanguageVMThread(), this);
288
} else {
289
Assert_MM_true(spaceBefore > spaceAfter);
290
_freeMemorySize -= (spaceBefore - spaceAfter);
291
}
292
}
293
/* if we couldn't satisfy the allocate, go to the non-full region list (slow-path) before failing over into replenishment */
294
if (NULL == result) {
295
Assert_MM_true(NULL == _allocationRegion);
296
/* scan through our active region list and attempt the allocation in each. Failing to satisfy a one-off object allocation, such as this, will not force a region into the discard list, however */
297
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
298
while ((NULL == result) && (NULL != region)) {
299
MM_MemoryPool *memoryPool = region->getMemoryPool();
300
Assert_MM_true(NULL != memoryPool);
301
UDATA spaceBefore = memoryPool->getActualFreeMemorySize();
302
result = memoryPool->allocateObject(env, allocateDescription);
303
if (NULL != result) {
304
UDATA spaceAfter = memoryPool->getActualFreeMemorySize();
305
Assert_MM_true(spaceBefore > spaceAfter);
306
_freeMemorySize -= (spaceBefore - spaceAfter);
307
}
308
region = _nonFullRegions.peekRegionAfter(region);
309
}
310
} else {
311
Assert_MM_true(NULL != _allocationRegion);
312
}
313
return result;
314
}
315
316
void *
317
MM_AllocationContextBalanced::allocateArrayletLeaf(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure)
318
{
319
/* this AC implementation doesn't try to cache leaf regions so just call into the subspace to hand us a region and then we will use it in lockedAllocateArrayletLeaf */
320
void *result = NULL;
321
lockCommon();
322
result = lockedReplenishAndAllocate(env, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);
323
unlockCommon();
324
/* if that fails, try to invoke the collector */
325
if (shouldCollectOnFailure && (NULL == result)) {
326
result = _subspace->replenishAllocationContextFailed(env, _subspace, this, NULL, allocateDescription, MM_MemorySubSpace::ALLOCATION_TYPE_LEAF);
327
}
328
if (NULL != result) {
329
/* zero the leaf here since we are not under any of the context or exclusive locks */
330
OMRZeroMemory(result, _heapRegionManager->getRegionSize());
331
}
332
return result;
333
}
334
335
void *
336
MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *envBase, MM_AllocateDescription *allocateDescription, MM_HeapRegionDescriptorVLHGC *freeRegionForArrayletLeaf)
337
{
338
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
339
340
Assert_MM_true(NULL != freeRegionForArrayletLeaf);
341
Assert_MM_true(MM_HeapRegionDescriptor::FREE == freeRegionForArrayletLeaf->getRegionType());
342
343
J9IndexableObject *spine = allocateDescription->getSpine();
344
Assert_MM_true(NULL != spine);
345
346
/* cache the allocate data pointer since we need to use it in several operations */
347
MM_HeapRegionDataForAllocate *leafAllocateData = &(freeRegionForArrayletLeaf->_allocateData);
348
/* ask the region to become a leaf type */
349
leafAllocateData->taskAsArrayletLeaf(env);
350
/* look up the spine region since we need to add this region to its leaf list */
351
MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC *)_heapRegionManager->tableDescriptorForAddress(spine);
352
/* the leaf requires a pointer back to the spine object so that it can verify its liveness elsewhere in the collector */
353
leafAllocateData->setSpine(spine);
354
freeRegionForArrayletLeaf->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
355
/* add the leaf to the spine region's leaf list */
356
/* We own the lock on the spine region's context when this call is made so we can safely manipulate this list.
357
* An exceptional scenario: A thread allocates a spine (and possibly a few arraylets), but does not complete the allocation. A global GC (or a series of regular PGCs) occurs
358
* that age out regions to max age. The spine moves into a common context. Now, we successfully resume the leaf allocation, but the common lock that
359
* we already hold is not sufficient any more. We need to additionally acquire common context' common lock, since multiple spines from different ACs could have come into this state,
360
* and worse multiple spines originally allocated from different ACs may end up in a single common context region.
361
*/
362
363
MM_AllocationContextTarok *spineContext = spineRegion->_allocateData._owningContext;
364
if (this != spineContext) {
365
Assert_MM_true(env->getCommonAllocationContext() == spineContext);
366
/* The common allocation context is always an instance of AllocationContextBalanced */
367
((MM_AllocationContextBalanced *)spineContext)->lockCommon();
368
}
369
370
leafAllocateData->addToArrayletLeafList(spineRegion);
371
372
if (this != spineContext) {
373
/* The common allocation context is always an instance of AllocationContextBalanced */
374
((MM_AllocationContextBalanced *)spineContext)->unlockCommon();
375
}
376
377
/* store the base address of the leaf for the memset and the return */
378
return freeRegionForArrayletLeaf->getLowAddress();
379
}
380
381
MM_HeapRegionDescriptorVLHGC *
382
MM_AllocationContextBalanced::collectorAcquireRegion(MM_EnvironmentBase *env)
383
{
384
MM_HeapRegionDescriptorVLHGC *region = NULL;
385
region = internalCollectorAcquireRegion(env);
386
return region;
387
}
388
389
MM_HeapRegionDescriptorVLHGC *
390
MM_AllocationContextBalanced::internalCollectorAcquireRegion(MM_EnvironmentBase *env)
391
{
392
MM_HeapRegionDescriptorVLHGC *region = NULL;
393
394
lockCommon();
395
Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());
396
do {
397
Assert_MM_true(NULL == _allocationRegion);
398
region = internalReplenishActiveRegion(env, false);
399
/* If failed to allocate region, attempt to expand.
400
* If we successfully expanded, we are not yet guaranteed that retry on replenish will succeed,
401
* since another thread from another AC may steal the expanded regions. Thus we keep expanding
402
* until we succeed to replenish or no more expansion is possible.
403
* This may not be the same AC receiving the expanded region, so this problem exists even without
404
* stealing.
405
*/
406
} while ((NULL == region) && (0 != _subspace->collectorExpand(env)));
407
408
if (NULL != region) {
409
Assert_MM_true(NULL == _nonFullRegions.peekFirstRegion());
410
Assert_MM_true(region == _allocationRegion);
411
UDATA regionSize = _heapRegionManager->getRegionSize();
412
_freeMemorySize -= regionSize;
413
414
_allocationRegion = NULL;
415
Trc_MM_AllocationContextBalanced_internalCollectorAcquireRegion_clearAllocationRegion(env->getLanguageVMThread(), this);
416
Assert_MM_true(NULL != region->getMemoryPool());
417
_flushedRegions.insertRegion(region);
418
}
419
unlockCommon();
420
421
return region;
422
}
423
424
void *
425
MM_AllocationContextBalanced::allocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)
426
{
427
void *result = NULL;
428
switch(allocationType) {
429
case MM_MemorySubSpace::ALLOCATION_TYPE_TLH:
430
result = allocateTLH(env, allocateDescription, objectAllocationInterface, false);
431
break;
432
case MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT:
433
result = allocateObject(env, allocateDescription, false);
434
break;
435
case MM_MemorySubSpace::ALLOCATION_TYPE_LEAF:
436
result = allocateArrayletLeaf(env, allocateDescription, false);
437
break;
438
default:
439
Assert_MM_unreachable();
440
break;
441
}
442
return result;
443
}
444
445
void *
446
MM_AllocationContextBalanced::lockedAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)
447
{
448
void *result = NULL;
449
switch (allocationType) {
450
case MM_MemorySubSpace::ALLOCATION_TYPE_OBJECT:
451
result = lockedAllocateObject(env, allocateDescription);
452
break;
453
case MM_MemorySubSpace::ALLOCATION_TYPE_TLH:
454
result = lockedAllocateTLH(env, allocateDescription, objectAllocationInterface);
455
break;
456
case MM_MemorySubSpace::ALLOCATION_TYPE_LEAF:
457
/* callers allocating an arraylet leaf should call lockedAllocateArrayletLeaf() directly */
458
Assert_MM_unreachable();
459
break;
460
default:
461
Assert_MM_unreachable();
462
}
463
return result;
464
}
465
466
void
467
MM_AllocationContextBalanced::setNextSibling(MM_AllocationContextBalanced *sibling)
468
{
469
Assert_MM_true(NULL == _nextSibling);
470
_nextSibling = sibling;
471
Assert_MM_true(NULL != _nextSibling);
472
}
473
474
void
475
MM_AllocationContextBalanced::setStealingCousin(MM_AllocationContextBalanced *cousin)
476
{
477
Assert_MM_true(NULL == _stealingCousin);
478
_stealingCousin = cousin;
479
_nextToSteal = cousin;
480
Assert_MM_true(NULL != _stealingCousin);
481
}
482
483
void
484
MM_AllocationContextBalanced::recycleRegion(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region)
485
{
486
MM_HeapRegionDataForAllocate *allocateData = &region->_allocateData;
487
MM_AllocationContextTarok *owningContext = allocateData->_owningContext;
488
MM_AllocationContextTarok *originalOwningContext = allocateData->_originalOwningContext;
489
Assert_MM_true((this == owningContext) || (this == originalOwningContext));
490
Assert_MM_true(region->getNumaNode() == getNumaNode());
491
if (NULL == originalOwningContext) {
492
originalOwningContext = owningContext;
493
}
494
Assert_MM_true(this == originalOwningContext);
495
496
/* the region is being returned to us, set the fields appropriately before returning it to the list */
497
allocateData->_originalOwningContext = NULL;
498
allocateData->_owningContext = this;
499
500
switch (region->getRegionType()) {
501
case MM_HeapRegionDescriptor::ADDRESS_ORDERED:
502
case MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED:
503
{
504
owningContext->removeRegionFromFlushedList(region);
505
allocateData->taskAsIdlePool(env);
506
_freeListLock.acquire();
507
_idleMPRegions.insertRegion(region);
508
_freeListLock.release();
509
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
510
if (extensions->tarokEnableExpensiveAssertions) {
511
void *low = region->getLowAddress();
512
void *high = region->getHighAddress();
513
MM_CardTable *cardTable = extensions->cardTable;
514
Card *card = cardTable->heapAddrToCardAddr(env, low);
515
Card *toCard = cardTable->heapAddrToCardAddr(env, high);
516
517
while (card < toCard) {
518
Assert_MM_true(CARD_CLEAN == *card);
519
card += 1;
520
}
521
}
522
}
523
break;
524
case MM_HeapRegionDescriptor::ARRAYLET_LEAF:
525
Assert_MM_true(NULL == allocateData->getNextArrayletLeafRegion());
526
Assert_MM_true(NULL == allocateData->getSpine());
527
528
if (MM_GCExtensions::getExtensions(env)->tarokDebugEnabled) {
529
/* poison the unused region so we can identify it in a crash (to be removed when 1953 is stable) */
530
memset(region->getLowAddress(), 0x0F, region->getSize());
531
}
532
allocateData->taskAsFreePool(env);
533
/* now, return the region to our free list */
534
addRegionToFreeList(env, region);
535
break;
536
case MM_HeapRegionDescriptor::FREE:
537
/* calling recycle on a free region implies an incorrect assumption in the caller */
538
Assert_MM_unreachable();
539
break;
540
default:
541
Assert_MM_unreachable();
542
}
543
}
544
545
void
546
MM_AllocationContextBalanced::tearDownRegion(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)
547
{
548
MM_MemoryPoolAddressOrderedList *memoryPool = (MM_MemoryPoolAddressOrderedList *)region->getMemoryPool();
549
if (NULL != memoryPool) {
550
memoryPool->tearDown(env);
551
region->setMemoryPool(NULL);
552
}
553
}
554
555
void
556
MM_AllocationContextBalanced::addRegionToFreeList(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region)
557
{
558
Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());
559
Assert_MM_true(getNumaNode() == region->getNumaNode());
560
Assert_MM_true(NULL == region->_allocateData._originalOwningContext);
561
_freeListLock.acquire();
562
_freeRegions.insertRegion(region);
563
_freeListLock.release();
564
}
565
566
void
567
MM_AllocationContextBalanced::resetLargestFreeEntry()
568
{
569
lockCommon();
570
if (NULL != _allocationRegion) {
571
_allocationRegion->getMemoryPool()->resetLargestFreeEntry();
572
}
573
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
574
while (NULL != region) {
575
region->getMemoryPool()->resetLargestFreeEntry();
576
region = _nonFullRegions.peekRegionAfter(region);
577
}
578
region = _discardRegionList.peekFirstRegion();
579
while (NULL != region) {
580
region->getMemoryPool()->resetLargestFreeEntry();
581
region = _discardRegionList.peekRegionAfter(region);
582
}
583
region = _flushedRegions.peekFirstRegion();
584
while (NULL != region) {
585
region->getMemoryPool()->resetLargestFreeEntry();
586
region = _flushedRegions.peekRegionAfter(region);
587
}
588
unlockCommon();
589
}
590
591
UDATA
592
MM_AllocationContextBalanced::getLargestFreeEntry()
593
{
594
UDATA largest = 0;
595
596
lockCommon();
597
/* if we have a free region, largest free entry is the region size */
598
MM_HeapRegionDescriptorVLHGC *free = _idleMPRegions.peekFirstRegion();
599
if (NULL == free) {
600
free = _freeRegions.peekFirstRegion();
601
}
602
if (NULL != free) {
603
largest = free->getSize();
604
} else {
605
if (NULL != _allocationRegion) {
606
MM_MemoryPool *memoryPool = _allocationRegion->getMemoryPool();
607
Assert_MM_true(NULL != memoryPool);
608
UDATA candidate = memoryPool->getLargestFreeEntry();
609
largest = candidate;
610
}
611
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
612
while (NULL != region) {
613
MM_MemoryPool *memoryPool = region->getMemoryPool();
614
Assert_MM_true(NULL != memoryPool);
615
UDATA candidate = memoryPool->getLargestFreeEntry();
616
largest = OMR_MAX(largest, candidate);
617
region = _nonFullRegions.peekRegionAfter(region);
618
}
619
region = _flushedRegions.peekFirstRegion();
620
while (NULL != region) {
621
MM_MemoryPool *memoryPool = region->getMemoryPool();
622
Assert_MM_true(NULL != memoryPool);
623
UDATA candidate = memoryPool->getLargestFreeEntry();
624
largest = OMR_MAX(largest, candidate);
625
region = _flushedRegions.peekRegionAfter(region);
626
}
627
}
628
unlockCommon();
629
630
return largest;
631
}
632
633
MM_HeapRegionDescriptorVLHGC *
634
MM_AllocationContextBalanced::acquireMPRegionFromHeap(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, MM_AllocationContextTarok *requestingContext)
635
{
636
MM_HeapRegionDescriptorVLHGC *region = acquireMPRegionFromNode(env, subspace, requestingContext);
637
638
/* _nextToSteal will be this if NUMA is not enabled */
639
if ((NULL == region) && (_nextToSteal != this)) {
640
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
641
Assert_MM_true(0 != extensions->_numaManager.getAffinityLeaderCount());
642
/* we didn't get any memory yet we are in a NUMA system so we should steal from a foreign node */
643
MM_AllocationContextBalanced *firstTheftAttempt = _nextToSteal;
644
do {
645
region = _nextToSteal->acquireMPRegionFromNode(env, subspace, requestingContext);
646
if (NULL != region) {
647
/* make sure that we record the original owner so that the region can be identified as foreign */
648
Assert_MM_true(NULL == region->_allocateData._originalOwningContext);
649
region->_allocateData._originalOwningContext = _nextToSteal;
650
}
651
/* advance to the next node whether we succeeded or not since we want to distribute our "theft" as evenly as possible */
652
_nextToSteal = _nextToSteal->getStealingCousin();
653
if (this == _nextToSteal) {
654
/* never try to steal from ourselves since that wouldn't be possible and the code interprets this case as a uniform system */
655
_nextToSteal = _nextToSteal->getStealingCousin();
656
}
657
} while ((NULL == region) && (firstTheftAttempt != _nextToSteal));
658
}
659
660
return region;
661
}
662
663
MM_HeapRegionDescriptorVLHGC *
664
MM_AllocationContextBalanced::acquireFreeRegionFromHeap(MM_EnvironmentBase *env)
665
{
666
MM_HeapRegionDescriptorVLHGC *region = acquireFreeRegionFromNode(env);
667
668
/* _nextToSteal will be this if NUMA is not enabled */
669
if ((NULL == region) && (_nextToSteal != this)) {
670
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
671
Assert_MM_true(0 != extensions->_numaManager.getAffinityLeaderCount());
672
/* we didn't get any memory yet we are in a NUMA system so we should steal from a foreign node */
673
MM_AllocationContextBalanced *firstTheftAttempt = _nextToSteal;
674
do {
675
region = _nextToSteal->acquireFreeRegionFromNode(env);
676
if (NULL != region) {
677
region->_allocateData._originalOwningContext = _nextToSteal;
678
}
679
/* advance to the next node whether we succeeded or not since we want to distribute our "theft" as evenly as possible */
680
_nextToSteal = _nextToSteal->getStealingCousin();
681
if (this == _nextToSteal) {
682
/* never try to steal from ourselves since that wouldn't be possible and the code interprets this case as a uniform system */
683
_nextToSteal = _nextToSteal->getStealingCousin();
684
}
685
} while ((NULL == region) && (firstTheftAttempt != _nextToSteal));
686
}
687
688
return region;
689
}
690
691
MM_HeapRegionDescriptorVLHGC *
692
MM_AllocationContextBalanced::acquireMPRegionFromNode(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext)
693
{
694
Trc_MM_AllocationContextBalanced_acquireMPBPRegionFromNode_Entry(env->getLanguageVMThread(), this, requestingContext);
695
/* this can only be called on the context itself or through stealing cousin relationships */
696
Assert_MM_true((this == requestingContext) || (getNumaNode() != requestingContext->getNumaNode()));
697
698
MM_HeapRegionDescriptorVLHGC *region = _cachedReplenishPoint->acquireMPRegionFromContext(env, subSpace, requestingContext);
699
MM_AllocationContextBalanced *targetContext = _cachedReplenishPoint->getNextSibling();
700
while ((NULL == region) && (targetContext != this)) {
701
region = targetContext->acquireMPRegionFromContext(env, subSpace, requestingContext);
702
if (NULL != region) {
703
_cachedReplenishPoint = targetContext;
704
}
705
targetContext = targetContext->getNextSibling();
706
}
707
if (NULL != region) {
708
/* Regions made available for allocation are identified by their region type (ADDRESS_ORDERED, as opposed to ADDRESS_ORDERED_MARKED) */
709
Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED == region->getRegionType());
710
Assert_MM_true(requestingContext == region->_allocateData._owningContext);
711
Assert_MM_true(getNumaNode() == region->getNumaNode());
712
}
713
Trc_MM_AllocationContextBalanced_acquireMPBPRegionFromNode_Exit(env->getLanguageVMThread(), region);
714
return region;
715
}
716
717
MM_HeapRegionDescriptorVLHGC *
718
MM_AllocationContextBalanced::acquireMPRegionFromContext(MM_EnvironmentBase *envBase, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext)
719
{
720
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
721
722
_freeListLock.acquire();
723
MM_HeapRegionDescriptorVLHGC *region= _idleMPRegions.peekFirstRegion();
724
if (NULL != region) {
725
_idleMPRegions.removeRegion(region);
726
} else {
727
region = _freeRegions.peekFirstRegion();
728
if (NULL != region) {
729
_freeRegions.removeRegion(region);
730
}
731
}
732
_freeListLock.release();
733
if (NULL != region) {
734
if (MM_HeapRegionDescriptor::FREE == region->getRegionType()) {
735
if (region->_allocateData.taskAsMemoryPool(env, requestingContext)) {
736
/* this is a new region. Initialize it for the given pool */
737
region->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
738
MM_MemoryPool *mpaol = region->getMemoryPool();
739
mpaol->setSubSpace(subSpace);
740
mpaol->expandWithRange(env, region->getSize(), region->getLowAddress(), region->getHighAddress(), false);
741
mpaol->recalculateMemoryPoolStatistics(env);
742
} else {
743
/* something went wrong so put the region back in the free list and return NULL (even though the region might have been found in another context, where we put it back is largely arbitrary and this path should never actually be taken) */
744
addRegionToFreeList(env, region);
745
region = NULL;
746
}
747
} else if (MM_HeapRegionDescriptor::ADDRESS_ORDERED_IDLE == region->getRegionType()) {
748
bool success = region->_allocateData.taskAsMemoryPool(env, requestingContext);
749
/* we can't fail to convert an IDLE region to an active one */
750
Assert_MM_true(success);
751
/* also add this region into our owned region list */
752
region->resetAge(env, (U_64)_subspace->getBytesRemainingBeforeTaxation());
753
region->_allocateData._owningContext = requestingContext;
754
MM_MemoryPool *pool = region->getMemoryPool();
755
Assert_MM_true(subSpace == pool->getSubSpace());
756
pool->rebuildFreeListInRegion(env, region, NULL);
757
pool->recalculateMemoryPoolStatistics(env);
758
Assert_MM_true(pool->getLargestFreeEntry() == region->getSize());
759
} else {
760
Assert_MM_unreachable();
761
}
762
if (NULL != region) {
763
Assert_MM_true(getNumaNode() == region->getNumaNode());
764
Assert_MM_true(NULL == region->_allocateData._originalOwningContext);
765
}
766
}
767
return region;
768
769
}
770
771
MM_HeapRegionDescriptorVLHGC *
772
MM_AllocationContextBalanced::acquireFreeRegionFromNode(MM_EnvironmentBase *env)
773
{
774
MM_HeapRegionDescriptorVLHGC *region = _cachedReplenishPoint->acquireFreeRegionFromContext(env);
775
MM_AllocationContextBalanced *targetContext = _cachedReplenishPoint->getNextSibling();
776
while ((NULL == region) && (targetContext != this)) {
777
region = targetContext->acquireFreeRegionFromContext(env);
778
if (NULL != region) {
779
_cachedReplenishPoint = targetContext;
780
}
781
targetContext = targetContext->getNextSibling();
782
}
783
if (NULL != region) {
784
Assert_MM_true(getNumaNode() == region->getNumaNode());
785
}
786
return region;
787
}
788
789
MM_HeapRegionDescriptorVLHGC *
790
MM_AllocationContextBalanced::acquireFreeRegionFromContext(MM_EnvironmentBase *env)
791
{
792
_freeListLock.acquire();
793
MM_HeapRegionDescriptorVLHGC *region = _freeRegions.peekFirstRegion();
794
if (NULL != region) {
795
_freeRegions.removeRegion(region);
796
} else {
797
region = _idleMPRegions.peekFirstRegion();
798
if (NULL != region) {
799
_idleMPRegions.removeRegion(region);
800
region->_allocateData.taskAsFreePool(env);
801
}
802
}
803
_freeListLock.release();
804
if (NULL != region) {
805
Assert_MM_true(getNumaNode() == region->getNumaNode());
806
}
807
return region;
808
809
}
810
811
void
812
MM_AllocationContextBalanced::lockCommon()
813
{
814
_contextLock.acquire();
815
}
816
void
817
MM_AllocationContextBalanced::unlockCommon()
818
{
819
_contextLock.release();
820
}
821
822
UDATA
823
MM_AllocationContextBalanced::getFreeMemorySize()
824
{
825
UDATA regionSize = _heapRegionManager->getRegionSize();
826
UDATA freeRegions = getFreeRegionCount();
827
return _freeMemorySize + (freeRegions * regionSize);
828
}
829
830
UDATA
831
MM_AllocationContextBalanced::getFreeRegionCount()
832
{
833
return _idleMPRegions.listSize() + _freeRegions.listSize();
834
}
835
836
void
837
MM_AllocationContextBalanced::migrateRegionToAllocationContext(MM_HeapRegionDescriptorVLHGC *region, MM_AllocationContextTarok *newOwner)
838
{
839
/*
840
* This is the point where we reconcile the data held in the region descriptors and the contexts. Prior to this, compaction planning may have decided
841
* to migrate a region into a new context but couldn't update the contexts' meta-structures due to performance concerns around the locks required to
842
* manipulate the lists. After this call returns, region's meta-data will be consistent with its owning context.
843
*/
844
if (region->containsObjects()) {
845
Assert_MM_true(NULL != region->getMemoryPool());
846
_flushedRegions.removeRegion(region);
847
Assert_MM_true(region->_allocateData._owningContext == newOwner);
848
newOwner->acceptMigratingRegion(region);
849
} else if (region->isArrayletLeaf()) {
850
/* nothing to do */
851
} else {
852
Assert_MM_unreachable();
853
}
854
/* we can only do direct migration between contexts with the same NUMA properties, at this time (note that 0 is special since it can accept memory from any node) */
855
Assert_MM_true((region->getNumaNode() == newOwner->getNumaNode()) || (0 == newOwner->getNumaNode()));
856
}
857
858
void
859
MM_AllocationContextBalanced::acceptMigratingRegion(MM_HeapRegionDescriptorVLHGC *region)
860
{
861
_flushedRegions.insertRegion(region);
862
}
863
864
void
865
MM_AllocationContextBalanced::resetHeapStatistics(bool globalCollect)
866
{
867
lockCommon();
868
if (NULL != _allocationRegion) {
869
_allocationRegion->getMemoryPool()->resetHeapStatistics(globalCollect);
870
}
871
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
872
while (NULL != region) {
873
region->getMemoryPool()->resetHeapStatistics(globalCollect);
874
region = _nonFullRegions.peekRegionAfter(region);
875
}
876
region = _discardRegionList.peekFirstRegion();
877
while (NULL != region) {
878
region->getMemoryPool()->resetHeapStatistics(globalCollect);
879
region = _discardRegionList.peekRegionAfter(region);
880
}
881
region = _flushedRegions.peekFirstRegion();
882
while (NULL != region) {
883
region->getMemoryPool()->resetHeapStatistics(globalCollect);
884
region = _flushedRegions.peekRegionAfter(region);
885
}
886
unlockCommon();
887
}
888
889
void
890
MM_AllocationContextBalanced::mergeHeapStats(MM_HeapStats *heapStats, UDATA includeMemoryType)
891
{
892
lockCommon();
893
if (NULL != _allocationRegion) {
894
_allocationRegion->getMemoryPool()->mergeHeapStats(heapStats, true);
895
}
896
MM_HeapRegionDescriptorVLHGC *region = _nonFullRegions.peekFirstRegion();
897
while (NULL != region) {
898
region->getMemoryPool()->mergeHeapStats(heapStats, true);
899
region = _nonFullRegions.peekRegionAfter(region);
900
}
901
region = _discardRegionList.peekFirstRegion();
902
while (NULL != region) {
903
region->getMemoryPool()->mergeHeapStats(heapStats, true);
904
region = _discardRegionList.peekRegionAfter(region);
905
}
906
region = _flushedRegions.peekFirstRegion();
907
while (NULL != region) {
908
region->getMemoryPool()->mergeHeapStats(heapStats, true);
909
region = _flushedRegions.peekRegionAfter(region);
910
}
911
unlockCommon();
912
}
913
914
void *
915
MM_AllocationContextBalanced::lockedReplenishAndAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType)
916
{
917
void * result = NULL;
918
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
919
UDATA regionSize = extensions->regionSize;
920
921
UDATA contiguousAllocationSize;
922
if (MM_MemorySubSpace::ALLOCATION_TYPE_LEAF == allocationType) {
923
contiguousAllocationSize = regionSize;
924
} else {
925
contiguousAllocationSize = allocateDescription->getContiguousBytes();
926
}
927
928
Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Entry(env->getLanguageVMThread(), regionSize, contiguousAllocationSize);
929
930
if (MM_MemorySubSpace::ALLOCATION_TYPE_LEAF == allocationType) {
931
if (_subspace->consumeFromTaxationThreshold(env, regionSize)) {
932
/* acquire a free region */
933
MM_HeapRegionDescriptorVLHGC *leafRegion = acquireFreeRegionFromHeap(env);
934
if (NULL != leafRegion) {
935
result = lockedAllocateArrayletLeaf(env, allocateDescription, leafRegion);
936
leafRegion->_allocateData._owningContext = this;
937
Assert_MM_true(leafRegion->getLowAddress() == result);
938
Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_acquiredFreeRegion(env->getLanguageVMThread(), regionSize);
939
}
940
}
941
} else {
942
Assert_MM_true(NULL == _allocationRegion);
943
MM_HeapRegionDescriptorVLHGC *newRegion = internalReplenishActiveRegion(env, true);
944
if (NULL != newRegion) {
945
/* the new region must be our current allocation region and it must be completely empty */
946
Assert_MM_true(_allocationRegion == newRegion);
947
Assert_MM_true(newRegion->getMemoryPool()->getActualFreeMemorySize() == newRegion->getSize());
948
949
result = lockedAllocate(env, objectAllocationInterface, allocateDescription, allocationType);
950
Assert_MM_true(NULL != result);
951
}
952
}
953
954
if (NULL != result) {
955
Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Success(env->getLanguageVMThread());
956
} else {
957
Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_Failure(env->getLanguageVMThread());
958
}
959
960
return result;
961
}
962
963
MM_HeapRegionDescriptorVLHGC *
964
MM_AllocationContextBalanced::internalReplenishActiveRegion(MM_EnvironmentBase *env, bool payTax)
965
{
966
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
967
UDATA regionSize = extensions->regionSize;
968
MM_HeapRegionDescriptorVLHGC *newRegion = NULL;
969
970
Assert_MM_true(NULL == _allocationRegion);
971
972
if (!payTax || _subspace->consumeFromTaxationThreshold(env, regionSize)) {
973
newRegion = acquireMPRegionFromHeap(env, _subspace, this);
974
if (NULL != newRegion) {
975
Trc_MM_AllocationContextBalanced_internalReplenishActiveRegion_convertedFreeRegion(env->getLanguageVMThread(), newRegion, regionSize);
976
_allocationRegion = newRegion;
977
Trc_MM_AllocationContextBalanced_internalReplenishActiveRegion_setAllocationRegion(env->getLanguageVMThread(), this, newRegion);
978
_freeMemorySize += newRegion->getMemoryPool()->getActualFreeMemorySize();
979
}
980
}
981
982
Assert_MM_true(newRegion == _allocationRegion);
983
984
return newRegion;
985
}
986
987
void
988
MM_AllocationContextBalanced::accountForRegionLocation(MM_HeapRegionDescriptorVLHGC *region, UDATA *localCount, UDATA *foreignCount)
989
{
990
Assert_MM_true((NULL == region->_allocateData._owningContext) || (this == region->_allocateData._owningContext));
991
if (NULL == region->_allocateData._originalOwningContext) {
992
/* local */
993
*localCount += 1;
994
Assert_MM_true(region->getNumaNode() == getNumaNode());
995
} else {
996
/* foreign (stolen) */
997
*foreignCount += 1;
998
Assert_MM_true(region->getNumaNode() != getNumaNode());
999
}
1000
}
1001
1002
void
1003
MM_AllocationContextBalanced::countRegionsInList(MM_RegionListTarok *list, UDATA *localCount, UDATA *foreignCount)
1004
{
1005
MM_HeapRegionDescriptorVLHGC *region = list->peekFirstRegion();
1006
while (NULL != region) {
1007
accountForRegionLocation(region, localCount, foreignCount);
1008
region = list->peekRegionAfter(region);
1009
}
1010
}
1011
void
1012
MM_AllocationContextBalanced::getRegionCount(UDATA *localCount, UDATA *foreignCount)
1013
{
1014
if (NULL != _allocationRegion) {
1015
accountForRegionLocation(_allocationRegion, localCount, foreignCount);
1016
}
1017
countRegionsInList(&_nonFullRegions, localCount, foreignCount);
1018
countRegionsInList(&_discardRegionList, localCount, foreignCount);
1019
countRegionsInList(&_flushedRegions, localCount, foreignCount);
1020
countRegionsInList(&_freeRegions, localCount, foreignCount);
1021
countRegionsInList(&_idleMPRegions, localCount, foreignCount);
1022
}
1023
1024
void
1025
MM_AllocationContextBalanced::removeRegionFromFlushedList(MM_HeapRegionDescriptorVLHGC *region)
1026
{
1027
_flushedRegions.removeRegion(region);
1028
}
1029
1030
MM_HeapRegionDescriptorVLHGC *
1031
MM_AllocationContextBalanced::selectRegionForContraction(MM_EnvironmentBase *env)
1032
{
1033
/* since we know this is only called during contraction we could skip the lock, but rather be safe */
1034
_freeListLock.acquire();
1035
1036
/* prefer free regions since idle MPAOL regions are more valuable to us */
1037
MM_HeapRegionDescriptorVLHGC *region = _freeRegions.peekFirstRegion();
1038
if (NULL != region) {
1039
_freeRegions.removeRegion(region);
1040
} else {
1041
region = _idleMPRegions.peekFirstRegion();
1042
if (NULL != region) {
1043
_idleMPRegions.removeRegion(region);
1044
region->_allocateData.taskAsFreePool(env);
1045
}
1046
}
1047
if (NULL != region) {
1048
Assert_MM_true(getNumaNode() == region->getNumaNode());
1049
Assert_MM_true(MM_HeapRegionDescriptor::FREE == region->getRegionType());
1050
}
1051
1052
_freeListLock.release();
1053
1054
return region;
1055
}
1056
1057
bool
1058
MM_AllocationContextBalanced::setNumaAffinityForThread(MM_EnvironmentBase *env)
1059
{
1060
bool success = true;
1061
1062
bool hasPhysicalNUMASupport = MM_GCExtensions::getExtensions(env)->_numaManager.isPhysicalNUMASupported();
1063
if (hasPhysicalNUMASupport && (0 != getNumaNode())) {
1064
/* TODO: should we try to read the affinity first and find the best node? */
1065
success = env->setNumaAffinity(_freeProcessorNodes, _freeProcessorNodeCount);
1066
}
1067
1068
return success;
1069
}
1070
1071
1072