Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_vlhgc/CopyForwardScheme.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2022 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "j9.h"
24
#include "j9cfg.h"
25
#include "j9protos.h"
26
#include "j9consts.h"
27
#include "j2sever.h"
28
#include "modronopt.h"
29
#include "ModronAssertions.h"
30
31
#include <string.h>
32
33
#include "mmhook_internal.h"
34
35
#include "CopyForwardScheme.hpp"
36
37
#include "AllocateDescription.hpp"
38
#include "AllocationContextTarok.hpp"
39
#include "ArrayletLeafIterator.hpp"
40
#include "AtomicOperations.hpp"
41
#include "Bits.hpp"
42
#include "CardCleaner.hpp"
43
#include "CardListFlushTask.hpp"
44
#include "CardTable.hpp"
45
#include "ClassHeapIterator.hpp"
46
#include "ClassIterator.hpp"
47
#include "ClassLoaderClassesIterator.hpp"
48
#include "ClassLoaderIterator.hpp"
49
#include "ClassLoaderRememberedSet.hpp"
50
#include "CopyForwardSchemeTask.hpp"
51
#include "CompactGroupManager.hpp"
52
#include "CompactGroupPersistentStats.hpp"
53
#include "CompressedCardTable.hpp"
54
#include "CopyForwardCompactGroup.hpp"
55
#include "CopyForwardGMPCardCleaner.hpp"
56
#include "CopyForwardNoGMPCardCleaner.hpp"
57
#include "CopyScanCacheChunkVLHGCInHeap.hpp"
58
#include "CopyScanCacheListVLHGC.hpp"
59
#include "CopyScanCacheVLHGC.hpp"
60
#include "CycleState.hpp"
61
#include "EnvironmentBase.hpp"
62
#include "EnvironmentVLHGC.hpp"
63
#include "FinalizableObjectBuffer.hpp"
64
#include "FinalizableReferenceBuffer.hpp"
65
#include "FinalizeListManager.hpp"
66
#include "ForwardedHeader.hpp"
67
#include "GlobalAllocationManager.hpp"
68
#include "Heap.hpp"
69
#include "HeapMapIterator.hpp"
70
#include "HeapMapWordIterator.hpp"
71
#include "HeapRegionDescriptorVLHGC.hpp"
72
#include "HeapRegionIteratorVLHGC.hpp"
73
#include "HeapRegionManager.hpp"
74
#include "HotFieldUtil.hpp"
75
#include "InterRegionRememberedSet.hpp"
76
#include "MarkMap.hpp"
77
#include "MemorySpace.hpp"
78
#include "MemorySubSpace.hpp"
79
#include "ObjectAccessBarrier.hpp"
80
#include "ObjectAllocationInterface.hpp"
81
#include "ObjectHeapIteratorAddressOrderedList.hpp"
82
#include "ObjectIteratorState.hpp"
83
#include "ObjectModel.hpp"
84
#include "ParallelDispatcher.hpp"
85
#include "PacketSlotIterator.hpp"
86
#include "ParallelTask.hpp"
87
#include "ReferenceObjectBuffer.hpp"
88
#include "ReferenceObjectList.hpp"
89
#include "ReferenceStats.hpp"
90
#include "RegionBasedOverflowVLHGC.hpp"
91
#include "RootScanner.hpp"
92
#include "SlotObject.hpp"
93
#include "StackSlotValidator.hpp"
94
#include "SublistFragment.hpp"
95
#include "SublistIterator.hpp"
96
#include "SublistPool.hpp"
97
#include "SublistPuddle.hpp"
98
#include "SublistSlotIterator.hpp"
99
#include "SurvivorMemoryIterator.hpp"
100
#include "WorkPacketsIterator.hpp"
101
#include "WorkPacketsVLHGC.hpp"
102
103
#define INITIAL_FREE_HISTORY_WEIGHT ((float)0.8)
104
#define TENURE_BYTES_HISTORY_WEIGHT ((float)0.8)
105
106
#define SCAN_CACHES_PER_THREAD 1 /* each thread has 1 scan cache */
107
#define DEFERRED_CACHES_PER_THREAD 1 /* each thread has 1 deferred cache (hierarchical scan ordering only) */
108
109
#define SCAN_TO_COPY_CACHE_MAX_DISTANCE (UDATA_MAX)
110
111
/* VM Design 1774: Ideally we would pull these cache line values from the port library but this will suffice for
112
* a quick implementation
113
*/
114
#if defined(AIXPPC) || defined(LINUXPPC)
115
#define CACHE_LINE_SIZE 128
116
#elif defined(J9ZOS390) || (defined(LINUX) && defined(S390))
117
#define CACHE_LINE_SIZE 256
118
#else
119
#define CACHE_LINE_SIZE 64
120
#endif
121
/* create macros to interpret the hot field descriptor */
122
#define HOTFIELD_SHOULD_ALIGN(descriptor) (0x1 == (0x1 & (descriptor)))
123
#define HOTFIELD_ALIGNMENT_BIAS(descriptor, heapObjectAlignment) (((descriptor) >> 1) * (heapObjectAlignment))
124
125
/* give a name to the common context. Note that this may need to be stored locally and fetched, at start-up,
126
* if the common context disappears or becomes defined in a more complicated fashion
127
*/
128
#define COMMON_CONTEXT_INDEX 0
129
130
/* If scavenger dynamicBreadthFirstScanOrdering and alwaysDepthCopyFirstOffset is enabled, always copy the first offset of each object after the object itself is copied */
131
#define DEFAULT_HOT_FIELD_OFFSET 1
132
133
#define AllCompressedCardsInWordClean 0
134
#define AllCompressedCardsInByteClean 0
135
#define AllCompressedCardsInWordSurvivor UDATA_MAX
136
#define AllCompressedCardsInByteSurvivor U_8_MAX
137
#define CompressedCardSurvivor 1
138
139
MM_CopyForwardScheme::MM_CopyForwardScheme(MM_EnvironmentVLHGC *env, MM_HeapRegionManager *manager)
140
: MM_BaseNonVirtual()
141
, _javaVM((J9JavaVM *)env->getLanguageVM())
142
, _extensions(MM_GCExtensions::getExtensions(env))
143
, _regionManager(manager)
144
, _interRegionRememberedSet(NULL)
145
, _reservedRegionList(NULL)
146
, _compactGroupMaxCount(MM_CompactGroupManager::getCompactGroupMaxCount(env))
147
, _phantomReferenceRegionsToProcess(0)
148
, _minCacheSize(0)
149
, _maxCacheSize(0)
150
, _dispatcher(_extensions->dispatcher)
151
, _cacheFreeList()
152
, _cacheScanLists(NULL)
153
, _scanCacheListSize(_extensions->_numaManager.getMaximumNodeNumber() + 1)
154
, _scanCacheWaitCount(0)
155
, _scanCacheMonitor(NULL)
156
, _workQueueWaitCountPtr(&_scanCacheWaitCount)
157
, _workQueueMonitorPtr(&_scanCacheMonitor)
158
, _doneIndex(0)
159
, _markMap(NULL)
160
, _heapBase(NULL)
161
, _heapTop(NULL)
162
, _abortFlag(false)
163
, _abortInProgress(false)
164
, _regionCountCannotBeEvacuated(0)
165
, _regionCountReservedNonEvacuated(0)
166
, _cacheLineAlignment(0)
167
, _clearableProcessingStarted(false)
168
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
169
, _dynamicClassUnloadingEnabled(false)
170
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
171
, _collectStringConstantsEnabled(false)
172
, _tracingEnabled(false)
173
, _commonContext(NULL)
174
, _compactGroupBlock(NULL)
175
, _arraySplitSize(0)
176
, _regionSublistContentionThreshold(0)
177
, _failedToExpand(false)
178
, _shouldScanFinalizableObjects(false)
179
, _objectAlignmentInBytes(env->getObjectAlignmentInBytes())
180
, _compressedSurvivorTable(NULL)
181
{
182
_typeId = __FUNCTION__;
183
}
184
185
MM_CopyForwardScheme *
186
MM_CopyForwardScheme::newInstance(MM_EnvironmentVLHGC *env, MM_HeapRegionManager *manager)
187
{
188
MM_CopyForwardScheme *scheme = (MM_CopyForwardScheme *)env->getForge()->allocate(sizeof(MM_CopyForwardScheme), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
189
if (scheme) {
190
new(scheme) MM_CopyForwardScheme(env, manager);
191
if (!scheme->initialize(env)) {
192
scheme->kill(env);
193
scheme = NULL;
194
}
195
}
196
return scheme;
197
}
198
199
void
200
MM_CopyForwardScheme::kill(MM_EnvironmentVLHGC *env)
201
{
202
tearDown(env);
203
env->getForge()->free(this);
204
}
205
206
bool
207
MM_CopyForwardScheme::initialize(MM_EnvironmentVLHGC *env)
208
{
209
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
210
211
if (!_cacheFreeList.initialize(env)) {
212
return false;
213
}
214
UDATA listsToCreate = _scanCacheListSize;
215
UDATA scanListsSizeInBytes = sizeof(MM_CopyScanCacheListVLHGC) * listsToCreate;
216
_cacheScanLists = (MM_CopyScanCacheListVLHGC *)env->getForge()->allocate(scanListsSizeInBytes, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
217
if (NULL == _cacheScanLists) {
218
return false;
219
}
220
memset((void*)_cacheScanLists, 0x0, scanListsSizeInBytes);
221
for (UDATA i = 0; i < listsToCreate; i++) {
222
new(&_cacheScanLists[i]) MM_CopyScanCacheListVLHGC();
223
if (!_cacheScanLists[i].initialize(env)) {
224
/* if we failed part-way through the list, adjust the _scanCacheListSize since tearDown will otherwise fail to
225
* invoke on the entries in the array which didn't have their constructors called
226
*/
227
_scanCacheListSize = i + 1;
228
return false;
229
}
230
}
231
if(omrthread_monitor_init_with_name(&_scanCacheMonitor, 0, "MM_CopyForwardScheme::cache")) {
232
return false;
233
}
234
235
/* Get the estimated cache count required. The cachesPerThread argument is used to ensure there are at least enough active
236
* caches for all working threads (threadCount * cachesPerThread)
237
*/
238
UDATA threadCount = extensions->dispatcher->threadCountMaximum();
239
UDATA compactGroupCount = MM_CompactGroupManager::getCompactGroupMaxCount(env);
240
241
/* Each thread can have a scan cache and compactGroupCount copy caches. In hierarchical, there could also be a deferred cache. */
242
UDATA cachesPerThread = SCAN_CACHES_PER_THREAD;
243
cachesPerThread += compactGroupCount; /* copy caches */
244
switch (_extensions->scavengerScanOrdering) {
245
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_BREADTH_FIRST:
246
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST:
247
break;
248
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_HIERARCHICAL:
249
cachesPerThread += DEFERRED_CACHES_PER_THREAD;
250
break;
251
default:
252
Assert_MM_unreachable();
253
break;
254
}
255
256
UDATA minCacheCount = threadCount * cachesPerThread;
257
258
/* Estimate how many caches we might need to describe the entire heap */
259
UDATA heapCaches = extensions->memoryMax / extensions->tlhMaximumSize;
260
261
/* use whichever value is higher */
262
UDATA totalCacheCount = OMR_MAX(minCacheCount, heapCaches);
263
264
if (!_cacheFreeList.resizeCacheEntries(env, totalCacheCount)) {
265
return false;
266
}
267
268
/* Create and initialize the owned region lists to maintain resource for survivor area heap acquisition */
269
_reservedRegionList = (MM_ReservedRegionListHeader *)env->getForge()->allocate(sizeof(MM_ReservedRegionListHeader) * _compactGroupMaxCount, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
270
if(NULL == _reservedRegionList) {
271
return false;
272
}
273
274
memset((void *)_reservedRegionList, 0, sizeof(MM_ReservedRegionListHeader) * _compactGroupMaxCount);
275
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
276
_reservedRegionList[index]._maxSublistCount = 1;
277
_reservedRegionList[index]._sublistCount = 1;
278
_reservedRegionList[index]._evacuateRegionCount = 0;
279
for (UDATA sublistIndex = 0; sublistIndex < MM_ReservedRegionListHeader::MAX_SUBLISTS; sublistIndex++) {
280
_reservedRegionList[index]._sublists[sublistIndex]._head = NULL;
281
_reservedRegionList[index]._sublists[sublistIndex]._cacheAcquireCount = 0;
282
_reservedRegionList[index]._sublists[sublistIndex]._cacheAcquireBytes = 0;
283
if(!_reservedRegionList[index]._sublists[sublistIndex]._lock.initialize(env, &_extensions->lnrlOptions, "MM_CopyForwardScheme:_reservedRegionList[]._sublists[]._lock")) {
284
return false;
285
}
286
}
287
_reservedRegionList[index]._freeMemoryCandidates = NULL;
288
_reservedRegionList[index]._freeMemoryCandidateCount = 0;
289
if(!_reservedRegionList[index]._freeMemoryCandidatesLock.initialize(env, &_extensions->lnrlOptions, "MM_CopyForwardScheme:_reservedRegionList[]._freeMemoryCandidatesLock")) {
290
return false;
291
}
292
}
293
294
/* Set the min/max sizes for copy scan cache allocation when allocating a general purpose area (does not include non-standard sized objects) */
295
_minCacheSize = _extensions->tlhMinimumSize;
296
_maxCacheSize = _extensions->tlhMaximumSize;
297
298
/* Cached pointer to the inter region remembered set */
299
_interRegionRememberedSet = MM_GCExtensions::getExtensions(env)->interRegionRememberedSet;
300
301
_cacheLineAlignment = CACHE_LINE_SIZE;
302
303
/* TODO: how to determine this value? It should be large enough that each thread does
304
* real work, but small enough to give good sharing
305
*/
306
/* Note: this value should divide evenly into the arraylet leaf size so that each chunk
307
* is a block of contiguous memory
308
*/
309
_arraySplitSize = 4096;
310
311
/* allocate the per-thread, per-compact-group data structures */
312
Assert_MM_true(0 != _extensions->gcThreadCount);
313
UDATA allocateSize = sizeof(MM_CopyForwardCompactGroup) * _extensions->gcThreadCount * _compactGroupMaxCount;
314
_compactGroupBlock = (MM_CopyForwardCompactGroup *)_extensions->getForge()->allocate(allocateSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
315
if (NULL == _compactGroupBlock) {
316
return false;
317
}
318
319
/* Calculate compressed Survivor table size in bytes */
320
UDATA compressedSurvivorTableSize = _extensions->heap->getMaximumPhysicalRange() / (CARD_SIZE * BITS_PER_BYTE);
321
_compressedSurvivorTable = (UDATA *)env->getForge()->allocate(compressedSurvivorTableSize, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
322
if (NULL == _compressedSurvivorTable) {
323
return false;
324
}
325
326
return true;
327
}
328
329
void
330
MM_CopyForwardScheme::tearDown(MM_EnvironmentVLHGC *env)
331
{
332
_cacheFreeList.tearDown(env);
333
if (NULL != _cacheScanLists) {
334
UDATA listCount = _scanCacheListSize;
335
for (UDATA i = 0; i < listCount; i++) {
336
_cacheScanLists[i].tearDown(env);
337
}
338
env->getForge()->free(_cacheScanLists);
339
_cacheScanLists = NULL;
340
}
341
342
if (NULL != _scanCacheMonitor) {
343
omrthread_monitor_destroy(_scanCacheMonitor);
344
_scanCacheMonitor = NULL;
345
}
346
347
if(NULL != _reservedRegionList) {
348
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
349
for (UDATA sublistIndex = 0; sublistIndex < MM_ReservedRegionListHeader::MAX_SUBLISTS; sublistIndex++) {
350
_reservedRegionList[index]._sublists[sublistIndex]._lock.tearDown();
351
}
352
_reservedRegionList[index]._freeMemoryCandidatesLock.tearDown();
353
}
354
env->getForge()->free(_reservedRegionList);
355
_reservedRegionList = NULL;
356
}
357
358
if (NULL != _compactGroupBlock) {
359
env->getForge()->free(_compactGroupBlock);
360
_compactGroupBlock = NULL;
361
}
362
363
if (NULL != _compressedSurvivorTable) {
364
env->getForge()->free(_compressedSurvivorTable);
365
_compressedSurvivorTable = NULL;
366
}
367
}
368
369
MM_AllocationContextTarok *
370
MM_CopyForwardScheme::getPreferredAllocationContext(MM_AllocationContextTarok *suggestedContext, J9Object *objectPtr)
371
{
372
MM_AllocationContextTarok *preferredContext = suggestedContext;
373
374
if (preferredContext == _commonContext) {
375
preferredContext = getContextForHeapAddress(objectPtr);
376
} /* no code beyond this point without modifying else statement below */
377
return preferredContext;
378
}
379
380
void
381
MM_CopyForwardScheme::raiseAbortFlag(MM_EnvironmentVLHGC *env)
382
{
383
if (!_abortFlag) {
384
bool didSetFlag = false;
385
omrthread_monitor_enter(*_workQueueMonitorPtr);
386
if (!_abortFlag) {
387
_abortFlag = true;
388
didSetFlag = true;
389
/* if any threads are waiting, notify them so that they can get out of the monitor since nobody else is going to push work for them */
390
if (0 != *_workQueueWaitCountPtr) {
391
omrthread_monitor_notify_all(*_workQueueMonitorPtr);
392
}
393
}
394
omrthread_monitor_exit(*_workQueueMonitorPtr);
395
396
if (didSetFlag) {
397
env->_copyForwardStats._aborted = true;
398
399
Trc_MM_CopyForwardScheme_abortFlagRaised(env->getLanguageVMThread());
400
PORT_ACCESS_FROM_ENVIRONMENT(env);
401
TRIGGER_J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT(MM_GCExtensions::getExtensions(env)->privateHookInterface, env->getOmrVMThread(), j9time_hires_clock(), J9HOOK_MM_PRIVATE_COPY_FORWARD_ABORT);
402
}
403
}
404
}
405
406
/**
407
* Clear any global stats associated to the copy forward scheme.
408
*/
409
void
410
MM_CopyForwardScheme::clearGCStats(MM_EnvironmentVLHGC *env)
411
{
412
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats.clear();
413
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._workPacketStats.clear();
414
}
415
416
void
417
MM_CopyForwardScheme::updateLeafRegions(MM_EnvironmentVLHGC *env)
418
{
419
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
420
MM_HeapRegionDescriptorVLHGC *region = NULL;
421
422
while(NULL != (region = regionIterator.nextRegion())) {
423
if(region->isArrayletLeaf()) {
424
J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();
425
Assert_MM_true(NULL != spineObject);
426
427
J9Object *updatedSpineObject = updateForwardedPointer(spineObject);
428
if(updatedSpineObject != spineObject) {
429
MM_HeapRegionDescriptorVLHGC *spineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(spineObject);
430
MM_HeapRegionDescriptorVLHGC *updatedSpineRegion = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(updatedSpineObject);
431
432
Assert_MM_true(spineRegion->_markData._shouldMark);
433
Assert_MM_true(spineRegion != updatedSpineRegion);
434
Assert_MM_true(updatedSpineRegion->containsObjects());
435
436
/* we need to move the leaf to another region's leaf list since its spine has moved */
437
region->_allocateData.removeFromArrayletLeafList();
438
region->_allocateData.addToArrayletLeafList(updatedSpineRegion);
439
region->_allocateData.setSpine((J9IndexableObject *)updatedSpineObject);
440
} else if (!isLiveObject(spineObject)) {
441
Assert_MM_true(isObjectInEvacuateMemory(spineObject));
442
/* the spine is in evacuate space so the arraylet is dead => recycle the leaf */
443
/* remove arraylet leaf from list */
444
region->_allocateData.removeFromArrayletLeafList();
445
/* recycle */
446
region->_allocateData.setSpine(NULL);
447
region->getSubSpace()->recycleRegion(env, region);
448
}
449
}
450
}
451
}
452
453
void
454
MM_CopyForwardScheme::preProcessRegions(MM_EnvironmentVLHGC *env)
455
{
456
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
457
MM_HeapRegionDescriptorVLHGC *region = NULL;
458
459
UDATA ownableSynchronizerCandidates = 0;
460
UDATA ownableSynchronizerCountInEden = 0;
461
462
_regionCountCannotBeEvacuated = 0;
463
464
while(NULL != (region = regionIterator.nextRegion())) {
465
region->_copyForwardData._survivor = false;
466
region->_copyForwardData._freshSurvivor = false;
467
if(region->containsObjects()) {
468
region->_copyForwardData._initialLiveSet = true;
469
region->_copyForwardData._evacuateSet = region->_markData._shouldMark;
470
if (region->_markData._shouldMark) {
471
region->getUnfinalizedObjectList()->startUnfinalizedProcessing();
472
ownableSynchronizerCandidates += region->getOwnableSynchronizerObjectList()->getObjectCount();
473
if (region->isEden()) {
474
ownableSynchronizerCountInEden += region->getOwnableSynchronizerObjectList()->getObjectCount();
475
}
476
region->getOwnableSynchronizerObjectList()->startOwnableSynchronizerProcessing();
477
Assert_MM_true(region->getRememberedSetCardList()->isAccurate());
478
if ((region->_criticalRegionsInUse > 0) || !env->_cycleState->_shouldRunCopyForward || (100 == _extensions->fvtest_forceCopyForwardHybridRatio) || (randomDecideForceNonEvacuatedRegion(_extensions->fvtest_forceCopyForwardHybridRatio))) {
479
/* set the region is noEvacuation for copyforward collector */
480
region->_markData._noEvacuation = true;
481
_regionCountCannotBeEvacuated += 1;
482
} else if ((_regionCountReservedNonEvacuated > 0) && region->isEden()){
483
_regionCountReservedNonEvacuated -= 1;
484
_regionCountCannotBeEvacuated += 1;
485
region->_markData._noEvacuation = true;
486
} else {
487
region->_markData._noEvacuation = false;
488
}
489
}
490
} else {
491
region->_copyForwardData._evacuateSet = false;
492
}
493
494
region->getReferenceObjectList()->resetPriorLists();
495
Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);
496
}
497
498
/* reset _regionCountReservedNonEvacuated */
499
_regionCountReservedNonEvacuated = 0;
500
/* ideally allocationStats._ownableSynchronizerObjectCount should be equal with ownableSynchronizerCountInEden,
501
* in case partial constructing ownableSynchronizerObject has been moved during previous PGC, notification for new allocation would happen after gc,
502
* so it is counted for new allocation, but not in Eden region. loose assertion for this special case
503
*/
504
Assert_MM_true(_extensions->allocationStats._ownableSynchronizerObjectCount >= ownableSynchronizerCountInEden);
505
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerCandidates = ownableSynchronizerCandidates;
506
}
507
508
void
509
MM_CopyForwardScheme::postProcessRegions(MM_EnvironmentVLHGC *env)
510
{
511
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
512
MM_HeapRegionDescriptorVLHGC *region = NULL;
513
UDATA survivorSetRegionCount = 0;
514
515
while(NULL != (region = regionIterator.nextRegion())) {
516
MM_MemoryPool *pool = region->getMemoryPool();
517
if (region->_copyForwardData._evacuateSet) {
518
if (region->isEden()) {
519
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._edenEvacuateRegionCount += 1;
520
} else {
521
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEdenEvacuateRegionCount += 1;
522
}
523
} else if (region->isFreshSurvivorRegion()) {
524
/* check Eden Survivor Regions */
525
if (0 == region->getLogicalAge()) {
526
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._edenSurvivorRegionCount += 1;
527
} else {
528
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEdenSurvivorRegionCount += 1;
529
}
530
}
531
532
/* Any region which is part of the survivor set should be set to "shouldMark" to appear as part of the collection set (and be swept, etc) */
533
if(region->isSurvivorRegion()) {
534
Assert_MM_true(region->containsObjects());
535
Assert_MM_false(region->_copyForwardData._evacuateSet);
536
Assert_MM_false(region->_markData._shouldMark);
537
Assert_MM_false(region->_reclaimData._shouldReclaim);
538
539
/* we do not count non-fresh region, only regions that we acquired as free */
540
if (region->isFreshSurvivorRegion()) {
541
survivorSetRegionCount += 1;
542
} else {
543
((MM_MemoryPoolAddressOrderedList *)pool)->resetFirstUnalignedFreeEntry();
544
}
545
546
/* store back the remaining memory in the pool as free memory */
547
region->_sweepData._alreadySwept = true;
548
if (pool->getFreeMemoryAndDarkMatterBytes() == region->getSize()) {
549
/* Collector converted this region from FREE/IDLE to ADDRESS_ORDERED, but never ended up using it
550
* (for example allocated some space but lost on forwarding the object). Converting it back to free
551
*/
552
pool->reset(MM_MemoryPool::any);
553
region->getSubSpace()->recycleRegion(env, region);
554
} else {
555
/* this is non-empty merged region - estimate its age based on compact group */
556
setAllocationAgeForMergedRegion(env, region);
557
}
558
}
559
560
/* Clear any copy forward data */
561
region->_copyForwardData._initialLiveSet = false;
562
region->_copyForwardData._requiresPhantomReferenceProcessing = false;
563
region->_copyForwardData._survivor = false;
564
region->_copyForwardData._freshSurvivor = false;
565
566
if (region->_copyForwardData._evacuateSet) {
567
Assert_MM_true(region->_sweepData._alreadySwept);
568
if (abortFlagRaised() || region->_markData._noEvacuation) {
569
if (region->getRegionType() == MM_HeapRegionDescriptor::ADDRESS_ORDERED) {
570
region->setRegionType(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);
571
} else {
572
Assert_MM_true(region->getRegionType() == MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED);
573
}
574
Assert_MM_false(region->_previousMarkMapCleared);
575
/* we want to sweep and compact this region since we may have failed to completely evacuate it */
576
Assert_MM_true(region->_markData._shouldMark);
577
region->_sweepData._alreadySwept = false;
578
region->_reclaimData._shouldReclaim = true;
579
} else {
580
pool->reset(MM_MemoryPool::any);
581
region->getSubSpace()->recycleRegion(env, region);
582
}
583
region->_copyForwardData._evacuateSet = false;
584
}
585
}
586
587
env->_cycleState->_pgcData._survivorSetRegionCount = survivorSetRegionCount;
588
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._nonEvacuateRegionCount = _regionCountCannotBeEvacuated;
589
}
590
591
/****************************************
592
* Copy and Forward implementation
593
****************************************
594
*/
595
596
bool
597
MM_CopyForwardScheme::isLiveObject(J9Object *objectPtr)
598
{
599
bool result = true;
600
601
if(NULL != objectPtr) {
602
Assert_MM_true(isHeapObject(objectPtr));
603
604
if (!isObjectInSurvivorMemory(objectPtr)) {
605
result = _markMap->isBitSet(objectPtr);
606
}
607
}
608
609
return result;
610
}
611
612
613
MMINLINE bool
614
MM_CopyForwardScheme::isObjectInEvacuateMemory(J9Object *objectPtr)
615
{
616
bool result = false;
617
618
if(NULL != objectPtr) {
619
result = isObjectInEvacuateMemoryNoCheck(objectPtr);
620
}
621
return result;
622
}
623
624
MMINLINE bool
625
MM_CopyForwardScheme::isObjectInEvacuateMemoryNoCheck(J9Object *objectPtr)
626
{
627
bool result = false;
628
629
MM_HeapRegionDescriptorVLHGC *region = NULL;
630
region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);
631
result = region->_markData._shouldMark;
632
return result;
633
}
634
635
MMINLINE bool
636
MM_CopyForwardScheme::isObjectInSurvivorMemory(J9Object *objectPtr)
637
{
638
bool result = false;
639
640
if(NULL != objectPtr) {
641
MM_HeapRegionDescriptorVLHGC *region = NULL;
642
region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);
643
Assert_MM_true(region->_copyForwardData._initialLiveSet || (!region->_markData._shouldMark && !region->_copyForwardData._initialLiveSet));
644
result = region->isFreshSurvivorRegion();
645
if (!result && region->isSurvivorRegion()) {
646
result = isCompressedSurvivor((void*)objectPtr);
647
}
648
}
649
return result;
650
}
651
652
MMINLINE bool
653
MM_CopyForwardScheme::isObjectInNurseryMemory(J9Object *objectPtr)
654
{
655
bool result = false;
656
657
if(NULL != objectPtr) {
658
MM_HeapRegionDescriptorVLHGC *region = NULL;
659
region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);
660
result = region->_markData._shouldMark || isObjectInSurvivorMemory(objectPtr);
661
}
662
return result;
663
}
664
665
MMINLINE void
666
MM_CopyForwardScheme::reinitCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, void *base, void *top, UDATA compactGroup)
667
{
668
MM_CopyForwardCompactGroup *compactGroupForMarkData = &(env->_copyForwardCompactGroups[compactGroup]);
669
Assert_MM_true(cache == compactGroupForMarkData->_copyCache);
670
cache->cacheBase = base;
671
cache->cacheAlloc = base;
672
cache->scanCurrent = base;
673
cache->_hasPartiallyScannedObject = false;
674
cache->cacheTop = top;
675
676
/* set the mark map cached values to the initial state */
677
/* Count one slot before the base in order to get the true atomic head location. Regions who do not start on a partial boundary will never see
678
* the slot previous.
679
*/
680
if(base == _heapBase) {
681
/* Going below heap base would be strange - just use _heapTop which won't collide with anything */
682
compactGroupForMarkData->_markMapAtomicHeadSlotIndex = _markMap->getSlotIndex((J9Object *)_heapTop);
683
} else {
684
compactGroupForMarkData->_markMapAtomicHeadSlotIndex = _markMap->getSlotIndex((J9Object *) (((UDATA)base) - _markMap->getObjectGrain()));
685
}
686
compactGroupForMarkData->_markMapAtomicTailSlotIndex = _markMap->getSlotIndex((J9Object *)top);
687
compactGroupForMarkData->_markMapPGCSlotIndex = 0;
688
compactGroupForMarkData->_markMapPGCBitMask = 0;
689
compactGroupForMarkData->_markMapGMPSlotIndex = 0;
690
compactGroupForMarkData->_markMapGMPBitMask = 0;
691
692
Assert_MM_true(compactGroup < _compactGroupMaxCount);
693
cache->_compactGroup = compactGroup;
694
Assert_MM_true(0.0 == cache->_allocationAgeSizeProduct);
695
696
MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(cache->cacheBase);
697
Trc_MM_CopyForwardScheme_reinitCache(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), cache,
698
region->getAllocationAgeSizeProduct() / (1024 * 1024) / (1024 * 1024), (double)((UDATA)cache->cacheAlloc - (UDATA)region->getLowAddress()) / (1024 * 1024));
699
700
/* store back the given flags */
701
cache->flags = J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY | (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_MASK_PERSISTENT);
702
}
703
704
MMINLINE void
705
MM_CopyForwardScheme::reinitArraySplitCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, J9IndexableObject *array, UDATA nextIndex)
706
{
707
cache->cacheBase = array;
708
cache->cacheAlloc = array;
709
cache->scanCurrent = array;
710
cache->_hasPartiallyScannedObject = false;
711
cache->cacheTop = array;
712
cache->_arraySplitIndex = nextIndex;
713
714
/* store back the appropriate flags */
715
cache->flags = (J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY | J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED) | (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_MASK_PERSISTENT);
716
}
717
718
void
719
MM_CopyForwardScheme::clearReservedRegionLists(MM_EnvironmentVLHGC *env)
720
{
721
Trc_MM_CopyForwardScheme_clearReservedRegionLists_Entry(env->getLanguageVMThread(), _compactGroupMaxCount);
722
723
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
724
Trc_MM_CopyForwardScheme_clearReservedRegionLists_compactGroup(env->getLanguageVMThread(), index, _reservedRegionList[index]._evacuateRegionCount, _reservedRegionList[index]._sublistCount, _reservedRegionList[index]._maxSublistCount, _reservedRegionList[index]._freeMemoryCandidateCount);
725
if (0 == _reservedRegionList[index]._freeMemoryCandidateCount) {
726
Assert_MM_true(NULL == _reservedRegionList[index]._freeMemoryCandidates);
727
} else {
728
Assert_MM_true(NULL != _reservedRegionList[index]._freeMemoryCandidates);
729
}
730
731
for (UDATA sublistIndex = 0; sublistIndex < _reservedRegionList[index]._sublistCount; sublistIndex++) {
732
MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[index]._sublists[sublistIndex];
733
MM_HeapRegionDescriptorVLHGC *region = regionList->_head;
734
735
while(NULL != region) {
736
MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;
737
738
releaseRegion(env, regionList, region);
739
region = next;
740
}
741
742
if (0 != regionList->_cacheAcquireCount) {
743
Trc_MM_CopyForwardScheme_clearReservedRegionLists_sublist(env->getLanguageVMThread(), index, sublistIndex, regionList->_cacheAcquireCount, regionList->_cacheAcquireBytes, regionList->_cacheAcquireBytes / regionList->_cacheAcquireCount);
744
}
745
746
regionList->_head = NULL;
747
regionList->_cacheAcquireCount = 0;
748
regionList->_cacheAcquireBytes = 0;
749
}
750
_reservedRegionList[index]._sublistCount = 1;
751
_reservedRegionList[index]._maxSublistCount = 1;
752
_reservedRegionList[index]._evacuateRegionCount = 0;
753
_reservedRegionList[index]._freeMemoryCandidates = NULL;
754
_reservedRegionList[index]._freeMemoryCandidateCount = 0;
755
}
756
757
Trc_MM_CopyForwardScheme_clearReservedRegionLists_Exit(env->getLanguageVMThread());
758
}
759
760
MM_HeapRegionDescriptorVLHGC *
761
MM_CopyForwardScheme::acquireEmptyRegion(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, UDATA compactGroup)
762
{
763
MM_HeapRegionDescriptorVLHGC *newRegion = NULL;
764
765
if (!_failedToExpand) {
766
UDATA allocationContextNumber = MM_CompactGroupManager::getAllocationContextNumberFromGroup(env, compactGroup);
767
MM_AllocationContextTarok *allocationContext = (MM_AllocationContextTarok *)_extensions->globalAllocationManager->getAllocationContextByIndex(allocationContextNumber);
768
769
newRegion = allocationContext->collectorAcquireRegion(env);
770
771
if(NULL != newRegion) {
772
MM_CycleState *cycleState = env->_cycleState;
773
MM_CycleState *externalCycleState = env->_cycleState->_externalCycleState;
774
775
/* a new region starts as ADDRESS_ORDERED but we will always have valid mark map data for this region so set its type now */
776
newRegion->setMarkMapValid();
777
if (newRegion->_previousMarkMapCleared) {
778
newRegion->_previousMarkMapCleared = false;
779
} else {
780
cycleState->_markMap->setBitsForRegion(env, newRegion, true);
781
}
782
if (NULL != externalCycleState) {
783
if (newRegion->_nextMarkMapCleared) {
784
newRegion->_nextMarkMapCleared = false;
785
if (_extensions->tarokEnableExpensiveAssertions) {
786
Assert_MM_true(externalCycleState->_markMap->checkBitsForRegion(env, newRegion));
787
}
788
} else {
789
externalCycleState->_markMap->setBitsForRegion(env, newRegion, true);
790
}
791
}
792
793
Assert_MM_true(NULL == newRegion->getUnfinalizedObjectList()->getHeadOfList());
794
Assert_MM_true(NULL == newRegion->getOwnableSynchronizerObjectList()->getHeadOfList());
795
Assert_MM_false(newRegion->_markData._shouldMark);
796
797
/*
798
* set logical age here to have a compact groups working properly
799
* real allocation age will be updated after PGC
800
*/
801
UDATA logicalRegionAge = MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup);
802
newRegion->setAge(0, logicalRegionAge);
803
804
Assert_MM_true(newRegion->getReferenceObjectList()->isSoftListEmpty());
805
Assert_MM_true(newRegion->getReferenceObjectList()->isWeakListEmpty());
806
Assert_MM_true(newRegion->getReferenceObjectList()->isPhantomListEmpty());
807
808
setRegionAsSurvivor(env, newRegion, true);
809
insertRegionIntoLockedList(env, regionList, newRegion);
810
} else {
811
/* record that we failed to expand so that we stop trying during this collection */
812
_failedToExpand = true;
813
}
814
}
815
816
return newRegion;
817
}
818
819
void
820
MM_CopyForwardScheme::insertRegionIntoLockedList(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, MM_HeapRegionDescriptorVLHGC *newRegion)
821
{
822
newRegion->_copyForwardData._nextRegion = regionList->_head;
823
newRegion->_copyForwardData._previousRegion = NULL;
824
825
if(NULL != regionList->_head) {
826
regionList->_head->_copyForwardData._previousRegion = newRegion;
827
}
828
829
regionList->_head = newRegion;
830
}
831
832
void
833
MM_CopyForwardScheme::releaseRegion(MM_EnvironmentVLHGC *env, MM_ReservedRegionListHeader::Sublist *regionList, MM_HeapRegionDescriptorVLHGC *region)
834
{
835
MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;
836
MM_HeapRegionDescriptorVLHGC *previous = region->_copyForwardData._previousRegion;
837
838
if (NULL != next) {
839
next->_copyForwardData._previousRegion = previous;
840
}
841
if (NULL != previous) {
842
previous->_copyForwardData._nextRegion = next;
843
Assert_MM_false(previous == previous->_copyForwardData._nextRegion);
844
} else {
845
regionList->_head = next;
846
}
847
region->_copyForwardData._nextRegion = NULL;
848
region->_copyForwardData._previousRegion = NULL;
849
}
850
851
void *
852
MM_CopyForwardScheme::reserveMemoryForObject(MM_EnvironmentVLHGC *env, uintptr_t compactGroup, uintptr_t objectSize, MM_LightweightNonReentrantLock** listLock)
853
{
854
MM_AllocateDescription allocDescription(objectSize, 0, false, false);
855
uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;
856
Assert_MM_true(sublistCount <= MM_ReservedRegionListHeader::MAX_SUBLISTS);
857
uintptr_t sublistIndex = env->getWorkerID() % sublistCount;
858
MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];
859
void *result = NULL;
860
861
/* Measure the number of acquires before and after we acquire the lock. If it changed, then there is probably contention on the lock. */
862
uintptr_t acquireCountBefore = regionList->_cacheAcquireCount;
863
regionList->_lock.acquire();
864
uintptr_t acquireCountAfter = regionList->_cacheAcquireCount;
865
866
/*
867
* 1. attempt to use an existing region
868
*/
869
MM_HeapRegionDescriptorVLHGC *region = regionList->_head;
870
while ((NULL == result) && (NULL != region)) {
871
MM_MemoryPool *memoryPool = region->getMemoryPool();
872
Assert_MM_true(NULL != memoryPool);
873
result = memoryPool->collectorAllocate(env, &allocDescription, false);
874
if (NULL != result) {
875
break;
876
}
877
region = region->_copyForwardData._nextRegion;
878
}
879
880
/*
881
* 2. attempt to acquire a region from the free memory candidates list
882
*/
883
if ((NULL == result) && (NULL != _reservedRegionList[compactGroup]._freeMemoryCandidates)) {
884
_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.acquire();
885
region = _reservedRegionList[compactGroup]._freeMemoryCandidates;
886
MM_HeapRegionDescriptorVLHGC *resultRegion = NULL;
887
while ((NULL == result) && (NULL != region)) {
888
MM_MemoryPool *memoryPool = region->getMemoryPool();
889
Assert_MM_true(NULL != memoryPool);
890
891
((MM_MemoryPoolAddressOrderedList *)memoryPool)->initialFirstUnalignedFreeEntry();
892
result = memoryPool->collectorAllocate(env, &allocDescription, false);
893
894
if (NULL != result) {
895
resultRegion = region;
896
break;
897
}
898
region = region->_copyForwardData._nextRegion;
899
}
900
if (NULL != result) {
901
/* remove this region from the common free memory candidates list and add it to our own sublist */
902
Assert_MM_true(NULL != resultRegion);
903
removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], resultRegion);
904
insertRegionIntoLockedList(env, regionList, resultRegion);
905
convertFreeMemoryCandidateToSurvivorRegion(env, resultRegion);
906
}
907
_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.release();
908
}
909
910
/*
911
* 3. attempt to acquire an empty region
912
*/
913
if (NULL == result) {
914
region = acquireEmptyRegion(env, regionList, compactGroup);
915
if(NULL != region) {
916
MM_MemoryPool *memoryPool = region->getMemoryPool();
917
Assert_MM_true(NULL != memoryPool);
918
result = memoryPool->collectorAllocate(env, &allocDescription, false);
919
Assert_MM_true(NULL != result); /* This should not have failed at this point */
920
}
921
}
922
923
if (NULL != result) {
924
regionList->_cacheAcquireCount += 1;
925
regionList->_cacheAcquireBytes += allocDescription.getBytesRequested();
926
setCompressedSurvivorCards(env, (void*)result, (void*) ((uintptr_t)result + allocDescription.getBytesRequested()));
927
}
928
929
regionList->_lock.release();
930
*listLock = &regionList->_lock;
931
932
Assert_MM_true(acquireCountBefore <= acquireCountAfter);
933
if ((NULL != result) && (sublistCount < _reservedRegionList[compactGroup]._maxSublistCount)) {
934
uintptr_t acceptableAcquireCountForContention = acquireCountBefore + _regionSublistContentionThreshold;
935
if (acceptableAcquireCountForContention < acquireCountAfter) {
936
/* contention detected on lock -- attempt to increase the number of sublists */
937
MM_AtomicOperations::lockCompareExchange(&_reservedRegionList[compactGroup]._sublistCount, sublistCount, sublistCount + 1);
938
}
939
}
940
941
return result;
942
}
943
944
bool
945
MM_CopyForwardScheme::reserveMemoryForCache(MM_EnvironmentVLHGC *env, uintptr_t compactGroup, uintptr_t maxCacheSize, void **addrBase, void **addrTop, MM_LightweightNonReentrantLock** listLock)
946
{
947
MM_AllocateDescription allocDescription(maxCacheSize, 0, false, false);
948
bool result = false;
949
uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;
950
Assert_MM_true(sublistCount <= MM_ReservedRegionListHeader::MAX_SUBLISTS);
951
uintptr_t sublistIndex = env->getWorkerID() % sublistCount;
952
MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];
953
954
/* Measure the number of acquires before and after we acquire the lock. If it changed, then there is probably contention on the lock. */
955
uintptr_t acquireCountBefore = regionList->_cacheAcquireCount;
956
regionList->_lock.acquire();
957
uintptr_t acquireCountAfter = regionList->_cacheAcquireCount;
958
959
/*
960
* 1. attempt to use an existing region
961
*/
962
MM_HeapRegionDescriptorVLHGC *region = regionList->_head;
963
while ((!result) && (NULL != region)) {
964
MM_MemoryPool *memoryPool = region->getMemoryPool();
965
Assert_MM_true(NULL != memoryPool);
966
967
void *tlhBase = NULL;
968
void *tlhTop = NULL;
969
result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));
970
971
MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;
972
if (result) {
973
*addrBase = tlhBase;
974
*addrTop = tlhTop;
975
break;
976
} else {
977
releaseRegion(env, regionList, region);
978
}
979
region = next;
980
}
981
982
/*
983
* 2. attempt to acquire a region from the free memory candidates list
984
*/
985
if ((!result) && (NULL != _reservedRegionList[compactGroup]._freeMemoryCandidates)) {
986
_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.acquire();
987
region = _reservedRegionList[compactGroup]._freeMemoryCandidates;
988
MM_HeapRegionDescriptorVLHGC *resultRegion = NULL;
989
while ((!result) && (NULL != region)) {
990
MM_MemoryPool *memoryPool = region->getMemoryPool();
991
Assert_MM_true(NULL != memoryPool);
992
993
void *tlhBase = NULL;
994
void *tlhTop = NULL;
995
((MM_MemoryPoolAddressOrderedList *)memoryPool)->initialFirstUnalignedFreeEntry();
996
result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));
997
if (result) {
998
*addrBase = tlhBase;
999
*addrTop = tlhTop;
1000
/* remove this region from the common free memory candidates list and add it to our own sublist */
1001
removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], region);
1002
insertRegionIntoLockedList(env, regionList, region);
1003
convertFreeMemoryCandidateToSurvivorRegion(env, region);
1004
break;
1005
1006
}
1007
Assert_MM_true(0 == memoryPool->getActualFreeMemorySize());
1008
resultRegion = region;
1009
region = region->_copyForwardData._nextRegion;
1010
removeFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], resultRegion);
1011
}
1012
_reservedRegionList[compactGroup]._freeMemoryCandidatesLock.release();
1013
}
1014
1015
/*
1016
* 3. attempt to acquire an empty region
1017
*/
1018
if(!result) {
1019
region = acquireEmptyRegion(env, regionList, compactGroup);
1020
if(NULL != region) {
1021
MM_MemoryPool *memoryPool = region->getMemoryPool();
1022
Assert_MM_true(NULL != memoryPool);
1023
1024
void *tlhBase = NULL;
1025
void *tlhTop = NULL;
1026
/* note that we called alignAllocationPointer on this pool when adding it to our copy-forward destination list so this address won't share a card with non-moving objects */
1027
result = (NULL != memoryPool->collectorAllocateTLH(env, &allocDescription, maxCacheSize, tlhBase, tlhTop, false));
1028
1029
Assert_MM_true(result); /* This should not have failed at this point */
1030
1031
*addrBase = tlhBase;
1032
*addrTop = tlhTop;
1033
}
1034
}
1035
1036
if (result) {
1037
regionList->_cacheAcquireCount += 1;
1038
regionList->_cacheAcquireBytes += ((uintptr_t)*addrTop) - ((uintptr_t)*addrBase);
1039
setCompressedSurvivorCards(env, *addrBase, *addrTop);
1040
}
1041
1042
regionList->_lock.release();
1043
*listLock = &regionList->_lock;
1044
1045
Assert_MM_true(acquireCountBefore <= acquireCountAfter);
1046
if (result && (sublistCount < _reservedRegionList[compactGroup]._maxSublistCount)) {
1047
uintptr_t acceptableAcquireCountForContention = acquireCountBefore + _regionSublistContentionThreshold;
1048
if (acceptableAcquireCountForContention < acquireCountAfter) {
1049
/* contention detected on lock -- attempt to increase the number of sublists */
1050
MM_AtomicOperations::lockCompareExchange(&_reservedRegionList[compactGroup]._sublistCount, sublistCount, sublistCount + 1);
1051
}
1052
}
1053
1054
return result;
1055
}
1056
1057
MM_CopyScanCacheVLHGC *
1058
MM_CopyForwardScheme::createScanCacheForOverflowInHeap(MM_EnvironmentVLHGC *env)
1059
{
1060
bool const compressed = env->compressObjectReferences();
1061
MM_CopyScanCacheVLHGC * result = NULL;
1062
1063
_cacheFreeList.lock();
1064
1065
/* check to see if another thread already did this */
1066
result = _cacheFreeList.popCacheNoLock(env);
1067
/* find out how many bytes are required to allocate a chunk in the heap */
1068
UDATA cacheSizeInBytes = MM_CopyScanCacheChunkVLHGCInHeap::bytesRequiredToAllocateChunkInHeap(env);
1069
/* this we are allocating this in a part of the heap which the copy-forward mechanism will have to walk before it finishes, we need to hide this in a hole so add that header size */
1070
UDATA bytesToReserve = sizeof(MM_HeapLinkedFreeHeader) + cacheSizeInBytes;
1071
UDATA suggestedCompactGroup = 0;
1072
while ((NULL == result) && (suggestedCompactGroup < _compactGroupMaxCount)) {
1073
MM_LightweightNonReentrantLock *listLock = NULL;
1074
void *extentBase = reserveMemoryForObject(env, suggestedCompactGroup, bytesToReserve, &listLock);
1075
if (NULL != extentBase) {
1076
/* this is not object memory so account for it as free memory while we have the size */
1077
/* lock the region list for this group and write-back the memory we consumed as free space immediately (this is a rare case so the
1078
* lock is an acceptable cost to avoid trying to defer the write-back of the free memory size since this case is unusual)
1079
*/
1080
Assert_MM_true(NULL != listLock);
1081
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(extentBase);
1082
MM_MemoryPool *pool = region->getMemoryPool();
1083
listLock->acquire();
1084
pool->incrementDarkMatterBytes(bytesToReserve);
1085
listLock->release();
1086
/* save out how much memory we wasted so the caller can account for it */
1087
memset(extentBase, 0x0, bytesToReserve);
1088
void *cacheBase = (void *)((MM_HeapLinkedFreeHeader *)extentBase + 1);
1089
MM_HeapLinkedFreeHeader::fillWithHoles(extentBase, bytesToReserve, compressed);
1090
result = _cacheFreeList.allocateCacheEntriesInExistingMemory(env, cacheBase, cacheSizeInBytes);
1091
}
1092
suggestedCompactGroup += 1;
1093
}
1094
1095
_cacheFreeList.unlock();
1096
1097
return result;
1098
}
1099
1100
UDATA
1101
MM_CopyForwardScheme::getDesiredCopyCacheSize(MM_EnvironmentVLHGC *env, UDATA compactGroup)
1102
{
1103
/* The desired cache size is a fraction of the number of bytes we've copied so far.
1104
* The upper bound on fragmentation is approximately this fraction, with the expected fragmentation about half of the fraction.
1105
*/
1106
const double allowableFragmentation = 2.0 * _extensions->tarokCopyForwardFragmentationTarget;
1107
const double bytesCopiedInCompactGroup = (double)(env->_copyForwardCompactGroups[compactGroup]._edenStats._copiedBytes + env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._copiedBytes);
1108
UDATA desiredCacheSize = (UDATA)(allowableFragmentation * bytesCopiedInCompactGroup);
1109
MM_CompactGroupPersistentStats *stats = &(_extensions->compactGroupPersistentStats[compactGroup]);
1110
UDATA perThreadSurvivalEstimatedSize = (UDATA)(((double)stats->_measuredLiveBytesBeforeCollectInCollectedSet * stats->_historicalSurvivalRate * allowableFragmentation) / (double)env->_currentTask->getThreadCount());
1111
desiredCacheSize = OMR_MAX(desiredCacheSize, perThreadSurvivalEstimatedSize);
1112
desiredCacheSize = MM_Math::roundToCeiling(_objectAlignmentInBytes, desiredCacheSize);
1113
desiredCacheSize = OMR_MIN(desiredCacheSize, _maxCacheSize);
1114
desiredCacheSize = OMR_MAX(desiredCacheSize, _minCacheSize);
1115
return desiredCacheSize;
1116
}
1117
1118
MM_CopyScanCacheVLHGC *
1119
MM_CopyForwardScheme::reserveMemoryForCopy(MM_EnvironmentVLHGC *env, J9Object *objectToEvacuate, MM_AllocationContextTarok *reservingContext, uintptr_t objectReserveSizeInBytes)
1120
{
1121
void *addrBase = NULL;
1122
void *addrTop = NULL;
1123
uintptr_t minimumRequiredCacheSize = objectReserveSizeInBytes;
1124
uintptr_t minimumSingleObjectAllocateSize = _extensions->tlhSurvivorDiscardThreshold;
1125
1126
Assert_MM_objectAligned(env, objectReserveSizeInBytes);
1127
1128
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectToEvacuate);
1129
uintptr_t compactGroup = MM_CompactGroupManager::getCompactGroupNumberInContext(env, region, reservingContext);
1130
MM_CopyForwardCompactGroup *copyForwardCompactGroup = &env->_copyForwardCompactGroups[compactGroup];
1131
1132
Assert_MM_true(compactGroup < _compactGroupMaxCount);
1133
1134
MM_CopyScanCacheVLHGC *copyCache = copyForwardCompactGroup->_copyCache;
1135
1136
retry:
1137
/* A survivor copy scan cache exists - check if there is room */
1138
if ((NULL == copyCache) || (((uintptr_t)copyCache->cacheTop - (uintptr_t)copyCache->cacheAlloc) < minimumRequiredCacheSize)) {
1139
/* There is no room for current copy cache */
1140
MM_LightweightNonReentrantLock *listLock = NULL;
1141
if (minimumRequiredCacheSize < copyForwardCompactGroup->_failedAllocateSize) {
1142
/* try to use TLH remainder from previous discard */
1143
if (((uintptr_t)copyForwardCompactGroup->_TLHRemainderTop - (uintptr_t)copyForwardCompactGroup->_TLHRemainderBase) >= minimumRequiredCacheSize) {
1144
addrBase = copyForwardCompactGroup->_TLHRemainderBase;
1145
addrTop = copyForwardCompactGroup->_TLHRemainderTop;
1146
Assert_MM_true(NULL != copyForwardCompactGroup->_TLHRemainderBase);
1147
Assert_MM_true(NULL != copyForwardCompactGroup->_TLHRemainderTop);
1148
copyForwardCompactGroup->resetTLHRemainder();
1149
1150
uintptr_t sublistCount = _reservedRegionList[compactGroup]._sublistCount;
1151
uintptr_t sublistIndex = env->getWorkerID() % sublistCount;
1152
MM_ReservedRegionListHeader::Sublist *regionList = &_reservedRegionList[compactGroup]._sublists[sublistIndex];
1153
listLock = &regionList->_lock;
1154
} else if (minimumSingleObjectAllocateSize < minimumRequiredCacheSize) {
1155
addrBase = reserveMemoryForObject(env, compactGroup, minimumRequiredCacheSize, &listLock);
1156
1157
if (NULL != addrBase) {
1158
addrTop = (void *)((uintptr_t)addrBase + minimumRequiredCacheSize);
1159
} else {
1160
/* failed to allocate - set the threshold to short-circuit future alloc attempts */
1161
copyForwardCompactGroup->_failedAllocateSize = minimumRequiredCacheSize;
1162
}
1163
} else {
1164
UDATA desiredCacheSize = getDesiredCopyCacheSize(env, compactGroup);
1165
desiredCacheSize = OMR_MAX(desiredCacheSize, minimumRequiredCacheSize);
1166
if (!reserveMemoryForCache(env, compactGroup, desiredCacheSize, &addrBase, &addrTop, &listLock)) {
1167
/* failed to allocate - set the threshold to short-circut future alloc attempts:
1168
* we should never (in this GC) attempt to allocate a cache (TLH) from this compact group
1169
*/
1170
copyForwardCompactGroup->_failedAllocateSize = 0;
1171
}
1172
}
1173
}
1174
1175
if (NULL != copyCache) {
1176
/* we can't use this cache as a destination so release local cache first. */
1177
MM_CopyScanCacheVLHGC * stoppedCache = stopCopyingIntoCache(env, compactGroup);
1178
Assert_MM_true(stoppedCache == copyCache);
1179
1180
if (copyCache->isCurrentlyBeingScanned()) {
1181
/* this cache is already being scanned. The scanning thread will add it to the free list when it's finished */
1182
copyCache = NULL;
1183
} else {
1184
/* assert that deferred or scan cache is not this cache */
1185
Assert_MM_true(copyCache != env->_scanCache);
1186
Assert_MM_true(copyCache != env->_deferredScanCache);
1187
/* Either cache is completely scanned or it has never been scanned.
1188
* If it has never been scanned, it is here that we should decide if there is scan work to do
1189
* and whether to add to the scan list
1190
*/
1191
if (copyCache->isScanWorkAvailable()) {
1192
/* must not have local references still in use before adding to global list */
1193
Assert_MM_true(copyCache->cacheBase <= copyCache->cacheAlloc);
1194
Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);
1195
Assert_MM_true(copyCache->scanCurrent <= copyCache->cacheAlloc);
1196
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
1197
env->_copyForwardStats._releaseScanListCount += 1;
1198
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
1199
addCacheEntryToScanCacheListAndNotify(env, copyCache);
1200
copyCache = NULL;
1201
} else {
1202
/* we have decided to stop copying into this cache so ensure that we won't try to keep using it as one (we will allocate a new cache structure if the allocate succeeds) */
1203
addCacheEntryToFreeCacheList(env, copyCache);
1204
copyCache = NULL;
1205
}
1206
}
1207
}
1208
1209
if (NULL != addrBase) {
1210
/* allocate from reserveMemory or TLHRemainder */
1211
Assert_MM_true(NULL == copyCache);
1212
1213
/* If we didn't already have a copy cache structure or dropped it earlier in the call, allocate a new one */
1214
copyCache = getFreeCache(env);
1215
if (NULL != copyCache) {
1216
copyForwardCompactGroup->_copyCache = copyCache;
1217
copyForwardCompactGroup->_copyCacheLock = listLock;
1218
reinitCache(env, copyCache, addrBase, addrTop, compactGroup);
1219
1220
Assert_MM_true(NULL != listLock);
1221
Assert_MM_true(0 == copyForwardCompactGroup->_freeMemoryMeasured);
1222
} else {
1223
/* ensure that we have realized the abort flag (since getFreeCache only returns NULL if it had to abort) */
1224
Assert_MM_true(abortFlagRaised());
1225
}
1226
}
1227
1228
if (NULL == copyCache) {
1229
/* Record stats */
1230
copyForwardCompactGroup->_failedCopiedObjects += 1;
1231
copyForwardCompactGroup->_failedCopiedBytes += objectReserveSizeInBytes;
1232
} else {
1233
Assert_MM_true(NULL != copyCache->cacheAlloc);
1234
Assert_MM_true(NULL != copyCache->cacheTop);
1235
Assert_MM_true(NULL != copyCache->cacheBase);
1236
if (((uintptr_t)addrTop - (uintptr_t)addrBase) < minimumRequiredCacheSize) {
1237
/* In case of increased tlhSurvivorDiscardThreshold, we may prefer TLH rather then single object allocation
1238
* in which case TLH cache may not be large enough to satisfy the allocation.
1239
* We'll try again but force single object allocation.
1240
* We could have detected earlier in the method the failed TLH allocate, but doing this late gives a chance
1241
* for current cache remainder or new cache to be preserved (if sufficiently large) as thread's TLH remainder
1242
*/
1243
Assert_MM_true(_extensions->tlhSurvivorDiscardThreshold > _minCacheSize);
1244
minimumSingleObjectAllocateSize = _minCacheSize;
1245
addrBase = NULL;
1246
addrTop = NULL;
1247
1248
goto retry;
1249
}
1250
if (_extensions->tarokEnableExpensiveAssertions) {
1251
/* verify that the mark map for this range is clear */
1252
Assert_MM_true(NULL == MM_HeapMapIterator(_extensions, _markMap, (UDATA*)copyCache->cacheAlloc, (UDATA*)copyCache->cacheTop, false).nextObject());
1253
}
1254
}
1255
}
1256
1257
return copyCache;
1258
}
1259
1260
MMINLINE bool
1261
MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, volatile j9object_t* objectPtrIndirect, bool leafType)
1262
{
1263
J9Object *originalObjectPtr = *objectPtrIndirect;
1264
J9Object *objectPtr = originalObjectPtr;
1265
bool success = true;
1266
1267
if((NULL != objectPtr) && isObjectInEvacuateMemory(objectPtr)) {
1268
/* Object needs to be copy and forwarded. Check if the work has already been done */
1269
MM_ForwardedHeader forwardHeader(objectPtr, _extensions->compressObjectReferences());
1270
objectPtr = forwardHeader.getForwardedObject();
1271
1272
if(NULL != objectPtr) {
1273
/* Object has been copied - update the forwarding information and return */
1274
*objectPtrIndirect = objectPtr;
1275
} else {
1276
Assert_GC_true_with_message(env, (UDATA)0x99669966 == _extensions->objectModel.getPreservedClass(&forwardHeader)->eyecatcher, "Invalid class in objectPtr=%p\n", originalObjectPtr);
1277
1278
1279
objectPtr = copy(env, reservingContext, &forwardHeader, leafType);
1280
if (NULL == objectPtr) {
1281
success = false;
1282
} else if (originalObjectPtr != objectPtr) {
1283
/* Update the slot */
1284
*objectPtrIndirect = objectPtr;
1285
}
1286
}
1287
}
1288
1289
return success;
1290
}
1291
1292
MMINLINE bool
1293
MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, GC_SlotObject *slotObject, bool leafType)
1294
{
1295
J9Object *value = slotObject->readReferenceFromSlot();
1296
J9Object *preservedValue = value;
1297
1298
bool success = copyAndForward(env, reservingContext, &value, leafType);
1299
1300
if (success) {
1301
if(preservedValue != value) {
1302
slotObject->writeReferenceToSlot(value);
1303
}
1304
_interRegionRememberedSet->rememberReferenceForCopyForward(env, objectPtr, value);
1305
} else {
1306
Assert_MM_false(_abortInProgress);
1307
Assert_MM_true(preservedValue == value);
1308
env->_workStack.push(env, objectPtr);
1309
}
1310
1311
return success;
1312
}
1313
1314
MMINLINE bool
1315
MM_CopyForwardScheme::copyAndForward(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, volatile j9object_t* slot)
1316
{
1317
bool success = copyAndForward(env, reservingContext, slot);
1318
1319
if (success) {
1320
_interRegionRememberedSet->rememberReferenceForCopyForward(env, objectPtr, *slot);
1321
} else {
1322
Assert_MM_false(_abortInProgress);
1323
/* Because there is a caller where the slot could be scanned by multiple threads at once, it is possible on failure that
1324
* the value of the slot HAS in fact changed (other thread had room to satisfy). Because of this, we do cannot check if the preserved
1325
* slot value would be unchanged (unlike other copyAndForward() implementations).
1326
*/
1327
env->_workStack.push(env, objectPtr);
1328
}
1329
1330
return success;
1331
}
1332
1333
MMINLINE bool
1334
MM_CopyForwardScheme::copyAndForwardPointerArray(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, UDATA startIndex, GC_SlotObject *slotObject)
1335
{
1336
J9Object *value = slotObject->readReferenceFromSlot();
1337
J9Object *preservedValue = value;
1338
1339
bool success = copyAndForward(env, reservingContext, &value);
1340
1341
if (success) {
1342
if(preservedValue != value) {
1343
slotObject->writeReferenceToSlot(value);
1344
}
1345
_interRegionRememberedSet->rememberReferenceForCopyForward(env, (J9Object *)arrayPtr, value);
1346
} else {
1347
Assert_MM_false(_abortInProgress);
1348
Assert_MM_true(preservedValue == value);
1349
/* We push only the current split unit (from startIndex with size of arraySplit size).
1350
* This is to avoid duplicate work which would otherwise be created,
1351
* if each failed-to-scan-to-completion copy-scan cache had created the work unit till the end of the array
1352
*/
1353
void *element1 = (void *)arrayPtr;
1354
void *element2 = (void *)((startIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG | PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG);
1355
Assert_MM_true(startIndex == (((UDATA)element2) >> PACKET_ARRAY_SPLIT_SHIFT));
1356
env->_workStack.push(env, element1, element2);
1357
}
1358
1359
return success;
1360
}
1361
1362
MMINLINE bool
1363
MM_CopyForwardScheme::copyAndForwardObjectClass(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr)
1364
{
1365
bool success = true;
1366
1367
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
1368
_extensions->classLoaderRememberedSet->rememberInstance(env, objectPtr);
1369
if(isDynamicClassUnloadingEnabled()) {
1370
j9object_t classObject = (j9object_t)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->classObject;
1371
Assert_MM_true(J9_INVALID_OBJECT != classObject);
1372
if (copyAndForward(env, reservingContext, &classObject)) {
1373
/* we don't need to update anything with the new address of the class object since objectPtr points at the immobile J9Class */
1374
} else {
1375
/* we failed to copy (and, therefore, mark) the class so we need to scan this object again */
1376
Assert_MM_false(_abortInProgress);
1377
env->_workStack.push(env, objectPtr);
1378
success = false;
1379
}
1380
}
1381
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
1382
1383
return success;
1384
}
1385
1386
/**
1387
* Cleanup after CopyForward work is complete.
1388
* This should only be called once per collection by the main thread.
1389
*/
1390
void
1391
MM_CopyForwardScheme::mainCleanupForCopyForward(MM_EnvironmentVLHGC *env)
1392
{
1393
/* make sure that we have dropped any remaining references to any on-heap scan caches which we would have allocated if we hit overflow */
1394
_cacheFreeList.removeAllHeapAllocatedChunks(env);
1395
1396
if (_extensions->tarokEnableExpensiveAssertions) {
1397
/* ensure that all managed caches have been returned to the free list */
1398
Assert_MM_true(_cacheFreeList.getTotalCacheCount() == _cacheFreeList.countCaches());
1399
}
1400
1401
Assert_MM_true(static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerCandidates >= static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._ownableSynchronizerSurvived);
1402
}
1403
1404
/**
1405
* Initialize the copy forward scheme for a garbage collection.
1406
* Initialize all internal values to start a garbage collect. This should only be
1407
* called once per collection by the main thread.
1408
*/
1409
void
1410
MM_CopyForwardScheme::mainSetupForCopyForward(MM_EnvironmentVLHGC *env)
1411
{
1412
clearAbortFlag();
1413
_abortInProgress = false;
1414
_clearableProcessingStarted = false;
1415
_failedToExpand = false;
1416
_phantomReferenceRegionsToProcess = 0;
1417
1418
/* Sort all hot fields for all classes as dynamicBreadthFirstScanOrdering is enabled */
1419
if (MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST == _extensions->scavengerScanOrdering) {
1420
MM_HotFieldUtil::sortAllHotFieldData(_javaVM, _extensions->globalVLHGCStats.gcCount);
1421
}
1422
1423
/* Cache of the mark map */
1424
_markMap = env->_cycleState->_markMap;
1425
1426
/* Cache heap ranges for fast "valid object" checks (this can change in an expanding heap situation, so we refetch every cycle) */
1427
_heapBase = _extensions->heap->getHeapBase();
1428
_heapTop = _extensions->heap->getHeapTop();
1429
1430
/* Record any special action for clearing / unloading this cycle */
1431
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
1432
_dynamicClassUnloadingEnabled = env->_cycleState->_dynamicClassUnloadingEnabled;
1433
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
1434
_collectStringConstantsEnabled = _extensions->collectStringConstants;
1435
1436
/* ensure heap base is aligned to region size */
1437
UDATA heapBase = (UDATA)_extensions->heap->getHeapBase();
1438
UDATA regionSize = _regionManager->getRegionSize();
1439
Assert_MM_true((0 != regionSize) && (0 == (heapBase % regionSize)));
1440
1441
/* Reinitialize the _doneIndex */
1442
_doneIndex = 0;
1443
1444
/* Context 0 is currently our "common destination context" */
1445
_commonContext = (MM_AllocationContextTarok *)_extensions->globalAllocationManager->getAllocationContextByIndex(0);
1446
1447
/* We don't want to split too aggressively so take the base2 log of our thread count as our current contention trigger.
1448
* Note that this number could probably be improved upon but log2 "seemed" to make sense for contention measurement and
1449
* provided a measurable performance benefit in the tests we were running.
1450
*/
1451
_regionSublistContentionThreshold = MM_Math::floorLog2(_extensions->dispatcher->activeThreadCount());
1452
1453
_interRegionRememberedSet->setupForPartialCollect(env);
1454
1455
/* Record whether finalizable processing is required in this copy-forward collection */
1456
_shouldScanFinalizableObjects = _extensions->finalizeListManager->isFinalizableObjectProcessingRequired();
1457
1458
cleanCompressedSurvivorCardTable(env);
1459
}
1460
1461
/**
1462
* Per worker thread pre-gc initialization.
1463
*/
1464
void
1465
MM_CopyForwardScheme::workerSetupForCopyForward(MM_EnvironmentVLHGC *env)
1466
{
1467
/* Reset the copy caches */
1468
Assert_MM_true(NULL == env->_scanCache);
1469
Assert_MM_true(NULL == env->_deferredScanCache);
1470
1471
/* install this thread's compact group structures */
1472
Assert_MM_true(NULL == env->_copyForwardCompactGroups);
1473
Assert_MM_true(NULL != _compactGroupBlock);
1474
env->_copyForwardCompactGroups = &_compactGroupBlock[env->getWorkerID() * _compactGroupMaxCount];
1475
1476
for (UDATA compactGroup = 0; compactGroup < _compactGroupMaxCount; compactGroup++) {
1477
env->_copyForwardCompactGroups[compactGroup].initialize(env);
1478
}
1479
1480
Assert_MM_true(NULL == env->_lastOverflowedRsclWithReleasedBuffers);
1481
}
1482
1483
/**
1484
* Merge any per thread GC stats into the main stat structure.
1485
*/
1486
void
1487
MM_CopyForwardScheme::mergeGCStats(MM_EnvironmentVLHGC *env)
1488
{
1489
PORT_ACCESS_FROM_ENVIRONMENT(env);
1490
MM_CopyForwardStats *localStats = &env->_copyForwardStats;
1491
MM_CompactGroupPersistentStats *persistentStats = _extensions->compactGroupPersistentStats;
1492
1493
/* the following statistics are only updated at the merge point */
1494
Assert_MM_true(0 == localStats->_copyObjectsTotal);
1495
Assert_MM_true(0 == localStats->_copyBytesTotal);
1496
Assert_MM_true(0 == localStats->_copyDiscardBytesTotal);
1497
Assert_MM_true(0 == localStats->_copyObjectsEden);
1498
Assert_MM_true(0 == localStats->_copyBytesEden);
1499
Assert_MM_true(0 == localStats->_copyDiscardBytesEden);
1500
Assert_MM_true(0 == localStats->_copyObjectsNonEden);
1501
Assert_MM_true(0 == localStats->_copyBytesNonEden);
1502
Assert_MM_true(0 == localStats->_copyDiscardBytesNonEden);
1503
1504
/* sum up the per-compact group data before entering the lock */
1505
for (UDATA compactGroupNumber = 0; compactGroupNumber < _compactGroupMaxCount; compactGroupNumber++) {
1506
MM_CopyForwardCompactGroup *compactGroup = &env->_copyForwardCompactGroups[compactGroupNumber];
1507
UDATA totalCopiedBytes = compactGroup->_edenStats._copiedBytes + compactGroup->_nonEdenStats._copiedBytes;
1508
UDATA totalLiveBytes = compactGroup->_edenStats._liveBytes + compactGroup->_nonEdenStats._liveBytes;
1509
1510
localStats->_copyObjectsTotal += compactGroup->_edenStats._copiedObjects + compactGroup->_nonEdenStats._copiedObjects;
1511
localStats->_copyBytesTotal += totalCopiedBytes;
1512
localStats->_scanObjectsTotal += compactGroup->_edenStats._scannedObjects + compactGroup->_nonEdenStats._scannedObjects;
1513
localStats->_scanBytesTotal += compactGroup->_edenStats._scannedBytes + compactGroup->_nonEdenStats._scannedBytes;
1514
1515
localStats->_copyObjectsEden += compactGroup->_edenStats._copiedObjects;
1516
localStats->_copyBytesEden += compactGroup->_edenStats._copiedBytes;
1517
localStats->_scanObjectsEden += compactGroup->_edenStats._scannedObjects;
1518
localStats->_scanBytesEden += compactGroup->_edenStats._scannedBytes;
1519
1520
localStats->_copyObjectsNonEden += compactGroup->_nonEdenStats._copiedObjects;
1521
localStats->_copyBytesNonEden += compactGroup->_nonEdenStats._copiedBytes;
1522
localStats->_scanObjectsNonEden += compactGroup->_nonEdenStats._scannedObjects;
1523
localStats->_scanBytesNonEden += compactGroup->_nonEdenStats._scannedBytes;
1524
1525
localStats->_copyDiscardBytesTotal += compactGroup->_discardedBytes;
1526
localStats->_TLHRemainderCount += compactGroup->_TLHRemainderCount;
1527
1528
if (0 == MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroupNumber)) {
1529
localStats->_copyDiscardBytesEden += compactGroup->_discardedBytes;
1530
} else {
1531
localStats->_copyDiscardBytesNonEden += compactGroup->_discardedBytes;
1532
}
1533
1534
/* use an atomic since other threads may be doing this at the same time */
1535
if (0 != totalLiveBytes) {
1536
MM_AtomicOperations::add(&persistentStats[compactGroupNumber]._measuredBytesCopiedFromGroupDuringCopyForward, totalLiveBytes);
1537
}
1538
1539
if (0 != totalCopiedBytes) {
1540
MM_AtomicOperations::add(&persistentStats[compactGroupNumber]._measuredBytesCopiedToGroupDuringCopyForward, totalCopiedBytes);
1541
MM_AtomicOperations::addU64(&persistentStats[compactGroupNumber]._measuredAllocationAgeToGroupDuringCopyForward, compactGroup->_allocationAge);
1542
}
1543
1544
if (0 != (totalCopiedBytes + compactGroup->_discardedBytes)) {
1545
Trc_MM_CopyForwardScheme_mergeGCStats_efficiency(env->getLanguageVMThread(), compactGroupNumber, totalCopiedBytes, compactGroup->_discardedBytes, (double)(compactGroup->_discardedBytes) / (double)(totalCopiedBytes + compactGroup->_discardedBytes));
1546
}
1547
}
1548
1549
/* Protect the merge with the mutex (this is done by multiple threads in the parallel collector) */
1550
omrthread_monitor_enter(_extensions->gcStatsMutex);
1551
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats.merge(localStats);
1552
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._workPacketStats.merge(&env->_workPacketStats);
1553
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._irrsStats.merge(&env->_irrsStats);
1554
omrthread_monitor_exit(_extensions->gcStatsMutex);
1555
1556
/* record the thread-specific parallelism stats in the trace buffer. This partially duplicates info in -Xtgc:parallel */
1557
Trc_MM_CopyForwardScheme_parallelStats(
1558
env->getLanguageVMThread(),
1559
(U_32)env->getWorkerID(),
1560
(U_32)j9time_hires_delta(0, env->_copyForwardStats._workStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1561
(U_32)j9time_hires_delta(0, env->_copyForwardStats._completeStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1562
(U_32)j9time_hires_delta(0, env->_copyForwardStats._syncStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1563
(U_32)j9time_hires_delta(0, env->_copyForwardStats._irrsStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1564
(U_32)env->_copyForwardStats._workStallCount,
1565
(U_32)env->_copyForwardStats._completeStallCount,
1566
(U_32)env->_copyForwardStats._syncStallCount,
1567
(U_32)env->_copyForwardStats._irrsStallCount,
1568
env->_copyForwardStats._acquireFreeListCount,
1569
env->_copyForwardStats._releaseFreeListCount,
1570
env->_copyForwardStats._acquireScanListCount,
1571
env->_copyForwardStats._releaseScanListCount,
1572
env->_copyForwardStats._copiedArraysSplit);
1573
1574
if (env->_copyForwardStats._aborted) {
1575
Trc_MM_CopyForwardScheme_parallelStatsForAbort(
1576
env->getLanguageVMThread(),
1577
(U_32)env->getWorkerID(),
1578
(U_32)j9time_hires_delta(0, env->_workPacketStats._workStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1579
(U_32)j9time_hires_delta(0, env->_workPacketStats._completeStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1580
(U_32)j9time_hires_delta(0, env->_copyForwardStats._markStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1581
(U_32)j9time_hires_delta(0, env->_copyForwardStats._abortStallTime, J9PORT_TIME_DELTA_IN_MILLISECONDS),
1582
(U_32)env->_workPacketStats._workStallCount,
1583
(U_32)env->_workPacketStats._completeStallCount,
1584
(U_32)env->_copyForwardStats._markStallCount,
1585
(U_32)env->_copyForwardStats._abortStallCount,
1586
env->_workPacketStats.workPacketsAcquired,
1587
env->_workPacketStats.workPacketsReleased,
1588
env->_workPacketStats.workPacketsExchanged,
1589
env->_copyForwardStats._markedArraysSplit);
1590
}
1591
}
1592
1593
void
1594
MM_CopyForwardScheme::copyForwardPreProcess(MM_EnvironmentVLHGC *env)
1595
{
1596
PORT_ACCESS_FROM_ENVIRONMENT(env);
1597
1598
/* stats management */
1599
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._startTime = j9time_hires_clock();
1600
/* Clear the gc statistics */
1601
clearGCStats(env);
1602
1603
/* Perform any pre copy forwarding changes to the region set */
1604
preProcessRegions(env);
1605
1606
if (0 != _regionCountCannotBeEvacuated) {
1607
/* need to run Hybrid mode, reuse InputListMonitor for both workPackets and ScanCopyCache */
1608
_workQueueMonitorPtr = env->_cycleState->_workPackets->getInputListMonitorPtr();
1609
_workQueueWaitCountPtr = env->_cycleState->_workPackets->getInputListWaitCountPtr();
1610
}
1611
/* Perform any main-specific setup */
1612
mainSetupForCopyForward(env);
1613
}
1614
1615
void
1616
MM_CopyForwardScheme::copyForwardPostProcess(MM_EnvironmentVLHGC *env)
1617
{
1618
PORT_ACCESS_FROM_ENVIRONMENT(env);
1619
1620
mainCleanupForCopyForward(env);
1621
1622
/* Record the completion time of the copy forward cycle */
1623
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._copyForwardStats._endTime = j9time_hires_clock();
1624
1625
updateLeafRegions(env);
1626
1627
/* We used memory from the ACs for survivor space - make sure it doesn't hang around as allocation space */
1628
clearReservedRegionLists(env);
1629
_extensions->globalAllocationManager->flushAllocationContexts(env);
1630
1631
copyForwardCompletedSuccessfully(env);
1632
1633
if(_extensions->tarokEnableExpensiveAssertions) {
1634
/* Verify the result of the copy forward operation (heap integrity, etc) */
1635
verifyCopyForwardResult(MM_EnvironmentVLHGC::getEnvironment(env));
1636
}
1637
1638
if (0 != _regionCountCannotBeEvacuated) {
1639
_workQueueMonitorPtr = &_scanCacheMonitor;
1640
_workQueueWaitCountPtr = &_scanCacheWaitCount;
1641
}
1642
1643
/* Do any final work to regions in order to release them back to the main collector implementation */
1644
postProcessRegions(env);
1645
1646
static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_abortFlagRaisedDuringPGC = copyForwardCompletedSuccessfully(env);
1647
}
1648
1649
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
1650
void
1651
MM_CopyForwardScheme::concurrentCopyForwardCollectionSet(MM_EnvironmentVLHGC *env)
1652
{
1653
/* isConcurrentCycleInProgress() tells us if this is the first PGC increment or not. If it is
1654
* we'll call copyForwardPreProcess(). isConcurrentCycleInProgress state/value will get updated
1655
* preventing copyForwardPreProcess from being called in subsequent increments. For initial increment,
1656
* isConcurrentCycleInProgress will change from false to true causing only preProcess step to
1657
* be performed */
1658
if (!isConcurrentCycleInProgress())
1659
{
1660
copyForwardPreProcess(env);
1661
}
1662
1663
/* Perform the copy forward. This step will update the isConcurrentCycleInProgress state/value.
1664
* Note: The following is temporary as this will be updated to call concurrent copy forward state machine */
1665
MM_CopyForwardSchemeTask copyForwardTask(env, _dispatcher, this, env->_cycleState);
1666
_dispatcher->run(env, &copyForwardTask);
1667
1668
/* isConcurrentCycleInProgress() tells us if this is the last PGC increment or not. If this is the
1669
* last increment, copyForwardPreProcess state/value would have been updated from from true to false,
1670
* which will cause the following copyForwardPostProcess step to be performed */
1671
if (!isConcurrentCycleInProgress())
1672
{
1673
copyForwardPostProcess(env);
1674
}
1675
}
1676
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
1677
1678
void
1679
MM_CopyForwardScheme::copyForwardCollectionSet(MM_EnvironmentVLHGC *env)
1680
{
1681
copyForwardPreProcess(env);
1682
1683
/* And perform the copy forward */
1684
MM_CopyForwardSchemeTask copyForwardTask(env, _dispatcher, this, env->_cycleState);
1685
_dispatcher->run(env, &copyForwardTask);
1686
1687
copyForwardPostProcess(env);
1688
}
1689
1690
/**
1691
* Determine whether a copy forward that has been started did complete successfully.
1692
* @return true if the copyForward completed successfully, false otherwise.
1693
*/
1694
bool
1695
MM_CopyForwardScheme::copyForwardCompletedSuccessfully(MM_EnvironmentVLHGC *env)
1696
{
1697
return !abortFlagRaised();
1698
}
1699
1700
/****************************************
1701
* Copy-Scan Cache management
1702
****************************************
1703
* TODO: move all the CopyScanCache methods into the CopyScanCache class.
1704
*/
1705
1706
/* getFreeCache makes the assumption that there will be at least 1 entry on the scan list if there are no entries on the free list.
1707
* This requires that there be at (N * _cachesPerThread) scan cache entries, where N is the number of threads (Main + workers)
1708
*/
1709
MM_CopyScanCacheVLHGC *
1710
MM_CopyForwardScheme::getFreeCache(MM_EnvironmentVLHGC *env)
1711
{
1712
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
1713
env->_copyForwardStats._acquireFreeListCount += 1;
1714
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
1715
/* Check the free list */
1716
MM_CopyScanCacheVLHGC *cache = _cacheFreeList.popCache(env);
1717
if(NULL != cache) {
1718
return cache;
1719
}
1720
1721
/* No thread can use more than _cachesPerThread cache entries at 1 time (flip, tenure, scan, large, possibly deferred)
1722
* So long as (N * _cachesPerThread) cache entries exist,
1723
* the head of the scan list will contain a valid entry */
1724
env->_copyForwardStats._scanCacheOverflow = true;
1725
1726
if (NULL == cache) {
1727
/* we couldn't get a free cache so we must be in an overflow scenario. Try creating new cache structures on the heap */
1728
cache = createScanCacheForOverflowInHeap(env);
1729
if (NULL == cache) {
1730
/* we couldn't overflow so we have no choice but to abort the copy-forward */
1731
raiseAbortFlag(env);
1732
}
1733
}
1734
/* Overflow or abort was hit so alert other threads that are waiting */
1735
omrthread_monitor_enter(*_workQueueMonitorPtr);
1736
if(0 != *_workQueueWaitCountPtr) {
1737
omrthread_monitor_notify(*_workQueueMonitorPtr);
1738
}
1739
omrthread_monitor_exit(*_workQueueMonitorPtr);
1740
return cache;
1741
}
1742
1743
void
1744
MM_CopyForwardScheme::addCacheEntryToFreeCacheList(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *newCacheEntry)
1745
{
1746
_cacheFreeList.pushCache(env, newCacheEntry);
1747
}
1748
1749
void
1750
MM_CopyForwardScheme::addCacheEntryToScanCacheListAndNotify(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *newCacheEntry)
1751
{
1752
UDATA numaNode = _regionManager->tableDescriptorForAddress(newCacheEntry->scanCurrent)->getNumaNode();
1753
_cacheScanLists[numaNode].pushCache(env, newCacheEntry);
1754
if (0 != *_workQueueWaitCountPtr) {
1755
/* Added an entry to the scan list - notify any other threads that a new entry has appeared on the list */
1756
omrthread_monitor_enter(*_workQueueMonitorPtr);
1757
omrthread_monitor_notify(*_workQueueMonitorPtr);
1758
omrthread_monitor_exit(*_workQueueMonitorPtr);
1759
}
1760
}
1761
1762
void
1763
MM_CopyForwardScheme::flushCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)
1764
{
1765
Assert_MM_false(cache->isSplitArray());
1766
if(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {
1767
if(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED)) {
1768
clearCache(env, cache);
1769
}
1770
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
1771
env->_copyForwardStats._releaseFreeListCount += 1;
1772
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
1773
addCacheEntryToFreeCacheList(env, cache);
1774
}
1775
}
1776
1777
bool
1778
MM_CopyForwardScheme::clearCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)
1779
{
1780
uintptr_t discardSize = (uintptr_t)cache->cacheTop - (uintptr_t)cache->cacheAlloc;
1781
Assert_MM_true(0 == (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED));
1782
Assert_MM_false(cache->isSplitArray());
1783
bool remainderCreated = false;
1784
1785
UDATA compactGroup = cache->_compactGroup;
1786
Assert_MM_true(compactGroup < _compactGroupMaxCount);
1787
MM_CopyForwardCompactGroup *compactGroupForMarkData = &(env->_copyForwardCompactGroups[compactGroup]);
1788
1789
if (0 < discardSize) {
1790
if ((discardSize < env->getExtensions()->tlhSurvivorDiscardThreshold) ||
1791
(discardSize <= ((uintptr_t)compactGroupForMarkData->_TLHRemainderTop - (uintptr_t)compactGroupForMarkData->_TLHRemainderBase))) {
1792
/* Abandon the current entry in the cache */
1793
compactGroupForMarkData->discardTLHRemainder(env, cache->cacheAlloc, cache->cacheTop);
1794
} else {
1795
/* Abandon the current TLHRemainder if one exists */
1796
compactGroupForMarkData->discardTLHRemainder(env);
1797
remainderCreated = true;
1798
compactGroupForMarkData->setTLHRemainder(cache->cacheAlloc, cache->cacheTop);
1799
}
1800
}
1801
1802
/* Broadcast details of that portion of memory within which objects have been allocated */
1803
TRIGGER_J9HOOK_MM_PRIVATE_CACHE_CLEARED(_extensions->privateHookInterface, env->getOmrVMThread(), env->_cycleState->_activeSubSpace,
1804
cache->cacheBase, cache->cacheAlloc, cache->cacheTop);
1805
1806
cache->flags |= J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED;
1807
1808
return remainderCreated;
1809
}
1810
1811
MM_CopyScanCacheVLHGC *
1812
MM_CopyForwardScheme::stopCopyingIntoCache(MM_EnvironmentVLHGC *env, UDATA compactGroup)
1813
{
1814
MM_CopyScanCacheVLHGC *copyCache = env->_copyForwardCompactGroups[compactGroup]._copyCache;
1815
MM_LightweightNonReentrantLock *copyCacheLock = env->_copyForwardCompactGroups[compactGroup]._copyCacheLock;
1816
1817
if (NULL != copyCache) {
1818
Assert_MM_false(copyCache->isSplitArray());
1819
UDATA wastedMemory = env->_copyForwardCompactGroups[compactGroup]._freeMemoryMeasured;
1820
env->_copyForwardCompactGroups[compactGroup]._freeMemoryMeasured = 0;
1821
1822
MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(copyCache->cacheBase);
1823
1824
/* atomically add (age * usedBytes) product from this cache to the regions product */
1825
double newAllocationAgeSizeProduct = region->atomicIncrementAllocationAgeSizeProduct(copyCache->_allocationAgeSizeProduct);
1826
region->updateAgeBounds(copyCache->_lowerAgeBound, copyCache->_upperAgeBound);
1827
1828
/* Return any remaining memory to the pool */
1829
discardRemainingCache(env, copyCache, copyCacheLock, wastedMemory);
1830
1831
Trc_MM_CopyForwardScheme_stopCopyingIntoCache(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), copyCache,
1832
(double)(newAllocationAgeSizeProduct - copyCache->_allocationAgeSizeProduct) / (1024 * 1024) / (1024 * 1024), (double)((UDATA)copyCache->cacheAlloc - (UDATA)region->getLowAddress()) / (1024 * 1024),
1833
(double)copyCache->_allocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024), (double)copyCache->_objectSize / (1024 * 1024), (double)newAllocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024));
1834
1835
copyCache->_allocationAgeSizeProduct = 0.0;
1836
copyCache->_objectSize = 0;
1837
copyCache->_lowerAgeBound = U_64_MAX;
1838
copyCache->_upperAgeBound = 0;
1839
1840
/* Push any cached mark map data out */
1841
flushCacheMarkMap(env, copyCache);
1842
/* Update a region's projected live bytes from copy cache*/
1843
updateProjectedLiveBytesFromCopyScanCache(env, copyCache);
1844
/* Clear the current entry in the cache */
1845
clearCache(env, copyCache);
1846
/* This is no longer a copy cache */
1847
copyCache->flags &= ~J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY;
1848
/* drop this cache from the env */
1849
env->_copyForwardCompactGroups[compactGroup]._copyCache = NULL;
1850
env->_copyForwardCompactGroups[compactGroup]._copyCacheLock = NULL;
1851
}
1852
return copyCache;
1853
}
1854
1855
void
1856
MM_CopyForwardScheme::updateProjectedLiveBytesFromCopyScanCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)
1857
{
1858
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(cache->cacheBase);
1859
Assert_MM_true(region->isSurvivorRegion());
1860
UDATA consumedBytes = (UDATA) cache->cacheAlloc - (UDATA) cache->cacheBase;
1861
MM_AtomicOperations::add(&region->_projectedLiveBytes, consumedBytes);
1862
}
1863
1864
void
1865
MM_CopyForwardScheme::discardRemainingCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache, MM_LightweightNonReentrantLock *cacheLock, UDATA wastedMemory)
1866
{
1867
Assert_MM_false(cache->isSplitArray());
1868
if (0 != wastedMemory) {
1869
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC*)_regionManager->tableDescriptorForAddress(cache->cacheBase);
1870
MM_MemoryPool *pool = region->getMemoryPool();
1871
pool->incrementDarkMatterBytes(wastedMemory);
1872
}
1873
}
1874
1875
void
1876
MM_CopyForwardScheme::addCopyCachesToFreeList(MM_EnvironmentVLHGC *env)
1877
{
1878
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
1879
MM_CopyScanCacheVLHGC * copyCache = stopCopyingIntoCache(env, index);
1880
if (NULL != copyCache) {
1881
addCacheEntryToFreeCacheList(env, copyCache);
1882
}
1883
}
1884
}
1885
1886
J9Object *
1887
MM_CopyForwardScheme::updateForwardedPointer(J9Object *objectPtr)
1888
{
1889
J9Object *forwardPtr;
1890
1891
if(isObjectInEvacuateMemory(objectPtr)) {
1892
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
1893
forwardPtr = forwardedHeader.getForwardedObject();
1894
if(forwardPtr != NULL) {
1895
return forwardPtr;
1896
}
1897
}
1898
1899
return objectPtr;
1900
}
1901
1902
MMINLINE MM_AllocationContextTarok *
1903
MM_CopyForwardScheme::getContextForHeapAddress(void *address)
1904
{
1905
return ((MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(address))->_allocateData._owningContext;
1906
}
1907
1908
J9Object *
1909
MM_CopyForwardScheme::copy(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_ForwardedHeader* forwardedHeader, bool leafType)
1910
{
1911
bool const compressed = env->compressObjectReferences();
1912
J9Object *result = NULL;
1913
J9Object *object = forwardedHeader->getObject();
1914
uintptr_t objectCopySizeInBytes = 0;
1915
uintptr_t objectReserveSizeInBytes = 0;
1916
1917
bool noEvacuation = false;
1918
if (0 != _regionCountCannotBeEvacuated) {
1919
noEvacuation = isObjectInNoEvacuationRegions(env, object);
1920
}
1921
1922
if (_abortInProgress || noEvacuation) {
1923
/* Once threads agreed that abort is in progress or the object is in noEvacuation region, only mark/push should be happening, no attempts even to allocate/copy */
1924
1925
if (_markMap->atomicSetBit(object)) {
1926
Assert_MM_false(MM_ForwardedHeader(object, compressed).isForwardedPointer());
1927
/* don't need to push leaf object in work stack */
1928
if (!leafType) {
1929
env->_workStack.push(env, object);
1930
}
1931
}
1932
1933
result = object;
1934
} else {
1935
uintptr_t hotFieldsDescriptor = 0;
1936
uintptr_t hotFieldsAlignment = 0;
1937
uintptr_t *hotFieldPadBase = NULL;
1938
uintptr_t hotFieldPadSize = 0;
1939
MM_CopyScanCacheVLHGC *copyCache = NULL;
1940
void *newCacheAlloc = NULL;
1941
GC_ObjectModel *objectModel = &_extensions->objectModel;
1942
1943
/* Object is in the evacuate space but not forwarded. */
1944
objectModel->calculateObjectDetailsForCopy(env, forwardedHeader, &objectCopySizeInBytes, &objectReserveSizeInBytes, &hotFieldsDescriptor);
1945
1946
Assert_MM_objectAligned(env, objectReserveSizeInBytes);
1947
1948
#if defined(J9VM_INTERP_NATIVE_SUPPORT)
1949
/* adjust the reserved object's size if we are aligning hot fields and this class has a known hot field */
1950
if (_extensions->scavengerAlignHotFields && HOTFIELD_SHOULD_ALIGN(hotFieldsDescriptor)) {
1951
/* set the descriptor field if we should be aligning (since assuming that 0 means no is not safe) */
1952
hotFieldsAlignment = hotFieldsDescriptor;
1953
/* for simplicity, add the maximum padding we could need (and back off after allocation) */
1954
objectReserveSizeInBytes += (_cacheLineAlignment - _objectAlignmentInBytes);
1955
Assert_MM_objectAligned(env, objectReserveSizeInBytes);
1956
}
1957
#endif /* J9VM_INTERP_NATIVE_SUPPORT */
1958
1959
reservingContext = getPreferredAllocationContext(reservingContext, object);
1960
1961
copyCache = reserveMemoryForCopy(env, object, reservingContext, objectReserveSizeInBytes);
1962
1963
/* Check if memory was reserved successfully */
1964
if(NULL == copyCache) {
1965
raiseAbortFlag(env);
1966
Assert_MM_true(NULL == result);
1967
} else {
1968
Assert_MM_false(copyCache->isSplitArray());
1969
1970
/* Memory has been reserved */
1971
uintptr_t destinationCompactGroup = copyCache->_compactGroup;
1972
J9Object *destinationObjectPtr = (J9Object *)copyCache->cacheAlloc;
1973
Assert_MM_true(NULL != destinationObjectPtr);
1974
1975
/* now correct for the hot field alignment */
1976
#if defined(J9VM_INTERP_NATIVE_SUPPORT)
1977
if (0 != hotFieldsAlignment) {
1978
uintptr_t remainingInCacheLine = _cacheLineAlignment - ((uintptr_t)destinationObjectPtr % _cacheLineAlignment);
1979
uintptr_t alignmentBias = HOTFIELD_ALIGNMENT_BIAS(hotFieldsAlignment, _objectAlignmentInBytes);
1980
/* do alignment only if the object cannot fit in the remaining space in the cache line */
1981
if ((remainingInCacheLine < objectCopySizeInBytes) && (alignmentBias < remainingInCacheLine)) {
1982
hotFieldPadSize = ((remainingInCacheLine + _cacheLineAlignment) - (alignmentBias % _cacheLineAlignment)) % _cacheLineAlignment;
1983
hotFieldPadBase = (uintptr_t *)destinationObjectPtr;
1984
/* now fix the object pointer so that the hot field is aligned */
1985
destinationObjectPtr = (J9Object *)((uintptr_t)destinationObjectPtr + hotFieldPadSize);
1986
}
1987
/* and update the reserved size so that we "un-reserve" the extra memory we said we might need. This is done by
1988
* removing the excess reserve since we already accounted for the hotFieldPadSize by bumping the destination pointer
1989
* and now we need to revert to the amount needed for the object allocation and its array alignment so the rest of
1990
* the method continues to function without needing to know about this extra alignment calculation
1991
*/
1992
objectReserveSizeInBytes = objectReserveSizeInBytes - (_cacheLineAlignment - _objectAlignmentInBytes);
1993
}
1994
#endif /* J9VM_INTERP_NATIVE_SUPPORT */
1995
1996
/* and correct for the double array alignment */
1997
newCacheAlloc = (void *)((uintptr_t)destinationObjectPtr + objectReserveSizeInBytes);
1998
1999
/* Try to swap the forwarding pointer to the destination copy array into the source object */
2000
J9Object* originalDestinationObjectPtr = destinationObjectPtr;
2001
destinationObjectPtr = forwardedHeader->setForwardedObject(destinationObjectPtr);
2002
Assert_MM_true(NULL != destinationObjectPtr);
2003
if (destinationObjectPtr == originalDestinationObjectPtr) {
2004
/* Succeeded in forwarding the object - copy and adjust the age value */
2005
2006
#if defined(J9VM_INTERP_NATIVE_SUPPORT)
2007
if (NULL != hotFieldPadBase) {
2008
/* lay down a hole (XXX: This assumes that we are using AOL (address-ordered-list)) */
2009
MM_HeapLinkedFreeHeader::fillWithHoles(hotFieldPadBase, hotFieldPadSize, compressed);
2010
}
2011
#endif /* J9VM_INTERP_NATIVE_SUPPORT */
2012
2013
memcpy((void *)destinationObjectPtr, forwardedHeader->getObject(), objectCopySizeInBytes);
2014
2015
forwardedHeader->fixupForwardedObject(destinationObjectPtr);
2016
2017
if (objectModel->isIndexable(destinationObjectPtr)) {
2018
_extensions->indexableObjectModel.fixupInternalLeafPointersAfterCopy((J9IndexableObject *)destinationObjectPtr, (J9IndexableObject *)forwardedHeader->getObject());
2019
2020
/* Updates internal data address of indexable objects. Every indexable object have a void *dataAddr
2021
* that always points to the array data. It will always point to the address right after the header,
2022
* in case of contiguous data it will point to the data itself, and in case of discontiguous
2023
* arraylet it will point to the first arrayiod. dataAddr is only updated if dataAddr points to data
2024
* within heap. */
2025
_extensions->indexableObjectModel.fixupDataAddr(destinationObjectPtr);
2026
}
2027
2028
objectModel->fixupHashFlagsAndSlot(forwardedHeader, destinationObjectPtr);
2029
2030
/* Update any mark maps and transfer card table data as appropriate for a successful copy */
2031
updateMarkMapAndCardTableOnCopy(env, forwardedHeader->getObject(), destinationObjectPtr, copyCache);
2032
2033
/* Move the cache allocate pointer to reflect the consumed memory */
2034
Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);
2035
2036
if (_tracingEnabled) {
2037
PORT_ACCESS_FROM_ENVIRONMENT(env);
2038
j9tty_printf(PORTLIB, "Cache alloc: %p newAlloc: %p origO: %p copyO: %p\n", copyCache->cacheAlloc, newCacheAlloc, forwardedHeader->getObject(), destinationObjectPtr);
2039
}
2040
2041
copyCache->cacheAlloc = newCacheAlloc;
2042
Assert_MM_true(copyCache->cacheAlloc <= copyCache->cacheTop);
2043
2044
/* Update the stats */
2045
if (hotFieldPadSize > 0) {
2046
/* account for this as free memory */
2047
env->_copyForwardCompactGroups[destinationCompactGroup]._freeMemoryMeasured += hotFieldPadSize;
2048
}
2049
MM_HeapRegionDescriptorVLHGC * sourceRegion = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(object);
2050
uintptr_t sourceCompactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, sourceRegion);
2051
if (sourceRegion->isEden()) {
2052
env->_copyForwardCompactGroups[sourceCompactGroup]._edenStats._liveObjects += 1;
2053
env->_copyForwardCompactGroups[sourceCompactGroup]._edenStats._liveBytes += objectCopySizeInBytes;
2054
env->_copyForwardCompactGroups[destinationCompactGroup]._edenStats._copiedObjects += 1;
2055
env->_copyForwardCompactGroups[destinationCompactGroup]._edenStats._copiedBytes += objectCopySizeInBytes;
2056
} else {
2057
env->_copyForwardCompactGroups[sourceCompactGroup]._nonEdenStats._liveObjects += 1;
2058
env->_copyForwardCompactGroups[sourceCompactGroup]._nonEdenStats._liveBytes += objectCopySizeInBytes;
2059
env->_copyForwardCompactGroups[destinationCompactGroup]._nonEdenStats._copiedObjects += 1;
2060
env->_copyForwardCompactGroups[destinationCompactGroup]._nonEdenStats._copiedBytes += objectCopySizeInBytes;
2061
}
2062
copyCache->_allocationAgeSizeProduct += ((double)objectReserveSizeInBytes * (double)sourceRegion->getAllocationAge());
2063
copyCache->_objectSize += objectReserveSizeInBytes;
2064
copyCache->_lowerAgeBound = OMR_MIN(copyCache->_lowerAgeBound, sourceRegion->getLowerAgeBound());
2065
copyCache->_upperAgeBound = OMR_MAX(copyCache->_upperAgeBound, sourceRegion->getUpperAgeBound());
2066
2067
#if defined(J9VM_GC_LEAF_BITS)
2068
if (_extensions->tarokEnableLeafFirstCopying) {
2069
copyLeafChildren(env, reservingContext, destinationObjectPtr);
2070
}
2071
#endif /* J9VM_GC_LEAF_BITS */
2072
/* depth copy the hot fields of an object if scavenger dynamicBreadthFirstScanOrdering is enabled */
2073
depthCopyHotFields(env, objectModel->getPreservedClass(forwardedHeader), destinationObjectPtr, reservingContext);
2074
}
2075
/* return value for updating the slot */
2076
result = destinationObjectPtr;
2077
}
2078
}
2079
2080
return result;
2081
}
2082
2083
#if defined(J9VM_GC_LEAF_BITS)
2084
void
2085
MM_CopyForwardScheme::copyLeafChildren(MM_EnvironmentVLHGC* env, MM_AllocationContextTarok *reservingContext, J9Object* objectPtr)
2086
{
2087
J9Class *clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);
2088
if (GC_ObjectModel::SCAN_MIXED_OBJECT == _extensions->objectModel.getScanType(clazz)) {
2089
UDATA instanceLeafDescription = (UDATA)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceLeafDescription;
2090
/* For now we only support leaf children in small objects. If the leaf description isn't immediate, ignore it to keep the code simple. */
2091
if (1 == (instanceLeafDescription & 1)) {
2092
bool const compressed = env->compressObjectReferences();
2093
fj9object_t* scanPtr = _extensions->mixedObjectModel.getHeadlessObject(objectPtr);
2094
UDATA leafBits = instanceLeafDescription >> 1;
2095
while (0 != leafBits) {
2096
if (1 == (leafBits & 1)) {
2097
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2098
GC_SlotObject slotObject(_javaVM->omrVM, scanPtr);
2099
/* pass leaf flag into copy method for optimizing abort case and hybrid case (don't need to push leaf object in work stack) */
2100
copyAndForward(env, reservingContext, objectPtr, &slotObject, true);
2101
}
2102
leafBits >>= 1;
2103
scanPtr = GC_SlotObject::addToSlotAddress(scanPtr, 1, compressed);
2104
}
2105
}
2106
}
2107
}
2108
#endif /* J9VM_GC_LEAF_BITS */
2109
2110
MMINLINE void
2111
MM_CopyForwardScheme::depthCopyHotFields(MM_EnvironmentVLHGC *env, J9Class *clazz, J9Object *destinationObjectPtr, MM_AllocationContextTarok *reservingContext) {
2112
/* depth copy the hot fields of an object up to a depth specified by depthCopyMax */
2113
J9ClassHotFieldsInfo* hotFieldsInfo = clazz->hotFieldsInfo;
2114
if (env->_hotFieldCopyDepthCount < _extensions->depthCopyMax && NULL != hotFieldsInfo) {
2115
U_8 hotFieldOffset = hotFieldsInfo->hotFieldOffset1;
2116
if (U_8_MAX != hotFieldOffset) {
2117
copyHotField(env, destinationObjectPtr, hotFieldOffset, reservingContext);
2118
U_8 hotFieldOffset2 = hotFieldsInfo->hotFieldOffset2;
2119
if (U_8_MAX !=hotFieldOffset2) {
2120
copyHotField(env, destinationObjectPtr, hotFieldOffset2, reservingContext);
2121
U_8 hotFieldOffset3 = hotFieldsInfo->hotFieldOffset3;
2122
if (U_8_MAX != hotFieldOffset3) {
2123
copyHotField(env, destinationObjectPtr, hotFieldOffset3, reservingContext);
2124
}
2125
}
2126
} else if ((_extensions->alwaysDepthCopyFirstOffset) && (false == _extensions->objectModel.isIndexable(destinationObjectPtr))) {
2127
copyHotField(env, destinationObjectPtr, DEFAULT_HOT_FIELD_OFFSET, reservingContext);
2128
}
2129
}
2130
}
2131
2132
MMINLINE void
2133
MM_CopyForwardScheme::copyHotField(MM_EnvironmentVLHGC *env, J9Object *destinationObjectPtr, U_8 offset, MM_AllocationContextTarok *reservingContext) {
2134
bool const compressed = _extensions->compressObjectReferences();
2135
GC_SlotObject hotFieldObject(_javaVM->omrVM, GC_SlotObject::addToSlotAddress((fomrobject_t*)((uintptr_t)destinationObjectPtr), offset, compressed));
2136
omrobjectptr_t objectPtr = hotFieldObject.readReferenceFromSlot();
2137
if (isObjectInEvacuateMemory(objectPtr)) {
2138
/* Hot field needs to be copy and forwarded. Check if the work has already been done */
2139
MM_ForwardedHeader forwardHeaderHotField(objectPtr, compressed);
2140
if (!forwardHeaderHotField.isForwardedPointer()) {
2141
env->_hotFieldCopyDepthCount += 1;
2142
copy(env, reservingContext, &forwardHeaderHotField);
2143
env->_hotFieldCopyDepthCount -= 1;
2144
}
2145
}
2146
}
2147
2148
void
2149
MM_CopyForwardScheme::flushCacheMarkMap(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC *cache)
2150
{
2151
MM_CopyForwardCompactGroup *compactGroup = &(env->_copyForwardCompactGroups[cache->_compactGroup]);
2152
Assert_MM_true(cache == compactGroup->_copyCache);
2153
Assert_MM_false(UDATA_MAX == compactGroup->_markMapPGCSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */
2154
Assert_MM_false(UDATA_MAX == compactGroup->_markMapGMPSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */
2155
Assert_MM_false(cache->isSplitArray());
2156
2157
if(0 != compactGroup->_markMapPGCBitMask) {
2158
UDATA pgcFlushSlotIndex = compactGroup->_markMapPGCSlotIndex;
2159
if((pgcFlushSlotIndex == compactGroup->_markMapAtomicHeadSlotIndex) || (pgcFlushSlotIndex == compactGroup->_markMapAtomicTailSlotIndex)) {
2160
_markMap->atomicSetSlot(pgcFlushSlotIndex, compactGroup->_markMapPGCBitMask);
2161
} else {
2162
_markMap->setSlot(pgcFlushSlotIndex, compactGroup->_markMapPGCBitMask);
2163
}
2164
2165
/* We set the slot index to an invalid value to assert on later if seen */
2166
compactGroup->_markMapPGCSlotIndex = UDATA_MAX;
2167
compactGroup->_markMapPGCBitMask = 0;
2168
}
2169
2170
if(NULL != env->_cycleState->_externalCycleState) {
2171
if(0 != compactGroup->_markMapGMPBitMask) {
2172
UDATA gmpFlushSlotIndex = compactGroup->_markMapGMPSlotIndex;
2173
if((gmpFlushSlotIndex == compactGroup->_markMapAtomicHeadSlotIndex) || (gmpFlushSlotIndex == compactGroup->_markMapAtomicTailSlotIndex)) {
2174
env->_cycleState->_externalCycleState->_markMap->atomicSetSlot(gmpFlushSlotIndex, compactGroup->_markMapGMPBitMask);
2175
} else {
2176
env->_cycleState->_externalCycleState->_markMap->setSlot(gmpFlushSlotIndex, compactGroup->_markMapGMPBitMask);
2177
}
2178
2179
/* We set the slot index to an invalid value to assert on later if seen */
2180
compactGroup->_markMapGMPSlotIndex = UDATA_MAX;
2181
compactGroup->_markMapGMPBitMask = 0;
2182
}
2183
}
2184
2185
compactGroup->_markMapAtomicHeadSlotIndex = 0;
2186
compactGroup->_markMapAtomicTailSlotIndex = 0;
2187
}
2188
2189
void
2190
MM_CopyForwardScheme::updateMarkMapCache(MM_EnvironmentVLHGC *env, MM_MarkMap *markMap, J9Object *object,
2191
UDATA *slotIndexIndirect, UDATA *bitMaskIndirect, UDATA atomicHeadSlotIndex, UDATA atomicTailSlotIndex)
2192
{
2193
UDATA slotIndex = 0;
2194
UDATA bitMask = 0;
2195
2196
markMap->getSlotIndexAndMask(object, &slotIndex, &bitMask);
2197
2198
if(*slotIndexIndirect != slotIndex) {
2199
if(0 != *bitMaskIndirect) {
2200
UDATA flushSlotIndex = *slotIndexIndirect;
2201
if((flushSlotIndex == atomicHeadSlotIndex) || (flushSlotIndex == atomicTailSlotIndex)) {
2202
markMap->atomicSetSlot(flushSlotIndex, *bitMaskIndirect);
2203
} else {
2204
markMap->setSlot(flushSlotIndex, *bitMaskIndirect);
2205
}
2206
}
2207
*slotIndexIndirect = slotIndex;
2208
*bitMaskIndirect = bitMask;
2209
} else {
2210
*bitMaskIndirect |= bitMask;
2211
}
2212
}
2213
2214
void
2215
MM_CopyForwardScheme::updateMarkMapAndCardTableOnCopy(MM_EnvironmentVLHGC *env, J9Object *srcObject, J9Object *dstObject, MM_CopyScanCacheVLHGC *dstCache)
2216
{
2217
MM_CopyForwardCompactGroup *destinationGroup = &(env->_copyForwardCompactGroups[dstCache->_compactGroup]);
2218
Assert_MM_true(dstCache == destinationGroup->_copyCache);
2219
Assert_MM_false(UDATA_MAX == destinationGroup->_markMapPGCSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */
2220
Assert_MM_false(UDATA_MAX == destinationGroup->_markMapGMPSlotIndex); /* Safety check from flushing to see if somehow the cache is being resurrected */
2221
Assert_MM_false(dstCache->isSplitArray());
2222
2223
updateMarkMapCache(env, _markMap, dstObject, &destinationGroup->_markMapPGCSlotIndex, &destinationGroup->_markMapPGCBitMask, destinationGroup->_markMapAtomicHeadSlotIndex, destinationGroup->_markMapAtomicTailSlotIndex);
2224
2225
/* If there is an external cycle in progress, see if any information needs to be migrated */
2226
if(NULL != env->_cycleState->_externalCycleState) {
2227
MM_MarkMap *externalMap = env->_cycleState->_externalCycleState->_markMap;
2228
2229
if(externalMap->isBitSet(srcObject)) {
2230
/* The external cycle has already visited the live object - move the mark map and card information across */
2231
updateMarkMapCache(env, externalMap, dstObject, &destinationGroup->_markMapGMPSlotIndex, &destinationGroup->_markMapGMPBitMask, destinationGroup->_markMapAtomicHeadSlotIndex, destinationGroup->_markMapAtomicTailSlotIndex);
2232
2233
MM_CardTable *cardTable = _extensions->cardTable;
2234
Card *card = cardTable->heapAddrToCardAddr(env, srcObject);
2235
2236
switch(*card) {
2237
case CARD_GMP_MUST_SCAN:
2238
case CARD_DIRTY:
2239
{
2240
Card *dstCard = cardTable->heapAddrToCardAddr(env, dstObject);
2241
if(CARD_GMP_MUST_SCAN != *dstCard) {
2242
*dstCard = CARD_GMP_MUST_SCAN;
2243
}
2244
break;
2245
}
2246
case CARD_PGC_MUST_SCAN:
2247
case CARD_CLEAN:
2248
/* do nothing */
2249
break;
2250
default:
2251
Assert_MM_unreachable();
2252
}
2253
}
2254
}
2255
}
2256
2257
/****************************************
2258
* Object scan and copy routines
2259
****************************************
2260
*/
2261
MMINLINE void
2262
MM_CopyForwardScheme::scanOwnableSynchronizerObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)
2263
{
2264
if (SCAN_REASON_COPYSCANCACHE == reason) {
2265
addOwnableSynchronizerObjectInList(env, objectPtr);
2266
} else if (SCAN_REASON_PACKET == reason) {
2267
if (isObjectInEvacuateMemoryNoCheck(objectPtr)) {
2268
addOwnableSynchronizerObjectInList(env, objectPtr);
2269
}
2270
}
2271
scanMixedObjectSlots(env, reservingContext, objectPtr, reason);
2272
}
2273
2274
/**
2275
* Iterate the slot reference and parse and pass leaf bit of the reference to copy forward
2276
* to avoid to push leaf object to work stack in case the reference need to be marked instead of copied.
2277
*/
2278
MMINLINE bool
2279
MM_CopyForwardScheme::iterateAndCopyforwardSlotReference(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr) {
2280
bool success = true;
2281
fj9object_t *endScanPtr;
2282
UDATA *descriptionPtr;
2283
UDATA descriptionBits;
2284
UDATA descriptionIndex;
2285
#if defined(J9VM_GC_LEAF_BITS)
2286
UDATA *leafPtr = (UDATA *)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceLeafDescription;
2287
UDATA leafBits;
2288
#endif /* J9VM_GC_LEAF_BITS */
2289
bool const compressed = env->compressObjectReferences();
2290
2291
/* Object slots */
2292
volatile fj9object_t* scanPtr = _extensions->mixedObjectModel.getHeadlessObject(objectPtr);
2293
UDATA objectSize = _extensions->mixedObjectModel.getSizeInBytesWithHeader(objectPtr);
2294
2295
endScanPtr = (fj9object_t*)(((U_8 *)objectPtr) + objectSize);
2296
descriptionPtr = (UDATA *)J9GC_J9OBJECT_CLAZZ(objectPtr, env)->instanceDescription;
2297
2298
if (((UDATA)descriptionPtr) & 1) {
2299
descriptionBits = ((UDATA)descriptionPtr) >> 1;
2300
#if defined(J9VM_GC_LEAF_BITS)
2301
leafBits = ((UDATA)leafPtr) >> 1;
2302
#endif /* J9VM_GC_LEAF_BITS */
2303
} else {
2304
descriptionBits = *descriptionPtr++;
2305
#if defined(J9VM_GC_LEAF_BITS)
2306
leafBits = *leafPtr++;
2307
#endif /* J9VM_GC_LEAF_BITS */
2308
}
2309
descriptionIndex = J9_OBJECT_DESCRIPTION_SIZE - 1;
2310
2311
while (success && (scanPtr < endScanPtr)) {
2312
/* Determine if the slot should be processed */
2313
if (descriptionBits & 1) {
2314
GC_SlotObject slotObject(_javaVM->omrVM, scanPtr);
2315
2316
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2317
#if defined(J9VM_GC_LEAF_BITS)
2318
success = copyAndForward(env, reservingContext, objectPtr, &slotObject, 1 == (leafBits & 1));
2319
#else /* J9VM_GC_LEAF_BITS */
2320
success = copyAndForward(env, reservingContext, objectPtr, &slotObject);
2321
#endif /* J9VM_GC_LEAF_BITS */
2322
}
2323
descriptionBits >>= 1;
2324
#if defined(J9VM_GC_LEAF_BITS)
2325
leafBits >>= 1;
2326
#endif /* J9VM_GC_LEAF_BITS */
2327
if (descriptionIndex-- == 0) {
2328
descriptionBits = *descriptionPtr++;
2329
#if defined(J9VM_GC_LEAF_BITS)
2330
leafBits = *leafPtr++;
2331
#endif /* J9VM_GC_LEAF_BITS */
2332
descriptionIndex = J9_OBJECT_DESCRIPTION_SIZE - 1;
2333
}
2334
scanPtr = GC_SlotObject::addToSlotAddress((fomrobject_t*)scanPtr, 1, compressed);
2335
}
2336
return success;
2337
}
2338
2339
void
2340
MM_CopyForwardScheme::scanMixedObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)
2341
{
2342
if(_tracingEnabled) {
2343
PORT_ACCESS_FROM_ENVIRONMENT(env);
2344
j9tty_printf(PORTLIB, "@%p\n", objectPtr);
2345
}
2346
2347
bool success = copyAndForwardObjectClass(env, reservingContext, objectPtr);
2348
2349
if (success) {
2350
/* Iteratoring and copyforwarding the slot reference with leaf bit */
2351
success = iterateAndCopyforwardSlotReference(env, reservingContext, objectPtr);
2352
}
2353
2354
updateScanStats(env, objectPtr, reason);
2355
}
2356
2357
void
2358
MM_CopyForwardScheme::scanReferenceObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)
2359
{
2360
bool success = copyAndForwardObjectClass(env, reservingContext, objectPtr);
2361
2362
I_32 referenceState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, objectPtr);
2363
2364
/* if the reference isn't part of the collection set, treat it as a strong reference */
2365
bool isReferenceInCollectionSet = isObjectInNurseryMemory(objectPtr);
2366
bool isReferenceCleared = (GC_ObjectModel::REF_STATE_CLEARED == referenceState) || (GC_ObjectModel::REF_STATE_ENQUEUED == referenceState);
2367
bool referentMustBeMarked = isReferenceCleared || !isReferenceInCollectionSet;
2368
bool referentMustBeCleared = false;
2369
if (isReferenceInCollectionSet) {
2370
UDATA referenceObjectOptions = env->_cycleState->_referenceObjectOptions;
2371
UDATA referenceObjectType = J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(objectPtr, env)) & J9AccClassReferenceMask;
2372
switch (referenceObjectType) {
2373
case J9AccClassReferenceWeak:
2374
referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_weak)) ;
2375
break;
2376
case J9AccClassReferenceSoft:
2377
referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_soft));
2378
referentMustBeMarked = referentMustBeMarked || (
2379
((0 == (referenceObjectOptions & MM_CycleState::references_soft_as_weak))
2380
&& ((UDATA)J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, objectPtr) < _extensions->getDynamicMaxSoftReferenceAge())));
2381
break;
2382
case J9AccClassReferencePhantom:
2383
referentMustBeCleared = (0 != (referenceObjectOptions & MM_CycleState::references_clear_phantom));
2384
break;
2385
default:
2386
Assert_MM_unreachable();
2387
}
2388
}
2389
2390
GC_SlotObject referentPtr(_javaVM->omrVM, J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, objectPtr));
2391
2392
/* Iterating and copyforwarding regular reference slots, except the special (soft) referent slot. Not making use of leaf bit optimization,
2393
* sacrificing minor performance to avoid code complication. Could optimize later, if/when using ObjectScanner */
2394
GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);
2395
GC_SlotObject *slotObject = NULL;
2396
while (success && (NULL != (slotObject = mixedObjectIterator.nextSlot()))) {
2397
if ((slotObject->readAddressFromSlot() != referentPtr.readAddressFromSlot()) || referentMustBeMarked) {
2398
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2399
success = copyAndForward(env, reservingContext, objectPtr, slotObject);
2400
}
2401
}
2402
2403
if (SCAN_REASON_OVERFLOWED_REGION == reason) {
2404
/* handled when we empty packet to overflow */
2405
} else {
2406
if (referentMustBeCleared) {
2407
Assert_MM_true(isReferenceInCollectionSet);
2408
referentPtr.writeReferenceToSlot(NULL);
2409
if (!isReferenceCleared) {
2410
J9GC_J9VMJAVALANGREFERENCE_STATE(env, objectPtr) = GC_ObjectModel::REF_STATE_CLEARED;
2411
}
2412
} else if (isReferenceInCollectionSet) {
2413
if (!isReferenceCleared) {
2414
if (success) {
2415
env->getGCEnvironment()->_referenceObjectBuffer->add(env, objectPtr);
2416
}
2417
}
2418
}
2419
}
2420
2421
updateScanStats(env, objectPtr, reason);
2422
}
2423
2424
UDATA
2425
MM_CopyForwardScheme::createNextSplitArrayWorkUnit(MM_EnvironmentVLHGC *env, J9IndexableObject *arrayPtr, UDATA startIndex, bool currentSplitUnitOnly)
2426
{
2427
UDATA sizeInElements = _extensions->indexableObjectModel.getSizeInElements(arrayPtr);
2428
UDATA slotsToScan = 0;
2429
2430
if (sizeInElements > 0) {
2431
Assert_MM_true(startIndex < sizeInElements);
2432
slotsToScan = sizeInElements - startIndex;
2433
2434
if (slotsToScan > _arraySplitSize) {
2435
slotsToScan = _arraySplitSize;
2436
2437
/* immediately make the next chunk available for another thread to start processing */
2438
UDATA nextIndex = startIndex + slotsToScan;
2439
Assert_MM_true(nextIndex < sizeInElements);
2440
2441
bool noEvacuation = false;
2442
if (0 != _regionCountCannotBeEvacuated) {
2443
noEvacuation = isObjectInNoEvacuationRegions(env, (J9Object *) arrayPtr);
2444
}
2445
2446
if (abortFlagRaised() || noEvacuation) {
2447
if (!currentSplitUnitOnly) {
2448
/* work stack driven */
2449
env->_workStack.push(env, (void *)arrayPtr, (void *)((nextIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG));
2450
env->_workStack.flushOutputPacket(env);
2451
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2452
env->_copyForwardStats._markedArraysSplit += 1;
2453
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2454
}
2455
} else {
2456
Assert_MM_false(currentSplitUnitOnly);
2457
/* copy-scan cache driven */
2458
MM_CopyScanCacheVLHGC *splitCache = getFreeCache(env);
2459
if (NULL != splitCache) {
2460
reinitArraySplitCache(env, splitCache, arrayPtr, nextIndex);
2461
addCacheEntryToScanCacheListAndNotify(env, splitCache);
2462
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2463
env->_copyForwardStats._copiedArraysSplit += 1;
2464
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2465
} else {
2466
Assert_MM_true(_abortFlag);
2467
void *element1 = (void *)arrayPtr;
2468
void *element2 = (void *)((nextIndex << PACKET_ARRAY_SPLIT_SHIFT) | PACKET_ARRAY_SPLIT_TAG);
2469
Assert_MM_true(nextIndex == (((UDATA)element2) >> PACKET_ARRAY_SPLIT_SHIFT));
2470
env->_workStack.push(env, element1, element2);
2471
env->_workStack.flushOutputPacket(env);
2472
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2473
env->_copyForwardStats._markedArraysSplit += 1;
2474
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2475
Trc_MM_CopyForwardScheme_scanPointerArrayObjectSlotsSplit_failedToAllocateCache(env->getLanguageVMThread(), sizeInElements);
2476
}
2477
}
2478
}
2479
}
2480
2481
return slotsToScan;
2482
}
2483
UDATA
2484
MM_CopyForwardScheme::scanPointerArrayObjectSlotsSplit(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, UDATA startIndex, bool currentSplitUnitOnly)
2485
{
2486
if(_tracingEnabled) {
2487
PORT_ACCESS_FROM_ENVIRONMENT(env);
2488
j9tty_printf(PORTLIB, "#");
2489
}
2490
2491
/* there's no harm in remembering the array multiple times, so do this for each split chunk */
2492
bool success = copyAndForwardObjectClass(env, reservingContext, (J9Object *)arrayPtr);
2493
2494
UDATA slotsToScan = createNextSplitArrayWorkUnit(env, arrayPtr, startIndex, currentSplitUnitOnly);
2495
2496
if (slotsToScan > 0) {
2497
/* TODO: this iterator scans the array backwards - change it to forward, and optimize it since we can guarantee the range will be contiguous */
2498
GC_PointerArrayIterator pointerArrayIterator(_javaVM, (J9Object *)arrayPtr);
2499
pointerArrayIterator.setIndex(startIndex + slotsToScan);
2500
2501
for (UDATA scanCount = 0; success && (scanCount < slotsToScan); scanCount++) {
2502
GC_SlotObject *slotObject = pointerArrayIterator.nextSlot();
2503
if (NULL == slotObject) {
2504
/* this can happen if the array is only partially allocated */
2505
break;
2506
}
2507
2508
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2509
success = copyAndForwardPointerArray(env, reservingContext, arrayPtr, startIndex, slotObject);
2510
}
2511
}
2512
2513
return slotsToScan;
2514
}
2515
2516
void
2517
MM_CopyForwardScheme::scanClassObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *classObject, ScanReason reason)
2518
{
2519
scanMixedObjectSlots(env, reservingContext, classObject, reason);
2520
2521
J9Class *classPtr = J9VM_J9CLASS_FROM_HEAPCLASS((J9VMThread*)env->getLanguageVMThread(), classObject);
2522
2523
if (NULL != classPtr) {
2524
volatile j9object_t * slotPtr = NULL;
2525
bool success = true;
2526
2527
do {
2528
/*
2529
* Scan J9Class internals using general iterator
2530
* - scan statics fields
2531
* - scan call sites
2532
* - scan MethodTypes
2533
* - scan VarHandle MethodTypes
2534
* - scan constants pool objects
2535
*/
2536
GC_ClassIterator classIterator(env, classPtr, false);
2537
while (success && (NULL != (slotPtr = classIterator.nextSlot()))) {
2538
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2539
success = copyAndForward(env, reservingContext, classObject, slotPtr);
2540
}
2541
2542
/*
2543
* Usually we don't care about class to class references because its can be marked as a part of alive classloader or find in Hash Table
2544
* However we need to scan them for case of Anonymous classes. Its are unloaded on individual basis so it is important to reach each one
2545
*/
2546
if (J9_ARE_ANY_BITS_SET(J9CLASS_EXTENDED_FLAGS(classPtr), J9ClassIsAnonymous)) {
2547
GC_ClassIteratorClassSlots classSlotIterator(_javaVM, classPtr);
2548
J9Class *classPtr;
2549
while (success && (NULL != (classPtr = classSlotIterator.nextSlot()))) {
2550
slotPtr = &(classPtr->classObject);
2551
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2552
success = copyAndForward(env, reservingContext, classObject, slotPtr);
2553
}
2554
}
2555
2556
if (success) {
2557
/* we can safely ignore any classes referenced by the constant pool, since
2558
* these are guaranteed to be referenced by our class loader
2559
* except anonymous case handled above
2560
*/
2561
/* By scanning the class object, we've committed to it either being in a card external to the collection set, or that it is already part of a copied set and
2562
* being scanned through the copy/scan cache. In either case, a simple pointer forward update is all that is required.
2563
*/
2564
classPtr->classObject = (j9object_t)updateForwardedPointer((J9Object *)classPtr->classObject);
2565
Assert_MM_true(isLiveObject((J9Object *)classPtr->classObject));
2566
}
2567
classPtr = classPtr->replacedClass;
2568
} while (success && (NULL != classPtr));
2569
}
2570
}
2571
2572
void
2573
MM_CopyForwardScheme::scanClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *classLoaderObject, ScanReason reason)
2574
{
2575
scanMixedObjectSlots(env, reservingContext, classLoaderObject, reason);
2576
2577
J9ClassLoader *classLoader = J9VMJAVALANGCLASSLOADER_VMREF((J9VMThread*)env->getLanguageVMThread(), classLoaderObject);
2578
if (NULL != classLoader) {
2579
/* By scanning the class loader object, we've committed to it either being in a card external to the collection set, or that it is already part of a copied set and
2580
* being scanned through the copy/scan cache. In either case, a simple pointer forward update is all that is required.
2581
*/
2582
classLoader->classLoaderObject = updateForwardedPointer((J9Object *)classLoader->classLoaderObject);
2583
Assert_MM_true(isLiveObject((J9Object *)classLoader->classLoaderObject));
2584
2585
/* No lock is required because this only runs under exclusive access */
2586
/* (NULL == classLoader->classHashTable) is true ONLY for DEAD class loaders */
2587
Assert_MM_true(NULL != classLoader->classHashTable);
2588
2589
/* Do this for all classloaders except anonymous */
2590
if (0 == (classLoader->flags & J9CLASSLOADER_ANON_CLASS_LOADER)) {
2591
GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);
2592
J9Class *clazz = NULL;
2593
bool success = true;
2594
while (success && (NULL != (clazz = iterator.nextClass()))) {
2595
Assert_MM_true(NULL != clazz->classObject);
2596
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
2597
success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(clazz->classObject));
2598
}
2599
2600
if (NULL != classLoader->moduleHashTable) {
2601
J9HashTableState walkState;
2602
J9Module **modulePtr = (J9Module **)hashTableStartDo(classLoader->moduleHashTable, &walkState);
2603
while (success && (NULL != modulePtr)) {
2604
J9Module * const module = *modulePtr;
2605
success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->moduleObject));
2606
if (success) {
2607
if (NULL != module->moduleName) {
2608
success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->moduleName));
2609
}
2610
}
2611
if (success) {
2612
if (NULL != module->version) {
2613
success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(module->version));
2614
}
2615
}
2616
modulePtr = (J9Module**)hashTableNextDo(&walkState);
2617
}
2618
2619
if (success && (classLoader == _javaVM->systemClassLoader)) {
2620
success = copyAndForward(env, reservingContext, classLoaderObject, (J9Object **)&(_javaVM->unamedModuleForSystemLoader->moduleObject));
2621
}
2622
}
2623
}
2624
}
2625
}
2626
2627
/****************************************
2628
* Scan completion routines
2629
****************************************
2630
*/
2631
bool
2632
MM_CopyForwardScheme::isScanCacheWorkAvailable(MM_CopyScanCacheListVLHGC *scanCacheList)
2633
{
2634
return !scanCacheList->isEmpty();
2635
}
2636
2637
bool
2638
MM_CopyForwardScheme::isAnyScanCacheWorkAvailable()
2639
{
2640
bool result = false;
2641
UDATA nodeLists = _scanCacheListSize;
2642
for (UDATA i = 0; (!result) && (i < nodeLists); i++) {
2643
result = isScanCacheWorkAvailable(&_cacheScanLists[i]);
2644
}
2645
return result;
2646
}
2647
2648
bool
2649
MM_CopyForwardScheme::isAnyScanWorkAvailable(MM_EnvironmentVLHGC *env)
2650
{
2651
return (isAnyScanCacheWorkAvailable() || ((0 != _regionCountCannotBeEvacuated) && !abortFlagRaised() && env->_workStack.inputPacketAvailableFromWorkPackets(env)));
2652
}
2653
2654
MM_CopyScanCacheVLHGC *
2655
MM_CopyForwardScheme::getSurvivorCacheForScan(MM_EnvironmentVLHGC *env)
2656
{
2657
MM_CopyScanCacheVLHGC *cache = NULL;
2658
2659
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
2660
cache = env->_copyForwardCompactGroups[index]._copyCache;
2661
if((NULL != cache) && cache->isScanWorkAvailable()) {
2662
return cache;
2663
}
2664
}
2665
2666
return NULL;
2667
}
2668
2669
MM_CopyForwardScheme::ScanReason
2670
MM_CopyForwardScheme::getNextWorkUnit(MM_EnvironmentVLHGC *env, UDATA preferredNumaNode)
2671
{
2672
env->_scanCache = NULL;
2673
ScanReason ret = SCAN_REASON_NONE;
2674
2675
MM_CopyScanCacheVLHGC *cache = NULL;
2676
/* Preference is to use survivor copy cache */
2677
if(NULL != (cache = getSurvivorCacheForScan(env))) {
2678
env->_scanCache = cache;
2679
ret = SCAN_REASON_COPYSCANCACHE;
2680
return ret;
2681
}
2682
2683
if (NULL != env->_deferredScanCache) {
2684
/* there is deferred scanning to do from partial depth first scanning */
2685
cache = (MM_CopyScanCacheVLHGC *)env->_deferredScanCache;
2686
env->_deferredScanCache = NULL;
2687
env->_scanCache = cache;
2688
ret = SCAN_REASON_COPYSCANCACHE;
2689
return ret;
2690
}
2691
2692
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2693
env->_copyForwardStats._acquireScanListCount += 1;
2694
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2695
2696
bool doneFlag = false;
2697
volatile UDATA doneIndex = _doneIndex;
2698
2699
while ((SCAN_REASON_NONE == ret) && !doneFlag) {
2700
if (SCAN_REASON_NONE == (ret = getNextWorkUnitNoWait(env, preferredNumaNode))) {
2701
omrthread_monitor_enter(*_workQueueMonitorPtr);
2702
*_workQueueWaitCountPtr += 1;
2703
2704
if(doneIndex == _doneIndex) {
2705
if((*_workQueueWaitCountPtr == env->_currentTask->getThreadCount()) && !isAnyScanWorkAvailable(env)) {
2706
*_workQueueWaitCountPtr = 0;
2707
_doneIndex += 1;
2708
omrthread_monitor_notify_all(*_workQueueMonitorPtr);
2709
} else {
2710
while(!isAnyScanWorkAvailable(env) && (doneIndex == _doneIndex)) {
2711
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2712
PORT_ACCESS_FROM_ENVIRONMENT(env);
2713
U_64 waitEndTime, waitStartTime;
2714
waitStartTime = j9time_hires_clock();
2715
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2716
omrthread_monitor_wait(*_workQueueMonitorPtr);
2717
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
2718
waitEndTime = j9time_hires_clock();
2719
if (doneIndex == _doneIndex) {
2720
env->_copyForwardStats.addToWorkStallTime(waitStartTime, waitEndTime);
2721
} else {
2722
env->_copyForwardStats.addToCompleteStallTime(waitStartTime, waitEndTime);
2723
}
2724
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
2725
}
2726
}
2727
}
2728
2729
/* Set the local done flag and if we are done and the waiting count is 0 (last thread) exit */
2730
doneFlag = (doneIndex != _doneIndex);
2731
if (!doneFlag) {
2732
*_workQueueWaitCountPtr -= 1;
2733
}
2734
omrthread_monitor_exit(*_workQueueMonitorPtr);
2735
}
2736
}
2737
2738
return ret;
2739
}
2740
2741
MM_CopyForwardScheme::ScanReason
2742
MM_CopyForwardScheme::getNextWorkUnitOnNode(MM_EnvironmentVLHGC *env, UDATA numaNode)
2743
{
2744
ScanReason ret = SCAN_REASON_NONE;
2745
2746
MM_CopyScanCacheVLHGC *cache = _cacheScanLists[numaNode].popCache(env);
2747
if(NULL != cache) {
2748
/* Check if there are threads waiting that should be notified because of pending entries */
2749
if((0 != *_workQueueWaitCountPtr) && isScanCacheWorkAvailable(&_cacheScanLists[numaNode])) {
2750
omrthread_monitor_enter(*_workQueueMonitorPtr);
2751
if(0 != *_workQueueWaitCountPtr) {
2752
omrthread_monitor_notify(*_workQueueMonitorPtr);
2753
}
2754
omrthread_monitor_exit(*_workQueueMonitorPtr);
2755
}
2756
env->_scanCache = cache;
2757
ret = SCAN_REASON_COPYSCANCACHE;
2758
}
2759
2760
return ret;
2761
}
2762
2763
MM_CopyForwardScheme::ScanReason
2764
MM_CopyForwardScheme::getNextWorkUnitNoWait(MM_EnvironmentVLHGC *env, UDATA preferredNumaNode)
2765
{
2766
UDATA nodeLists = _scanCacheListSize;
2767
ScanReason ret = SCAN_REASON_NONE;
2768
/* local node first */
2769
ret = getNextWorkUnitOnNode(env, preferredNumaNode);
2770
if (SCAN_REASON_NONE == ret) {
2771
/* we failed to find a scan cache on our preferred node */
2772
if (COMMON_CONTEXT_INDEX != preferredNumaNode) {
2773
/* try the common node */
2774
ret = getNextWorkUnitOnNode(env, COMMON_CONTEXT_INDEX);
2775
}
2776
/* now try the remaining nodes */
2777
UDATA nextNode = (preferredNumaNode + 1) % nodeLists;
2778
while ((SCAN_REASON_NONE == ret) && (nextNode != preferredNumaNode)) {
2779
if (COMMON_CONTEXT_INDEX != nextNode) {
2780
ret = getNextWorkUnitOnNode(env, nextNode);
2781
}
2782
nextNode = (nextNode + 1) % nodeLists;
2783
}
2784
}
2785
if (SCAN_REASON_NONE == ret && (0 != _regionCountCannotBeEvacuated) && !abortFlagRaised()) {
2786
if (env->_workStack.retrieveInputPacket(env)) {
2787
ret = SCAN_REASON_PACKET;
2788
}
2789
}
2790
return ret;
2791
}
2792
2793
/**
2794
* Calculates distance from the allocation pointer to the scan pointer for the given cache.
2795
*
2796
* If the allocation pointer is less than or equal to the scan pointer, or the cache is NULL
2797
* the distance is set to the maximum unsigned UDATA, SCAN_TO_COPY_CACHE_MAX_DISTANCE.
2798
* @return distance calculated.
2799
*/
2800
MMINLINE UDATA
2801
MM_CopyForwardScheme::scanToCopyDistance(MM_CopyScanCacheVLHGC* cache)
2802
{
2803
if (cache == NULL) {
2804
return SCAN_TO_COPY_CACHE_MAX_DISTANCE;
2805
}
2806
IDATA distance = ((UDATA) cache->cacheAlloc) - ((UDATA) cache->scanCurrent);
2807
UDATA udistance;
2808
if (distance <= 0) {
2809
udistance = SCAN_TO_COPY_CACHE_MAX_DISTANCE;
2810
} else {
2811
udistance = distance;
2812
}
2813
return udistance;
2814
}
2815
2816
/**
2817
* For a given copyCache and scanCache (which may or may not also be a copy cache), return the
2818
* best cache for scanning of these two caches.
2819
*
2820
* If the copyCache has work to scan, and the scanCache is not a copy cache, then the copyCache is
2821
* the better one. If they are both copy caches (it is assumed the scanCache in this context has
2822
* work to scan), then the one with the shorter scanToCopyDistance is the better one to scan.
2823
*
2824
* @param copyCache the candidate copy cache
2825
* @param scanCache the current best scan cache, which may be updated.
2826
* @return true if the scanCache has been updated with the best cache to scan.
2827
*/
2828
MMINLINE bool
2829
MM_CopyForwardScheme::bestCacheForScanning(MM_CopyScanCacheVLHGC* copyCache, MM_CopyScanCacheVLHGC** scanCache)
2830
{
2831
if (!copyCache->isScanWorkAvailable()) {
2832
return false;
2833
}
2834
if (!((*scanCache)->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {
2835
*scanCache = copyCache;
2836
return true;
2837
}
2838
if (scanToCopyDistance(copyCache) < scanToCopyDistance(*scanCache)) {
2839
*scanCache = copyCache;
2840
return true;
2841
}
2842
return false;
2843
}
2844
2845
MMINLINE bool
2846
MM_CopyForwardScheme::aliasToCopyCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheVLHGC** nextScanCache)
2847
{
2848
bool interruptScanning = false;
2849
2850
Assert_MM_unimplemented();
2851
#if 0
2852
/* VMDESIGN 1359.
2853
* Only alias the _survivorCopyScanCache IF there are 0 threads waiting. If the current thread is the only producer and
2854
* it aliases it's survivor cache then it will be the only thread able to consume. This will alleviate the stalling issues
2855
* described in the above mentioned design.
2856
*/
2857
if (0 == *_workQueueWaitCountPtr) {
2858
interruptScanning = bestCacheForScanning(env->_survivorCopyScanCache, nextScanCache) || interruptScanning;
2859
}
2860
#endif /* 0 */
2861
2862
return interruptScanning;
2863
}
2864
2865
MMINLINE void
2866
MM_CopyForwardScheme::scanObject(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)
2867
{
2868
J9Class* clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);
2869
Assert_MM_mustBeClass(clazz);
2870
switch(_extensions->objectModel.getScanType(clazz)) {
2871
case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:
2872
case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:
2873
case GC_ObjectModel::SCAN_MIXED_OBJECT:
2874
scanMixedObjectSlots(env, reservingContext, objectPtr, reason);
2875
break;
2876
case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:
2877
scanOwnableSynchronizerObjectSlots(env, reservingContext, objectPtr, reason);
2878
break;
2879
case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:
2880
scanReferenceObjectSlots(env, reservingContext, objectPtr, reason);
2881
break;
2882
case GC_ObjectModel::SCAN_CLASS_OBJECT:
2883
scanClassObjectSlots(env, reservingContext, objectPtr, reason);
2884
break;
2885
case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:
2886
scanClassLoaderObjectSlots(env, reservingContext, objectPtr, reason);
2887
break;
2888
case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:
2889
scanPointerArrayObjectSlots(env, reservingContext, (J9IndexableObject *)objectPtr, reason);
2890
break;
2891
case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:
2892
if (SCAN_REASON_DIRTY_CARD != reason) {
2893
/* since we copy arrays in the non-aborting case, count them as scanned in the abort case for symmetry */
2894
updateScanStats(env, objectPtr, reason);
2895
}
2896
break;
2897
default:
2898
Trc_MM_CopyForwardScheme_scanObject_invalid(env->getLanguageVMThread(), objectPtr, reason);
2899
Assert_MM_unreachable();
2900
}
2901
}
2902
2903
MMINLINE void
2904
MM_CopyForwardScheme::updateScanStats(MM_EnvironmentVLHGC *env, J9Object *objectPtr, ScanReason reason)
2905
{
2906
bool noEvacuation = false;
2907
if (0 != _regionCountCannotBeEvacuated) {
2908
noEvacuation = isObjectInNoEvacuationRegions(env, objectPtr);
2909
}
2910
2911
if (SCAN_REASON_DIRTY_CARD == reason) {
2912
UDATA objectSize = _extensions->objectModel.getSizeInBytesWithHeader(objectPtr);
2913
env->_copyForwardStats._objectsCardClean += 1;
2914
env->_copyForwardStats._bytesCardClean += objectSize;
2915
} else if (abortFlagRaised() || noEvacuation) {
2916
UDATA objectSize = _extensions->objectModel.getSizeInBytesWithHeader(objectPtr);
2917
Assert_MM_false(SCAN_REASON_DIRTY_CARD == reason);
2918
MM_HeapRegionDescriptorVLHGC * region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);
2919
UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);
2920
if (region->isEden()) {
2921
env->_copyForwardCompactGroups[compactGroup]._edenStats._liveObjects += 1;
2922
env->_copyForwardCompactGroups[compactGroup]._edenStats._liveBytes += objectSize;
2923
env->_copyForwardCompactGroups[compactGroup]._edenStats._scannedObjects += 1;
2924
env->_copyForwardCompactGroups[compactGroup]._edenStats._scannedBytes += objectSize;
2925
} else {
2926
env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._liveObjects += 1;
2927
env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._liveBytes += objectSize;
2928
env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._scannedObjects += 1;
2929
env->_copyForwardCompactGroups[compactGroup]._nonEdenStats._scannedBytes += objectSize;
2930
}
2931
}
2932
2933
/* else:
2934
* if not abort, object is copied and stats are updated through copy method
2935
* if abort, object is both copied and scanned, but we do not report those stats
2936
*/
2937
}
2938
2939
2940
void
2941
MM_CopyForwardScheme::scanPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9IndexableObject *arrayPtr, ScanReason reason)
2942
{
2943
UDATA index = 0;
2944
bool currentSplitUnitOnly = false;
2945
2946
/* only _abortInProgress==true or noEvacuation==true case are expected here, but we should handle all of exception cases(such as abortFlagRaised() case) */
2947
if (SCAN_REASON_PACKET == reason) {
2948
UDATA peekValue = (UDATA)env->_workStack.peek(env);
2949
if ((PACKET_ARRAY_SPLIT_TAG == (peekValue & PACKET_ARRAY_SPLIT_TAG))) {
2950
UDATA workItem = (UDATA)env->_workStack.pop(env);
2951
index = workItem >> PACKET_ARRAY_SPLIT_SHIFT;
2952
currentSplitUnitOnly = ((PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG == (peekValue & PACKET_ARRAY_SPLIT_CURRENT_UNIT_ONLY_TAG)));
2953
}
2954
}
2955
if (0 == index) {
2956
/* make sure we only record stats for the object once -- note that this means we might
2957
* attribute the scanning cost to the wrong thread, but that's not really important
2958
*/
2959
updateScanStats(env, (J9Object*)arrayPtr, reason);
2960
}
2961
2962
scanPointerArrayObjectSlotsSplit(env, reservingContext, arrayPtr, index, currentSplitUnitOnly);
2963
}
2964
2965
/**
2966
* Scans all the objects to scan in the env->_scanCache and flushes the cache at the end.
2967
*/
2968
void
2969
MM_CopyForwardScheme::completeScanCache(MM_EnvironmentVLHGC *env)
2970
{
2971
MM_CopyScanCacheVLHGC *scanCache = (MM_CopyScanCacheVLHGC *)env->_scanCache;
2972
2973
/* mark that cache is in use as a scan cache */
2974
scanCache->setCurrentlyBeingScanned();
2975
if (scanCache->isSplitArray()) {
2976
/* a scan cache can't be a split array and have generic work available */
2977
Assert_MM_false(scanCache->isScanWorkAvailable());
2978
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(scanCache->scanCurrent);
2979
J9IndexableObject *arrayObject = (J9IndexableObject *)scanCache->scanCurrent;
2980
UDATA nextIndex = scanCache->_arraySplitIndex;
2981
Assert_MM_true(0 != nextIndex);
2982
scanPointerArrayObjectSlotsSplit(env, reservingContext, arrayObject, nextIndex);
2983
scanCache->clearSplitArray();
2984
} else if (scanCache->isScanWorkAvailable()) {
2985
/* we want to perform a NUMA-aware analogue to "hierarchical scanning" so this scan cache should pull other objects into its node */
2986
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(scanCache->scanCurrent);
2987
do {
2988
GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(
2989
_extensions,
2990
(J9Object *)scanCache->scanCurrent,
2991
(J9Object *)scanCache->cacheAlloc, false);
2992
/* Advance the scan pointer to the top of the cache to signify that this has been scanned */
2993
scanCache->scanCurrent = scanCache->cacheAlloc;
2994
/* Scan the chunk for all live objects */
2995
J9Object *objectPtr = NULL;
2996
while((objectPtr = heapChunkIterator.nextObject()) != NULL) {
2997
scanObject(env, reservingContext, objectPtr, SCAN_REASON_COPYSCANCACHE);
2998
}
2999
} while(scanCache->isScanWorkAvailable());
3000
3001
}
3002
/* mark cache as no longer in use for scanning */
3003
scanCache->clearCurrentlyBeingScanned();
3004
/* Done with the cache - build a free list entry in the hole, release the cache to the free list (if not used), and continue */
3005
flushCache(env, scanCache);
3006
}
3007
3008
MMINLINE bool
3009
MM_CopyForwardScheme::incrementalScanMixedObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,
3010
bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)
3011
{
3012
GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM);
3013
3014
if (!hasPartiallyScannedObject) {
3015
/* finished previous object, step up for next one */
3016
mixedObjectIterator.initialize(_javaVM->omrVM, objectPtr);
3017
} else {
3018
/* retrieve partial scan state of cache */
3019
mixedObjectIterator.restore(&(scanCache->_objectIteratorState));
3020
}
3021
GC_SlotObject *slotObject;
3022
bool success = true;
3023
while (success && ((slotObject = mixedObjectIterator.nextSlot()) != NULL)) {
3024
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
3025
success = copyAndForward(env, reservingContext, objectPtr, slotObject);
3026
3027
/* interrupt scanning this cache if it should be aliased or re-aliased */
3028
if (aliasToCopyCache(env, nextScanCache)) {
3029
/* save scan state of cache */
3030
mixedObjectIterator.save(&(scanCache->_objectIteratorState));
3031
return true;
3032
}
3033
}
3034
3035
return false;
3036
}
3037
3038
MMINLINE bool
3039
MM_CopyForwardScheme::incrementalScanClassObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,
3040
bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)
3041
{
3042
/* NOTE: An incremental scan solution should be provided here. For now, just use a full scan and ignore any hierarchical needs. */
3043
scanClassObjectSlots(env, reservingContext, objectPtr);
3044
return false;
3045
}
3046
3047
MMINLINE bool
3048
MM_CopyForwardScheme::incrementalScanClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,
3049
bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)
3050
{
3051
/* NOTE: An incremental scan solution should be provided here. For now, just use a full scan and ignore any hierarchical needs. */
3052
scanClassLoaderObjectSlots(env, reservingContext, objectPtr);
3053
return false;
3054
}
3055
3056
MMINLINE bool
3057
MM_CopyForwardScheme::incrementalScanPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,
3058
bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)
3059
{
3060
GC_PointerArrayIterator pointerArrayIterator(_javaVM);
3061
3062
if (!hasPartiallyScannedObject) {
3063
/* finished previous object, step up for next one */
3064
pointerArrayIterator.initialize(_javaVM, objectPtr);
3065
} else {
3066
/* retrieve partial scan state of cache */
3067
pointerArrayIterator.restore(&(scanCache->_objectIteratorState));
3068
}
3069
3070
GC_SlotObject *slotObject = NULL;
3071
bool success = true;
3072
3073
while (success && ((slotObject = pointerArrayIterator.nextSlot()) != NULL)) {
3074
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
3075
success = copyAndForward(env, reservingContext, objectPtr, slotObject);
3076
3077
/* interrupt scanning this cache if it should be aliased or re-aliased */
3078
if (aliasToCopyCache(env, nextScanCache)) {
3079
/* save scan state of cache */
3080
pointerArrayIterator.save(&(scanCache->_objectIteratorState));
3081
return true;
3082
}
3083
}
3084
3085
return false;
3086
}
3087
3088
MMINLINE bool
3089
MM_CopyForwardScheme::incrementalScanReferenceObjectSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, MM_CopyScanCacheVLHGC* scanCache, J9Object *objectPtr,
3090
bool hasPartiallyScannedObject, MM_CopyScanCacheVLHGC** nextScanCache)
3091
{
3092
GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM);
3093
fj9object_t *referentPtr = J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, objectPtr);
3094
bool referentMustBeMarked = false;
3095
3096
if (!hasPartiallyScannedObject) {
3097
/* finished previous object, step up for next one */
3098
mixedObjectIterator.initialize(_javaVM->omrVM, objectPtr);
3099
} else {
3100
/* retrieve partial scan state of cache */
3101
mixedObjectIterator.restore(&(scanCache->_objectIteratorState));
3102
}
3103
3104
if (J9AccClassReferenceSoft == (J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(objectPtr, env)) & J9AccClassReferenceMask)) {
3105
/* Object is a Soft Reference: mark it if not expired */
3106
U_32 age = J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, objectPtr);
3107
referentMustBeMarked = age < _extensions->getDynamicMaxSoftReferenceAge();
3108
}
3109
3110
GC_SlotObject *slotObject;
3111
bool success = true;
3112
while (success && ((slotObject = mixedObjectIterator.nextSlot()) != NULL)) {
3113
if (((fj9object_t *)slotObject->readAddressFromSlot() != referentPtr) || referentMustBeMarked) {
3114
/* Copy/Forward the slot reference and perform any inter-region remember work that is required */
3115
success = copyAndForward(env, reservingContext, objectPtr, slotObject);
3116
3117
/* interrupt scanning this cache if it should be aliased or re-aliased */
3118
if (aliasToCopyCache(env, nextScanCache)) {
3119
/* save scan state of cache */
3120
mixedObjectIterator.save(&(scanCache->_objectIteratorState));
3121
return true;
3122
}
3123
}
3124
}
3125
3126
return false;
3127
}
3128
3129
void
3130
MM_CopyForwardScheme::incrementalScanCacheBySlot(MM_EnvironmentVLHGC *env)
3131
{
3132
MM_CopyScanCacheVLHGC* scanCache = (MM_CopyScanCacheVLHGC *)env->_scanCache;
3133
J9Object *objectPtr;
3134
MM_CopyScanCacheVLHGC* nextScanCache = scanCache;
3135
3136
nextCache:
3137
/* mark that cache is in use as a scan cache */
3138
scanCache->setCurrentlyBeingScanned();
3139
bool hasPartiallyScannedObject = scanCache->_hasPartiallyScannedObject;
3140
if (scanCache->isScanWorkAvailable()) {
3141
/* we want to perform a NUMA-aware analogue to "hierarchical scanning" so this scan cache should pull other objects into its node */
3142
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(env->_scanCache->scanCurrent);
3143
do {
3144
void *cacheAlloc = scanCache->cacheAlloc;
3145
GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(
3146
_extensions,
3147
(J9Object *)scanCache->scanCurrent,
3148
(J9Object *)cacheAlloc,
3149
false);
3150
3151
/* Scan the chunk for live objects, incrementally slot by slot */
3152
while ((objectPtr = heapChunkIterator.nextObject()) != NULL) {
3153
/* retrieve scan state of the scan cache */
3154
switch(_extensions->objectModel.getScanType(objectPtr)) {
3155
case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:
3156
case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:
3157
case GC_ObjectModel::SCAN_MIXED_OBJECT:
3158
case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:
3159
hasPartiallyScannedObject = incrementalScanMixedObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);
3160
break;
3161
case GC_ObjectModel::SCAN_CLASS_OBJECT:
3162
hasPartiallyScannedObject = incrementalScanClassObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);
3163
break;
3164
case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:
3165
hasPartiallyScannedObject = incrementalScanClassLoaderObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);
3166
break;
3167
case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:
3168
hasPartiallyScannedObject = incrementalScanPointerArrayObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);
3169
break;
3170
case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:
3171
hasPartiallyScannedObject = incrementalScanReferenceObjectSlots(env, reservingContext, scanCache, objectPtr, hasPartiallyScannedObject, &nextScanCache);
3172
break;
3173
case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:
3174
continue;
3175
break;
3176
default:
3177
Assert_MM_unreachable();
3178
}
3179
3180
/* object was not completely scanned in order to interrupt scan */
3181
if (hasPartiallyScannedObject) {
3182
/* interrupt scan, save scan state of cache before deferring */
3183
scanCache->scanCurrent = objectPtr;
3184
scanCache->_hasPartiallyScannedObject = true;
3185
/* Only save scan cache if it is not a copy cache, and then don't add to scanlist - this
3186
* can cause contention, just defer to later time on same thread
3187
* if deferred cache is occupied, then queue current scan cache on scan list
3188
*/
3189
scanCache->clearCurrentlyBeingScanned();
3190
if (!(scanCache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY)) {
3191
if (NULL == env->_deferredScanCache) {
3192
env->_deferredScanCache = scanCache;
3193
} else {
3194
#if defined(J9MODRON_TGC_PARALLEL_STATISTICS)
3195
env->_copyForwardStats._releaseScanListCount += 1;
3196
#endif /* J9MODRON_TGC_PARALLEL_STATISTICS */
3197
addCacheEntryToScanCacheListAndNotify(env, scanCache);
3198
}
3199
}
3200
env->_scanCache = scanCache = nextScanCache;
3201
goto nextCache;
3202
}
3203
}
3204
/* Advance the scan pointer for the objects that were scanned */
3205
scanCache->scanCurrent = cacheAlloc;
3206
} while (scanCache->isScanWorkAvailable());
3207
}
3208
/* although about to flush this cache, the flush occurs only if the cache is not in use
3209
* hence we still need to store the state of current scanning */
3210
scanCache->_hasPartiallyScannedObject = false;
3211
/* mark cache as no longer in use for scanning */
3212
scanCache->clearCurrentlyBeingScanned();
3213
/* Done with the cache - build a free list entry in the hole, release the cache to the free list (if not used), and continue */
3214
flushCache(env, scanCache);
3215
}
3216
3217
void
3218
MM_CopyForwardScheme::cleanOverflowedRegion(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region, U_8 flagToClean)
3219
{
3220
Assert_MM_true(region->containsObjects());
3221
/* do we need to clean this region? */
3222
U_8 flags = region->_markData._overflowFlags;
3223
if (flagToClean == (flags & flagToClean)) {
3224
/* Region must be cleaned */
3225
/* save back the new flags, first, in case we re-overflow in another thread (or this thread) */
3226
U_8 newFlags = flags & ~flagToClean;
3227
region->_markData._overflowFlags = newFlags;
3228
/* Force our write of the overflow flags from our cache and ensure that we have no stale mark map data before we walk */
3229
MM_AtomicOperations::sync();
3230
if (region->_copyForwardData._evacuateSet || region->isFreshSurvivorRegion()) {
3231
cleanOverflowInRange(env, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());
3232
} else if (region->isSurvivorRegion()) {
3233
GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);
3234
while (survivorIterator.next()) {
3235
cleanOverflowInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());
3236
}
3237
}
3238
}
3239
}
3240
3241
bool
3242
MM_CopyForwardScheme::isWorkPacketsOverflow(MM_EnvironmentVLHGC *env)
3243
{
3244
MM_WorkPackets *packets = (MM_WorkPackets *)(env->_cycleState->_workPackets);
3245
bool result = false;
3246
if (packets->getOverflowFlag()) {
3247
result = true;
3248
}
3249
return result;
3250
}
3251
3252
bool
3253
MM_CopyForwardScheme::handleOverflow(MM_EnvironmentVLHGC *env)
3254
{
3255
MM_WorkPackets *packets = (MM_WorkPackets *)(env->_cycleState->_workPackets);
3256
bool result = false;
3257
3258
if (packets->getOverflowFlag()) {
3259
result = true;
3260
if (((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsAndReleaseMainForMark(env, UNIQUE_ID)) {
3261
packets->clearOverflowFlag();
3262
env->_currentTask->releaseSynchronizedGCThreads(env);
3263
}
3264
/* our overflow handling mechanism is to set flags in the region descriptor so clean those regions */
3265
U_8 flagToRemove = MM_RegionBasedOverflowVLHGC::overflowFlagForCollectionType(env, env->_cycleState->_collectionType);
3266
GC_HeapRegionIteratorVLHGC regionIterator = GC_HeapRegionIteratorVLHGC(_regionManager);
3267
MM_HeapRegionDescriptorVLHGC *region = NULL;
3268
while (NULL != (region = regionIterator.nextRegion())) {
3269
if (region->containsObjects()) {
3270
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3271
cleanOverflowedRegion(env, region, flagToRemove);
3272
}
3273
}
3274
}
3275
((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForMark(env, UNIQUE_ID);
3276
}
3277
return result;
3278
}
3279
3280
void
3281
MM_CopyForwardScheme::completeScanForAbort(MM_EnvironmentVLHGC *env)
3282
{
3283
/* From this point on, no copying should happen - reservingContext is irrelevant */
3284
MM_AllocationContextTarok *reservingContext = _commonContext;
3285
3286
J9Object *objectPtr = NULL;
3287
do {
3288
while (NULL != (objectPtr = (J9Object *)env->_workStack.pop(env))) {
3289
do {
3290
Assert_MM_false(MM_ForwardedHeader(objectPtr, _extensions->compressObjectReferences()).isForwardedPointer());
3291
scanObject(env, reservingContext, objectPtr, SCAN_REASON_PACKET);
3292
3293
objectPtr = (J9Object *)env->_workStack.popNoWait(env);
3294
} while (NULL != objectPtr);
3295
}
3296
((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForMark(env, UNIQUE_ID);
3297
} while (handleOverflow(env));
3298
}
3299
3300
void
3301
MM_CopyForwardScheme::completeScanWorkPacket(MM_EnvironmentVLHGC *env)
3302
{
3303
MM_AllocationContextTarok *reservingContext = _commonContext;
3304
J9Object *objectPtr = NULL;
3305
3306
while (NULL != (objectPtr = (J9Object *)env->_workStack.popNoWaitFromCurrentInputPacket(env))) {
3307
Assert_MM_false(MM_ForwardedHeader(objectPtr, _extensions->compressObjectReferences()).isForwardedPointer());
3308
scanObject(env, reservingContext, objectPtr, SCAN_REASON_PACKET);
3309
}
3310
}
3311
3312
void
3313
MM_CopyForwardScheme::completeScan(MM_EnvironmentVLHGC *env)
3314
{
3315
UDATA nodeOfThread = 0;
3316
3317
/* if we aren't using NUMA, we don't want to check the thread affinity since we will have only one list of scan caches */
3318
if (_extensions->_numaManager.isPhysicalNUMASupported()) {
3319
nodeOfThread = env->getNumaAffinity();
3320
Assert_MM_true(nodeOfThread <= _extensions->_numaManager.getMaximumNodeNumber());
3321
}
3322
ScanReason scanReason = SCAN_REASON_NONE;
3323
while(SCAN_REASON_NONE != (scanReason = getNextWorkUnit(env, nodeOfThread))) {
3324
if (SCAN_REASON_COPYSCANCACHE == scanReason) {
3325
Assert_MM_true(env->_scanCache->cacheBase <= env->_scanCache->cacheAlloc);
3326
Assert_MM_true(env->_scanCache->cacheAlloc <= env->_scanCache->cacheTop);
3327
Assert_MM_true(env->_scanCache->scanCurrent <= env->_scanCache->cacheAlloc);
3328
3329
switch (_extensions->scavengerScanOrdering) {
3330
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_BREADTH_FIRST:
3331
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST:
3332
completeScanCache(env);
3333
break;
3334
case MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_HIERARCHICAL:
3335
incrementalScanCacheBySlot(env);
3336
break;
3337
default:
3338
Assert_MM_unreachable();
3339
break;
3340
} /* end of switch on type of scan order */
3341
} else if (SCAN_REASON_PACKET == scanReason) {
3342
completeScanWorkPacket(env);
3343
}
3344
}
3345
3346
/* flush Mark Map caches before we start draining Work Stack (in case of Abort) */
3347
addCopyCachesToFreeList(env);
3348
3349
if (((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsAndReleaseMainForAbort(env, UNIQUE_ID)) {
3350
if (abortFlagRaised()) {
3351
_abortInProgress = true;
3352
}
3353
/* using abort case to handle work packets overflow during copyforwardHybrid */
3354
if (!_abortInProgress && (0 != _regionCountCannotBeEvacuated) && isWorkPacketsOverflow(env)) {
3355
_abortInProgress = true;
3356
}
3357
env->_currentTask->releaseSynchronizedGCThreads(env);
3358
}
3359
3360
if(_abortInProgress) {
3361
completeScanForAbort(env);
3362
}
3363
}
3364
3365
MMINLINE void
3366
MM_CopyForwardScheme::addOwnableSynchronizerObjectInList(MM_EnvironmentVLHGC *env, j9object_t object)
3367
{
3368
if (NULL != _extensions->accessBarrier->isObjectInOwnableSynchronizerList(object)) {
3369
env->getGCEnvironment()->_ownableSynchronizerObjectBuffer->add(env, object);
3370
env->_copyForwardStats._ownableSynchronizerSurvived += 1;
3371
}
3372
}
3373
3374
#if defined(J9VM_GC_FINALIZATION)
3375
void
3376
MM_CopyForwardScheme::scanUnfinalizedObjects(MM_EnvironmentVLHGC *env)
3377
{
3378
/* ensure that all clearable processing is complete up to this point since this phase resurrects objects */
3379
env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);
3380
3381
GC_FinalizableObjectBuffer buffer(_extensions);
3382
MM_HeapRegionDescriptorVLHGC *region = NULL;
3383
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
3384
while(NULL != (region = regionIterator.nextRegion())) {
3385
if (region->_copyForwardData._evacuateSet && !region->getUnfinalizedObjectList()->wasEmpty()) {
3386
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3387
J9Object *pointer = region->getUnfinalizedObjectList()->getPriorList();
3388
while (NULL != pointer) {
3389
bool finalizable = false;
3390
env->_copyForwardStats._unfinalizedCandidates += 1;
3391
3392
Assert_MM_true(region->isAddressInRegion(pointer));
3393
3394
/* NOTE: it is safe to read from the forwarded object since either:
3395
* 1. it was copied before unfinalized processing began, or
3396
* 2. it was copied by this thread.
3397
*/
3398
MM_ForwardedHeader forwardedHeader(pointer, _extensions->compressObjectReferences());
3399
J9Object* forwardedPtr = forwardedHeader.getForwardedObject();
3400
if (NULL == forwardedPtr) {
3401
if (_markMap->isBitSet(pointer)) {
3402
forwardedPtr = pointer;
3403
} else {
3404
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
3405
/* TODO: Use the context for the finalize thread */
3406
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(pointer);
3407
forwardedPtr = copy(env, reservingContext, &forwardedHeader);
3408
finalizable = true;
3409
3410
if (NULL == forwardedPtr) {
3411
/* We failed to copy the object. This must have caused an abort. This will be dealt with in scanUnfinalizedObjectsComplete */
3412
Assert_MM_false(_abortInProgress);
3413
Assert_MM_true(abortFlagRaised());
3414
forwardedPtr = pointer;
3415
}
3416
}
3417
}
3418
3419
J9Object* next = _extensions->accessBarrier->getFinalizeLink(forwardedPtr);
3420
if (finalizable) {
3421
/* object was not previously marked -- it is now finalizable so push it to the local buffer */
3422
env->_copyForwardStats._unfinalizedEnqueued += 1;
3423
buffer.add(env, forwardedPtr);
3424
env->_cycleState->_finalizationRequired = true;
3425
} else {
3426
env->getGCEnvironment()->_unfinalizedObjectBuffer->add(env, forwardedPtr);
3427
}
3428
3429
pointer = next;
3430
}
3431
3432
/* Flush the local buffer of finalizable objects to the global list.
3433
* This is done once per region to ensure that multi-tenant lists
3434
* only contain objects from the same allocation context
3435
*/
3436
buffer.flush(env);
3437
}
3438
}
3439
}
3440
3441
/* restore everything to a flushed state before exiting */
3442
env->getGCEnvironment()->_unfinalizedObjectBuffer->flush(env);
3443
}
3444
#endif /* J9VM_GC_FINALIZATION */
3445
3446
void
3447
MM_CopyForwardScheme::cleanCardTable(MM_EnvironmentVLHGC *env)
3448
{
3449
Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);
3450
if (NULL != env->_cycleState->_externalCycleState) {
3451
/* A GMP is in progress */
3452
MM_CopyForwardGMPCardCleaner cardCleaner(this);
3453
cleanCardTableForPartialCollect(env, &cardCleaner);
3454
} else {
3455
/* No GMP is in progress so we can clear more aggressively */
3456
MM_CopyForwardNoGMPCardCleaner cardCleaner(this);
3457
cleanCardTableForPartialCollect(env, &cardCleaner);
3458
}
3459
}
3460
3461
void
3462
MM_CopyForwardScheme::cleanCardTableForPartialCollect(MM_EnvironmentVLHGC *env, MM_CardCleaner *cardCleaner)
3463
{
3464
PORT_ACCESS_FROM_ENVIRONMENT(env);
3465
U_64 cleanStartTime = j9time_hires_clock();
3466
3467
bool gmpIsRunning = (NULL != env->_cycleState->_externalCycleState);
3468
MM_CardTable* cardTable = _extensions->cardTable;
3469
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
3470
MM_HeapRegionDescriptorVLHGC *region = NULL;
3471
while(NULL != (region = regionIterator.nextRegion())) {
3472
/* Don't include survivor regions as we scan - they don't need to be processed and this will throw off the work unit indices */
3473
if (region->containsObjects() && region->_copyForwardData._initialLiveSet) {
3474
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3475
if (!region->_markData._shouldMark) {
3476
/* this region isn't part of the collection set, so it may have dirty or remembered cards in it. */
3477
cardTable->cleanCardsInRegion(env, cardCleaner, region);
3478
} else {
3479
/* this region is part of the collection set, so just change its dirty cards to clean (or GMP_MUST_SCAN) */
3480
void *low = region->getLowAddress();
3481
void *high = region->getHighAddress();
3482
Card *card = cardTable->heapAddrToCardAddr(env, low);
3483
Card *toCard = cardTable->heapAddrToCardAddr(env, high);
3484
3485
while (card < toCard) {
3486
Card fromState = *card;
3487
switch(fromState) {
3488
case CARD_PGC_MUST_SCAN:
3489
*card = CARD_CLEAN;
3490
break;
3491
case CARD_GMP_MUST_SCAN:
3492
/* This can only occur if a GMP is currently active, no transition is required */
3493
Assert_MM_true(gmpIsRunning);
3494
break;
3495
case CARD_DIRTY:
3496
if (gmpIsRunning) {
3497
*card = CARD_GMP_MUST_SCAN;
3498
} else {
3499
*card = CARD_CLEAN;
3500
}
3501
break;
3502
case CARD_CLEAN:
3503
/* do nothing */
3504
break;
3505
case CARD_REMEMBERED:
3506
/* card state valid if left over during aborted card cleaning */
3507
*card = CARD_CLEAN;
3508
break;
3509
case CARD_REMEMBERED_AND_GMP_SCAN:
3510
/* card state valid if left over during aborted card cleaning */
3511
Assert_MM_true(gmpIsRunning);
3512
*card = CARD_GMP_MUST_SCAN;
3513
break;
3514
default:
3515
Assert_MM_unreachable();
3516
}
3517
card += 1;
3518
}
3519
}
3520
}
3521
}
3522
}
3523
3524
U_64 cleanEndTime = j9time_hires_clock();
3525
env->_cardCleaningStats.addToCardCleaningTime(cleanStartTime, cleanEndTime);
3526
}
3527
3528
void
3529
MM_CopyForwardScheme::updateOrDeleteObjectsFromExternalCycle(MM_EnvironmentVLHGC *env)
3530
{
3531
/* this function has knowledge of the collection set, which is only valid during a PGC */
3532
Assert_MM_true(NULL != env->_cycleState->_externalCycleState);
3533
3534
MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;
3535
Assert_MM_true(externalMarkMap != _markMap);
3536
3537
MM_HeapRegionDescriptorVLHGC *region = NULL;
3538
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
3539
while(NULL != (region = regionIterator.nextRegion())) {
3540
if(region->_markData._shouldMark) {
3541
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3542
Assert_MM_true(region->_copyForwardData._initialLiveSet);
3543
Assert_MM_false(region->isSurvivorRegion());
3544
Assert_MM_true(region->containsObjects());
3545
3546
if(abortFlagRaised() || region->_markData._noEvacuation) {
3547
/* Walk the mark map range for the region and fixing mark bits to be the subset of the current mark map.
3548
* (Those bits that are cleared have been moved and their bits are already set).
3549
*/
3550
UDATA currentExternalIndex = externalMarkMap->getSlotIndex((J9Object *)region->getLowAddress());
3551
UDATA topExternalIndex = externalMarkMap->getSlotIndex((J9Object *)region->getHighAddress());
3552
UDATA currentIndex = _markMap->getSlotIndex((J9Object *)region->getLowAddress());
3553
3554
while(currentExternalIndex < topExternalIndex) {
3555
UDATA slot = externalMarkMap->getSlot(currentExternalIndex);
3556
if(0 != slot) {
3557
externalMarkMap->setSlot(currentExternalIndex, slot & _markMap->getSlot(currentIndex));
3558
}
3559
currentExternalIndex += 1;
3560
currentIndex += 1;
3561
}
3562
} else {
3563
Assert_MM_false(region->_nextMarkMapCleared);
3564
externalMarkMap->setBitsForRegion(env, region, true);
3565
}
3566
}
3567
}
3568
}
3569
3570
/* Mark map processing must be completed before we move to work packets */
3571
env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);
3572
3573
/* Clear or update references on external cycle work packets, depending on whether the reference has been forwarded or not */
3574
UDATA totalCount = 0;
3575
UDATA deletedCount = 0;
3576
UDATA preservedCount = 0;
3577
MM_WorkPacketsIterator packetIterator(env, env->_cycleState->_externalCycleState->_workPackets);
3578
MM_Packet *packet = NULL;
3579
while (NULL != (packet = packetIterator.nextPacket(env))) {
3580
if (!packet->isEmpty()) {
3581
/* there is data in this packet so use it */
3582
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3583
MM_PacketSlotIterator slotIterator(packet);
3584
J9Object **slot = NULL;
3585
while (NULL != (slot = slotIterator.nextSlot())) {
3586
J9Object *object = *slot;
3587
Assert_MM_true(NULL != object);
3588
if (PACKET_INVALID_OBJECT != (UDATA)object) {
3589
totalCount += 1;
3590
if(isLiveObject(object)) {
3591
Assert_MM_true(externalMarkMap->isBitSet(object));
3592
Assert_MM_true(_markMap->isBitSet(object));
3593
Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ(object, env));
3594
} else {
3595
Assert_MM_true(isObjectInEvacuateMemory(object));
3596
J9Object *forwardedObject = updateForwardedPointer(object);
3597
if(externalMarkMap->isBitSet(forwardedObject)) {
3598
Assert_MM_true(_markMap->isBitSet(forwardedObject));
3599
Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ(forwardedObject, env));
3600
preservedCount += 1;
3601
*slot = forwardedObject;
3602
} else {
3603
/* this object failed to survive the PGC cycle */
3604
Assert_MM_true(!_markMap->isBitSet(forwardedObject));
3605
deletedCount += 1;
3606
slotIterator.resetSplitTagIndexForObject(object, PACKET_INVALID_OBJECT);
3607
*slot = (J9Object*)PACKET_INVALID_OBJECT;
3608
}
3609
}
3610
}
3611
}
3612
}
3613
}
3614
}
3615
3616
Trc_MM_CopyForwardScheme_deleteDeadObjectsFromExternalCycle(env->getLanguageVMThread(), totalCount, deletedCount, preservedCount);
3617
}
3618
3619
bool
3620
MM_CopyForwardScheme::scanObjectsInRange(MM_EnvironmentVLHGC *env, void *lowAddress, void *highAddress, bool rememberedObjectsOnly)
3621
{
3622
/* we only support scanning exactly one card at a time */
3623
Assert_MM_true(0 == ((UDATA)lowAddress & (J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP - 1)));
3624
Assert_MM_true(((UDATA)lowAddress + CARD_SIZE) == (UDATA)highAddress);
3625
/* card cleaning is done after stack processing so any objects we copy should be copied into the node which refers to them, even from cards */
3626
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(lowAddress);
3627
3628
if (rememberedObjectsOnly) {
3629
for (UDATA bias = 0; bias < CARD_SIZE; bias += J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP) {
3630
void *scanAddress = (void *)((UDATA)lowAddress + bias);
3631
MM_HeapMapWordIterator markedObjectIterator(_markMap, scanAddress);
3632
J9Object *fromObject = NULL;
3633
while (NULL != (fromObject = markedObjectIterator.nextObject())) {
3634
/* this object needs to be re-scanned (to update next mark map and RSM) */
3635
if (_extensions->objectModel.isRemembered(fromObject)) {
3636
scanObject(env, reservingContext, fromObject, SCAN_REASON_DIRTY_CARD);
3637
3638
}
3639
}
3640
}
3641
} else {
3642
for (UDATA bias = 0; bias < CARD_SIZE; bias += J9MODRON_HEAP_BYTES_PER_UDATA_OF_HEAP_MAP) {
3643
void *scanAddress = (void *)((UDATA)lowAddress + bias);
3644
MM_HeapMapWordIterator markedObjectIterator(_markMap, scanAddress);
3645
J9Object *fromObject = NULL;
3646
while (NULL != (fromObject = markedObjectIterator.nextObject())) {
3647
/* this object needs to be re-scanned (to update next mark map and RSM) */
3648
scanObject(env, reservingContext, fromObject, SCAN_REASON_DIRTY_CARD);
3649
}
3650
}
3651
}
3652
/* we can only clean the card if we haven't raised the abort flag since we might have aborted in this thread
3653
* while processing the card while another thread copied an object that this card referred to. We need to
3654
* make sure that we re-clean this card in abort processing, in that case, so don't clean the card.
3655
* If an abort _is_ already in progress, however, no objects can be copied so we are safe to clean this card
3656
* knowing that all its objects have correct references.
3657
*/
3658
return _abortInProgress || !abortFlagRaised();
3659
}
3660
3661
3662
/**
3663
* The root set scanner for MM_CopyForwardScheme.
3664
* @copydoc MM_RootScanner
3665
* @ingroup GC_Modron_Standard
3666
*/
3667
class MM_CopyForwardSchemeRootScanner : public MM_RootScanner
3668
{
3669
private:
3670
MM_CopyForwardScheme *_copyForwardScheme; /**< Local reference back to the copy forward scheme driving the collection */
3671
3672
private:
3673
virtual void doSlot(J9Object **slotPtr) {
3674
if (NULL != *slotPtr) {
3675
/* we don't have the context of this slot so just relocate the object into the same node where we found it */
3676
MM_AllocationContextTarok *reservingContext = _copyForwardScheme->getContextForHeapAddress(*slotPtr);
3677
_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);
3678
}
3679
}
3680
3681
virtual void doStackSlot(J9Object **slotPtr, void *walkState, const void* stackLocation) {
3682
if (_copyForwardScheme->isHeapObject(*slotPtr)) {
3683
/* heap object - validate and mark */
3684
Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::COULD_BE_FORWARDED, *slotPtr, stackLocation, walkState).validate(_env));
3685
/* we know that threads are bound to nodes so relocalize this object into the node of the thread which directly references it */
3686
J9VMThread *thread = ((J9StackWalkState *)walkState)->currentThread;
3687
MM_AllocationContextTarok *reservingContext = (MM_AllocationContextTarok *)MM_EnvironmentVLHGC::getEnvironment(thread)->getAllocationContext();
3688
_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);
3689
} else if (NULL != *slotPtr) {
3690
/* stack object - just validate */
3691
Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::NOT_ON_HEAP, *slotPtr, stackLocation, walkState).validate(_env));
3692
}
3693
}
3694
3695
virtual void doVMThreadSlot(J9Object **slotPtr, GC_VMThreadIterator *vmThreadIterator) {
3696
if (_copyForwardScheme->isHeapObject(*slotPtr)) {
3697
/* we know that threads are bound to nodes so relocalize this object into the node of the thread which directly references it */
3698
J9VMThread *thread = vmThreadIterator->getVMThread();
3699
MM_AllocationContextTarok *reservingContext = (MM_AllocationContextTarok *)MM_EnvironmentVLHGC::getEnvironment(thread)->getAllocationContext();
3700
_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, slotPtr);
3701
} else if (NULL != *slotPtr) {
3702
Assert_MM_true(vmthreaditerator_state_monitor_records == vmThreadIterator->getState());
3703
}
3704
}
3705
3706
virtual void doClass(J9Class *clazz) {
3707
/* Should never try to scan J9Class structures - these are handled by j.l.c and class loader references on the heap */
3708
Assert_MM_unreachable();
3709
}
3710
3711
virtual void doClassLoader(J9ClassLoader *classLoader) {
3712
if(0 == (classLoader->gcFlags & J9_GC_CLASS_LOADER_DEAD)) {
3713
/* until we decide if class loaders should be common, just relocate this object back into its existing node */
3714
MM_AllocationContextTarok *reservingContext = _copyForwardScheme->getContextForHeapAddress(classLoader->classLoaderObject);
3715
_copyForwardScheme->copyAndForward(MM_EnvironmentVLHGC::getEnvironment(_env), reservingContext, &classLoader->classLoaderObject);
3716
}
3717
}
3718
3719
#if defined(J9VM_GC_FINALIZATION)
3720
virtual void doFinalizableObject(j9object_t object) {
3721
Assert_MM_unreachable();
3722
}
3723
3724
virtual void scanFinalizableObjects(MM_EnvironmentBase *env) {
3725
reportScanningStarted(RootScannerEntity_FinalizableObjects);
3726
/* synchronization can be expensive so skip it if there's no work to do */
3727
if (_copyForwardScheme->_shouldScanFinalizableObjects) {
3728
if (env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {
3729
_copyForwardScheme->scanFinalizableObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3730
env->_currentTask->releaseSynchronizedGCThreads(env);
3731
}
3732
} else {
3733
/* double check that there really was no work to do */
3734
Assert_MM_true(!MM_GCExtensions::getExtensions(env)->finalizeListManager->isFinalizableObjectProcessingRequired());
3735
}
3736
reportScanningEnded(RootScannerEntity_FinalizableObjects);
3737
}
3738
#endif /* J9VM_GC_FINALIZATION */
3739
3740
public:
3741
MM_CopyForwardSchemeRootScanner(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :
3742
MM_RootScanner(env),
3743
_copyForwardScheme(copyForwardScheme)
3744
{
3745
_typeId = __FUNCTION__;
3746
};
3747
3748
/**
3749
* Scan all root set references from the VM into the heap.
3750
* For all slots that are hard root references into the heap, the appropriate slot handler will be called.
3751
*/
3752
void
3753
scanRoots(MM_EnvironmentBase *env)
3754
{
3755
/* threads and their stacks tell us more about NUMA affinity than anything else so ensure that we scan them first and process all scan caches that they produce before proceeding */
3756
scanThreads(env);
3757
_copyForwardScheme->completeScan(MM_EnvironmentVLHGC::getEnvironment(env));
3758
3759
Assert_MM_true(_classDataAsRoots == !_copyForwardScheme->isDynamicClassUnloadingEnabled());
3760
if (_classDataAsRoots) {
3761
/* The classLoaderObject of a class loader might be in the nursery, but a class loader
3762
* can never be in the remembered set, so include class loaders here.
3763
*/
3764
scanClassLoaders(env);
3765
}
3766
3767
#if defined(J9VM_GC_FINALIZATION)
3768
scanFinalizableObjects(env);
3769
#endif /* J9VM_GC_FINALIZATION */
3770
scanJNIGlobalReferences(env);
3771
3772
if(_stringTableAsRoot){
3773
scanStringTable(env);
3774
}
3775
}
3776
};
3777
3778
/**
3779
* The clearable root set scanner for MM_CopyForwardScheme.
3780
* @copydoc MM_RootScanner
3781
* @ingroup GC_Modron_Standard
3782
*/
3783
class MM_CopyForwardSchemeRootClearer : public MM_RootScanner
3784
{
3785
private:
3786
MM_CopyForwardScheme *_copyForwardScheme;
3787
3788
private:
3789
virtual void doSlot(J9Object **slotPtr) {
3790
Assert_MM_unreachable(); /* Should not have gotten here - how do you clear a generic slot? */
3791
}
3792
3793
virtual void doClass(J9Class *clazz) {
3794
Assert_MM_unreachable(); /* Should not have gotten here - how do you clear a class? */
3795
}
3796
3797
virtual void scanSoftReferenceObjects(MM_EnvironmentBase *env) {
3798
reportScanningStarted(RootScannerEntity_SoftReferenceObjects);
3799
_copyForwardScheme->scanSoftReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3800
reportScanningEnded(RootScannerEntity_SoftReferenceObjects);
3801
}
3802
3803
virtual CompletePhaseCode scanSoftReferencesComplete(MM_EnvironmentBase *env) {
3804
/* do nothing -- no new objects could have been discovered by soft reference processing */
3805
return complete_phase_OK;
3806
}
3807
3808
virtual void scanWeakReferenceObjects(MM_EnvironmentBase *env) {
3809
reportScanningStarted(RootScannerEntity_WeakReferenceObjects);
3810
_copyForwardScheme->scanWeakReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3811
reportScanningEnded(RootScannerEntity_WeakReferenceObjects);
3812
}
3813
3814
virtual CompletePhaseCode scanWeakReferencesComplete(MM_EnvironmentBase *env) {
3815
/* No new objects could have been discovered by soft / weak reference processing,
3816
* but we must complete this phase prior to unfinalized processing to ensure that
3817
* finalizable referents get cleared */
3818
env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);
3819
return complete_phase_OK;
3820
}
3821
3822
#if defined(J9VM_GC_FINALIZATION)
3823
virtual void scanUnfinalizedObjects(MM_EnvironmentBase *env) {
3824
/* allow the scheme to handle this, since it knows which regions are interesting */
3825
reportScanningStarted(RootScannerEntity_UnfinalizedObjects);
3826
_copyForwardScheme->scanUnfinalizedObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3827
reportScanningEnded(RootScannerEntity_UnfinalizedObjects);
3828
}
3829
3830
virtual CompletePhaseCode scanUnfinalizedObjectsComplete(MM_EnvironmentBase *env) {
3831
reportScanningStarted(RootScannerEntity_UnfinalizedObjectsComplete);
3832
/* ensure that all unfinalized processing is complete before we start marking additional objects */
3833
env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);
3834
3835
bool wasAbortAlreadyInProgress = _copyForwardScheme->_abortInProgress;
3836
_copyForwardScheme->completeScan(MM_EnvironmentVLHGC::getEnvironment(env));
3837
3838
if (!wasAbortAlreadyInProgress && _copyForwardScheme->_abortInProgress) {
3839
/* an abort occurred during unfinalized processing: there could be unscanned or unforwarded objects on the finalizable list */
3840
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
3841
/* since we know we're in abort handling mode and won't be copying any of these objects we don't need to synchronize here */
3842
_copyForwardScheme->scanFinalizableObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3843
}
3844
_copyForwardScheme->completeScanForAbort(MM_EnvironmentVLHGC::getEnvironment(env));
3845
}
3846
reportScanningEnded(RootScannerEntity_UnfinalizedObjectsComplete);
3847
return complete_phase_OK;
3848
}
3849
#endif /* J9VM_GC_FINALIZATION */
3850
3851
virtual void scanOwnableSynchronizerObjects(MM_EnvironmentBase *env) {
3852
/* allow the scheme to handle this, since it knows which regions are interesting */
3853
/* empty, move ownable synchronizer processing in copy-continuous phase */
3854
}
3855
3856
virtual void scanPhantomReferenceObjects(MM_EnvironmentBase *env) {
3857
reportScanningStarted(RootScannerEntity_PhantomReferenceObjects);
3858
_copyForwardScheme->scanPhantomReferenceObjects(MM_EnvironmentVLHGC::getEnvironment(env));
3859
reportScanningEnded(RootScannerEntity_PhantomReferenceObjects);
3860
}
3861
3862
virtual CompletePhaseCode scanPhantomReferencesComplete(MM_EnvironmentBase *envBase) {
3863
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
3864
3865
reportScanningStarted(RootScannerEntity_PhantomReferenceObjectsComplete);
3866
env->_currentTask->synchronizeGCThreads(env, UNIQUE_ID);
3867
Assert_MM_true(MM_CycleState::references_clear_phantom == (env->_cycleState->_referenceObjectOptions & MM_CycleState::references_clear_phantom));
3868
3869
/* phantom reference processing may resurrect objects - scan them now */
3870
_copyForwardScheme->completeScan(env);
3871
3872
reportScanningEnded(RootScannerEntity_PhantomReferenceObjectsComplete);
3873
return complete_phase_OK;
3874
}
3875
3876
virtual void doMonitorReference(J9ObjectMonitor *objectMonitor, GC_HashTableIterator *monitorReferenceIterator) {
3877
J9ThreadAbstractMonitor * monitor = (J9ThreadAbstractMonitor*)objectMonitor->monitor;
3878
MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._monitorReferenceCandidates += 1;
3879
J9Object *objectPtr = (J9Object *)monitor->userData;
3880
if(!_copyForwardScheme->isLiveObject(objectPtr)) {
3881
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3882
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3883
J9Object *forwardPtr = forwardedHeader.getForwardedObject();
3884
if(NULL != forwardPtr) {
3885
monitor->userData = (UDATA)forwardPtr;
3886
} else {
3887
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
3888
monitorReferenceIterator->removeSlot();
3889
MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._monitorReferenceCleared += 1;
3890
/* We must call objectMonitorDestroy (as opposed to omrthread_monitor_destroy) when the
3891
* monitor is not internal to the GC
3892
*/
3893
static_cast<J9JavaVM*>(_omrVM->_language_vm)->internalVMFunctions->objectMonitorDestroy(static_cast<J9JavaVM*>(_omrVM->_language_vm), (J9VMThread *)_env->getLanguageVMThread(), (omrthread_monitor_t)monitor);
3894
}
3895
}
3896
}
3897
3898
virtual CompletePhaseCode scanMonitorReferencesComplete(MM_EnvironmentBase *envBase) {
3899
MM_EnvironmentVLHGC* env = MM_EnvironmentVLHGC::getEnvironment(envBase);
3900
reportScanningStarted(RootScannerEntity_MonitorReferenceObjectsComplete);
3901
((J9JavaVM *)env->getLanguageVM())->internalVMFunctions->objectMonitorDestroyComplete((J9JavaVM *)env->getLanguageVM(), (J9VMThread *)env->getLanguageVMThread());
3902
reportScanningEnded(RootScannerEntity_MonitorReferenceObjectsComplete);
3903
return complete_phase_OK;
3904
}
3905
3906
virtual void doJNIWeakGlobalReference(J9Object **slotPtr) {
3907
J9Object *objectPtr = *slotPtr;
3908
if(!_copyForwardScheme->isLiveObject(objectPtr)) {
3909
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3910
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3911
*slotPtr = forwardedHeader.getForwardedObject();
3912
}
3913
}
3914
3915
virtual void doStringTableSlot(J9Object **slotPtr, GC_StringTableIterator *stringTableIterator) {
3916
J9Object *objectPtr = *slotPtr;
3917
MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._stringConstantsCandidates += 1;
3918
if(!_copyForwardScheme->isLiveObject(objectPtr)) {
3919
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3920
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3921
objectPtr = forwardedHeader.getForwardedObject();
3922
if(NULL == objectPtr) {
3923
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
3924
MM_EnvironmentVLHGC::getEnvironment(_env)->_copyForwardStats._stringConstantsCleared += 1;
3925
stringTableIterator->removeSlot();
3926
} else {
3927
*slotPtr = objectPtr;
3928
}
3929
}
3930
}
3931
3932
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
3933
virtual void doDoubleMappedObjectSlot(J9Object *objectPtr, struct J9PortVmemIdentifier *identifier) {
3934
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);
3935
env->_copyForwardStats._doubleMappedArrayletsCandidates += 1;
3936
if (!_copyForwardScheme->isLiveObject(objectPtr)) {
3937
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3938
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3939
objectPtr = forwardedHeader.getForwardedObject();
3940
if (NULL == objectPtr) {
3941
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
3942
env->_copyForwardStats._doubleMappedArrayletsCleared += 1;
3943
OMRPORT_ACCESS_FROM_OMRVM(_omrVM);
3944
omrvmem_release_double_mapped_region(identifier->address, identifier->size, identifier);
3945
}
3946
}
3947
}
3948
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
3949
3950
/**
3951
* @Clear the string table cache slot if the object is not marked
3952
*/
3953
virtual void doStringCacheTableSlot(J9Object **slotPtr) {
3954
J9Object *objectPtr = *slotPtr;
3955
if(!_copyForwardScheme->isLiveObject(objectPtr)) {
3956
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3957
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3958
*slotPtr = forwardedHeader.getForwardedObject();
3959
}
3960
}
3961
3962
#if defined(J9VM_OPT_JVMTI)
3963
virtual void doJVMTIObjectTagSlot(J9Object **slotPtr, GC_JVMTIObjectTagTableIterator *objectTagTableIterator)
3964
{
3965
J9Object *objectPtr = *slotPtr;
3966
if(!_copyForwardScheme->isLiveObject(objectPtr)) {
3967
Assert_MM_true(_copyForwardScheme->isObjectInEvacuateMemory(objectPtr));
3968
MM_ForwardedHeader forwardedHeader(objectPtr, _extensions->compressObjectReferences());
3969
*slotPtr = forwardedHeader.getForwardedObject();
3970
}
3971
}
3972
#endif /* J9VM_OPT_JVMTI */
3973
3974
#if defined(J9VM_GC_FINALIZATION)
3975
virtual void doFinalizableObject(j9object_t object) {
3976
Assert_MM_unreachable();
3977
}
3978
#endif /* J9VM_GC_FINALIZATION */
3979
3980
public:
3981
MM_CopyForwardSchemeRootClearer(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :
3982
MM_RootScanner(env),
3983
_copyForwardScheme(copyForwardScheme)
3984
{
3985
_typeId = __FUNCTION__;
3986
};
3987
};
3988
3989
void
3990
MM_CopyForwardScheme::clearMarkMapForPartialCollect(MM_EnvironmentVLHGC *env)
3991
{
3992
Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);
3993
3994
/* Walk the collection set to determine what ranges of the mark map should be cleared */
3995
MM_HeapRegionDescriptorVLHGC *region = NULL;
3996
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
3997
while(NULL != (region = regionIterator.nextRegion())) {
3998
if (region->_copyForwardData._evacuateSet) {
3999
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4000
/* we start with an assumption that abort will occur, so we set _previousMarkMapCleared to false.
4001
* if not, the region will be recycled, in which moment the flag will turn to true
4002
*/
4003
if (region->_previousMarkMapCleared) {
4004
region->_previousMarkMapCleared = false;
4005
if (_extensions->tarokEnableExpensiveAssertions) {
4006
Assert_MM_true(_markMap->checkBitsForRegion(env, region));
4007
}
4008
/* TODO: need to handle region->hasValidMarkMap() case for optimum performance */
4009
/* consider remembering where the last allocated object is, to minimize clearing for regions with low occupancy (indeed, regions with low occupancy are rather good candidates for evacuation). */
4010
// } else if (region->hasValidMarkMap()) {
4011
} else {
4012
_markMap->setBitsForRegion(env, region, true);
4013
}
4014
}
4015
}
4016
}
4017
}
4018
4019
void
4020
MM_CopyForwardScheme::clearCardTableForPartialCollect(MM_EnvironmentVLHGC *env)
4021
{
4022
Assert_MM_true(MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType);
4023
bool gmpIsRunning = (NULL != env->_cycleState->_externalCycleState);
4024
4025
if (gmpIsRunning) {
4026
/* Walk the collection set to determine what ranges of the mark map should be cleared */
4027
MM_HeapRegionDescriptorVLHGC *region = NULL;
4028
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4029
MM_CardTable *cardTable = _extensions->cardTable;
4030
while(NULL != (region = regionIterator.nextRegion())) {
4031
if (region->_copyForwardData._evacuateSet && !region->_markData._noEvacuation) {
4032
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4033
void *low = region->getLowAddress();
4034
void *high = region->getHighAddress();
4035
Card *lowCard = cardTable->heapAddrToCardAddr(env, low);
4036
Card *highCard = cardTable->heapAddrToCardAddr(env, high);
4037
UDATA cardRangeSize = (UDATA)highCard - (UDATA)lowCard;
4038
memset(lowCard, CARD_CLEAN, cardRangeSize);
4039
}
4040
}
4041
}
4042
}
4043
}
4044
4045
void
4046
MM_CopyForwardScheme::workThreadGarbageCollect(MM_EnvironmentVLHGC *env)
4047
{
4048
/* GC init (set up per-invocation values) */
4049
workerSetupForCopyForward(env);
4050
4051
env->_workStack.prepareForWork(env, env->_cycleState->_workPackets);
4052
4053
/* pre-populate the _reservedRegionList with the flushed regions */
4054
/* this is a simple operation, so do it in one GC thread */
4055
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4056
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager, MM_HeapRegionDescriptor::MANAGED);
4057
MM_HeapRegionDescriptorVLHGC *region = NULL;
4058
while (NULL != (region = regionIterator.nextRegion())) {
4059
if (region->containsObjects()) {
4060
UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);
4061
if (region->_markData._shouldMark) {
4062
_reservedRegionList[compactGroup]._evacuateRegionCount += 1;
4063
} else {
4064
Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED == region->getRegionType());
4065
MM_MemoryPool *pool = region->getMemoryPool();
4066
/* only add regions with pools which could possibly satisfy a TLH allocation */
4067
if ((pool->getActualFreeMemorySize() >= pool->getMinimumFreeEntrySize()) &&
4068
((pool->getActualFreeMemorySize()/pool->getActualFreeEntryCount()) >= _extensions->freeSizeThresholdForSurvivor)
4069
) {
4070
Assert_MM_true(pool->getActualFreeMemorySize() < region->getSize());
4071
Assert_MM_false(region->isSurvivorRegion());
4072
insertFreeMemoryCandidate(env, &_reservedRegionList[compactGroup], region);
4073
}
4074
}
4075
}
4076
}
4077
4078
/* initialize the maximum number of sublists for each compact group; ensure that we try to produce fewer survivor regions than evacuate regions */
4079
for(UDATA index = 0; index < _compactGroupMaxCount; index++) {
4080
UDATA evacuateCount = _reservedRegionList[index]._evacuateRegionCount;
4081
/* Arbitrarily set the max to half the evacuate count. This means that, if it's possible, we'll use no more than half as many survivor regions as there were evacuate regions */
4082
UDATA maxSublistCount = evacuateCount / 2;
4083
maxSublistCount = OMR_MAX(maxSublistCount, 1);
4084
maxSublistCount = OMR_MIN(maxSublistCount, MM_ReservedRegionListHeader::MAX_SUBLISTS);
4085
_reservedRegionList[index]._maxSublistCount = maxSublistCount;
4086
}
4087
}
4088
4089
/* another thread clears the class loader remembered set */
4090
if (_extensions->tarokEnableIncrementalClassGC) {
4091
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4092
MM_ClassLoaderRememberedSet *classLoaderRememberedSet = _extensions->classLoaderRememberedSet;
4093
classLoaderRememberedSet->resetRegionsToClear(env);
4094
MM_HeapRegionDescriptorVLHGC *region = NULL;
4095
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4096
while(NULL != (region = regionIterator.nextRegion())) {
4097
if (region->_markData._shouldMark) {
4098
classLoaderRememberedSet->prepareToClearRememberedSetForRegion(env, region);
4099
}
4100
}
4101
classLoaderRememberedSet->clearRememberedSets(env);
4102
}
4103
}
4104
4105
4106
/* We want to clear all out-going references from the nursery set since those regions
4107
* will be walked and their precise out-going references will be used to reconstruct the RS
4108
*/
4109
_interRegionRememberedSet->clearFromRegionReferencesForCopyForward(env);
4110
4111
clearMarkMapForPartialCollect(env);
4112
4113
if (NULL != env->_cycleState->_externalCycleState) {
4114
rememberReferenceListsFromExternalCycle(env);
4115
}
4116
((MM_CopyForwardSchemeTask*)env->_currentTask)->synchronizeGCThreadsForInterRegionRememberedSet(env, UNIQUE_ID);
4117
4118
/* Enable dynamicBreadthFirstScanOrdering depth copying if dynamicBreadthFirstScanOrdering is enabled */
4119
env->enableHotFieldDepthCopy();
4120
4121
/* scan roots before cleaning the card table since the roots give us more concrete NUMA recommendations */
4122
scanRoots(env);
4123
4124
cleanCardTable(env);
4125
4126
completeScan(env);
4127
4128
/* TODO: check if abort happened during root scanning/cardTable clearing (and optimize in any other way) */
4129
if(abortFlagRaised()) {
4130
Assert_MM_true(_abortInProgress);
4131
/* rescan to fix up root slots, but also to complete scanning of roots that we miss to mark/push in original root scanning */
4132
scanRoots(env);
4133
4134
cleanCardTable(env);
4135
4136
completeScan(env);
4137
}
4138
/* Disable dynamicBreadthFirstScanOrdering depth copying after root scanning and main phase of PGC cycle */
4139
env->disableHotFieldDepthCopy();
4140
4141
/* ensure that all buffers have been flushed before we start reference processing */
4142
env->getGCEnvironment()->_referenceObjectBuffer->flush(env);
4143
4144
UDATA preservedGcReadBarrierType = 0;
4145
if(env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {
4146
_clearableProcessingStarted = true;
4147
4148
/* During clearable pass, GC threads can access clearable slots other than the one they are directly processing.
4149
* Such other slots could still point to fowarded objects and forwarded pointer needs to be
4150
* resolved (at least in thread local sense) to be able to access the object.
4151
* An example of that is string comparator, that may be used when removing
4152
* an entry from the string table, as part of AVL rebalancing.
4153
* String comparator happens to be used also in the context of mutator thread when adding new elements,
4154
* and it already uses Read Barrier (to support concurrent evacuating GCs).
4155
* That read barrier will do exactly what we need for our clearable pass (well it will do more,
4156
* not just locally resolve FP, but even fix the slot, but it's correct for this pass, too). We just need
4157
* to enable the RB, if not already enabled.
4158
*/
4159
preservedGcReadBarrierType = _javaVM->gcReadBarrierType;
4160
_javaVM->gcReadBarrierType = J9_GC_READ_BARRIER_TYPE_ALWAYS;
4161
4162
/* Soft and weak references resurrected by finalization need to be cleared immediately since weak and soft processing has already completed.
4163
* This has to be set before unfinalizable (and phantom) processing, because it can copy object to a non-fresh region, in which case we do
4164
* not want to put GMP refs to REMEMBERED state (we want have a chance to put it back to INITIAL state).
4165
*/
4166
env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_soft;
4167
env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_weak;
4168
/* since we need a sync point here anyway, use this opportunity to determine which regions contain weak and soft references or unfinalized objects */
4169
/* (we can't do phantom references yet because unfinalized processing may find more of them) */
4170
MM_HeapRegionDescriptorVLHGC *region = NULL;
4171
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4172
while(NULL != (region = regionIterator.nextRegion())) {
4173
if (region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) {
4174
region->getReferenceObjectList()->startSoftReferenceProcessing();
4175
region->getReferenceObjectList()->startWeakReferenceProcessing();
4176
}
4177
}
4178
env->_currentTask->releaseSynchronizedGCThreads(env);
4179
}
4180
4181
MM_CopyForwardSchemeRootClearer rootClearer(env, this);
4182
rootClearer.setStringTableAsRoot(!isCollectStringConstantsEnabled());
4183
rootClearer.scanClearable(env);
4184
4185
/* Clearable must not uncover any new work */
4186
Assert_MM_true(NULL == env->_workStack.popNoWait(env));
4187
4188
if(env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {
4189
_javaVM->gcReadBarrierType = preservedGcReadBarrierType;
4190
env->_currentTask->releaseSynchronizedGCThreads(env);
4191
}
4192
4193
if(!abortFlagRaised()) {
4194
clearCardTableForPartialCollect(env);
4195
}
4196
4197
/* make sure that we aren't leaving any stale scan work behind */
4198
Assert_MM_false(isAnyScanCacheWorkAvailable());
4199
4200
if(NULL != env->_cycleState->_externalCycleState) {
4201
updateOrDeleteObjectsFromExternalCycle(env);
4202
}
4203
4204
env->_workStack.flush(env);
4205
/* flush the buffer after clearable phase --- cmvc 198798 */
4206
/* flush ownable synchronizer object buffer after rebuild the ownableSynchronizerObjectList during main scan phase */
4207
env->getGCEnvironment()->_ownableSynchronizerObjectBuffer->flush(env);
4208
4209
abandonTLHRemainders(env);
4210
4211
/* No matter what happens, always sum up the gc stats */
4212
mergeGCStats(env);
4213
4214
env->_copyForwardCompactGroups = NULL;
4215
4216
return ;
4217
}
4218
4219
void
4220
MM_CopyForwardScheme::scanRoots(MM_EnvironmentVLHGC* env)
4221
{
4222
MM_CopyForwardSchemeRootScanner rootScanner(env, this);
4223
rootScanner.setStringTableAsRoot(!isCollectStringConstantsEnabled());
4224
rootScanner.setClassDataAsRoots(!isDynamicClassUnloadingEnabled());
4225
rootScanner.setIncludeStackFrameClassReferences(isDynamicClassUnloadingEnabled());
4226
4227
rootScanner.scanRoots(env);
4228
4229
/* Mark root set classes */
4230
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
4231
if(isDynamicClassUnloadingEnabled()) {
4232
/* A single thread processes all class loaders, marking any loader which has instances outside of the collection set. */
4233
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4234
bool foundSystemClassLoader = false;
4235
bool foundApplicationClassLoader = false;
4236
bool foundAnonymousClassLoader = false;
4237
4238
MM_ClassLoaderRememberedSet *classLoaderRememberedSet = _extensions->classLoaderRememberedSet;
4239
GC_ClassLoaderIterator classLoaderIterator(_javaVM->classLoaderBlocks);
4240
J9ClassLoader *classLoader = NULL;
4241
4242
while (NULL != (classLoader = classLoaderIterator.nextSlot())) {
4243
if (0 == (classLoader->gcFlags & J9_GC_CLASS_LOADER_DEAD)) {
4244
if(J9_ARE_ANY_BITS_SET(classLoader->flags, J9CLASSLOADER_ANON_CLASS_LOADER)) {
4245
foundAnonymousClassLoader = true;
4246
/* Anonymous classloader should be scanned on level of classes every time */
4247
GC_ClassLoaderSegmentIterator segmentIterator(classLoader, MEMORY_TYPE_RAM_CLASS);
4248
J9MemorySegment *segment = NULL;
4249
while(NULL != (segment = segmentIterator.nextSegment())) {
4250
GC_ClassHeapIterator classHeapIterator(_javaVM, segment);
4251
J9Class *clazz = NULL;
4252
while(NULL != (clazz = classHeapIterator.nextClass())) {
4253
if (classLoaderRememberedSet->isClassRemembered(env, clazz)) {
4254
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(clazz->classObject);
4255
copyAndForward(env, reservingContext, &clazz->classObject);
4256
}
4257
}
4258
}
4259
} else {
4260
if (classLoaderRememberedSet->isRemembered(env, classLoader)) {
4261
foundSystemClassLoader = foundSystemClassLoader || (classLoader == _javaVM->systemClassLoader);
4262
foundApplicationClassLoader = foundApplicationClassLoader || (classLoader == _javaVM->applicationClassLoader);
4263
if (NULL != classLoader->classLoaderObject) {
4264
/* until we decide if class loaders should be common, just relocate this object back into its existing node */
4265
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(classLoader->classLoaderObject);
4266
copyAndForward(env, reservingContext, &classLoader->classLoaderObject);
4267
} else {
4268
/* Only system/app classloaders can have a null classloader object (only during early bootstrap) */
4269
Assert_MM_true((classLoader == _javaVM->systemClassLoader) || (classLoader == _javaVM->applicationClassLoader));
4270
4271
/* We will never find the object for this class loader during scanning, so scan its class table immediately */
4272
GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);
4273
J9Class *clazz = NULL;
4274
bool success = true;
4275
4276
while (success && (NULL != (clazz = iterator.nextClass()))) {
4277
Assert_MM_true(NULL != clazz->classObject);
4278
MM_AllocationContextTarok *clazzContext = getContextForHeapAddress(clazz->classObject);
4279
/* Copy/Forward the slot reference*/
4280
success = copyAndForward(env, clazzContext, (J9Object **)&(clazz->classObject));
4281
}
4282
4283
if (NULL != classLoader->moduleHashTable) {
4284
J9HashTableState walkState;
4285
J9Module **modulePtr = (J9Module **)hashTableStartDo(classLoader->moduleHashTable, &walkState);
4286
while (success && (NULL != modulePtr)) {
4287
J9Module * const module = *modulePtr;
4288
success = copyAndForward(env, getContextForHeapAddress(module->moduleObject), (J9Object **)&(module->moduleObject));
4289
if (success) {
4290
if (NULL != module->moduleName) {
4291
success = copyAndForward(env, getContextForHeapAddress(module->moduleName), (J9Object **)&(module->moduleName));
4292
}
4293
}
4294
if (success) {
4295
if (NULL != module->version) {
4296
success = copyAndForward(env, getContextForHeapAddress(module->version), (J9Object **)&(module->version));
4297
}
4298
}
4299
modulePtr = (J9Module**)hashTableNextDo(&walkState);
4300
}
4301
4302
if (success && (classLoader == _javaVM->systemClassLoader)) {
4303
success = copyAndForward(env, getContextForHeapAddress(_javaVM->unamedModuleForSystemLoader->moduleObject), (J9Object **)&(_javaVM->unamedModuleForSystemLoader->moduleObject));
4304
}
4305
}
4306
}
4307
}
4308
}
4309
}
4310
}
4311
4312
/* verify that we found the permanent class loaders in the above loop */
4313
Assert_MM_true(NULL != _javaVM->systemClassLoader);
4314
Assert_MM_true(foundSystemClassLoader);
4315
Assert_MM_true( (NULL == _javaVM->applicationClassLoader) || foundApplicationClassLoader );
4316
Assert_MM_true(NULL != _javaVM->anonClassLoader);
4317
Assert_MM_true(foundAnonymousClassLoader);
4318
}
4319
}
4320
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
4321
}
4322
4323
void
4324
MM_CopyForwardScheme::verifyDumpObjectDetails(MM_EnvironmentVLHGC *env, const char *title, J9Object *object)
4325
{
4326
PORT_ACCESS_FROM_ENVIRONMENT(env);
4327
4328
j9tty_printf(PORTLIB, "%s: %p\n", title, object);
4329
4330
if(NULL != object) {
4331
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(object);
4332
4333
j9tty_printf(PORTLIB, "\tregion:%p base:%p top:%p regionProperties:%u\n",
4334
region,
4335
region->getLowAddress(),
4336
region->getHighAddress(),
4337
region->getRegionProperties()
4338
);
4339
4340
j9tty_printf(PORTLIB, "\t\tbitSet:%c externalBitSet:%c shouldMark:%c initialLiveSet:%c survivorSet:%c freshSurvivorSet:%c age:%zu\n",
4341
_markMap->isBitSet(object) ? 'Y' : 'N',
4342
(NULL == env->_cycleState->_externalCycleState) ? 'N' : (env->_cycleState->_externalCycleState->_markMap->isBitSet(object) ? 'Y' : 'N'),
4343
region->_markData._shouldMark ? 'Y' : 'N',
4344
region->_copyForwardData._initialLiveSet ? 'Y' : 'N',
4345
region->isSurvivorRegion() ? 'Y' : 'N',
4346
region->isFreshSurvivorRegion() ? 'Y' : 'N',
4347
region->getLogicalAge()
4348
);
4349
}
4350
}
4351
4352
class MM_CopyForwardVerifyScanner : public MM_RootScanner
4353
{
4354
public:
4355
protected:
4356
private:
4357
MM_CopyForwardScheme *_copyForwardScheme; /**< Local reference back to the copy forward scheme driving the collection */
4358
4359
private:
4360
void verifyObject(J9Object **slotPtr)
4361
{
4362
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);
4363
4364
J9Object *objectPtr = *slotPtr;
4365
if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {
4366
PORT_ACCESS_FROM_ENVIRONMENT(env);
4367
j9tty_printf(PORTLIB, "Root slot points into evacuate! Slot %p dstObj %p. RootScannerEntity=%zu\n", slotPtr, objectPtr, (UDATA)_scanningEntity);
4368
Assert_MM_unreachable();
4369
}
4370
}
4371
4372
virtual void doSlot(J9Object **slotPtr) {
4373
verifyObject(slotPtr);
4374
}
4375
4376
virtual void doStackSlot(J9Object **slotPtr, void *walkState, const void* stackLocation) {
4377
if (_copyForwardScheme->isHeapObject(*slotPtr)) {
4378
/* heap object - validate and mark */
4379
Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::COULD_BE_FORWARDED, *slotPtr, stackLocation, walkState).validate(_env));
4380
verifyObject(slotPtr);
4381
Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, ((J9StackWalkState*)walkState)->walkThread));
4382
} else if (NULL != *slotPtr) {
4383
/* stack object - just validate */
4384
Assert_MM_validStackSlot(MM_StackSlotValidator(MM_StackSlotValidator::NOT_ON_HEAP, *slotPtr, stackLocation, walkState).validate(_env));
4385
}
4386
}
4387
4388
virtual void doVMThreadSlot(J9Object **slotPtr, GC_VMThreadIterator *vmThreadIterator) {
4389
if (_copyForwardScheme->isHeapObject(*slotPtr)) {
4390
verifyObject(slotPtr);
4391
Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, vmThreadIterator->getVMThread()));
4392
} else if (NULL != *slotPtr) {
4393
Assert_MM_true(vmthreaditerator_state_monitor_records == vmThreadIterator->getState());
4394
Assert_MM_mustBeClass(J9GC_J9OBJECT_CLAZZ_THREAD(*slotPtr, vmThreadIterator->getVMThread()));
4395
}
4396
}
4397
4398
virtual void doClass(J9Class *clazz) {
4399
J9Object *classObject = (J9Object *)clazz->classObject;
4400
if(NULL != classObject) {
4401
if (_copyForwardScheme->isDynamicClassUnloadingEnabled() && !_copyForwardScheme->isLiveObject(classObject)) {
4402
/* don't verify garbage collected classes */
4403
} else {
4404
_copyForwardScheme->verifyClassObjectSlots(MM_EnvironmentVLHGC::getEnvironment(_env), classObject);
4405
}
4406
}
4407
}
4408
4409
virtual void doClassLoader(J9ClassLoader *classLoader) {
4410
J9Object *classLoaderObject = J9GC_J9CLASSLOADER_CLASSLOADEROBJECT(classLoader);
4411
if(NULL != classLoaderObject) {
4412
if (_copyForwardScheme->isDynamicClassUnloadingEnabled() && !_copyForwardScheme->isLiveObject(classLoaderObject)) {
4413
/* don't verify garbage collected class loaders */
4414
} else {
4415
verifyObject(J9GC_J9CLASSLOADER_CLASSLOADEROBJECT_EA(classLoader));
4416
}
4417
}
4418
}
4419
4420
#if defined(J9VM_GC_FINALIZATION)
4421
virtual void doUnfinalizedObject(J9Object *objectPtr, MM_UnfinalizedObjectList *list) {
4422
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);
4423
4424
if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {
4425
PORT_ACCESS_FROM_ENVIRONMENT(env);
4426
j9tty_printf(PORTLIB, "Unfinalized object list points into evacuate! list %p object %p\n", list, objectPtr);
4427
Assert_MM_unreachable();
4428
}
4429
}
4430
#endif /* J9VM_GC_FINALIZATION */
4431
4432
#if defined(J9VM_GC_FINALIZATION)
4433
virtual void doFinalizableObject(j9object_t objectPtr) {
4434
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);
4435
4436
if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {
4437
PORT_ACCESS_FROM_ENVIRONMENT(env);
4438
j9tty_printf(PORTLIB, "Finalizable object in evacuate! object %p\n", objectPtr);
4439
Assert_MM_unreachable();
4440
}
4441
}
4442
#endif /* J9VM_GC_FINALIZATION */
4443
4444
virtual void doOwnableSynchronizerObject(J9Object *objectPtr, MM_OwnableSynchronizerObjectList *list) {
4445
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(_env);
4446
4447
if(!_copyForwardScheme->_abortInProgress && !_copyForwardScheme->isObjectInNoEvacuationRegions(env, objectPtr) && _copyForwardScheme->verifyIsPointerInEvacute(env, objectPtr)) {
4448
PORT_ACCESS_FROM_ENVIRONMENT(env);
4449
j9tty_printf(PORTLIB, "OwnableSynchronizer object list points into evacuate! list %p object %p\n", list, objectPtr);
4450
Assert_MM_unreachable();
4451
}
4452
}
4453
4454
public:
4455
MM_CopyForwardVerifyScanner(MM_EnvironmentVLHGC *env, MM_CopyForwardScheme *copyForwardScheme) :
4456
MM_RootScanner(env, true),
4457
_copyForwardScheme(copyForwardScheme)
4458
{
4459
_typeId = __FUNCTION__;
4460
};
4461
4462
protected:
4463
private:
4464
4465
};
4466
4467
void
4468
MM_CopyForwardScheme::verifyCopyForwardResult(MM_EnvironmentVLHGC *env)
4469
{
4470
/* Destination regions verifying their integrity */
4471
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4472
MM_HeapRegionDescriptorVLHGC *region = NULL;
4473
4474
while(NULL != (region = regionIterator.nextRegion())) {
4475
if(region->isArrayletLeaf()) {
4476
J9Object *spineObject = (J9Object *)region->_allocateData.getSpine();
4477
Assert_MM_true(NULL != spineObject);
4478
/* the spine must be marked if it was copied as a live object or if we aborted the copy-forward */
4479
/* otherwise, it must not be forwarded (since that would imply that the spine survived but the pointer wasn't updated) */
4480
if(!_markMap->isBitSet(spineObject)) {
4481
MM_ForwardedHeader forwardedSpine(spineObject, _extensions->compressObjectReferences());
4482
if (forwardedSpine.isForwardedPointer()) {
4483
PORT_ACCESS_FROM_ENVIRONMENT(env);
4484
j9tty_printf(PORTLIB, "Spine pointer is not marked and is forwarded (leaf region's pointer to spine not updated)! Region %p Spine %p (should be %p)\n", region, spineObject, forwardedSpine.getForwardedObject());
4485
verifyDumpObjectDetails(env, "spineObject", spineObject);
4486
Assert_MM_unreachable();
4487
}
4488
}
4489
} else {
4490
if(region->containsObjects()) {
4491
if(region->isSurvivorRegion()) {
4492
if (region->isFreshSurvivorRegion()) {
4493
verifyChunkSlotsAndMapSlotsInRange(env, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());
4494
} else {
4495
/* iterating from isCompressedSurvivor */
4496
GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);
4497
while (survivorIterator.next()) {
4498
verifyChunkSlotsAndMapSlotsInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());
4499
}
4500
}
4501
}
4502
4503
if(region->_copyForwardData._initialLiveSet) {
4504
/* iterating from isNotCompressedSurvivor */
4505
GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable, false);
4506
while (survivorIterator.next()) {
4507
verifyObjectsInRange(env, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());
4508
}
4509
}
4510
}
4511
}
4512
}
4513
4514
MM_CopyForwardVerifyScanner scanner(env, this);
4515
scanner.scanAllSlots(env);
4516
4517
if(NULL != env->_cycleState->_externalCycleState) {
4518
verifyExternalState(env);
4519
}
4520
}
4521
4522
void
4523
MM_CopyForwardScheme::verifyObject(MM_EnvironmentVLHGC *env, J9Object *objectPtr)
4524
{
4525
J9Class* clazz = J9GC_J9OBJECT_CLAZZ(objectPtr, env);
4526
Assert_MM_mustBeClass(clazz);
4527
switch(_extensions->objectModel.getScanType(clazz)) {
4528
case GC_ObjectModel::SCAN_MIXED_OBJECT_LINKED:
4529
case GC_ObjectModel::SCAN_ATOMIC_MARKABLE_REFERENCE_OBJECT:
4530
case GC_ObjectModel::SCAN_MIXED_OBJECT:
4531
case GC_ObjectModel::SCAN_OWNABLESYNCHRONIZER_OBJECT:
4532
verifyMixedObjectSlots(env, objectPtr);
4533
break;
4534
case GC_ObjectModel::SCAN_CLASS_OBJECT:
4535
verifyClassObjectSlots(env, objectPtr);
4536
break;
4537
case GC_ObjectModel::SCAN_CLASSLOADER_OBJECT:
4538
verifyClassLoaderObjectSlots(env, objectPtr);
4539
break;
4540
case GC_ObjectModel::SCAN_POINTER_ARRAY_OBJECT:
4541
verifyPointerArrayObjectSlots(env, objectPtr);
4542
break;
4543
case GC_ObjectModel::SCAN_REFERENCE_MIXED_OBJECT:
4544
verifyReferenceObjectSlots(env, objectPtr);
4545
break;
4546
case GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT:
4547
/* nothing to do */
4548
break;
4549
default:
4550
Assert_MM_unreachable();
4551
}
4552
}
4553
4554
void
4555
MM_CopyForwardScheme::verifyMixedObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)
4556
{
4557
GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);
4558
GC_SlotObject *slotObject = NULL;
4559
4560
while (NULL != (slotObject = mixedObjectIterator.nextSlot())) {
4561
J9Object *dstObject = slotObject->readReferenceFromSlot();
4562
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4563
PORT_ACCESS_FROM_ENVIRONMENT(env);
4564
j9tty_printf(PORTLIB, "Mixed object slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4565
verifyDumpObjectDetails(env, "srcObj", objectPtr);
4566
verifyDumpObjectDetails(env, "dstObj", dstObject);
4567
Assert_MM_unreachable();
4568
}
4569
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4570
PORT_ACCESS_FROM_ENVIRONMENT(env);
4571
j9tty_printf(PORTLIB, "Mixed object slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4572
verifyDumpObjectDetails(env, "srcObj", objectPtr);
4573
verifyDumpObjectDetails(env, "dstObj", dstObject);
4574
Assert_MM_unreachable();
4575
}
4576
}
4577
}
4578
4579
void
4580
MM_CopyForwardScheme::verifyReferenceObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)
4581
{
4582
fj9object_t referentToken = J9GC_J9VMJAVALANGREFERENCE_REFERENT(env, objectPtr);
4583
J9Object* referentPtr = _extensions->accessBarrier->convertPointerFromToken(referentToken);
4584
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, referentPtr) && verifyIsPointerInEvacute(env, referentPtr)) {
4585
PORT_ACCESS_FROM_ENVIRONMENT(env);
4586
j9tty_printf(PORTLIB, "RefMixed referent slot points to evacuate! srcObj %p dstObj %p\n", objectPtr, referentPtr);
4587
Assert_MM_unreachable();
4588
}
4589
if((NULL != referentPtr) && !_markMap->isBitSet(referentPtr)) {
4590
PORT_ACCESS_FROM_ENVIRONMENT(env);
4591
j9tty_printf(PORTLIB, "RefMixed referent slot points to unmarked object! srcObj %p dstObj %p\n", objectPtr, referentPtr);
4592
verifyDumpObjectDetails(env, "srcObj", objectPtr);
4593
verifyDumpObjectDetails(env, "referentPtr", referentPtr);
4594
Assert_MM_unreachable();
4595
}
4596
4597
GC_MixedObjectIterator mixedObjectIterator(_javaVM->omrVM, objectPtr);
4598
GC_SlotObject *slotObject = NULL;
4599
4600
while (NULL != (slotObject = mixedObjectIterator.nextSlot())) {
4601
J9Object *dstObject = slotObject->readReferenceFromSlot();
4602
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4603
PORT_ACCESS_FROM_ENVIRONMENT(env);
4604
j9tty_printf(PORTLIB, "RefMixed object slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4605
Assert_MM_unreachable();
4606
}
4607
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4608
PORT_ACCESS_FROM_ENVIRONMENT(env);
4609
j9tty_printf(PORTLIB, "RefMixed object slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4610
verifyDumpObjectDetails(env, "srcObj", objectPtr);
4611
verifyDumpObjectDetails(env, "dstPtr", dstObject);
4612
Assert_MM_unreachable();
4613
}
4614
}
4615
}
4616
4617
void
4618
MM_CopyForwardScheme::verifyPointerArrayObjectSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr)
4619
{
4620
GC_PointerArrayIterator pointerArrayIterator(_javaVM, objectPtr);
4621
GC_SlotObject *slotObject = NULL;
4622
4623
while((slotObject = pointerArrayIterator.nextSlot()) != NULL) {
4624
J9Object *dstObject = slotObject->readReferenceFromSlot();
4625
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4626
PORT_ACCESS_FROM_ENVIRONMENT(env);
4627
j9tty_printf(PORTLIB, "Pointer array slot points to evacuate! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4628
Assert_MM_unreachable();
4629
}
4630
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4631
PORT_ACCESS_FROM_ENVIRONMENT(env);
4632
j9tty_printf(PORTLIB, "Pointer array slot points to unmarked object! srcObj %p slot %p dstObj %p\n", objectPtr, slotObject->readAddressFromSlot(), dstObject);
4633
verifyDumpObjectDetails(env, "srcObj", objectPtr);
4634
verifyDumpObjectDetails(env, "dstObj", dstObject);
4635
Assert_MM_unreachable();
4636
}
4637
}
4638
}
4639
4640
void
4641
MM_CopyForwardScheme::verifyClassObjectSlots(MM_EnvironmentVLHGC *env, J9Object *classObject)
4642
{
4643
verifyMixedObjectSlots(env, classObject);
4644
4645
J9Class *classPtr = J9VM_J9CLASS_FROM_HEAPCLASS((J9VMThread*)env->getLanguageVMThread(), classObject);
4646
4647
if (NULL != classPtr) {
4648
volatile j9object_t * slotPtr = NULL;
4649
4650
do {
4651
/*
4652
* scan static fields
4653
*/
4654
GC_ClassStaticsIterator classStaticsIterator(env, classPtr);
4655
while(NULL != (slotPtr = classStaticsIterator.nextSlot())) {
4656
J9Object *dstObject = *slotPtr;
4657
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4658
PORT_ACCESS_FROM_ENVIRONMENT(env);
4659
j9tty_printf(PORTLIB, "Class static slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4660
Assert_MM_unreachable();
4661
}
4662
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4663
PORT_ACCESS_FROM_ENVIRONMENT(env);
4664
j9tty_printf(PORTLIB, "Class static slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4665
verifyDumpObjectDetails(env, "classObject", classObject);
4666
verifyDumpObjectDetails(env, "dstObj", dstObject);
4667
Assert_MM_unreachable();
4668
}
4669
}
4670
4671
/*
4672
* scan call sites
4673
*/
4674
GC_CallSitesIterator callSitesIterator(classPtr);
4675
while(NULL != (slotPtr = callSitesIterator.nextSlot())) {
4676
J9Object *dstObject = *slotPtr;
4677
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4678
PORT_ACCESS_FROM_ENVIRONMENT(env);
4679
j9tty_printf(PORTLIB, "Class call site slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4680
Assert_MM_unreachable();
4681
}
4682
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4683
PORT_ACCESS_FROM_ENVIRONMENT(env);
4684
j9tty_printf(PORTLIB, "Class call site slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4685
verifyDumpObjectDetails(env, "classObject", classObject);
4686
verifyDumpObjectDetails(env, "dstObj", dstObject);
4687
Assert_MM_unreachable();
4688
}
4689
}
4690
4691
/*
4692
* scan MethodTypes
4693
*/
4694
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
4695
GC_MethodTypesIterator methodTypesIterator(classPtr->romClass->invokeCacheCount, classPtr->invokeCache);
4696
#else /* defined(J9VM_OPT_OPENJDK_METHODHANDLE) */
4697
GC_MethodTypesIterator methodTypesIterator(classPtr->romClass->methodTypeCount, classPtr->methodTypes);
4698
#endif /* defined(J9VM_OPT_OPENJDK_METHODHANDLE) */
4699
4700
while(NULL != (slotPtr = methodTypesIterator.nextSlot())) {
4701
J9Object *dstObject = *slotPtr;
4702
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4703
PORT_ACCESS_FROM_ENVIRONMENT(env);
4704
j9tty_printf(PORTLIB, "Class MethodType slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4705
Assert_MM_unreachable();
4706
}
4707
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4708
PORT_ACCESS_FROM_ENVIRONMENT(env);
4709
j9tty_printf(PORTLIB, "Class MethodType slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4710
verifyDumpObjectDetails(env, "classObject", classObject);
4711
verifyDumpObjectDetails(env, "dstObj", dstObject);
4712
Assert_MM_unreachable();
4713
}
4714
}
4715
4716
/*
4717
* scan VarHandle MethodTypes
4718
*/
4719
#if defined(J9VM_OPT_METHOD_HANDLE)
4720
GC_MethodTypesIterator varHandleMethodTypesIterator(classPtr->romClass->varHandleMethodTypeCount, classPtr->varHandleMethodTypes);
4721
while(NULL != (slotPtr = varHandleMethodTypesIterator.nextSlot())) {
4722
J9Object *dstObject = *slotPtr;
4723
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4724
PORT_ACCESS_FROM_ENVIRONMENT(env);
4725
j9tty_printf(PORTLIB, "Class MethodType slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4726
Assert_MM_unreachable();
4727
}
4728
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4729
PORT_ACCESS_FROM_ENVIRONMENT(env);
4730
j9tty_printf(PORTLIB, "Class MethodType slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4731
verifyDumpObjectDetails(env, "classObject", classObject);
4732
verifyDumpObjectDetails(env, "dstObj", dstObject);
4733
Assert_MM_unreachable();
4734
}
4735
}
4736
#endif /* defined(J9VM_OPT_METHOD_HANDLE) */
4737
4738
/*
4739
* scan constant pool objects
4740
*/
4741
/* we can safely ignore any classes referenced by the constant pool, since
4742
* these are guaranteed to be referenced by our class loader
4743
*/
4744
GC_ConstantPoolObjectSlotIterator constantPoolIterator(_javaVM, classPtr);
4745
while(NULL != (slotPtr = constantPoolIterator.nextSlot())) {
4746
J9Object *dstObject = *slotPtr;
4747
if(!_abortInProgress && !isObjectInNoEvacuationRegions(env, dstObject) && verifyIsPointerInEvacute(env, dstObject)) {
4748
PORT_ACCESS_FROM_ENVIRONMENT(env);
4749
j9tty_printf(PORTLIB, "Class CP slot points to evacuate! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4750
Assert_MM_unreachable();
4751
}
4752
if((NULL != dstObject) && !_markMap->isBitSet(dstObject)) {
4753
PORT_ACCESS_FROM_ENVIRONMENT(env);
4754
j9tty_printf(PORTLIB, "Class CP slot points to unmarked object! srcObj %p J9Class %p slot %p dstObj %p\n", classObject, classPtr, slotPtr, dstObject);
4755
verifyDumpObjectDetails(env, "classObject", classObject);
4756
verifyDumpObjectDetails(env, "dstObj", dstObject);
4757
Assert_MM_unreachable();
4758
}
4759
}
4760
classPtr = classPtr->replacedClass;
4761
} while (NULL != classPtr);
4762
}
4763
}
4764
4765
void
4766
MM_CopyForwardScheme::verifyClassLoaderObjectSlots(MM_EnvironmentVLHGC *env, J9Object *classLoaderObject)
4767
{
4768
verifyMixedObjectSlots(env, classLoaderObject);
4769
4770
J9ClassLoader *classLoader = J9VMJAVALANGCLASSLOADER_VMREF((J9VMThread*)env->getLanguageVMThread(), classLoaderObject);
4771
if ((NULL != classLoader) && (0 == (classLoader->flags & J9CLASSLOADER_ANON_CLASS_LOADER))) {
4772
/* No lock is required because this only runs under exclusive access */
4773
/* (NULL == classLoader->classHashTable) is true ONLY for DEAD class loaders */
4774
Assert_MM_true(NULL != classLoader->classHashTable);
4775
GC_ClassLoaderClassesIterator iterator(_extensions, classLoader);
4776
J9Class *clazz = NULL;
4777
while (NULL != (clazz = iterator.nextClass())) {
4778
if (!_abortInProgress && !isObjectInNoEvacuationRegions(env, (J9Object *)clazz->classObject) && verifyIsPointerInEvacute(env, (J9Object *)clazz->classObject)) {
4779
PORT_ACCESS_FROM_ENVIRONMENT(env);
4780
j9tty_printf(PORTLIB, "Class loader table class object points to evacuate! srcObj %p clazz %p clazzObj %p\n", classLoaderObject, clazz, clazz->classObject);
4781
Assert_MM_unreachable();
4782
}
4783
if ((NULL != clazz->classObject) && !_markMap->isBitSet((J9Object *)clazz->classObject)) {
4784
PORT_ACCESS_FROM_ENVIRONMENT(env);
4785
j9tty_printf(PORTLIB, "Class loader table class object points to unmarked object! srcObj %p clazz %p clazzObj %p\n", classLoaderObject, clazz, clazz->classObject);
4786
verifyDumpObjectDetails(env, "classLoaderObject", classLoaderObject);
4787
verifyDumpObjectDetails(env, "classObject", (J9Object *)clazz->classObject);
4788
Assert_MM_unreachable();
4789
}
4790
}
4791
}
4792
}
4793
4794
void
4795
MM_CopyForwardScheme::verifyExternalState(MM_EnvironmentVLHGC *env)
4796
{
4797
/* this function has knowledge of the collection set, which is only valid during a PGC */
4798
Assert_MM_true(NULL != env->_cycleState->_externalCycleState);
4799
4800
MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;
4801
Assert_MM_true(externalMarkMap != _markMap);
4802
4803
MM_HeapRegionDescriptorVLHGC *region = NULL;
4804
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4805
while(NULL != (region = regionIterator.nextRegion())) {
4806
if(region->containsObjects()) {
4807
if(region->_markData._shouldMark) {
4808
Assert_MM_true(region->_copyForwardData._initialLiveSet);
4809
4810
if(_abortInProgress || region->_markData._noEvacuation) {
4811
MM_HeapMapIterator mapIterator(_extensions, externalMarkMap, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress(), false);
4812
J9Object *objectPtr = NULL;
4813
4814
while(NULL != (objectPtr = mapIterator.nextObject())) {
4815
Assert_MM_true(_markMap->isBitSet(objectPtr));
4816
}
4817
} else {
4818
/* Evacuate space - make sure the GMP mark map is clear */
4819
UDATA lowIndex = externalMarkMap->getSlotIndex((J9Object *)region->getLowAddress());
4820
UDATA highIndex = externalMarkMap->getSlotIndex((J9Object *)region->getHighAddress());
4821
4822
for(UDATA slotIndex = lowIndex; slotIndex < highIndex; slotIndex++) {
4823
Assert_MM_true(0 == externalMarkMap->getSlot(slotIndex));
4824
}
4825
}
4826
} else if (region->isSurvivorRegion()) {
4827
/* Survivor space - check that anything marked in the GMP map is also marked in the PGC map */
4828
if (region->isFreshSurvivorRegion()) {
4829
checkConsistencyGMPMapAndPGCMap(env, region, (UDATA *)region->getLowAddress(), (UDATA *)region->getHighAddress());
4830
} else {
4831
/* iterating from isCompressedSurvivor */
4832
GC_SurvivorMemoryIterator survivorIterator(env, region, _compressedSurvivorTable);
4833
while (survivorIterator.next()) {
4834
checkConsistencyGMPMapAndPGCMap(env, region, (UDATA *)survivorIterator.getCurrentLow(), (UDATA *)survivorIterator.getCurrentHigh());
4835
}
4836
}
4837
}
4838
}
4839
}
4840
4841
/* Check that no object in the work packets appears in the evacuate space.
4842
* If it appears in survivor, verify that both map bits are set.
4843
*/
4844
MM_WorkPacketsIterator packetIterator(env, env->_cycleState->_externalCycleState->_workPackets);
4845
MM_Packet *packet = NULL;
4846
while (NULL != (packet = packetIterator.nextPacket(env))) {
4847
if (!packet->isEmpty()) {
4848
/* there is data in this packet so use it */
4849
MM_PacketSlotIterator slotIterator(packet);
4850
J9Object **slot = NULL;
4851
while (NULL != (slot = slotIterator.nextSlot())) {
4852
J9Object *object = *slot;
4853
Assert_MM_true(NULL != object);
4854
if (PACKET_INVALID_OBJECT != (UDATA)object) {
4855
Assert_MM_false(!_abortInProgress && !isObjectInNoEvacuationRegions(env, object) && verifyIsPointerInEvacute(env, object));
4856
Assert_MM_true(!verifyIsPointerInSurvivor(env, object) || (_markMap->isBitSet(object) && externalMarkMap->isBitSet(object)));
4857
}
4858
}
4859
}
4860
}
4861
}
4862
4863
bool
4864
MM_CopyForwardScheme::verifyIsPointerInSurvivor(MM_EnvironmentVLHGC *env, J9Object *object)
4865
{
4866
if(NULL == object) {
4867
return false;
4868
}
4869
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->physicalTableDescriptorForAddress(object);
4870
bool result = region->isFreshSurvivorRegion();
4871
if (!result && region->isSurvivorRegion()) {
4872
result = isCompressedSurvivor((void*)object);
4873
}
4874
return result;
4875
}
4876
4877
bool
4878
MM_CopyForwardScheme::verifyIsPointerInEvacute(MM_EnvironmentVLHGC *env, J9Object *object)
4879
{
4880
if(NULL == object) {
4881
return false;
4882
}
4883
4884
MM_HeapRegionDescriptorVLHGC *region = NULL;
4885
region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->physicalTableDescriptorForAddress(object);
4886
return region->_markData._shouldMark;
4887
}
4888
4889
void
4890
MM_CopyForwardScheme::verifyObjectsInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)
4891
{
4892
MM_HeapMapIterator iterator(_extensions, _markMap, lowAddress, highAddress, false);
4893
J9Object *objectPtr = NULL;
4894
while (NULL != (objectPtr = (iterator.nextObject()))) {
4895
verifyObject(env, objectPtr);
4896
}
4897
}
4898
4899
void
4900
MM_CopyForwardScheme::verifyChunkSlotsAndMapSlotsInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)
4901
{
4902
MM_HeapMapIterator mapIterator(_extensions, _markMap, lowAddress, highAddress, false);
4903
GC_ObjectHeapIteratorAddressOrderedList heapChunkIterator(_extensions, (J9Object *)lowAddress, (J9Object *)highAddress, false);
4904
J9Object *objectPtr = NULL;
4905
4906
while(NULL != (objectPtr = heapChunkIterator.nextObject())) {
4907
J9Object *mapObjectPtr = mapIterator.nextObject();
4908
4909
if(objectPtr != mapObjectPtr) {
4910
PORT_ACCESS_FROM_ENVIRONMENT(env);
4911
j9tty_printf(PORTLIB, "ChunkIterator and mapIterator did not match up during walk of survivor space! ChunkSlot %p MapSlot %p\n", objectPtr, mapObjectPtr);
4912
Assert_MM_unreachable();
4913
break;
4914
}
4915
verifyObject(env, objectPtr);
4916
}
4917
if(NULL != mapIterator.nextObject()) {
4918
PORT_ACCESS_FROM_ENVIRONMENT(env);
4919
j9tty_printf(PORTLIB, "Survivor space mapIterator did not end when the chunkIterator did!\n");
4920
Assert_MM_unreachable();
4921
}
4922
}
4923
4924
void
4925
MM_CopyForwardScheme:: cleanOverflowInRange(MM_EnvironmentVLHGC *env, UDATA *lowAddress, UDATA *highAddress)
4926
{
4927
/* At this point, no copying should happen, so that reservingContext is irrelevant */
4928
MM_AllocationContextTarok *reservingContext = _commonContext;
4929
MM_HeapMapIterator objectIterator = MM_HeapMapIterator(MM_GCExtensions::getExtensions(env), env->_cycleState->_markMap, lowAddress, highAddress);
4930
4931
J9Object *object = NULL;
4932
while (NULL != (object = objectIterator.nextObject())) {
4933
scanObject(env, reservingContext, object, SCAN_REASON_OVERFLOWED_REGION);
4934
}
4935
}
4936
4937
void
4938
MM_CopyForwardScheme::checkConsistencyGMPMapAndPGCMap(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region, UDATA *lowAddress, UDATA *highAddress)
4939
{
4940
MM_MarkMap *externalMarkMap = env->_cycleState->_externalCycleState->_markMap;
4941
MM_HeapMapIterator mapIterator(_extensions, externalMarkMap, lowAddress, highAddress, false);
4942
J9Object *objectPtr = NULL;
4943
4944
while(NULL != (objectPtr = mapIterator.nextObject())) {
4945
Assert_MM_true(_markMap->isBitSet(objectPtr));
4946
Assert_MM_true(objectPtr >= region->getLowAddress());
4947
Assert_MM_true(objectPtr < region->getHighAddress());
4948
}
4949
}
4950
4951
void
4952
MM_CopyForwardScheme::scanWeakReferenceObjects(MM_EnvironmentVLHGC *env)
4953
{
4954
Assert_MM_true(env->getGCEnvironment()->_referenceObjectBuffer->isEmpty());
4955
4956
MM_HeapRegionDescriptorVLHGC *region = NULL;
4957
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4958
while(NULL != (region = regionIterator.nextRegion())) {
4959
if ((region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) && !region->getReferenceObjectList()->wasWeakListEmpty()) {
4960
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4961
processReferenceList(env, region, region->getReferenceObjectList()->getPriorWeakList(), &env->_copyForwardStats._weakReferenceStats);
4962
}
4963
}
4964
}
4965
4966
/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */
4967
env->getGCEnvironment()->_referenceObjectBuffer->flush(env);
4968
}
4969
4970
void
4971
MM_CopyForwardScheme::scanSoftReferenceObjects(MM_EnvironmentVLHGC *env)
4972
{
4973
Assert_MM_true(env->getGCEnvironment()->_referenceObjectBuffer->isEmpty());
4974
4975
MM_HeapRegionDescriptorVLHGC *region = NULL;
4976
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
4977
while(NULL != (region = regionIterator.nextRegion())) {
4978
if ((region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) && !region->getReferenceObjectList()->wasSoftListEmpty()) {
4979
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
4980
processReferenceList(env, region, region->getReferenceObjectList()->getPriorSoftList(), &env->_copyForwardStats._softReferenceStats);
4981
}
4982
}
4983
}
4984
4985
/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */
4986
env->getGCEnvironment()->_referenceObjectBuffer->flush(env);
4987
}
4988
4989
void
4990
MM_CopyForwardScheme::scanPhantomReferenceObjects(MM_EnvironmentVLHGC *env)
4991
{
4992
/* unfinalized processing may discover more phantom reference objects */
4993
env->getGCEnvironment()->_referenceObjectBuffer->flush(env);
4994
4995
if (env->_currentTask->synchronizeGCThreadsAndReleaseSingleThread(env, UNIQUE_ID)) {
4996
Assert_MM_true(0 == _phantomReferenceRegionsToProcess);
4997
env->_cycleState->_referenceObjectOptions |= MM_CycleState::references_clear_phantom;
4998
MM_HeapRegionDescriptorVLHGC *region = NULL;
4999
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
5000
while(NULL != (region = regionIterator.nextRegion())) {
5001
Assert_MM_true(region->getReferenceObjectList()->wasPhantomListEmpty());
5002
Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);
5003
if (region->isSurvivorRegion() || region->_copyForwardData._evacuateSet) {
5004
region->getReferenceObjectList()->startPhantomReferenceProcessing();
5005
if (!region->getReferenceObjectList()->wasPhantomListEmpty()) {
5006
region->_copyForwardData._requiresPhantomReferenceProcessing = true;
5007
_phantomReferenceRegionsToProcess += 1;
5008
}
5009
}
5010
}
5011
env->_currentTask->releaseSynchronizedGCThreads(env);
5012
}
5013
5014
UDATA phantomReferenceRegionsProcessed = 0;
5015
MM_HeapRegionDescriptorVLHGC *region = NULL;
5016
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
5017
while(NULL != (region = regionIterator.nextRegion())) {
5018
if (region->_copyForwardData._requiresPhantomReferenceProcessing) {
5019
Assert_MM_true(region->isSurvivorRegion() || region->_copyForwardData._evacuateSet);
5020
Assert_MM_false(region->getReferenceObjectList()->wasPhantomListEmpty());
5021
phantomReferenceRegionsProcessed += 1;
5022
if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
5023
processReferenceList(env, region, region->getReferenceObjectList()->getPriorPhantomList(), &env->_copyForwardStats._phantomReferenceStats);
5024
}
5025
}
5026
}
5027
5028
Assert_MM_true(_phantomReferenceRegionsToProcess == phantomReferenceRegionsProcessed);
5029
5030
/* processReferenceList() may have pushed remembered references back onto the buffer if a GMP is active */
5031
env->getGCEnvironment()->_referenceObjectBuffer->flush(env);
5032
}
5033
5034
void
5035
MM_CopyForwardScheme::processReferenceList(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC* region, J9Object* headOfList, MM_ReferenceStats *referenceStats)
5036
{
5037
/* no list can possibly contain more reference objects than there are bytes in a region. */
5038
const UDATA maxObjects = _regionManager->getRegionSize();
5039
UDATA objectsVisited = 0;
5040
GC_FinalizableReferenceBuffer buffer(_extensions);
5041
bool const compressed = env->compressObjectReferences();
5042
5043
J9Object* referenceObj = headOfList;
5044
while (NULL != referenceObj) {
5045
Assert_MM_true(isLiveObject(referenceObj));
5046
5047
objectsVisited += 1;
5048
referenceStats->_candidates += 1;
5049
5050
Assert_MM_true(region->isAddressInRegion(referenceObj));
5051
Assert_MM_true(objectsVisited < maxObjects);
5052
5053
J9Object* nextReferenceObj = _extensions->accessBarrier->getReferenceLink(referenceObj);
5054
5055
GC_SlotObject referentSlotObject(_extensions->getOmrVM(), J9GC_J9VMJAVALANGREFERENCE_REFERENT_ADDRESS(env, referenceObj));
5056
J9Object *referent = referentSlotObject.readReferenceFromSlot();
5057
if (NULL != referent) {
5058
UDATA referenceObjectType = J9CLASS_FLAGS(J9GC_J9OBJECT_CLAZZ(referenceObj, env)) & J9AccClassReferenceMask;
5059
5060
/* update the referent if it's been forwarded */
5061
MM_ForwardedHeader forwardedReferent(referent, compressed);
5062
if (forwardedReferent.isForwardedPointer()) {
5063
referent = forwardedReferent.getForwardedObject();
5064
referentSlotObject.writeReferenceToSlot(referent);
5065
} else {
5066
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedReferent));
5067
}
5068
5069
if (isLiveObject(referent)) {
5070
if (J9AccClassReferenceSoft == referenceObjectType) {
5071
U_32 age = J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, referenceObj);
5072
if (age < _extensions->getMaxSoftReferenceAge()) {
5073
/* Soft reference hasn't aged sufficiently yet - increment the age */
5074
J9GC_J9VMJAVALANGSOFTREFERENCE_AGE(env, referenceObj) = age + 1;
5075
}
5076
}
5077
_interRegionRememberedSet->rememberReferenceForMark(env, referenceObj, referent);
5078
} else {
5079
Assert_MM_true(isObjectInEvacuateMemory(referent));
5080
/* transition the state to cleared */
5081
I_32 previousState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj);
5082
Assert_MM_true((GC_ObjectModel::REF_STATE_INITIAL == previousState) || (GC_ObjectModel::REF_STATE_REMEMBERED == previousState));
5083
5084
referenceStats->_cleared += 1;
5085
J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_CLEARED;
5086
5087
/* Phantom references keep it's referent alive in Java 8 and doesn't in Java 9 and later */
5088
if ((J9AccClassReferencePhantom == referenceObjectType) && ((J2SE_VERSION(_javaVM) & J2SE_VERSION_MASK) <= J2SE_18)) {
5089
/* Scanning will be done after the enqueuing */
5090
copyAndForward(env, region->_allocateData._owningContext, referenceObj, &referentSlotObject);
5091
if (GC_ObjectModel::REF_STATE_REMEMBERED == previousState) {
5092
Assert_MM_true(NULL != env->_cycleState->_externalCycleState);
5093
/* We changed the state from REMEMBERED to CLEARED, so this will not be enqueued back to region's reference queue.
5094
* However, GMP has to revisit this reference to mark the referent in its own mark map.
5095
*/
5096
_extensions->cardTable->dirtyCardWithValue(env, referenceObj, CARD_GMP_MUST_SCAN);
5097
}
5098
} else {
5099
referentSlotObject.writeReferenceToSlot(NULL);
5100
}
5101
5102
/* Check if the reference has a queue */
5103
if (0 != J9GC_J9VMJAVALANGREFERENCE_QUEUE(env, referenceObj)) {
5104
/* Reference object can be enqueued onto the finalizable list */
5105
referenceStats->_enqueued += 1;
5106
buffer.add(env, referenceObj);
5107
env->_cycleState->_finalizationRequired = true;
5108
}
5109
}
5110
}
5111
5112
switch (J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj)) {
5113
case GC_ObjectModel::REF_STATE_REMEMBERED:
5114
Assert_MM_true(NULL != env->_cycleState->_externalCycleState);
5115
/* This reference object was on a list of GMP reference objects at the start of the cycle. Restore it to its original condition. */
5116
J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_INITIAL;
5117
env->getGCEnvironment()->_referenceObjectBuffer->add(env, referenceObj);
5118
break;
5119
case GC_ObjectModel::REF_STATE_CLEARED:
5120
break;
5121
case GC_ObjectModel::REF_STATE_INITIAL:
5122
/* if the object isn't in nursery space it should have been REMEMBERED */
5123
Assert_MM_true(isObjectInNurseryMemory(referenceObj));
5124
break;
5125
case GC_ObjectModel::REF_STATE_ENQUEUED:
5126
/* this object shouldn't have been on the list */
5127
Assert_MM_unreachable();
5128
break;
5129
default:
5130
Assert_MM_unreachable();
5131
break;
5132
}
5133
5134
referenceObj = nextReferenceObj;
5135
}
5136
buffer.flush(env);
5137
}
5138
5139
void
5140
MM_CopyForwardScheme::rememberReferenceList(MM_EnvironmentVLHGC *env, J9Object* headOfList)
5141
{
5142
Assert_MM_true((NULL == headOfList) || (NULL != env->_cycleState->_externalCycleState));
5143
/* If phantom reference processing has already started this list will never be processed */
5144
Assert_MM_true(0 == _phantomReferenceRegionsToProcess);
5145
5146
J9Object* referenceObj = headOfList;
5147
while (NULL != referenceObj) {
5148
J9Object* next = _extensions->accessBarrier->getReferenceLink(referenceObj);
5149
I_32 referenceState = J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj);
5150
switch (referenceState) {
5151
case GC_ObjectModel::REF_STATE_INITIAL:
5152
/* The reference object was on a list of GMP reference objects at the start of the cycle. Remember this */
5153
J9GC_J9VMJAVALANGREFERENCE_STATE(env, referenceObj) = GC_ObjectModel::REF_STATE_REMEMBERED;
5154
if (!isObjectInEvacuateMemory(referenceObj)) {
5155
Assert_MM_true(_markMap->isBitSet(referenceObj));
5156
Assert_MM_true(!isObjectInNurseryMemory(referenceObj));
5157
env->getGCEnvironment()->_referenceObjectBuffer->add(env, referenceObj);
5158
}
5159
break;
5160
case GC_ObjectModel::REF_STATE_CLEARED:
5161
/* The reference object was cleared (probably by an explicit call to the clear() Java API).
5162
* No need to remember it, since it's already in its terminal state.
5163
*/
5164
break;
5165
case GC_ObjectModel::REF_STATE_ENQUEUED:
5166
/* The reference object was enqueued. This could have happened either
5167
* 1) during previous GC (+ finalization), in which case it has been removed from the list at GC time or
5168
* 2) in Java through explicit enqueue(), in which case it may still be in the list
5169
* Explicit enqueue() will clear reference queue field. So, if we still see it in the list, the queue must be null.
5170
* This GC will rebuild the list, after which the reference must not be on the list anymore. *
5171
*/
5172
Assert_MM_true(0 == J9GC_J9VMJAVALANGREFERENCE_QUEUE(env, referenceObj));
5173
break;
5174
case GC_ObjectModel::REF_STATE_REMEMBERED:
5175
/* The reference object must not already be remembered */
5176
default:
5177
PORT_ACCESS_FROM_ENVIRONMENT(env);
5178
j9tty_printf(PORTLIB, "rememberReferenceList referenceObj=%p, referenceState=%zu\n", referenceObj, referenceState);
5179
Assert_MM_unreachable();
5180
}
5181
referenceObj = next;
5182
}
5183
}
5184
5185
void
5186
MM_CopyForwardScheme::rememberReferenceListsFromExternalCycle(MM_EnvironmentVLHGC *env)
5187
{
5188
GC_HeapRegionIteratorVLHGC regionIterator(_regionManager);
5189
MM_HeapRegionDescriptorVLHGC *region = NULL;
5190
while(NULL != (region = regionIterator.nextRegion())) {
5191
if (region->_markData._shouldMark) {
5192
if (J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
5193
rememberAndResetReferenceLists(env, region);
5194
}
5195
}
5196
}
5197
}
5198
5199
void
5200
MM_CopyForwardScheme::rememberAndResetReferenceLists(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region)
5201
{
5202
MM_ReferenceObjectList *referenceObjectList = region->getReferenceObjectList();
5203
UDATA referenceObjectOptions = env->_cycleState->_referenceObjectOptions;
5204
5205
if (0 == (referenceObjectOptions & MM_CycleState::references_clear_weak)) {
5206
referenceObjectList->startWeakReferenceProcessing();
5207
J9Object* headOfList = referenceObjectList->getPriorWeakList();
5208
if (NULL != headOfList) {
5209
Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberWeak(env->getLanguageVMThread(), region, headOfList);
5210
rememberReferenceList(env, headOfList);
5211
}
5212
}
5213
5214
if (0 == (referenceObjectOptions & MM_CycleState::references_clear_soft)) {
5215
referenceObjectList->startSoftReferenceProcessing();
5216
J9Object* headOfList = referenceObjectList->getPriorSoftList();
5217
if (NULL != headOfList) {
5218
Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberSoft(env->getLanguageVMThread(), region, headOfList);
5219
rememberReferenceList(env, headOfList);
5220
}
5221
}
5222
5223
if (0 == (referenceObjectOptions & MM_CycleState::references_clear_phantom)) {
5224
referenceObjectList->startPhantomReferenceProcessing();
5225
J9Object* headOfList = referenceObjectList->getPriorPhantomList();
5226
if (NULL != headOfList) {
5227
Trc_MM_CopyForwardScheme_rememberAndResetReferenceLists_rememberPhantom(env->getLanguageVMThread(), region, headOfList);
5228
rememberReferenceList(env, headOfList);
5229
}
5230
}
5231
5232
referenceObjectList->resetPriorLists();
5233
}
5234
5235
#if defined(J9VM_GC_FINALIZATION)
5236
void
5237
MM_CopyForwardScheme::scanFinalizableObjects(MM_EnvironmentVLHGC *env)
5238
{
5239
GC_FinalizeListManager * finalizeListManager = _extensions->finalizeListManager;
5240
5241
/* If we're copying objects this code must be run single-threaded and we should only be here if work is actually required */
5242
/* This function is also used during abort; these assertions aren't applicable to that case because objects can't be copied during abort */
5243
Assert_MM_true(_abortInProgress || env->_currentTask->isSynchronized());
5244
Assert_MM_true(_abortInProgress || _shouldScanFinalizableObjects);
5245
Assert_MM_true(_abortInProgress || finalizeListManager->isFinalizableObjectProcessingRequired());
5246
5247
/* walk finalizable objects loaded by the system class loader */
5248
j9object_t systemObject = finalizeListManager->resetSystemFinalizableObjects();
5249
if (NULL != systemObject) {
5250
scanFinalizableList(env, systemObject);
5251
}
5252
5253
/* walk finalizable objects loaded by the all other class loaders */
5254
j9object_t defaultObject = finalizeListManager->resetDefaultFinalizableObjects();
5255
if (NULL != defaultObject) {
5256
scanFinalizableList(env, defaultObject);
5257
}
5258
5259
5260
5261
{
5262
/* walk reference objects */
5263
GC_FinalizableReferenceBuffer referenceBuffer(_extensions);
5264
j9object_t referenceObject = finalizeListManager->resetReferenceObjects();
5265
while (NULL != referenceObject) {
5266
j9object_t next = NULL;
5267
if(!isLiveObject(referenceObject)) {
5268
Assert_MM_true(isObjectInEvacuateMemory(referenceObject));
5269
MM_ForwardedHeader forwardedHeader(referenceObject, _extensions->compressObjectReferences());
5270
if (!forwardedHeader.isForwardedPointer()) {
5271
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
5272
next = _extensions->accessBarrier->getReferenceLink(referenceObject);
5273
5274
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(referenceObject);
5275
J9Object* copyObject = copy(env, reservingContext, &forwardedHeader);
5276
if ( (NULL == copyObject) || (referenceObject == copyObject) ) {
5277
referenceBuffer.add(env, referenceObject);
5278
} else {
5279
/* It's only safe to copy objects on the finalizable list if we're in single threaded mode */
5280
Assert_MM_true(!_abortInProgress);
5281
referenceBuffer.add(env, copyObject);
5282
}
5283
} else {
5284
J9Object *forwardedPtr = forwardedHeader.getForwardedObject();
5285
Assert_MM_true(NULL != forwardedPtr);
5286
next = _extensions->accessBarrier->getReferenceLink(forwardedPtr);
5287
referenceBuffer.add(env, forwardedPtr);
5288
}
5289
} else {
5290
next = _extensions->accessBarrier->getReferenceLink(referenceObject);
5291
referenceBuffer.add(env, referenceObject);
5292
}
5293
5294
referenceObject = next;
5295
}
5296
referenceBuffer.flush(env);
5297
}
5298
}
5299
5300
void
5301
MM_CopyForwardScheme::scanFinalizableList(MM_EnvironmentVLHGC *env, j9object_t headObject)
5302
{
5303
GC_FinalizableObjectBuffer objectBuffer(_extensions);
5304
5305
while (NULL != headObject) {
5306
j9object_t next = NULL;
5307
5308
if(!isLiveObject(headObject)) {
5309
Assert_MM_true(isObjectInEvacuateMemory(headObject));
5310
MM_ForwardedHeader forwardedHeader(headObject, _extensions->compressObjectReferences());
5311
if (!forwardedHeader.isForwardedPointer()) {
5312
Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
5313
next = _extensions->accessBarrier->getFinalizeLink(headObject);
5314
5315
MM_AllocationContextTarok *reservingContext = getContextForHeapAddress(headObject);
5316
J9Object* copyObject = copy(env, reservingContext, &forwardedHeader);
5317
if ( (NULL == copyObject) || (headObject == copyObject) ) {
5318
objectBuffer.add(env, headObject);
5319
} else {
5320
/* It's only safe to copy objects on the finalizable list if we're in single threaded mode */
5321
Assert_MM_true(!_abortInProgress);
5322
objectBuffer.add(env, copyObject);
5323
}
5324
} else {
5325
J9Object *forwardedPtr = forwardedHeader.getForwardedObject();
5326
Assert_MM_true(NULL != forwardedPtr);
5327
next = _extensions->accessBarrier->getFinalizeLink(forwardedPtr);
5328
objectBuffer.add(env, forwardedPtr);
5329
}
5330
} else {
5331
next = _extensions->accessBarrier->getFinalizeLink(headObject);
5332
objectBuffer.add(env, headObject);
5333
}
5334
5335
headObject = next;
5336
}
5337
5338
objectBuffer.flush(env);
5339
}
5340
#endif /* J9VM_GC_FINALIZATION */
5341
5342
void
5343
MM_CopyForwardScheme::removeFreeMemoryCandidate(MM_EnvironmentVLHGC* env, MM_ReservedRegionListHeader* regionList, MM_HeapRegionDescriptorVLHGC *region)
5344
{
5345
Assert_MM_true(NULL != regionList->_freeMemoryCandidates);
5346
Assert_MM_true(0 < regionList->_freeMemoryCandidateCount);
5347
5348
regionList->_freeMemoryCandidateCount -= 1;
5349
5350
MM_HeapRegionDescriptorVLHGC *next = region->_copyForwardData._nextRegion;
5351
MM_HeapRegionDescriptorVLHGC *previous = region->_copyForwardData._previousRegion;
5352
if (NULL != next) {
5353
next->_copyForwardData._previousRegion = previous;
5354
}
5355
if (NULL != previous) {
5356
previous->_copyForwardData._nextRegion = next;
5357
Assert_MM_true(previous != previous->_copyForwardData._nextRegion);
5358
} else {
5359
Assert_MM_true(region == regionList->_freeMemoryCandidates);
5360
regionList->_freeMemoryCandidates = next;
5361
}
5362
}
5363
5364
void
5365
MM_CopyForwardScheme::insertFreeMemoryCandidate(MM_EnvironmentVLHGC* env, MM_ReservedRegionListHeader* regionList, MM_HeapRegionDescriptorVLHGC *region)
5366
{
5367
region->_copyForwardData._nextRegion = regionList->_freeMemoryCandidates;
5368
region->_copyForwardData._previousRegion = NULL;
5369
if(NULL != regionList->_freeMemoryCandidates) {
5370
regionList->_freeMemoryCandidates->_copyForwardData._previousRegion = region;
5371
}
5372
regionList->_freeMemoryCandidates = region;
5373
regionList->_freeMemoryCandidateCount += 1;
5374
}
5375
5376
void
5377
MM_CopyForwardScheme::convertFreeMemoryCandidateToSurvivorRegion(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region)
5378
{
5379
Trc_MM_CopyForwardScheme_convertFreeMemoryCandidateToSurvivorRegion_Entry(env->getLanguageVMThread(), region);
5380
Assert_MM_true(NULL != region);
5381
Assert_MM_true(MM_HeapRegionDescriptor::ADDRESS_ORDERED_MARKED == region->getRegionType());
5382
Assert_MM_false(region->isSurvivorRegion());
5383
Assert_MM_false(region->isFreshSurvivorRegion());
5384
5385
setRegionAsSurvivor(env, region, false);
5386
5387
/* TODO: Remembering does not really have to be done under a lock, but dual (prev, current) list implementation indirectly forces us to do it this way. */
5388
rememberAndResetReferenceLists(env, region);
5389
5390
Trc_MM_CopyForwardScheme_convertFreeMemoryCandidateToSurvivorRegion_Exit(env->getLanguageVMThread());
5391
}
5392
5393
void
5394
MM_CopyForwardScheme::setRegionAsSurvivor(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region, bool freshSurvivor)
5395
{
5396
UDATA usedBytes = region->getSize() - region->getMemoryPool()->getFreeMemoryAndDarkMatterBytes();
5397
5398
/* convert allocation age into (usedBytes * age) multiple. it will be converted back to pure age at the end of GC.
5399
* in the mean time as caches are allocated from the region, the age will be merged
5400
*/
5401
double allocationAgeSizeProduct = (double)usedBytes * (double)region->getAllocationAge();
5402
5403
Trc_MM_CopyForwardScheme_setRegionAsSurvivor(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), MM_CompactGroupManager::getCompactGroupNumber(env, region),
5404
(double)region->getAllocationAge() / (1024 * 1024), (double)usedBytes / (1024 * 1024), allocationAgeSizeProduct / (1024 * 1024) / (1024 * 1024));
5405
5406
Assert_MM_true(0.0 == region->getAllocationAgeSizeProduct());
5407
region->setAllocationAgeSizeProduct(allocationAgeSizeProduct);
5408
if (freshSurvivor) {
5409
region->resetAgeBounds();
5410
}
5411
5412
/* update the pool so it only knows about the free memory occurring before survivor base. We will add whatever we don't use at the end of the copy-forward */
5413
Assert_MM_false(region->_copyForwardData._requiresPhantomReferenceProcessing);
5414
region->_copyForwardData._survivor = true;
5415
region->_copyForwardData._freshSurvivor = freshSurvivor;
5416
}
5417
5418
void
5419
MM_CopyForwardScheme::setAllocationAgeForMergedRegion(MM_EnvironmentVLHGC* env, MM_HeapRegionDescriptorVLHGC *region)
5420
{
5421
UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);
5422
UDATA usedBytes = region->getSize() - region->getMemoryPool()->getFreeMemoryAndDarkMatterBytes();
5423
5424
Assert_MM_true(0 != usedBytes);
5425
5426
/* convert allocation age product (usedBytes * age) back to pure age */
5427
U_64 newAllocationAge = (U_64)(region->getAllocationAgeSizeProduct() / (double)usedBytes);
5428
5429
Trc_MM_CopyForwardScheme_setAllocationAgeForMergedRegion(env->getLanguageVMThread(), _regionManager->mapDescriptorToRegionTableIndex(region), compactGroup,
5430
region->getAllocationAgeSizeProduct() / (1024 * 1024) / (1024 * 1024), (double)usedBytes / (1024 * 1024), (double)newAllocationAge / (1024 * 1024),
5431
(double)region->getLowerAgeBound() / (1024 * 1024), (double)region->getUpperAgeBound() / (1024 * 1024));
5432
5433
if (_extensions->tarokAllocationAgeEnabled) {
5434
Assert_MM_true(newAllocationAge < _extensions->compactGroupPersistentStats[compactGroup]._maxAllocationAge);
5435
Assert_MM_true((MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup) == 0) || (newAllocationAge >= _extensions->compactGroupPersistentStats[compactGroup - 1]._maxAllocationAge));
5436
}
5437
5438
UDATA logicalAge = 0;
5439
if (_extensions->tarokAllocationAgeEnabled) {
5440
logicalAge = MM_CompactGroupManager::calculateLogicalAgeForRegion(env, newAllocationAge);
5441
} else {
5442
logicalAge = MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup);
5443
}
5444
5445
region->setAge(newAllocationAge, logicalAge);
5446
/* reset aging auxiliary datea for future usage */
5447
region->setAllocationAgeSizeProduct(0.0);
5448
5449
}
5450
5451
bool
5452
MM_CopyForwardScheme::isObjectInNoEvacuationRegions(MM_EnvironmentVLHGC *env, J9Object *objectPtr)
5453
{
5454
if ((NULL == objectPtr) || (0 == _regionCountCannotBeEvacuated)) {
5455
return false;
5456
}
5457
MM_HeapRegionDescriptorVLHGC *region = (MM_HeapRegionDescriptorVLHGC *)_regionManager->tableDescriptorForAddress(objectPtr);
5458
return region->_markData._noEvacuation;
5459
}
5460
5461
bool
5462
MM_CopyForwardScheme::randomDecideForceNonEvacuatedRegion(UDATA ratio) {
5463
bool ret = false;
5464
if ((0 < ratio) && (ratio <= 100)) {
5465
ret = ((UDATA)(rand() % 100) <= (UDATA)(ratio - 1));
5466
}
5467
return ret;
5468
}
5469
5470
MMINLINE bool
5471
MM_CopyForwardScheme::isCompressedSurvivor(void *heapAddr)
5472
{
5473
UDATA compressedCardOffset = ((UDATA)heapAddr - (UDATA)_heapBase) / CARD_SIZE;
5474
UDATA compressedCardIndex = compressedCardOffset / COMPRESSED_CARDS_PER_WORD;
5475
UDATA compressedSurvivorWord = _compressedSurvivorTable[compressedCardIndex];
5476
bool isSurvivor = false;
5477
5478
if (AllCompressedCardsInWordClean != compressedSurvivorWord) {
5479
UDATA bit = compressedCardOffset % COMPRESSED_CARDS_PER_WORD;
5480
isSurvivor = (CompressedCardSurvivor == ((compressedSurvivorWord >> bit) & 1));
5481
}
5482
return isSurvivor;
5483
}
5484
5485
/**
5486
* compressedSurvivorTable bit to Card Table, it is for identifying if live object is in survivor memory in current PGC
5487
* setCompressedSurvivorCards() are called for requiring free memory from region and preparing preserved TLHRemainders.
5488
*/
5489
MMINLINE void
5490
MM_CopyForwardScheme::setCompressedSurvivorCards(MM_EnvironmentVLHGC *env, void *startHeapAddress, void *endHeapAddress)
5491
{
5492
UDATA compressedCardStartOffset = ((UDATA)startHeapAddress - (UDATA)_heapBase) / CARD_SIZE;
5493
UDATA compressedCardStartIndex = compressedCardStartOffset / COMPRESSED_CARDS_PER_WORD;
5494
UDATA compressedCardEndOffset = (((UDATA)endHeapAddress - (UDATA)_heapBase) + (CARD_SIZE -1))/ CARD_SIZE;
5495
UDATA compressedCardEndIndex = compressedCardEndOffset / COMPRESSED_CARDS_PER_WORD;
5496
UDATA mask = 1;
5497
UDATA endOfWord = ((UDATA)1) << (COMPRESSED_CARDS_PER_WORD - 1);
5498
UDATA compressedSurvivorWord = AllCompressedCardsInWordClean;
5499
5500
UDATA *compressedSurvivor = &_compressedSurvivorTable[compressedCardStartIndex];
5501
5502
UDATA shiftStart = compressedCardStartOffset % COMPRESSED_CARDS_PER_WORD;
5503
mask = mask << shiftStart;
5504
UDATA offset = compressedCardStartOffset;
5505
UDATA idx = compressedCardStartIndex;
5506
if (idx == compressedCardEndIndex) {
5507
endOfWord = ((UDATA)1) << ((compressedCardEndOffset - 1) % COMPRESSED_CARDS_PER_WORD);
5508
}
5509
while (offset < compressedCardEndOffset) {
5510
/* invert bit */
5511
compressedSurvivorWord ^= mask;
5512
5513
if (mask == endOfWord) {
5514
/*only first and last word need atomic update, non-atomic for intermediate ones */
5515
if ((idx != compressedCardStartIndex) && (idx != compressedCardEndIndex)) {
5516
*compressedSurvivor |= compressedSurvivorWord;
5517
} else {
5518
/* atomic update */
5519
volatile UDATA *localAddr = compressedSurvivor;
5520
UDATA oldValue = *localAddr;
5521
UDATA newValue = oldValue | compressedSurvivorWord;
5522
if (newValue != oldValue) {
5523
while ((MM_AtomicOperations::lockCompareExchange(localAddr, oldValue, newValue)) != oldValue) {
5524
oldValue = *localAddr;
5525
newValue = oldValue | compressedSurvivorWord;
5526
}
5527
}
5528
}
5529
compressedSurvivor++;
5530
compressedSurvivorWord = AllCompressedCardsInWordClean;
5531
idx += 1;
5532
if (idx == compressedCardEndIndex) {
5533
endOfWord = ((UDATA)1) << ((compressedCardEndOffset - 1) % COMPRESSED_CARDS_PER_WORD);
5534
}
5535
mask = 1;
5536
} else {
5537
/* mask for next bit to handle */
5538
mask = mask << 1;
5539
}
5540
offset += 1;
5541
}
5542
}
5543
5544
MMINLINE void
5545
MM_CopyForwardScheme::cleanCompressedSurvivorCardTable(MM_EnvironmentVLHGC *env)
5546
{
5547
UDATA compressedSurvivorTableSize = _extensions->heap->getMaximumPhysicalRange() / (CARD_SIZE * BITS_PER_BYTE);
5548
memset((void*)_compressedSurvivorTable, AllCompressedCardsInByteClean, compressedSurvivorTableSize);
5549
}
5550
5551
void
5552
MM_CopyForwardScheme::abandonTLHRemainders(MM_EnvironmentVLHGC *env)
5553
{
5554
for (UDATA compactGroup = 0; compactGroup < _compactGroupMaxCount; compactGroup++) {
5555
MM_CopyForwardCompactGroup *copyForwardCompactGroup = &env->_copyForwardCompactGroups[compactGroup];
5556
if (_extensions->recycleRemainders) {
5557
if ((MM_CompactGroupManager::getRegionAgeFromGroup(env, compactGroup) >= _extensions->tarokNurseryMaxAge._valueSpecified) &&
5558
(copyForwardCompactGroup->getTLHRemainderSize() >= _extensions->minimumFreeSizeForSurvivor)) {
5559
copyForwardCompactGroup->recycleTLHRemainder(env);
5560
} else {
5561
copyForwardCompactGroup->discardTLHRemainder(env);
5562
}
5563
} else {
5564
copyForwardCompactGroup->discardTLHRemainder(env);
5565
}
5566
}
5567
}
5568
5569