Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_vlhgc/ConfigurationIncrementalGenerational.cpp
5986 views
1
2
/*******************************************************************************
3
* Copyright (c) 1991, 2021 IBM Corp. and others
4
*
5
* This program and the accompanying materials are made available under
6
* the terms of the Eclipse Public License 2.0 which accompanies this
7
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
8
* or the Apache License, Version 2.0 which accompanies this distribution and
9
* is available at https://www.apache.org/licenses/LICENSE-2.0.
10
*
11
* This Source Code may also be made available under the following
12
* Secondary Licenses when the conditions for such availability set
13
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
14
* General Public License, version 2 with the GNU Classpath
15
* Exception [1] and GNU General Public License, version 2 with the
16
* OpenJDK Assembly Exception [2].
17
*
18
* [1] https://www.gnu.org/software/classpath/license.html
19
* [2] http://openjdk.java.net/legal/assembly-exception.html
20
*
21
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
22
*******************************************************************************/
23
24
/**
25
* @file
26
* @ingroup GC_Modron_Standard
27
*/
28
29
#include "j9.h"
30
#include "j9cfg.h"
31
32
#include "ConfigurationIncrementalGenerational.hpp"
33
34
#include "ClassLoaderRememberedSet.hpp"
35
#include "CompressedCardTable.hpp"
36
#include "EnvironmentVLHGC.hpp"
37
#include "GCExtensions.hpp"
38
#include "GlobalAllocationManagerTarok.hpp"
39
#include "GlobalCollector.hpp"
40
#include "HeapRegionDescriptorVLHGC.hpp"
41
#include "HeapRegionManager.hpp"
42
#include "HeapRegionManagerVLHGC.hpp"
43
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
44
#include "HeapRegionStateTable.hpp"
45
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
46
#include "HeapVirtualMemory.hpp"
47
#include "MemorySpace.hpp"
48
#include "MemorySubSpaceTarok.hpp"
49
#include "IncrementalCardTable.hpp"
50
#include "IncrementalGenerationalGC.hpp"
51
#include "InterRegionRememberedSet.hpp"
52
#include "PhysicalArenaRegionBased.hpp"
53
#include "PhysicalSubArenaRegionBased.hpp"
54
#include "SweepPoolManagerAddressOrderedList.hpp"
55
#include "SweepPoolManagerVLHGC.hpp"
56
57
#define TAROK_MINIMUM_REGION_SIZE_BYTES (512 * 1024)
58
59
MM_Configuration *
60
MM_ConfigurationIncrementalGenerational::newInstance(MM_EnvironmentBase *env)
61
{
62
MM_ConfigurationIncrementalGenerational *configuration = (MM_ConfigurationIncrementalGenerational *) env->getForge()->allocate(sizeof(MM_ConfigurationIncrementalGenerational), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
63
64
if(NULL != configuration) {
65
new(configuration) MM_ConfigurationIncrementalGenerational(env);
66
if(!configuration->initialize(env)) {
67
configuration->kill(env);
68
configuration = NULL;
69
}
70
}
71
return configuration;
72
}
73
74
/**
75
* Create the global collector for a Tarok configuration
76
*/
77
MM_GlobalCollector *
78
MM_ConfigurationIncrementalGenerational::createGlobalCollector(MM_EnvironmentBase *envBase)
79
{
80
MM_GCExtensionsBase *extensions = envBase->getExtensions();
81
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
82
MM_HeapRegionManager *heapRegionManager = extensions->heapRegionManager;
83
84
return MM_IncrementalGenerationalGC::newInstance(env, heapRegionManager);
85
}
86
87
/**
88
* Create the heap for a region based configuration
89
*/
90
MM_Heap *
91
MM_ConfigurationIncrementalGenerational::createHeapWithManager(MM_EnvironmentBase *env, UDATA heapBytesRequested, MM_HeapRegionManager *regionManager)
92
{
93
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
94
95
MM_Heap *heap = MM_HeapVirtualMemory::newInstance(env, extensions->heapAlignment, heapBytesRequested, regionManager);
96
if (NULL == heap) {
97
return NULL;
98
}
99
100
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
101
/* Enable double mapping if glibc version 2.27 or newer is found. For double map to
102
* work we need a file descriptor, to get one we use shm_open(3) or memfd_create(2);
103
* shm_open(3) has 2 drawbacks: [I] shared memory is used; [II] does not support
104
* anonymous huge pages. [I] shared memory in Linux systems has a limit (half of
105
* physical memory). [II] if we create a file descriptor using shm_open(3) and then
106
* try to mmap with huge pages with the respective file descriptor the mmap call
107
* fails. It would only succeed if MAP_ANON flag was provided, but doing so ignores
108
* the file descriptor which is the opposite of what we want. In a newer glibc
109
* version (glibc 2.27 onwards) there’s a new function that does exactly what we
110
* want, and that's memfd_create(2); however that's only supported in glibc 2.27. We
111
* also need to check if region size is a bigger or equal to multiple of page size.
112
*
113
*/
114
if (extensions->isArrayletDoubleMapRequested && extensions->isArrayletDoubleMapAvailable) {
115
uintptr_t pagesize = heap->getPageSize();
116
if (!extensions->memoryManager->isLargePage(env, pagesize) || (pagesize <= extensions->getOmrVM()->_arrayletLeafSize)) {
117
extensions->indexableObjectModel.setEnableDoubleMapping(true);
118
}
119
}
120
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
121
122
/* when we try to attach this heap to a region manager, we will need the card table since it needs to be NUMA-affinitized using the same logic as the heap so initialize it here */
123
extensions->cardTable = MM_IncrementalCardTable::newInstance(MM_EnvironmentVLHGC::getEnvironment(env), heap);
124
if (NULL == extensions->cardTable) {
125
heap->kill(env);
126
return NULL;
127
}
128
129
if (extensions->tarokEnableCompressedCardTable) {
130
extensions->compressedCardTable = MM_CompressedCardTable::newInstance(MM_EnvironmentVLHGC::getEnvironment(env), heap);
131
if (NULL == extensions->compressedCardTable) {
132
extensions->cardTable->kill(env);
133
extensions->cardTable = NULL;
134
heap->kill(env);
135
return NULL;
136
}
137
}
138
139
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
140
if (extensions->isConcurrentCopyForwardEnabled()) {
141
uintptr_t heapBase = (uintptr_t) heap->getHeapBase();
142
uintptr_t regionShift = regionManager->getRegionShift();
143
uintptr_t regionCount = heap->getMaximumPhysicalRange() >> regionShift;
144
145
extensions->heapRegionStateTable = OMR::GC::HeapRegionStateTable::newInstance(env->getForge(), heapBase, regionShift, regionCount);
146
if (NULL == extensions->heapRegionStateTable) {
147
extensions->compressedCardTable->kill(env);
148
extensions->compressedCardTable = NULL;
149
extensions->cardTable->kill(env);
150
extensions->cardTable = NULL;
151
heap->kill(env);
152
return NULL;
153
}
154
}
155
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
156
157
return heap;
158
}
159
160
MM_EnvironmentBase *
161
MM_ConfigurationIncrementalGenerational::allocateNewEnvironment(MM_GCExtensionsBase *extensions, OMR_VMThread *omrVMThread)
162
{
163
return MM_EnvironmentVLHGC::newInstance(extensions, omrVMThread);
164
}
165
166
J9Pool *
167
MM_ConfigurationIncrementalGenerational::createEnvironmentPool(MM_EnvironmentBase *env)
168
{
169
PORT_ACCESS_FROM_ENVIRONMENT(env);
170
171
uintptr_t numberOfElements = getConfigurationDelegate()->getInitialNumberOfPooledEnvironments(env);
172
/* number of elements, pool flags = 0, 0 selects default pool configuration (at least 1 element, puddle size rounded to OS page size) */
173
return pool_new(sizeof(MM_EnvironmentVLHGC), numberOfElements, sizeof(U_64), 0, J9_GET_CALLSITE(), OMRMEM_CATEGORY_MM, POOL_FOR_PORT(PORTLIB));
174
}
175
176
bool
177
MM_ConfigurationIncrementalGenerational::initializeEnvironment(MM_EnvironmentBase *env)
178
{
179
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
180
J9VMThread *vmThread = (J9VMThread *)env->getLanguageVMThread();
181
OMR_VM *omrVM = env->getOmrVM();
182
183
if (!MM_Configuration::initializeEnvironment(env)) {
184
return false;
185
}
186
187
/* acquire an ACT for this env */
188
if (!extensions->globalAllocationManager->acquireAllocationContext(env)) {
189
return false;
190
}
191
192
vmThread->cardTableVirtualStart = (U_8 *)j9gc_incrementalUpdate_getCardTableVirtualStart(omrVM);
193
vmThread->cardTableShiftSize = j9gc_incrementalUpdate_getCardTableShiftValue(omrVM);
194
195
return true;
196
}
197
198
MM_MemorySpace *
199
MM_ConfigurationIncrementalGenerational::createDefaultMemorySpace(MM_EnvironmentBase *env, MM_Heap *heap, MM_InitializationParameters *parameters)
200
{
201
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
202
203
MM_HeapRegionManager *regionManager = extensions->heapRegionManager;
204
Assert_MM_true(NULL != regionManager);
205
206
/* Create Sweep Pool Manager for VLHGC */
207
extensions->sweepPoolManagerAddressOrderedList = (MM_SweepPoolManagerAddressOrderedList *) MM_SweepPoolManagerVLHGC::newInstance(env);
208
if (NULL == extensions->sweepPoolManagerAddressOrderedList) {
209
return NULL;
210
}
211
212
/* allocate size: (region count) X (max GC thread count) X (size of Bucket) */
213
UDATA allocateSize = sizeof(MM_RememberedSetCardBucket);
214
allocateSize *= extensions->getHeap()->getHeapRegionManager()->getTableRegionCount();
215
allocateSize *= extensions->gcThreadCount;
216
217
extensions->rememberedSetCardBucketPool = (MM_RememberedSetCardBucket *)extensions->getForge()->allocate(allocateSize, MM_AllocationCategory::REMEMBERED_SET, J9_GET_CALLSITE());
218
if (NULL == extensions->rememberedSetCardBucketPool) {
219
return NULL;
220
}
221
222
/* this is as good a place as any to create the global allocation manager */
223
MM_GlobalAllocationManagerTarok *gamt = MM_GlobalAllocationManagerTarok::newInstance(env);
224
if (NULL == gamt) {
225
return NULL;
226
}
227
extensions->globalAllocationManager = gamt;
228
229
MM_PhysicalSubArenaRegionBased *physicalSubArena = MM_PhysicalSubArenaRegionBased::newInstance(env, heap);
230
if(NULL == physicalSubArena) {
231
return NULL;
232
}
233
234
bool usesGlobalCollector = true;
235
MM_MemorySubSpaceTarok *memorySubspaceTarok = MM_MemorySubSpaceTarok::newInstance(env, physicalSubArena, gamt, usesGlobalCollector, parameters->_minimumSpaceSize, parameters->_initialOldSpaceSize, parameters->_maximumSpaceSize, MEMORY_TYPE_OLD, 0);
236
if(NULL == memorySubspaceTarok) {
237
return NULL;
238
}
239
/* the subspace exists so now we can request that the allocation contexts are created (since they require the subspace) */
240
if (!gamt->initializeAllocationContexts(env, memorySubspaceTarok)) {
241
memorySubspaceTarok->kill(env);
242
return NULL;
243
}
244
/* now, configure the collector with this subspace */
245
((MM_IncrementalGenerationalGC *)extensions->getGlobalCollector())->setConfiguredSubspace(env, memorySubspaceTarok);
246
247
248
MM_PhysicalArenaRegionBased *physicalArena = MM_PhysicalArenaRegionBased::newInstance(env, heap);
249
if(NULL == physicalArena) {
250
memorySubspaceTarok->kill(env);
251
return NULL;
252
}
253
254
return MM_MemorySpace::newInstance(env, heap, physicalArena, memorySubspaceTarok, parameters, MEMORY_SPACE_NAME_FLAT, MEMORY_SPACE_DESCRIPTION_FLAT);
255
}
256
257
258
void
259
MM_ConfigurationIncrementalGenerational::defaultMemorySpaceAllocated(MM_GCExtensionsBase *extensions, void* defaultMemorySpace)
260
{
261
MM_EnvironmentVLHGC env((J9JavaVM *)extensions->getOmrVM()->_language_vm);
262
MM_Configuration::defaultMemorySpaceAllocated(extensions, defaultMemorySpace);
263
/* initialize TaxationThreshold and RememberedSetCardBucketPool before first gc occurs and all of dependance has been set. */
264
((MM_IncrementalGenerationalGC *)extensions->getGlobalCollector())->initializeTaxationThreshold(&env);
265
extensions->interRegionRememberedSet->initializeRememberedSetCardBucketPool(&env);
266
}
267
268
bool
269
MM_ConfigurationIncrementalGenerational::initialize(MM_EnvironmentBase *env)
270
{
271
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
272
273
bool result = MM_Configuration::initialize(env);
274
275
/* By default disable hot field depth copying */
276
env->disableHotFieldDepthCopy();
277
278
if (result) {
279
if (MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_NONE == extensions->scavengerScanOrdering) {
280
extensions->scavengerScanOrdering = MM_GCExtensions::OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST;
281
}
282
extensions->setVLHGC(true);
283
}
284
285
#define DEFAULT_MAX_AGE_FOR_PGC_COUNT_BASED 24
286
#define DEFAULT_MAX_NURSERY_AGE 1
287
#define DEFAULT_MAX_AGE_FOR_ALLOCATION_BASED 5
288
289
/* set default region maximum age if it is not specified yet */
290
if (0 == extensions->tarokRegionMaxAge) {
291
if (extensions->tarokAllocationAgeEnabled) {
292
extensions->tarokRegionMaxAge = DEFAULT_MAX_AGE_FOR_ALLOCATION_BASED;
293
} else {
294
extensions->tarokRegionMaxAge = DEFAULT_MAX_AGE_FOR_PGC_COUNT_BASED;
295
}
296
}
297
298
if (!extensions->tarokNurseryMaxAge._wasSpecified) {
299
/* set default nursery age */
300
extensions->tarokNurseryMaxAge._valueSpecified = DEFAULT_MAX_NURSERY_AGE;
301
} else {
302
/* specified nursery age is out of range - correct it to default */
303
if (extensions->tarokNurseryMaxAge._valueSpecified >= extensions->tarokRegionMaxAge) {
304
extensions->tarokNurseryMaxAge._valueSpecified = DEFAULT_MAX_NURSERY_AGE;
305
}
306
}
307
308
if (!extensions->tarokMinimumGMPWorkTargetBytes._wasSpecified) {
309
/* default to a region size. No real reason for choosing this number other than that it is sized relative to the heap */
310
extensions->tarokMinimumGMPWorkTargetBytes._valueSpecified = extensions->regionSize;
311
}
312
313
if (!extensions->dnssExpectedRatioMaximum._wasSpecified) {
314
extensions->dnssExpectedRatioMaximum._valueSpecified = 0.05;
315
}
316
317
if (!extensions->dnssExpectedRatioMinimum._wasSpecified) {
318
extensions->dnssExpectedRatioMinimum._valueSpecified = 0.02;
319
}
320
321
if (!extensions->heapExpansionGCRatioThreshold._wasSpecified) {
322
extensions->heapExpansionGCRatioThreshold._valueSpecified = 5;
323
}
324
325
if (!extensions->heapContractionGCRatioThreshold._wasSpecified) {
326
extensions->heapContractionGCRatioThreshold._valueSpecified = 2;
327
}
328
329
return result;
330
}
331
332
void
333
MM_ConfigurationIncrementalGenerational::tearDown(MM_EnvironmentBase *env)
334
{
335
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
336
337
if (NULL != extensions->sweepPoolManagerAddressOrderedList) {
338
extensions->sweepPoolManagerAddressOrderedList->kill(env);
339
extensions->sweepPoolManagerAddressOrderedList = NULL;
340
}
341
342
if (NULL != extensions->cardTable) {
343
extensions->cardTable->kill(MM_EnvironmentVLHGC::getEnvironment(env));
344
extensions->cardTable = NULL;
345
}
346
347
if (NULL != extensions->compressedCardTable) {
348
extensions->compressedCardTable->kill(env);
349
extensions->compressedCardTable = NULL;
350
}
351
352
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
353
if (NULL != extensions->heapRegionStateTable) {
354
extensions->heapRegionStateTable->kill(env->getForge());
355
extensions->heapRegionStateTable = NULL;
356
}
357
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
358
359
MM_Configuration::tearDown(env);
360
361
// cleanup after extensions->heapRegionManager
362
if (NULL != extensions->rememberedSetCardBucketPool) {
363
extensions->getForge()->free(extensions->rememberedSetCardBucketPool);
364
extensions->rememberedSetCardBucketPool = NULL;
365
}
366
}
367
368
MM_HeapRegionManager *
369
MM_ConfigurationIncrementalGenerational::createHeapRegionManager(MM_EnvironmentBase *env)
370
{
371
MM_GCExtensionsBase *extensions = env->getExtensions();
372
return MM_HeapRegionManagerVLHGC::newInstance(env, extensions->regionSize, sizeof(MM_HeapRegionDescriptorVLHGC), MM_HeapRegionDescriptorVLHGC::initializer, MM_HeapRegionDescriptorVLHGC::destructor);
373
}
374
375
void
376
MM_ConfigurationIncrementalGenerational::cleanUpClassLoader(MM_EnvironmentBase *env, J9ClassLoader* classLoader)
377
{
378
MM_GCExtensionsBase *extensions = env->getExtensions();
379
MM_ClassLoaderRememberedSet *classLoaderRememberedSet = extensions->classLoaderRememberedSet;
380
if (MM_CycleState::CT_PARTIAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType) {
381
/* during PGCs we should never unload a class loader which is remembered because it could have instances */
382
Assert_MM_false(classLoaderRememberedSet->isRemembered(env, classLoader));
383
}
384
classLoaderRememberedSet->killRememberedSet(env, classLoader);
385
}
386
387
388
void
389
MM_ConfigurationIncrementalGenerational::prepareParameters(OMR_VM *omrVM, UDATA minimumSpaceSize, UDATA minimumNewSpaceSize,
390
UDATA initialNewSpaceSize, UDATA maximumNewSpaceSize, UDATA minimumTenureSpaceSize, UDATA initialTenureSpaceSize,
391
UDATA maximumTenureSpaceSize, UDATA memoryMax, UDATA tenureFlags, MM_InitializationParameters *parameters)
392
{
393
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(omrVM);
394
UDATA contextCount = MM_GlobalAllocationManagerTarok::calculateIdealManagedContextCount(extensions);
395
396
/* Each AC needs a region, so we adjust the calculated values as per generic formulas.
397
* Alternatively, we could leave min/initial size as is, but make early initialize fail (and thus fail to start VM),
398
* if specified sizes are lower than VLHGC specific values (regionSize*ACcount) */
399
minimumSpaceSize = OMR_MAX(minimumSpaceSize, extensions->regionSize * contextCount);
400
initialTenureSpaceSize = OMR_MAX(initialTenureSpaceSize, extensions->regionSize * contextCount);
401
402
MM_Configuration::prepareParameters(omrVM, minimumSpaceSize, minimumNewSpaceSize, initialNewSpaceSize, maximumNewSpaceSize,
403
minimumTenureSpaceSize, initialTenureSpaceSize, maximumTenureSpaceSize,
404
memoryMax, tenureFlags, parameters);
405
}
406
407
bool
408
MM_ConfigurationIncrementalGenerational::verifyRegionSize(MM_EnvironmentBase *env, UDATA regionSize)
409
{
410
return regionSize >= TAROK_MINIMUM_REGION_SIZE_BYTES;
411
}
412
413
bool
414
MM_ConfigurationIncrementalGenerational::initializeNUMAManager(MM_EnvironmentBase *env)
415
{
416
MM_GCExtensionsBase *extensions = env->getExtensions();
417
bool disabledByForce = extensions->numaForced && !extensions->_numaManager.isPhysicalNUMAEnabled();
418
if (!disabledByForce) {
419
extensions->_numaManager.shouldEnablePhysicalNUMA(true);
420
}
421
bool result = MM_Configuration::initializeNUMAManager(env);
422
423
if (result && !disabledByForce) {
424
/* shut off NUMA (even if enabled by force) if we are using too small a heap */
425
UDATA affinityLeaderCount = 0;
426
extensions->_numaManager.getAffinityLeaders(&affinityLeaderCount);
427
if ((1 + affinityLeaderCount) != MM_GlobalAllocationManagerTarok::calculateIdealManagedContextCount(extensions)) {
428
extensions->_numaManager.shouldEnablePhysicalNUMA(false);
429
result = extensions->_numaManager.recacheNUMASupport(static_cast<MM_EnvironmentBase *>(env));
430
/* startup can't fail if NUMAManager disabled */
431
Assert_MM_true(result);
432
}
433
}
434
435
return result;
436
}
437
438