Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_base/IndexableObjectAllocationModel.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "ArrayletObjectModel.hpp"
24
#include "GCExtensions.hpp"
25
#include "IndexableObjectAllocationModel.hpp"
26
#include "Math.hpp"
27
#include "MemorySpace.hpp"
28
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
29
#include "ArrayletLeafIterator.hpp"
30
#include "HeapRegionManager.hpp"
31
#include "HeapRegionDescriptorVLHGC.hpp"
32
#include "Heap.hpp"
33
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
34
35
/**
36
* Allocation description and layout initialization. This is called before OMR allocates
37
* (and possibly zeroes) the raw bytes for the arraylet spine.
38
*/
39
bool
40
MM_IndexableObjectAllocationModel::initializeAllocateDescription(MM_EnvironmentBase *env)
41
{
42
/* prerequisite base class initialization of description */
43
if (!isAllocatable()) {
44
return false;
45
}
46
47
/* continue, with reservations */
48
setAllocatable(false);
49
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
50
uintptr_t spineBytes = extensions->indexableObjectModel.getSpineSize(_class, _layout, _numberOfArraylets, _dataSize, _alignSpineDataSection);
51
#if defined (J9VM_GC_MODRON_COMPACTION) || defined (J9VM_GC_GENERATIONAL)
52
if (_allocateDescription.getPreHashFlag()) {
53
if (spineBytes == extensions->indexableObjectModel.getHashcodeOffset(_class, _layout, _numberOfIndexedFields)) {
54
/* Add extra uintptr_t for hash */
55
spineBytes += sizeof(uintptr_t);
56
}
57
}
58
#endif /* defined (J9VM_GC_MODRON_COMPACTION) || defined (J9VM_GC_GENERATIONAL) */
59
spineBytes = extensions->objectModel.adjustSizeInBytes(spineBytes);
60
61
/* determine size of layout overhead (additional to spine bytes) and finalize allocation description */
62
uintptr_t layoutSizeInBytes = 0;
63
switch (_layout) {
64
case GC_ArrayletObjectModel::Illegal:
65
/* invalid layout - not allocatable */
66
Assert_MM_unreachable();
67
break;
68
69
case GC_ArrayletObjectModel::InlineContiguous:
70
/* all good */
71
setAllocatable(true);
72
break;
73
74
case GC_ArrayletObjectModel::Discontiguous:
75
/* non-empty discontiguous arrays require slow-path allocate */
76
if (isGCAllowed() || (0 == _numberOfIndexedFields)) {
77
/* _numberOfArraylets discontiguous leaves, all contains leaf size bytes */
78
layoutSizeInBytes = _dataSize;
79
_allocateDescription.setChunkedArray(true);
80
Trc_MM_allocateAndConnectNonContiguousArraylet_Entry(env->getLanguageVMThread(),
81
_numberOfIndexedFields, spineBytes, _numberOfArraylets);
82
setAllocatable(true);
83
}
84
break;
85
86
case GC_ArrayletObjectModel::Hybrid:
87
Assert_MM_true(0 < _numberOfArraylets);
88
/* hybrid arrays always require slow-path allocate */
89
if (isGCAllowed()) {
90
/* (_dataSize % leaf size) bytes in spine, ((n-1) * leaf size) bytes in (_numberOfArraylets - 1) leaves */
91
layoutSizeInBytes = env->getOmrVM()->_arrayletLeafSize * (_numberOfArraylets - 1);
92
_allocateDescription.setChunkedArray(true);
93
Trc_MM_allocateAndConnectNonContiguousArraylet_Entry(env->getLanguageVMThread(),
94
_numberOfIndexedFields, spineBytes, _numberOfArraylets);
95
setAllocatable(true);
96
}
97
break;
98
99
default:
100
Assert_MM_unreachable();
101
break;
102
}
103
104
if (isAllocatable()) {
105
/* set total request size and layout metadata to finalize the description */
106
_allocateDescription.setBytesRequested(spineBytes + layoutSizeInBytes);
107
_allocateDescription.setNumArraylets(_numberOfArraylets);
108
_allocateDescription.setSpineBytes(spineBytes);
109
return true;
110
}
111
return false;
112
}
113
114
/**
115
* Initializer. This is called after OMR has allocated raw (possibly zeroed) bytes for the spine
116
*/
117
omrobjectptr_t
118
MM_IndexableObjectAllocationModel::initializeIndexableObject(MM_EnvironmentBase *env, void *allocatedBytes)
119
{
120
/* Set array object header and size (in elements) and set description spine pointer */
121
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
122
GC_ArrayObjectModel *indexableObjectModel = &extensions->indexableObjectModel;
123
J9IndexableObject *spine = (J9IndexableObject*)initializeJavaObject(env, allocatedBytes);
124
_allocateDescription.setSpine(spine);
125
if (NULL != spine) {
126
/* Set the array size */
127
if (getAllocateDescription()->isChunkedArray()) {
128
indexableObjectModel->setSizeInElementsForDiscontiguous(spine, _numberOfIndexedFields);
129
#if defined(J9VM_ENV_DATA64)
130
indexableObjectModel->setDataAddrForDiscontiguous(spine, NULL);
131
#endif /* J9VM_ENV_DATA64 */
132
} else {
133
indexableObjectModel->setSizeInElementsForContiguous(spine, _numberOfIndexedFields);
134
#if defined(J9VM_ENV_DATA64)
135
indexableObjectModel->setDataAddrForContiguous(spine);
136
#endif /* J9VM_ENV_DATA64 */
137
}
138
}
139
140
141
/* Lay out arraylet and arrayoid pointers */
142
switch (_layout) {
143
case GC_ArrayletObjectModel::InlineContiguous:
144
Assert_MM_true(1 == _numberOfArraylets);
145
break;
146
147
case GC_ArrayletObjectModel::Discontiguous:
148
case GC_ArrayletObjectModel::Hybrid:
149
if (NULL != spine) {
150
if(0 == _numberOfIndexedFields) {
151
/* Don't try to initialize the arrayoid for an empty NUA */
152
Trc_MM_allocateAndConnectNonContiguousArraylet_Exit(env->getLanguageVMThread(), spine);
153
break;
154
}
155
Trc_MM_allocateAndConnectNonContiguousArraylet_Summary(env->getLanguageVMThread(),
156
_numberOfIndexedFields, getAllocateDescription()->getContiguousBytes(), _numberOfArraylets);
157
spine = layoutDiscontiguousArraylet(env, spine);
158
Trc_MM_allocateAndConnectNonContiguousArraylet_Exit(env->getLanguageVMThread(), spine);
159
} else {
160
Trc_MM_allocateAndConnectNonContiguousArraylet_spineFailure(env->getLanguageVMThread());
161
}
162
break;
163
164
default:
165
Assert_MM_unreachable();
166
break;
167
}
168
169
if (NULL != spine) {
170
/* Initialize hashcode slot */
171
if (getAllocateDescription()->getPreHashFlag()) {
172
env->getExtensions()->objectModel.initializeHashSlot((J9JavaVM*)env->getLanguageVM(), (omrobjectptr_t)spine);
173
}
174
Assert_MM_true(env->getExtensions()->objectModel.isIndexable((omrobjectptr_t)spine));
175
}
176
177
Assert_MM_true(spine == _allocateDescription.getSpine());
178
return (omrobjectptr_t)spine;
179
}
180
181
/**
182
* For contiguous arraylet all data is in the spine but arrayoid pointers must still be laid down.
183
*
184
* @return initialized arraylet spine with its arraylet pointers initialized.
185
*/
186
MMINLINE J9IndexableObject *
187
MM_IndexableObjectAllocationModel::layoutContiguousArraylet(MM_EnvironmentBase *env, J9IndexableObject *spine)
188
{
189
Assert_MM_true(_numberOfArraylets == _allocateDescription.getNumArraylets());
190
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
191
bool const compressed = env->compressObjectReferences();
192
193
/* set arraylet pointers in the spine. these all point into the data part of the spine */
194
fj9object_t *arrayoidPtr = extensions->indexableObjectModel.getArrayoidPointer(spine);
195
uintptr_t leafOffset = (uintptr_t)GC_SlotObject::addToSlotAddress(arrayoidPtr, _numberOfArraylets, compressed);
196
if (_alignSpineDataSection) {
197
leafOffset = MM_Math::roundToCeiling(sizeof(uint64_t), leafOffset);
198
}
199
uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
200
for (uintptr_t i = 0; i < _numberOfArraylets; i++) {
201
GC_SlotObject slotObject(env->getOmrVM(), arrayoidPtr);
202
slotObject.writeReferenceToSlot((omrobjectptr_t)leafOffset);
203
leafOffset += arrayletLeafSize;
204
arrayoidPtr = GC_SlotObject::addToSlotAddress(arrayoidPtr, 1, compressed);
205
}
206
207
return spine;
208
}
209
210
/**
211
* For discontiguous or hybrid arraylet spine is allocated first and leaves are sequentially
212
* allocated and attached to the spine. The allocation description saves and restores the
213
* spine pointer in case a GC occurs while allocating the leaves.
214
*
215
* If a leaf allocation fails the spine and preceding arraylets are abandoned as floating
216
* garbage and NULL is returned.
217
*
218
* @return initialized arraylet spine with attached arraylet leaves, or NULL
219
*/
220
MMINLINE J9IndexableObject *
221
MM_IndexableObjectAllocationModel::layoutDiscontiguousArraylet(MM_EnvironmentBase *env, J9IndexableObject *spine)
222
{
223
Assert_MM_true(_numberOfArraylets == _allocateDescription.getNumArraylets());
224
225
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
226
GC_ArrayObjectModel *indexableObjectModel = &extensions->indexableObjectModel;
227
bool const compressed = env->compressObjectReferences();
228
229
/* determine how many bytes to allocate outside of the spine (in arraylet leaves) */
230
const uintptr_t arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
231
Assert_MM_true(_allocateDescription.getBytesRequested() >= _allocateDescription.getContiguousBytes());
232
uintptr_t bytesRemaining = _allocateDescription.getBytesRequested() - _allocateDescription.getContiguousBytes();
233
Assert_MM_true((0 == (bytesRemaining % arrayletLeafSize)) || (GC_ArrayletObjectModel::Hybrid != _layout));
234
/* hybrid arraylets store _dataSize % arrayletLeafSize bytes in the spine, remainder in _numberOfArraylets-1 leaves */
235
236
/* allocate leaf for each arraylet and attach it to its leaf pointer in the spine */
237
uintptr_t arrayoidIndex = 0;
238
fj9object_t *arrayoidPtr = indexableObjectModel->getArrayoidPointer(spine);
239
while (0 < bytesRemaining) {
240
/* allocate the next arraylet leaf */
241
void *leaf = env->_objectAllocationInterface->allocateArrayletLeaf(env, &_allocateDescription,
242
_allocateDescription.getMemorySpace(), true);
243
244
/* if leaf allocation failed set the result to NULL and return */
245
if (NULL == leaf) {
246
/* spine and preceding arraylets are now floating garbage */
247
Trc_MM_allocateAndConnectNonContiguousArraylet_leafFailure(env->getLanguageVMThread());
248
_allocateDescription.setSpine(NULL);
249
spine = NULL;
250
break;
251
}
252
253
/* refresh the spine -- it might move if we GC while allocating the leaf */
254
spine = _allocateDescription.getSpine();
255
arrayoidPtr = indexableObjectModel->getArrayoidPointer(spine);
256
257
/* set the arrayoid pointer in the spine to point to the new leaf */
258
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, arrayoidIndex, compressed));
259
slotObject.writeReferenceToSlot((omrobjectptr_t)leaf);
260
261
bytesRemaining -= OMR_MIN(bytesRemaining, arrayletLeafSize);
262
arrayoidIndex += 1;
263
}
264
265
if (NULL != spine) {
266
switch (_layout) {
267
case GC_ArrayletObjectModel::Discontiguous:
268
indexableObjectModel->AssertArrayletIsDiscontiguous(spine);
269
Assert_MM_true(arrayoidIndex == _numberOfArraylets);
270
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
271
if (indexableObjectModel->isDoubleMappingEnabled()) {
272
/**
273
* There are some special cases where double mapping an arraylet is
274
* not necessary; isArrayletDataDiscontiguous() details those cases.
275
*/
276
if (indexableObjectModel->isArrayletDataDiscontiguous(spine)) {
277
doubleMapArraylets(env, (J9Object *)spine, NULL);
278
}
279
}
280
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
281
break;
282
283
case GC_ArrayletObjectModel::Hybrid:
284
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
285
/* Unreachable if double map is enabled */
286
if (indexableObjectModel->isDoubleMappingEnabled()) {
287
Assert_MM_double_map_unreachable();
288
}
289
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
290
/* last arrayoid points to end of arrayoid array in spine header (object-aligned if
291
* required). (data size % leaf size) bytes of data are stored here (may be empty).
292
*/
293
Assert_MM_true(arrayoidIndex == (_numberOfArraylets - 1));
294
{
295
uintptr_t leafOffset = (uintptr_t)GC_SlotObject::addToSlotAddress(arrayoidPtr, _numberOfArraylets, compressed);
296
if (_alignSpineDataSection) {
297
leafOffset = MM_Math::roundToCeiling(env->getObjectAlignmentInBytes(), leafOffset);
298
}
299
/* set the last arrayoid pointer to point to remainder data */
300
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, arrayoidIndex, compressed));
301
slotObject.writeReferenceToSlot((omrobjectptr_t)leafOffset);
302
}
303
break;
304
305
default:
306
Assert_MM_unreachable();
307
break;
308
}
309
}
310
311
return spine;
312
}
313
314
#if defined(J9VM_GC_ENABLE_DOUBLE_MAP)
315
#if !((defined(LINUX) || defined(OSX)) && defined(J9VM_ENV_DATA64))
316
/* Double map is only supported on LINUX 64 bit Systems for now */
317
#error "Platform not supported by Double Map API"
318
#endif /* !((defined(LINUX) || defined(OSX)) && defined(J9VM_ENV_DATA64)) */
319
void *
320
MM_IndexableObjectAllocationModel::doubleMapArraylets(MM_EnvironmentBase *env, J9Object *objectPtr, void *preferredAddress)
321
{
322
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
323
J9JavaVM *javaVM = extensions->getJavaVM();
324
325
GC_ArrayletLeafIterator arrayletLeafIterator(javaVM, (J9IndexableObject *)objectPtr);
326
MM_Heap *heap = extensions->getHeap();
327
UDATA arrayletLeafSize = env->getOmrVM()->_arrayletLeafSize;
328
UDATA arrayletLeafCount = MM_Math::roundToCeiling(arrayletLeafSize, _dataSize) / arrayletLeafSize;
329
Trc_MM_double_map_Entry(env->getLanguageVMThread(), (void *)objectPtr, arrayletLeafSize, arrayletLeafCount);
330
331
void *result = NULL;
332
333
#define ARRAYLET_ALLOC_THRESHOLD 64
334
void *leaves[ARRAYLET_ALLOC_THRESHOLD];
335
void **arrayletLeaveAddrs = leaves;
336
if (arrayletLeafCount > ARRAYLET_ALLOC_THRESHOLD) {
337
arrayletLeaveAddrs = (void **)env->getForge()->allocate(arrayletLeafCount * sizeof(uintptr_t), MM_AllocationCategory::GC_HEAP, J9_GET_CALLSITE());
338
}
339
340
if (NULL == arrayletLeaveAddrs) {
341
return NULL;
342
}
343
344
GC_SlotObject *slotObject = NULL;
345
uintptr_t count = 0;
346
347
while (NULL != (slotObject = arrayletLeafIterator.nextLeafPointer())) {
348
void *currentLeaf = slotObject->readReferenceFromSlot();
349
arrayletLeaveAddrs[count] = currentLeaf;
350
count++;
351
}
352
353
/* Number of arraylet leaves in the iterator must match the number of leaves calculated */
354
Assert_MM_true(arrayletLeafCount == count);
355
356
GC_SlotObject objectSlot(env->getOmrVM(), extensions->indexableObjectModel.getArrayoidPointer((J9IndexableObject *)objectPtr));
357
J9Object *firstLeafSlot = objectSlot.readReferenceFromSlot();
358
359
MM_HeapRegionDescriptorVLHGC *firstLeafRegionDescriptor = (MM_HeapRegionDescriptorVLHGC *)heap->getHeapRegionManager()->tableDescriptorForAddress(firstLeafSlot);
360
361
/* Retrieve actual page size */
362
UDATA pageSize = heap->getPageSize();
363
364
/* For now we double map the entire region of all arraylet leaves. This might change in the future if hybrid regions are introduced. */
365
uintptr_t byteAmount = arrayletLeafSize * arrayletLeafCount;
366
367
/* Get heap and from there call an OMR API that will doble map everything */
368
result = heap->doubleMapRegions(env, arrayletLeaveAddrs, count, arrayletLeafSize, byteAmount,
369
&firstLeafRegionDescriptor->_arrayletDoublemapID,
370
pageSize,
371
preferredAddress);
372
373
if (arrayletLeafCount > ARRAYLET_ALLOC_THRESHOLD) {
374
env->getForge()->free((void *)arrayletLeaveAddrs);
375
}
376
377
/*
378
* Double map failed.
379
* If doublemap fails the caller must handle it appropriately. The only case being
380
* JNI critical, where it will fall back to copying each element of the array to
381
* a temporary array (logic handled by JNI Critical). It might hurt performance
382
* but execution won't halt.
383
*/
384
if (NULL == firstLeafRegionDescriptor->_arrayletDoublemapID.address) {
385
Trc_MM_double_map_Failed(env->getLanguageVMThread());
386
result = NULL;
387
}
388
389
Trc_MM_double_map_Exit(env->getLanguageVMThread(), result);
390
return result;
391
}
392
#endif /* J9VM_GC_ENABLE_DOUBLE_MAP */
393
394
395