Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_modron_startup/mgcalloc.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
/**
24
* @file
25
* @ingroup GC_Modron_Startup
26
*/
27
28
#include <string.h>
29
30
#include "j9.h"
31
#include "j9cfg.h"
32
#include "j9consts.h"
33
#include "mmhook_internal.h"
34
#include "ModronAssertions.h"
35
#include "modronopt.h"
36
#include "omrgc.h"
37
#include "rommeth.h"
38
39
#include "AllocateDescription.hpp"
40
#include "AtomicOperations.hpp"
41
#include "EnvironmentBase.hpp"
42
#include "GlobalCollector.hpp"
43
#include "IndexableObjectAllocationModel.hpp"
44
#include "MemorySpace.hpp"
45
#include "MemorySubSpace.hpp"
46
#include "MixedObjectAllocationModel.hpp"
47
#include "modronapi.hpp"
48
#include "ObjectAccessBarrier.hpp"
49
#include "ObjectAccessBarrierAPI.hpp"
50
#include "ObjectAllocationInterface.hpp"
51
#include "ObjectModel.hpp"
52
#include "ObjectMonitor.hpp"
53
#if defined (J9VM_GC_REALTIME)
54
#include "Scheduler.hpp"
55
#endif /* J9VM_GC_REALTIME */
56
#include "VMAccess.hpp"
57
58
extern "C" {
59
60
static uintptr_t stackIterator(J9VMThread *currentThread, J9StackWalkState *walkState);
61
static void dumpStackFrames(J9VMThread *currentThread);
62
static void traceAllocateIndexableObject(J9VMThread *vmThread, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields);
63
static J9Object * traceAllocateObject(J9VMThread *vmThread, J9Object * object, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields=0);
64
static bool traceObjectCheck(J9VMThread *vmThread, bool *shouldTriggerAllocationSampling = NULL);
65
66
#define STACK_FRAMES_TO_DUMP 8
67
68
/**
69
* High level fast path allocate routine (used by VM and JIT) to allocate a single object. This method does not need to be called with
70
* a resolve frame as it cannot cause a GC. If the attempt at allocation fails, the method will return null and it is the caller's
71
* responsibility to call through to the "slow path" J9AllocateObject function after setting up a resolve frame.
72
* NOTE: This function can only be called for instrumentable allocates!
73
*
74
* @param vmThread The thread requesting the allocation
75
* @param clazz The class of the object to be allocated
76
* @param allocateFlags a bitfield of flags from the following
77
* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap
78
* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate
79
* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot
80
* OMR_GC_ALLOCATE_OBJECT_NO_GC NOTE: this will be set unconditionally for this call
81
* @return Pointer to the object header, or NULL
82
*/
83
J9Object *
84
J9AllocateObjectNoGC(J9VMThread *vmThread, J9Class *clazz, uintptr_t allocateFlags)
85
{
86
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
87
88
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
89
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
90
if (extensions->instrumentableAllocateHookEnabled || !env->isInlineTLHAllocateEnabled()) {
91
/* This function is restricted to only being used for instrumentable allocates so we only need to check that one allocation hook.
92
* Note that we can't handle hooked allocates since we might be called without a JIT resolve frame and that is required for us to
93
* report the allocation event.
94
*/
95
return NULL;
96
}
97
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
98
99
Assert_MM_true(allocateFlags & OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE);
100
// TODO: respect or reject tenured flag?
101
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED);
102
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH);
103
104
J9Object *objectPtr = NULL;
105
106
if(!traceObjectCheck(vmThread)){
107
allocateFlags |= OMR_GC_ALLOCATE_OBJECT_NO_GC;
108
if (J9CLASS_IS_ENSUREHASHED(clazz)) {
109
allocateFlags |= OMR_GC_ALLOCATE_OBJECT_HASHED;
110
}
111
MM_MixedObjectAllocationModel mixedOAM(env, clazz, allocateFlags);
112
if (mixedOAM.initializeAllocateDescription(env)) {
113
env->_isInNoGCAllocationCall = true;
114
objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &mixedOAM);
115
if (NULL != objectPtr) {
116
uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);
117
Assert_MM_true(allocatedBytes == mixedOAM.getAllocateDescription()->getContiguousBytes());
118
if (LN_HAS_LOCKWORD(vmThread, objectPtr)) {
119
j9objectmonitor_t initialLockword = VM_ObjectMonitor::getInitialLockword(vmThread->javaVM, clazz);
120
if (0 != initialLockword) {
121
j9objectmonitor_t *lockEA = J9OBJECT_MONITOR_EA(vmThread, objectPtr);
122
J9_STORE_LOCKWORD(vmThread, lockEA, initialLockword);
123
}
124
}
125
}
126
env->_isInNoGCAllocationCall = false;
127
}
128
}
129
130
if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {
131
vmThread->javaVM->internalVMFunctions->defaultValueWithUnflattenedFlattenables(vmThread, clazz, objectPtr);
132
}
133
134
return objectPtr;
135
}
136
137
static uintptr_t
138
stackIterator(J9VMThread *currentThread, J9StackWalkState *walkState)
139
{
140
if (NULL != walkState) {
141
J9Method *method = walkState->method;
142
const char *mc = "Missing_class";
143
const char *mm = "Missing_method";
144
const char *ms = "(Missing_signature)";
145
U_16 mc_size = (U_16)strlen(mc);
146
U_16 mm_size = (U_16)strlen(mm);
147
U_16 ms_size = (U_16)strlen(ms);
148
149
#ifdef J9VM_INTERP_NATIVE_SUPPORT
150
void *jit = walkState->jitInfo;
151
#else
152
void *jit = NULL;
153
#endif
154
155
if (NULL != method) {
156
J9Class *methodClass = J9_CLASS_FROM_METHOD(method);
157
J9ROMMethod *romMethod = J9_ROM_METHOD_FROM_RAM_METHOD(method);
158
159
if (NULL != methodClass) {
160
J9UTF8 *className = J9ROMCLASS_CLASSNAME(methodClass->romClass);
161
162
if (NULL != className) {
163
mc_size = J9UTF8_LENGTH(className);
164
mc = (char *)J9UTF8_DATA(className);
165
}
166
}
167
168
if (NULL != romMethod) {
169
J9UTF8 *methodName = J9ROMMETHOD_NAME(romMethod);
170
J9UTF8 *methodSignature = J9ROMMETHOD_SIGNATURE(romMethod);
171
172
if (NULL != methodName) {
173
mm_size = J9UTF8_LENGTH(methodName);
174
mm = (char *)J9UTF8_DATA(methodName);
175
}
176
177
if (NULL != methodSignature) {
178
ms_size = J9UTF8_LENGTH(methodSignature);
179
ms = (char *)J9UTF8_DATA(methodSignature);
180
}
181
}
182
}
183
Trc_MM_MethodSampleContinue(currentThread, method, mc_size, mc, mm_size, mm, ms_size, ms, jit, walkState->pc);
184
}
185
return J9_STACKWALK_KEEP_ITERATING;
186
}
187
188
static void
189
dumpStackFrames(J9VMThread *currentThread)
190
{
191
if (TrcEnabled_Trc_MM_MethodSampleContinue) {
192
193
if (NULL != currentThread) {
194
J9StackWalkState walkState;
195
196
walkState.skipCount = 0;
197
walkState.maxFrames = STACK_FRAMES_TO_DUMP;
198
walkState.frameWalkFunction = stackIterator;
199
walkState.walkThread = currentThread;
200
walkState.flags = J9_STACKWALK_ITERATE_FRAMES |
201
J9_STACKWALK_VISIBLE_ONLY |
202
J9_STACKWALK_INCLUDE_NATIVES;
203
currentThread->javaVM->walkStackFrames(currentThread, &walkState);
204
}
205
}
206
}
207
208
static void
209
traceAllocateIndexableObject(J9VMThread *vmThread, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields)
210
{
211
J9ArrayClass* arrayClass = (J9ArrayClass*) clazz;
212
uintptr_t arity = arrayClass->arity;
213
J9UTF8* utf;
214
/* Max arity is 255, so define a bracket array of size 256*2 */
215
static const char * brackets =
216
"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"
217
"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"
218
"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"
219
"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]";
220
221
222
utf = J9ROMCLASS_CLASSNAME(arrayClass->leafComponentType->romClass);
223
224
Trc_MM_J9AllocateIndexableObject_outOfLineObjectAllocation(vmThread, clazz, J9UTF8_LENGTH(utf), J9UTF8_DATA(utf), arity*2, brackets, objSize, numberOfIndexedFields);
225
return;
226
}
227
228
static J9Object *
229
traceAllocateObject(J9VMThread *vmThread, J9Object * object, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields)
230
{
231
bool shouldTrigggerObjectAllocationSampling = false;
232
uintptr_t byteGranularity = 0;
233
234
if (traceObjectCheck(vmThread, &shouldTrigggerObjectAllocationSampling)){
235
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
236
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
237
J9ROMClass *romClass = clazz->romClass;
238
byteGranularity = extensions->oolObjectSamplingBytesGranularity;
239
240
if (J9ROMCLASS_IS_ARRAY(romClass)){
241
traceAllocateIndexableObject(vmThread, clazz, objSize, numberOfIndexedFields);
242
}else{
243
Trc_MM_J9AllocateObject_outOfLineObjectAllocation(
244
vmThread, clazz, J9UTF8_LENGTH(J9ROMCLASS_CLASSNAME(romClass)), J9UTF8_DATA(J9ROMCLASS_CLASSNAME(romClass)), objSize);
245
}
246
247
/* Keep the remainder, want this to happen so that we don't miss objects
248
* after seeing large objects
249
*/
250
env->_oolTraceAllocationBytes = (env->_oolTraceAllocationBytes) % byteGranularity;
251
}
252
253
if (shouldTrigggerObjectAllocationSampling) {
254
PORT_ACCESS_FROM_VMC(vmThread);
255
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
256
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
257
258
byteGranularity = extensions->objectSamplingBytesGranularity;
259
/* Keep the remainder, want this to happen so that we don't miss objects
260
* after seeing large objects
261
*/
262
uintptr_t allocSizeInsideTLH = env->getAllocatedSizeInsideTLH();
263
uintptr_t remainder = (env->_traceAllocationBytes + allocSizeInsideTLH) % byteGranularity;
264
env->_traceAllocationBytesCurrentTLH = allocSizeInsideTLH + (env->_traceAllocationBytes % byteGranularity) - remainder;
265
env->_traceAllocationBytes = (env->_traceAllocationBytes) % byteGranularity;
266
267
if (!extensions->needDisableInlineAllocation()) {
268
269
env->setTLHSamplingTop(byteGranularity - remainder);
270
}
271
272
TRIGGER_J9HOOK_MM_OBJECT_ALLOCATION_SAMPLING(
273
extensions->hookInterface,
274
vmThread,
275
j9time_hires_clock(),
276
J9HOOK_MM_OBJECT_ALLOCATION_SAMPLING,
277
object,
278
clazz,
279
objSize);
280
}
281
return object;
282
}
283
284
/* Required to check if we're going to trace or not since a java stack trace needs
285
* stack frames built up; therefore we can't be in the noGC version of allocates
286
*
287
* Returns true if we should trace the object
288
* */
289
static bool
290
traceObjectCheck(J9VMThread *vmThread, bool *shouldTriggerAllocationSampling)
291
{
292
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
293
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
294
uintptr_t byteGranularity = 0;
295
296
if (NULL != shouldTriggerAllocationSampling) {
297
byteGranularity = extensions->objectSamplingBytesGranularity;
298
*shouldTriggerAllocationSampling = (env->_traceAllocationBytes + env->getAllocatedSizeInsideTLH() - env->_traceAllocationBytesCurrentTLH) >= byteGranularity;
299
}
300
301
if (extensions->doOutOfLineAllocationTrace){
302
byteGranularity = extensions->oolObjectSamplingBytesGranularity;
303
if(env->_oolTraceAllocationBytes >= byteGranularity){
304
return true;
305
}
306
}
307
return false;
308
}
309
310
/**
311
* High level fast path allocate routine (used by VM and JIT) to allocate an indexable object. This method does not need to be called with
312
* a resolve frame as it cannot cause a GC. If the attempt at allocation fails, the method will return null and it is the caller's
313
* responsibility to call through to the "slow path" J9AllocateIndexableObject function after setting up a resolve frame.
314
* NOTE: This function can only be called for instrumentable allocates!
315
*
316
* @param vmThread The thread requesting the allocation
317
* @param clazz The class of the object to be allocated
318
* @param numberOfIndexedFields The number of indexable fields required in the allocated object
319
* @param allocateFlags a bitfield of flags from the following
320
* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap
321
* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate
322
* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot
323
* OMR_GC_ALLOCATE_OBJECT_NO_GC NOTE: this will be set unconditionally for this call
324
* @return Pointer to the object header, or NULL
325
*/
326
J9Object *
327
J9AllocateIndexableObjectNoGC(J9VMThread *vmThread, J9Class *clazz, uint32_t numberOfIndexedFields, uintptr_t allocateFlags)
328
{
329
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
330
331
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
332
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
333
if (extensions->instrumentableAllocateHookEnabled || !env->isInlineTLHAllocateEnabled()) {
334
/* This function is restricted to only being used for instrumentable allocates so we only need to check that one allocation hook.
335
* Note that we can't handle hooked allocates since we might be called without a JIT resolve frame and that is required for us to
336
* report the allocation event.
337
*/
338
return NULL;
339
}
340
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
341
342
Assert_MM_true(allocateFlags & OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE);
343
// TODO: respect or reject tenured flag?
344
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED);
345
if (OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH)) {
346
Assert_MM_true(GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT == extensions->objectModel.getScanType(clazz));
347
}
348
349
J9Object *objectPtr = NULL;
350
if(!traceObjectCheck(vmThread)){
351
allocateFlags |= OMR_GC_ALLOCATE_OBJECT_NO_GC;
352
MM_IndexableObjectAllocationModel indexableOAM(env, clazz, numberOfIndexedFields, allocateFlags);
353
if (indexableOAM.initializeAllocateDescription(env)) {
354
env->_isInNoGCAllocationCall = true;
355
objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &indexableOAM);
356
if (NULL != objectPtr) {
357
uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);
358
Assert_MM_true(allocatedBytes == indexableOAM.getAllocateDescription()->getContiguousBytes());
359
}
360
env->_isInNoGCAllocationCall = false;
361
}
362
}
363
/* TODO: Need to implement a more optimal path for cases where barriers are not required or where a batch barrier can be used. */
364
if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {
365
MM_ObjectAccessBarrierAPI objectAccessBarrier(vmThread);
366
J9Class * elementClass = ((J9ArrayClass *) clazz)->componentType;
367
j9object_t defaultValue = elementClass->flattenedClassCache->defaultValue;
368
for (UDATA index = 0; index < numberOfIndexedFields; index++) {
369
objectAccessBarrier.inlineIndexableObjectStoreObject(vmThread, objectPtr, index, defaultValue);
370
}
371
}
372
373
return objectPtr;
374
}
375
376
/**
377
* High level allocate routine (used by VM) to allocate a single object
378
* @param clazz the class of the object to be allocated
379
* @param allocateFlags a bitfield of flags from the following
380
* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap
381
* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate
382
* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot
383
* @return pointer to the object header, or NULL
384
*/
385
J9Object *
386
J9AllocateObject(J9VMThread *vmThread, J9Class *clazz, uintptr_t allocateFlags)
387
{
388
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
389
390
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
391
if (!env->isInlineTLHAllocateEnabled()) {
392
/* For duration of call restore TLH allocate fields;
393
* we will hide real heapAlloc again on exit to fool JIT/Interpreter
394
* into thinking TLH is full if needed
395
*/
396
env->enableInlineTLHAllocate();
397
}
398
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
399
400
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH);
401
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NO_GC);
402
403
J9Object *objectPtr = NULL;
404
/* Replaced classes have poisoned the totalInstanceSize such that they are not allocatable,
405
* so inline allocate and NoGC allocate have already failed. If this allocator is reached
406
* with a replaced class, update to the current version and allocate that.
407
*/
408
clazz = J9_CURRENT_CLASS(clazz);
409
if (J9CLASS_IS_ENSUREHASHED(clazz)) {
410
allocateFlags |= OMR_GC_ALLOCATE_OBJECT_HASHED;
411
}
412
MM_MixedObjectAllocationModel mixedOAM(env, clazz, allocateFlags);
413
if (mixedOAM.initializeAllocateDescription(env)) {
414
objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &mixedOAM);
415
if (NULL != objectPtr) {
416
uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);
417
Assert_MM_true(allocatedBytes == mixedOAM.getAllocateDescription()->getContiguousBytes());
418
if (LN_HAS_LOCKWORD(vmThread, objectPtr)) {
419
j9objectmonitor_t initialLockword = VM_ObjectMonitor::getInitialLockword(vmThread->javaVM, clazz);
420
if (0 != initialLockword) {
421
j9objectmonitor_t *lockEA = J9OBJECT_MONITOR_EA(vmThread, objectPtr);
422
J9_STORE_LOCKWORD(vmThread, lockEA, initialLockword);
423
}
424
}
425
}
426
}
427
428
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
429
if (env->_failAllocOnExcessiveGC && (NULL != objectPtr)) {
430
/* If we have garbage collected too much, return NULL as if we had failed to allocate the object (effectively triggering an OOM).
431
* TODO: The ordering of this call wrt/ allocation really needs to change - this is just a temporary solution until
432
* we reorganize the rest of the code base.
433
*/
434
objectPtr = NULL;
435
/* we stop failing subsequent allocations, to give some room for the Java program
436
* to recover (release some resources) after OutOfMemoryError until next GC occurs
437
*/
438
env->_failAllocOnExcessiveGC = false;
439
extensions->excessiveGCLevel = excessive_gc_fatal_consumed;
440
/* sync storage since we have memset the object to NULL and
441
* filled in the header information even though we will be returning NULL
442
*/
443
MM_AtomicOperations::writeBarrier();
444
Trc_MM_ObjectAllocationFailedDueToExcessiveGC(vmThread);
445
}
446
447
uintptr_t sizeInBytesRequired = mixedOAM.getAllocateDescription()->getBytesRequested();
448
if (NULL != objectPtr) {
449
/* The hook could release access and so the object address could change (the value is preserved). */
450
if (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE == (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE & allocateFlags)) {
451
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE(
452
vmThread->javaVM->hookInterface,
453
vmThread,
454
objectPtr,
455
sizeInBytesRequired);
456
} else {
457
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE(
458
vmThread->javaVM->hookInterface,
459
vmThread,
460
objectPtr,
461
sizeInBytesRequired);
462
}
463
464
if( !mixedOAM.getAllocateDescription()->isCompletedFromTlh()) {
465
TRIGGER_J9HOOK_MM_PRIVATE_NON_TLH_ALLOCATION(
466
extensions->privateHookInterface,
467
vmThread->omrVMThread,
468
objectPtr);
469
}
470
471
uintptr_t lowThreshold = extensions->lowAllocationThreshold;
472
uintptr_t highThreshold = extensions->highAllocationThreshold;
473
if ( (sizeInBytesRequired >= lowThreshold) && (sizeInBytesRequired <= highThreshold) ) {
474
Trc_MM_AllocationThreshold_triggerAllocationThresholdEvent(vmThread,sizeInBytesRequired,lowThreshold,highThreshold);
475
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD(
476
vmThread->javaVM->hookInterface,
477
vmThread,
478
objectPtr,
479
sizeInBytesRequired,
480
lowThreshold,
481
highThreshold);
482
}
483
}
484
485
if(NULL == objectPtr) {
486
MM_MemorySpace *memorySpace = mixedOAM.getAllocateDescription()->getMemorySpace();
487
PORT_ACCESS_FROM_ENVIRONMENT(env);
488
/* we're going to return NULL, trace this */
489
Trc_MM_ObjectAllocationFailed(vmThread, sizeInBytesRequired, clazz, memorySpace->getName(), memorySpace);
490
dumpStackFrames(vmThread);
491
TRIGGER_J9HOOK_MM_PRIVATE_OUT_OF_MEMORY(extensions->privateHookInterface, vmThread->omrVMThread, j9time_hires_clock(), J9HOOK_MM_PRIVATE_OUT_OF_MEMORY, memorySpace, memorySpace->getName());
492
} else {
493
objectPtr = traceAllocateObject(vmThread, objectPtr, clazz, sizeInBytesRequired);
494
if (extensions->isStandardGC()) {
495
if (OMR_GC_ALLOCATE_OBJECT_TENURED == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED)) {
496
/* Object must be allocated in Tenure if it is requested */
497
Assert_MM_true(extensions->isOld(objectPtr));
498
}
499
#if defined(J9VM_GC_REALTIME)
500
} else if (extensions->isMetronomeGC()) {
501
if (env->saveObjects((omrobjectptr_t)objectPtr)) {
502
j9gc_startGCIfTimeExpired(vmThread->omrVMThread);
503
env->restoreObjects((omrobjectptr_t*)&objectPtr);
504
}
505
#endif /* defined(J9VM_GC_REALTIME) */
506
}
507
}
508
509
if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {
510
vmThread->javaVM->internalVMFunctions->defaultValueWithUnflattenedFlattenables(vmThread, clazz, objectPtr);
511
}
512
513
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
514
if (extensions->needDisableInlineAllocation()) {
515
env->disableInlineTLHAllocate();
516
}
517
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
518
519
return objectPtr;
520
}
521
522
/**
523
* High level allocate routine (used by VM) to allocate an object array
524
* @param allocateFlags a bitfield of flags from the following
525
* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap
526
* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate
527
* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot
528
* @return pointer to the object header, or NULL *
529
*/
530
J9Object *
531
J9AllocateIndexableObject(J9VMThread *vmThread, J9Class *clazz, uint32_t numberOfIndexedFields, uintptr_t allocateFlags)
532
{
533
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
534
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
535
536
Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NO_GC);
537
if (OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH)) {
538
Assert_MM_true(GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT == extensions->objectModel.getScanType(clazz));
539
}
540
541
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
542
if (!env->isInlineTLHAllocateEnabled()) {
543
/* For duration of call restore TLH allocate fields;
544
* we will hide real heapAlloc again on exit to fool JIT/Interpreter
545
* into thinking TLH is full if needed
546
*/
547
env->enableInlineTLHAllocate();
548
}
549
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
550
551
J9Object *objectPtr = NULL;
552
uintptr_t sizeInBytesRequired = 0;
553
MM_IndexableObjectAllocationModel indexableOAM(env, clazz, numberOfIndexedFields, allocateFlags);
554
if (indexableOAM.initializeAllocateDescription(env)) {
555
objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &indexableOAM);
556
if (NULL != objectPtr) {
557
uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);
558
Assert_MM_true(allocatedBytes == indexableOAM.getAllocateDescription()->getContiguousBytes());
559
}
560
}
561
562
if (env->_failAllocOnExcessiveGC && (NULL != objectPtr)) {
563
/* If we have garbage collected too much, return NULL as if we had failed to allocate the object (effectively triggering an OOM).
564
* TODO: The ordering of this call wrt/ allocation really needs to change - this is just a temporary solution until
565
* we reorganize the rest of the code base.
566
*/
567
objectPtr = NULL;
568
/* we stop failing subsequent allocations, to give some room for the Java program
569
* to recover (release some resources) after OutOfMemoryError until next GC occurs
570
*/
571
env->_failAllocOnExcessiveGC = false;
572
extensions->excessiveGCLevel = excessive_gc_fatal_consumed;
573
/* sync storage since we have memset the object to NULL and
574
* filled in the header information even though we will be returning NULL
575
*/
576
MM_AtomicOperations::storeSync();
577
Trc_MM_ArrayObjectAllocationFailedDueToExcessiveGC(vmThread);
578
}
579
580
sizeInBytesRequired = indexableOAM.getAllocateDescription()->getBytesRequested();
581
if (NULL != objectPtr) {
582
/* The hook could release access and so the object address could change (the value is preserved). Since this
583
* means the hook could write back a different value to the variable, it must be a valid lvalue (ie: not cast).
584
*/
585
if (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE == (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE & allocateFlags)) {
586
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE(
587
vmThread->javaVM->hookInterface,
588
vmThread,
589
objectPtr,
590
sizeInBytesRequired);
591
} else {
592
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE(
593
vmThread->javaVM->hookInterface,
594
vmThread,
595
objectPtr,
596
sizeInBytesRequired);
597
}
598
599
/* If this was a non-TLH allocation, trigger the hook */
600
if( !indexableOAM.getAllocateDescription()->isCompletedFromTlh()) {
601
TRIGGER_J9HOOK_MM_PRIVATE_NON_TLH_ALLOCATION(
602
extensions->privateHookInterface,
603
vmThread->omrVMThread,
604
objectPtr);
605
}
606
607
uintptr_t lowThreshold = extensions->lowAllocationThreshold;
608
uintptr_t highThreshold = extensions->highAllocationThreshold;
609
if ( (sizeInBytesRequired >= lowThreshold) && (sizeInBytesRequired <= highThreshold) ) {
610
Trc_MM_AllocationThreshold_triggerAllocationThresholdEventIndexable(vmThread,sizeInBytesRequired,lowThreshold,highThreshold);
611
TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD(
612
vmThread->javaVM->hookInterface,
613
vmThread,
614
objectPtr,
615
sizeInBytesRequired,
616
lowThreshold,
617
highThreshold);
618
}
619
620
objectPtr = traceAllocateObject(vmThread, objectPtr, clazz, sizeInBytesRequired, (uintptr_t)numberOfIndexedFields);
621
if (extensions->isStandardGC()) {
622
if (OMR_GC_ALLOCATE_OBJECT_TENURED == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED)) {
623
/* Object must be allocated in Tenure if it is requested */
624
Assert_MM_true(extensions->isOld(objectPtr));
625
}
626
#if defined(J9VM_GC_REALTIME)
627
} else if (extensions->isMetronomeGC()) {
628
if (env->saveObjects((omrobjectptr_t)objectPtr)) {
629
j9gc_startGCIfTimeExpired(vmThread->omrVMThread);
630
env->restoreObjects((omrobjectptr_t*)&objectPtr);
631
}
632
#endif /* defined(J9VM_GC_REALTIME) */
633
}
634
} else {
635
/* we're going to return NULL, trace this */
636
PORT_ACCESS_FROM_ENVIRONMENT(env);
637
MM_MemorySpace *memorySpace = indexableOAM.getAllocateDescription()->getMemorySpace();
638
Trc_MM_ArrayObjectAllocationFailed(vmThread, sizeInBytesRequired, clazz, memorySpace->getName(), memorySpace);
639
dumpStackFrames(vmThread);
640
TRIGGER_J9HOOK_MM_PRIVATE_OUT_OF_MEMORY(extensions->privateHookInterface, vmThread->omrVMThread, j9time_hires_clock(), J9HOOK_MM_PRIVATE_OUT_OF_MEMORY, memorySpace, memorySpace->getName());
641
}
642
/* TODO: Need to implement a more optimal path for cases where barriers are not required or where a batch barrier can be used. */
643
if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {
644
MM_ObjectAccessBarrierAPI objectAccessBarrier(vmThread);
645
J9Class * elementClass = ((J9ArrayClass *) clazz)->componentType;
646
j9object_t defaultValue = elementClass->flattenedClassCache->defaultValue;
647
for (UDATA index = 0; index < numberOfIndexedFields; index++) {
648
objectAccessBarrier.inlineIndexableObjectStoreObject(vmThread, objectPtr, index, defaultValue);
649
}
650
}
651
652
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
653
if (extensions->needDisableInlineAllocation()) {
654
env->disableInlineTLHAllocate();
655
}
656
#endif /* J9VM_GC_THREAD_LOCAL_HEAP */
657
658
return objectPtr;
659
}
660
661
/**
662
* Async message callback routine called whenever J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE
663
* or J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD is registered or unregistered. Each time called
664
* we check to see if at least one user is still registered. If so we disable inline
665
* TLH allocate to force JIT/Interpreter to go out of line, ie call J9AllocateObject et al,
666
* for allocates so that the calls to the required calls to the hook routine(s) can be made.
667
*
668
* @param vmThread - thread whose inline allocates need enabling/disabling
669
*/
670
void
671
memoryManagerTLHAsyncCallbackHandler(J9VMThread *vmThread, IDATA handlerKey, void *userData)
672
{
673
J9JavaVM * vm = (J9JavaVM*)userData;
674
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
675
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);
676
MM_ObjectAllocationInterface* allocationInterface = env->_objectAllocationInterface;
677
678
extensions->instrumentableAllocateHookEnabled = (0 != J9_EVENT_IS_HOOKED(vm->hookInterface,J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE));
679
680
if ( J9_EVENT_IS_HOOKED(vm->hookInterface,J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD) ) {
681
Trc_MM_memoryManagerTLHAsyncCallbackHandler_eventIsHooked(vmThread);
682
if (extensions->isStandardGC() || extensions->isVLHGC()) {
683
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
684
extensions->disableInlineCacheForAllocationThreshold = (extensions->lowAllocationThreshold < (extensions->tlhMaximumSize + extensions->tlhMinimumSize));
685
#endif /* defined(J9VM_GC_THREAD_LOCAL_HEAP) */
686
} else if (extensions->isSegregatedHeap()) {
687
#if defined(J9VM_GC_SEGREGATED_HEAP)
688
extensions->disableInlineCacheForAllocationThreshold = (extensions->lowAllocationThreshold <= J9VMGC_SIZECLASSES_MAX_SMALL_SIZE_BYTES);
689
#endif /* defined(J9VM_GC_SEGREGATED_HEAP) */
690
}
691
} else {
692
Trc_MM_memoryManagerTLHAsyncCallbackHandler_eventNotHooked(vmThread);
693
extensions->disableInlineCacheForAllocationThreshold = false;
694
}
695
696
if (extensions->isStandardGC() || extensions->isVLHGC()) {
697
#if defined(J9VM_GC_THREAD_LOCAL_HEAP)
698
if (extensions->needDisableInlineAllocation()) {
699
Trc_MM_memoryManagerTLHAsyncCallbackHandler_disableInlineTLHAllocates(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold,extensions->tlhMinimumSize,extensions->tlhMaximumSize);
700
if (allocationInterface->cachedAllocationsEnabled(env)) {
701
/* BEN TODO: Collapse the env->enable/disableInlineTLHAllocate with these enable/disableCachedAllocations */
702
env->disableInlineTLHAllocate();
703
allocationInterface->disableCachedAllocations(env);
704
}
705
} else {
706
Trc_MM_memoryManagerTLHAsyncCallbackHandler_enableInlineTLHAllocates(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold,extensions->tlhMinimumSize,extensions->tlhMaximumSize);
707
if (!allocationInterface->cachedAllocationsEnabled(env)) {
708
/* BEN TODO: Collapse the env->enable/disableInlineTLHAllocate with these enable/disableCachedAllocations */
709
env->enableInlineTLHAllocate();
710
allocationInterface->enableCachedAllocations(env);
711
}
712
}
713
714
if (allocationInterface->cachedAllocationsEnabled(env)) {
715
uintptr_t samplingBytesGranularity = extensions->objectSamplingBytesGranularity;
716
if (UDATA_MAX != extensions->objectSamplingBytesGranularity) {
717
env->_traceAllocationBytes = 0;
718
env->_traceAllocationBytesCurrentTLH = 0;
719
env->setTLHSamplingTop(samplingBytesGranularity);
720
} else if (!env->isInlineTLHAllocateEnabled()) {
721
env->resetTLHSamplingTop();
722
}
723
}
724
725
#endif /* defined(J9VM_GC_THREAD_LOCAL_HEAP) */
726
} else if (extensions->isSegregatedHeap()) {
727
#if defined(J9VM_GC_SEGREGATED_HEAP)
728
if (extensions->needDisableInlineAllocation()) {
729
Trc_MM_memoryManagerTLHAsyncCallbackHandler_disableAllocationCache(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold);
730
if (allocationInterface->cachedAllocationsEnabled(env)) {
731
allocationInterface->disableCachedAllocations(env);
732
}
733
} else {
734
Trc_MM_memoryManagerTLHAsyncCallbackHandler_enableAllocationCache(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold);
735
if (!allocationInterface->cachedAllocationsEnabled(env)) {
736
allocationInterface->enableCachedAllocations(env);
737
}
738
}
739
#endif /* defined(J9VM_GC_SEGREGATED_HEAP) */
740
}
741
}
742
} /* extern "C" */
743
744
745