Path: blob/master/runtime/gc_modron_startup/mgcalloc.cpp
5986 views
/*******************************************************************************1* Copyright (c) 1991, 2021 IBM Corp. and others2*3* This program and the accompanying materials are made available under4* the terms of the Eclipse Public License 2.0 which accompanies this5* distribution and is available at https://www.eclipse.org/legal/epl-2.0/6* or the Apache License, Version 2.0 which accompanies this distribution and7* is available at https://www.apache.org/licenses/LICENSE-2.0.8*9* This Source Code may also be made available under the following10* Secondary Licenses when the conditions for such availability set11* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU12* General Public License, version 2 with the GNU Classpath13* Exception [1] and GNU General Public License, version 2 with the14* OpenJDK Assembly Exception [2].15*16* [1] https://www.gnu.org/software/classpath/license.html17* [2] http://openjdk.java.net/legal/assembly-exception.html18*19* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception20*******************************************************************************/2122/**23* @file24* @ingroup GC_Modron_Startup25*/2627#include <string.h>2829#include "j9.h"30#include "j9cfg.h"31#include "j9consts.h"32#include "mmhook_internal.h"33#include "ModronAssertions.h"34#include "modronopt.h"35#include "omrgc.h"36#include "rommeth.h"3738#include "AllocateDescription.hpp"39#include "AtomicOperations.hpp"40#include "EnvironmentBase.hpp"41#include "GlobalCollector.hpp"42#include "IndexableObjectAllocationModel.hpp"43#include "MemorySpace.hpp"44#include "MemorySubSpace.hpp"45#include "MixedObjectAllocationModel.hpp"46#include "modronapi.hpp"47#include "ObjectAccessBarrier.hpp"48#include "ObjectAccessBarrierAPI.hpp"49#include "ObjectAllocationInterface.hpp"50#include "ObjectModel.hpp"51#include "ObjectMonitor.hpp"52#if defined (J9VM_GC_REALTIME)53#include "Scheduler.hpp"54#endif /* J9VM_GC_REALTIME */55#include "VMAccess.hpp"5657extern "C" {5859static uintptr_t stackIterator(J9VMThread *currentThread, J9StackWalkState *walkState);60static void dumpStackFrames(J9VMThread *currentThread);61static void traceAllocateIndexableObject(J9VMThread *vmThread, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields);62static J9Object * traceAllocateObject(J9VMThread *vmThread, J9Object * object, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields=0);63static bool traceObjectCheck(J9VMThread *vmThread, bool *shouldTriggerAllocationSampling = NULL);6465#define STACK_FRAMES_TO_DUMP 86667/**68* High level fast path allocate routine (used by VM and JIT) to allocate a single object. This method does not need to be called with69* a resolve frame as it cannot cause a GC. If the attempt at allocation fails, the method will return null and it is the caller's70* responsibility to call through to the "slow path" J9AllocateObject function after setting up a resolve frame.71* NOTE: This function can only be called for instrumentable allocates!72*73* @param vmThread The thread requesting the allocation74* @param clazz The class of the object to be allocated75* @param allocateFlags a bitfield of flags from the following76* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap77* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate78* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot79* OMR_GC_ALLOCATE_OBJECT_NO_GC NOTE: this will be set unconditionally for this call80* @return Pointer to the object header, or NULL81*/82J9Object *83J9AllocateObjectNoGC(J9VMThread *vmThread, J9Class *clazz, uintptr_t allocateFlags)84{85MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);8687#if defined(J9VM_GC_THREAD_LOCAL_HEAP)88MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);89if (extensions->instrumentableAllocateHookEnabled || !env->isInlineTLHAllocateEnabled()) {90/* This function is restricted to only being used for instrumentable allocates so we only need to check that one allocation hook.91* Note that we can't handle hooked allocates since we might be called without a JIT resolve frame and that is required for us to92* report the allocation event.93*/94return NULL;95}96#endif /* J9VM_GC_THREAD_LOCAL_HEAP */9798Assert_MM_true(allocateFlags & OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE);99// TODO: respect or reject tenured flag?100Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED);101Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH);102103J9Object *objectPtr = NULL;104105if(!traceObjectCheck(vmThread)){106allocateFlags |= OMR_GC_ALLOCATE_OBJECT_NO_GC;107if (J9CLASS_IS_ENSUREHASHED(clazz)) {108allocateFlags |= OMR_GC_ALLOCATE_OBJECT_HASHED;109}110MM_MixedObjectAllocationModel mixedOAM(env, clazz, allocateFlags);111if (mixedOAM.initializeAllocateDescription(env)) {112env->_isInNoGCAllocationCall = true;113objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &mixedOAM);114if (NULL != objectPtr) {115uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);116Assert_MM_true(allocatedBytes == mixedOAM.getAllocateDescription()->getContiguousBytes());117if (LN_HAS_LOCKWORD(vmThread, objectPtr)) {118j9objectmonitor_t initialLockword = VM_ObjectMonitor::getInitialLockword(vmThread->javaVM, clazz);119if (0 != initialLockword) {120j9objectmonitor_t *lockEA = J9OBJECT_MONITOR_EA(vmThread, objectPtr);121J9_STORE_LOCKWORD(vmThread, lockEA, initialLockword);122}123}124}125env->_isInNoGCAllocationCall = false;126}127}128129if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {130vmThread->javaVM->internalVMFunctions->defaultValueWithUnflattenedFlattenables(vmThread, clazz, objectPtr);131}132133return objectPtr;134}135136static uintptr_t137stackIterator(J9VMThread *currentThread, J9StackWalkState *walkState)138{139if (NULL != walkState) {140J9Method *method = walkState->method;141const char *mc = "Missing_class";142const char *mm = "Missing_method";143const char *ms = "(Missing_signature)";144U_16 mc_size = (U_16)strlen(mc);145U_16 mm_size = (U_16)strlen(mm);146U_16 ms_size = (U_16)strlen(ms);147148#ifdef J9VM_INTERP_NATIVE_SUPPORT149void *jit = walkState->jitInfo;150#else151void *jit = NULL;152#endif153154if (NULL != method) {155J9Class *methodClass = J9_CLASS_FROM_METHOD(method);156J9ROMMethod *romMethod = J9_ROM_METHOD_FROM_RAM_METHOD(method);157158if (NULL != methodClass) {159J9UTF8 *className = J9ROMCLASS_CLASSNAME(methodClass->romClass);160161if (NULL != className) {162mc_size = J9UTF8_LENGTH(className);163mc = (char *)J9UTF8_DATA(className);164}165}166167if (NULL != romMethod) {168J9UTF8 *methodName = J9ROMMETHOD_NAME(romMethod);169J9UTF8 *methodSignature = J9ROMMETHOD_SIGNATURE(romMethod);170171if (NULL != methodName) {172mm_size = J9UTF8_LENGTH(methodName);173mm = (char *)J9UTF8_DATA(methodName);174}175176if (NULL != methodSignature) {177ms_size = J9UTF8_LENGTH(methodSignature);178ms = (char *)J9UTF8_DATA(methodSignature);179}180}181}182Trc_MM_MethodSampleContinue(currentThread, method, mc_size, mc, mm_size, mm, ms_size, ms, jit, walkState->pc);183}184return J9_STACKWALK_KEEP_ITERATING;185}186187static void188dumpStackFrames(J9VMThread *currentThread)189{190if (TrcEnabled_Trc_MM_MethodSampleContinue) {191192if (NULL != currentThread) {193J9StackWalkState walkState;194195walkState.skipCount = 0;196walkState.maxFrames = STACK_FRAMES_TO_DUMP;197walkState.frameWalkFunction = stackIterator;198walkState.walkThread = currentThread;199walkState.flags = J9_STACKWALK_ITERATE_FRAMES |200J9_STACKWALK_VISIBLE_ONLY |201J9_STACKWALK_INCLUDE_NATIVES;202currentThread->javaVM->walkStackFrames(currentThread, &walkState);203}204}205}206207static void208traceAllocateIndexableObject(J9VMThread *vmThread, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields)209{210J9ArrayClass* arrayClass = (J9ArrayClass*) clazz;211uintptr_t arity = arrayClass->arity;212J9UTF8* utf;213/* Max arity is 255, so define a bracket array of size 256*2 */214static const char * brackets =215"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"216"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"217"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]"218"[][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]";219220221utf = J9ROMCLASS_CLASSNAME(arrayClass->leafComponentType->romClass);222223Trc_MM_J9AllocateIndexableObject_outOfLineObjectAllocation(vmThread, clazz, J9UTF8_LENGTH(utf), J9UTF8_DATA(utf), arity*2, brackets, objSize, numberOfIndexedFields);224return;225}226227static J9Object *228traceAllocateObject(J9VMThread *vmThread, J9Object * object, J9Class* clazz, uintptr_t objSize, uintptr_t numberOfIndexedFields)229{230bool shouldTrigggerObjectAllocationSampling = false;231uintptr_t byteGranularity = 0;232233if (traceObjectCheck(vmThread, &shouldTrigggerObjectAllocationSampling)){234MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);235MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);236J9ROMClass *romClass = clazz->romClass;237byteGranularity = extensions->oolObjectSamplingBytesGranularity;238239if (J9ROMCLASS_IS_ARRAY(romClass)){240traceAllocateIndexableObject(vmThread, clazz, objSize, numberOfIndexedFields);241}else{242Trc_MM_J9AllocateObject_outOfLineObjectAllocation(243vmThread, clazz, J9UTF8_LENGTH(J9ROMCLASS_CLASSNAME(romClass)), J9UTF8_DATA(J9ROMCLASS_CLASSNAME(romClass)), objSize);244}245246/* Keep the remainder, want this to happen so that we don't miss objects247* after seeing large objects248*/249env->_oolTraceAllocationBytes = (env->_oolTraceAllocationBytes) % byteGranularity;250}251252if (shouldTrigggerObjectAllocationSampling) {253PORT_ACCESS_FROM_VMC(vmThread);254MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);255MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);256257byteGranularity = extensions->objectSamplingBytesGranularity;258/* Keep the remainder, want this to happen so that we don't miss objects259* after seeing large objects260*/261uintptr_t allocSizeInsideTLH = env->getAllocatedSizeInsideTLH();262uintptr_t remainder = (env->_traceAllocationBytes + allocSizeInsideTLH) % byteGranularity;263env->_traceAllocationBytesCurrentTLH = allocSizeInsideTLH + (env->_traceAllocationBytes % byteGranularity) - remainder;264env->_traceAllocationBytes = (env->_traceAllocationBytes) % byteGranularity;265266if (!extensions->needDisableInlineAllocation()) {267268env->setTLHSamplingTop(byteGranularity - remainder);269}270271TRIGGER_J9HOOK_MM_OBJECT_ALLOCATION_SAMPLING(272extensions->hookInterface,273vmThread,274j9time_hires_clock(),275J9HOOK_MM_OBJECT_ALLOCATION_SAMPLING,276object,277clazz,278objSize);279}280return object;281}282283/* Required to check if we're going to trace or not since a java stack trace needs284* stack frames built up; therefore we can't be in the noGC version of allocates285*286* Returns true if we should trace the object287* */288static bool289traceObjectCheck(J9VMThread *vmThread, bool *shouldTriggerAllocationSampling)290{291MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);292MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);293uintptr_t byteGranularity = 0;294295if (NULL != shouldTriggerAllocationSampling) {296byteGranularity = extensions->objectSamplingBytesGranularity;297*shouldTriggerAllocationSampling = (env->_traceAllocationBytes + env->getAllocatedSizeInsideTLH() - env->_traceAllocationBytesCurrentTLH) >= byteGranularity;298}299300if (extensions->doOutOfLineAllocationTrace){301byteGranularity = extensions->oolObjectSamplingBytesGranularity;302if(env->_oolTraceAllocationBytes >= byteGranularity){303return true;304}305}306return false;307}308309/**310* High level fast path allocate routine (used by VM and JIT) to allocate an indexable object. This method does not need to be called with311* a resolve frame as it cannot cause a GC. If the attempt at allocation fails, the method will return null and it is the caller's312* responsibility to call through to the "slow path" J9AllocateIndexableObject function after setting up a resolve frame.313* NOTE: This function can only be called for instrumentable allocates!314*315* @param vmThread The thread requesting the allocation316* @param clazz The class of the object to be allocated317* @param numberOfIndexedFields The number of indexable fields required in the allocated object318* @param allocateFlags a bitfield of flags from the following319* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap320* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate321* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot322* OMR_GC_ALLOCATE_OBJECT_NO_GC NOTE: this will be set unconditionally for this call323* @return Pointer to the object header, or NULL324*/325J9Object *326J9AllocateIndexableObjectNoGC(J9VMThread *vmThread, J9Class *clazz, uint32_t numberOfIndexedFields, uintptr_t allocateFlags)327{328MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);329330#if defined(J9VM_GC_THREAD_LOCAL_HEAP)331MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);332if (extensions->instrumentableAllocateHookEnabled || !env->isInlineTLHAllocateEnabled()) {333/* This function is restricted to only being used for instrumentable allocates so we only need to check that one allocation hook.334* Note that we can't handle hooked allocates since we might be called without a JIT resolve frame and that is required for us to335* report the allocation event.336*/337return NULL;338}339#endif /* J9VM_GC_THREAD_LOCAL_HEAP */340341Assert_MM_true(allocateFlags & OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE);342// TODO: respect or reject tenured flag?343Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED);344if (OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH)) {345Assert_MM_true(GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT == extensions->objectModel.getScanType(clazz));346}347348J9Object *objectPtr = NULL;349if(!traceObjectCheck(vmThread)){350allocateFlags |= OMR_GC_ALLOCATE_OBJECT_NO_GC;351MM_IndexableObjectAllocationModel indexableOAM(env, clazz, numberOfIndexedFields, allocateFlags);352if (indexableOAM.initializeAllocateDescription(env)) {353env->_isInNoGCAllocationCall = true;354objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &indexableOAM);355if (NULL != objectPtr) {356uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);357Assert_MM_true(allocatedBytes == indexableOAM.getAllocateDescription()->getContiguousBytes());358}359env->_isInNoGCAllocationCall = false;360}361}362/* TODO: Need to implement a more optimal path for cases where barriers are not required or where a batch barrier can be used. */363if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {364MM_ObjectAccessBarrierAPI objectAccessBarrier(vmThread);365J9Class * elementClass = ((J9ArrayClass *) clazz)->componentType;366j9object_t defaultValue = elementClass->flattenedClassCache->defaultValue;367for (UDATA index = 0; index < numberOfIndexedFields; index++) {368objectAccessBarrier.inlineIndexableObjectStoreObject(vmThread, objectPtr, index, defaultValue);369}370}371372return objectPtr;373}374375/**376* High level allocate routine (used by VM) to allocate a single object377* @param clazz the class of the object to be allocated378* @param allocateFlags a bitfield of flags from the following379* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap380* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate381* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot382* @return pointer to the object header, or NULL383*/384J9Object *385J9AllocateObject(J9VMThread *vmThread, J9Class *clazz, uintptr_t allocateFlags)386{387MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);388389#if defined(J9VM_GC_THREAD_LOCAL_HEAP)390if (!env->isInlineTLHAllocateEnabled()) {391/* For duration of call restore TLH allocate fields;392* we will hide real heapAlloc again on exit to fool JIT/Interpreter393* into thinking TLH is full if needed394*/395env->enableInlineTLHAllocate();396}397#endif /* J9VM_GC_THREAD_LOCAL_HEAP */398399Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH);400Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NO_GC);401402J9Object *objectPtr = NULL;403/* Replaced classes have poisoned the totalInstanceSize such that they are not allocatable,404* so inline allocate and NoGC allocate have already failed. If this allocator is reached405* with a replaced class, update to the current version and allocate that.406*/407clazz = J9_CURRENT_CLASS(clazz);408if (J9CLASS_IS_ENSUREHASHED(clazz)) {409allocateFlags |= OMR_GC_ALLOCATE_OBJECT_HASHED;410}411MM_MixedObjectAllocationModel mixedOAM(env, clazz, allocateFlags);412if (mixedOAM.initializeAllocateDescription(env)) {413objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &mixedOAM);414if (NULL != objectPtr) {415uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);416Assert_MM_true(allocatedBytes == mixedOAM.getAllocateDescription()->getContiguousBytes());417if (LN_HAS_LOCKWORD(vmThread, objectPtr)) {418j9objectmonitor_t initialLockword = VM_ObjectMonitor::getInitialLockword(vmThread->javaVM, clazz);419if (0 != initialLockword) {420j9objectmonitor_t *lockEA = J9OBJECT_MONITOR_EA(vmThread, objectPtr);421J9_STORE_LOCKWORD(vmThread, lockEA, initialLockword);422}423}424}425}426427MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);428if (env->_failAllocOnExcessiveGC && (NULL != objectPtr)) {429/* If we have garbage collected too much, return NULL as if we had failed to allocate the object (effectively triggering an OOM).430* TODO: The ordering of this call wrt/ allocation really needs to change - this is just a temporary solution until431* we reorganize the rest of the code base.432*/433objectPtr = NULL;434/* we stop failing subsequent allocations, to give some room for the Java program435* to recover (release some resources) after OutOfMemoryError until next GC occurs436*/437env->_failAllocOnExcessiveGC = false;438extensions->excessiveGCLevel = excessive_gc_fatal_consumed;439/* sync storage since we have memset the object to NULL and440* filled in the header information even though we will be returning NULL441*/442MM_AtomicOperations::writeBarrier();443Trc_MM_ObjectAllocationFailedDueToExcessiveGC(vmThread);444}445446uintptr_t sizeInBytesRequired = mixedOAM.getAllocateDescription()->getBytesRequested();447if (NULL != objectPtr) {448/* The hook could release access and so the object address could change (the value is preserved). */449if (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE == (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE & allocateFlags)) {450TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE(451vmThread->javaVM->hookInterface,452vmThread,453objectPtr,454sizeInBytesRequired);455} else {456TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE(457vmThread->javaVM->hookInterface,458vmThread,459objectPtr,460sizeInBytesRequired);461}462463if( !mixedOAM.getAllocateDescription()->isCompletedFromTlh()) {464TRIGGER_J9HOOK_MM_PRIVATE_NON_TLH_ALLOCATION(465extensions->privateHookInterface,466vmThread->omrVMThread,467objectPtr);468}469470uintptr_t lowThreshold = extensions->lowAllocationThreshold;471uintptr_t highThreshold = extensions->highAllocationThreshold;472if ( (sizeInBytesRequired >= lowThreshold) && (sizeInBytesRequired <= highThreshold) ) {473Trc_MM_AllocationThreshold_triggerAllocationThresholdEvent(vmThread,sizeInBytesRequired,lowThreshold,highThreshold);474TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD(475vmThread->javaVM->hookInterface,476vmThread,477objectPtr,478sizeInBytesRequired,479lowThreshold,480highThreshold);481}482}483484if(NULL == objectPtr) {485MM_MemorySpace *memorySpace = mixedOAM.getAllocateDescription()->getMemorySpace();486PORT_ACCESS_FROM_ENVIRONMENT(env);487/* we're going to return NULL, trace this */488Trc_MM_ObjectAllocationFailed(vmThread, sizeInBytesRequired, clazz, memorySpace->getName(), memorySpace);489dumpStackFrames(vmThread);490TRIGGER_J9HOOK_MM_PRIVATE_OUT_OF_MEMORY(extensions->privateHookInterface, vmThread->omrVMThread, j9time_hires_clock(), J9HOOK_MM_PRIVATE_OUT_OF_MEMORY, memorySpace, memorySpace->getName());491} else {492objectPtr = traceAllocateObject(vmThread, objectPtr, clazz, sizeInBytesRequired);493if (extensions->isStandardGC()) {494if (OMR_GC_ALLOCATE_OBJECT_TENURED == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED)) {495/* Object must be allocated in Tenure if it is requested */496Assert_MM_true(extensions->isOld(objectPtr));497}498#if defined(J9VM_GC_REALTIME)499} else if (extensions->isMetronomeGC()) {500if (env->saveObjects((omrobjectptr_t)objectPtr)) {501j9gc_startGCIfTimeExpired(vmThread->omrVMThread);502env->restoreObjects((omrobjectptr_t*)&objectPtr);503}504#endif /* defined(J9VM_GC_REALTIME) */505}506}507508if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {509vmThread->javaVM->internalVMFunctions->defaultValueWithUnflattenedFlattenables(vmThread, clazz, objectPtr);510}511512#if defined(J9VM_GC_THREAD_LOCAL_HEAP)513if (extensions->needDisableInlineAllocation()) {514env->disableInlineTLHAllocate();515}516#endif /* J9VM_GC_THREAD_LOCAL_HEAP */517518return objectPtr;519}520521/**522* High level allocate routine (used by VM) to allocate an object array523* @param allocateFlags a bitfield of flags from the following524* OMR_GC_ALLOCATE_OBJECT_TENURED forced Old space allocation even if Generational Heap525* OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE set if this allocate was an instrumentable allocate526* OMR_GC_ALLOCATE_OBJECT_HASHED set if this allocation initializes hash slot527* @return pointer to the object header, or NULL *528*/529J9Object *530J9AllocateIndexableObject(J9VMThread *vmThread, J9Class *clazz, uint32_t numberOfIndexedFields, uintptr_t allocateFlags)531{532MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);533MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);534535Assert_MM_false(allocateFlags & OMR_GC_ALLOCATE_OBJECT_NO_GC);536if (OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_NON_ZERO_TLH)) {537Assert_MM_true(GC_ObjectModel::SCAN_PRIMITIVE_ARRAY_OBJECT == extensions->objectModel.getScanType(clazz));538}539540#if defined(J9VM_GC_THREAD_LOCAL_HEAP)541if (!env->isInlineTLHAllocateEnabled()) {542/* For duration of call restore TLH allocate fields;543* we will hide real heapAlloc again on exit to fool JIT/Interpreter544* into thinking TLH is full if needed545*/546env->enableInlineTLHAllocate();547}548#endif /* J9VM_GC_THREAD_LOCAL_HEAP */549550J9Object *objectPtr = NULL;551uintptr_t sizeInBytesRequired = 0;552MM_IndexableObjectAllocationModel indexableOAM(env, clazz, numberOfIndexedFields, allocateFlags);553if (indexableOAM.initializeAllocateDescription(env)) {554objectPtr = OMR_GC_AllocateObject(vmThread->omrVMThread, &indexableOAM);555if (NULL != objectPtr) {556uintptr_t allocatedBytes = env->getExtensions()->objectModel.getConsumedSizeInBytesWithHeader(objectPtr);557Assert_MM_true(allocatedBytes == indexableOAM.getAllocateDescription()->getContiguousBytes());558}559}560561if (env->_failAllocOnExcessiveGC && (NULL != objectPtr)) {562/* If we have garbage collected too much, return NULL as if we had failed to allocate the object (effectively triggering an OOM).563* TODO: The ordering of this call wrt/ allocation really needs to change - this is just a temporary solution until564* we reorganize the rest of the code base.565*/566objectPtr = NULL;567/* we stop failing subsequent allocations, to give some room for the Java program568* to recover (release some resources) after OutOfMemoryError until next GC occurs569*/570env->_failAllocOnExcessiveGC = false;571extensions->excessiveGCLevel = excessive_gc_fatal_consumed;572/* sync storage since we have memset the object to NULL and573* filled in the header information even though we will be returning NULL574*/575MM_AtomicOperations::storeSync();576Trc_MM_ArrayObjectAllocationFailedDueToExcessiveGC(vmThread);577}578579sizeInBytesRequired = indexableOAM.getAllocateDescription()->getBytesRequested();580if (NULL != objectPtr) {581/* The hook could release access and so the object address could change (the value is preserved). Since this582* means the hook could write back a different value to the variable, it must be a valid lvalue (ie: not cast).583*/584if (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE == (OMR_GC_ALLOCATE_OBJECT_INSTRUMENTABLE & allocateFlags)) {585TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE(586vmThread->javaVM->hookInterface,587vmThread,588objectPtr,589sizeInBytesRequired);590} else {591TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE(592vmThread->javaVM->hookInterface,593vmThread,594objectPtr,595sizeInBytesRequired);596}597598/* If this was a non-TLH allocation, trigger the hook */599if( !indexableOAM.getAllocateDescription()->isCompletedFromTlh()) {600TRIGGER_J9HOOK_MM_PRIVATE_NON_TLH_ALLOCATION(601extensions->privateHookInterface,602vmThread->omrVMThread,603objectPtr);604}605606uintptr_t lowThreshold = extensions->lowAllocationThreshold;607uintptr_t highThreshold = extensions->highAllocationThreshold;608if ( (sizeInBytesRequired >= lowThreshold) && (sizeInBytesRequired <= highThreshold) ) {609Trc_MM_AllocationThreshold_triggerAllocationThresholdEventIndexable(vmThread,sizeInBytesRequired,lowThreshold,highThreshold);610TRIGGER_J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD(611vmThread->javaVM->hookInterface,612vmThread,613objectPtr,614sizeInBytesRequired,615lowThreshold,616highThreshold);617}618619objectPtr = traceAllocateObject(vmThread, objectPtr, clazz, sizeInBytesRequired, (uintptr_t)numberOfIndexedFields);620if (extensions->isStandardGC()) {621if (OMR_GC_ALLOCATE_OBJECT_TENURED == (allocateFlags & OMR_GC_ALLOCATE_OBJECT_TENURED)) {622/* Object must be allocated in Tenure if it is requested */623Assert_MM_true(extensions->isOld(objectPtr));624}625#if defined(J9VM_GC_REALTIME)626} else if (extensions->isMetronomeGC()) {627if (env->saveObjects((omrobjectptr_t)objectPtr)) {628j9gc_startGCIfTimeExpired(vmThread->omrVMThread);629env->restoreObjects((omrobjectptr_t*)&objectPtr);630}631#endif /* defined(J9VM_GC_REALTIME) */632}633} else {634/* we're going to return NULL, trace this */635PORT_ACCESS_FROM_ENVIRONMENT(env);636MM_MemorySpace *memorySpace = indexableOAM.getAllocateDescription()->getMemorySpace();637Trc_MM_ArrayObjectAllocationFailed(vmThread, sizeInBytesRequired, clazz, memorySpace->getName(), memorySpace);638dumpStackFrames(vmThread);639TRIGGER_J9HOOK_MM_PRIVATE_OUT_OF_MEMORY(extensions->privateHookInterface, vmThread->omrVMThread, j9time_hires_clock(), J9HOOK_MM_PRIVATE_OUT_OF_MEMORY, memorySpace, memorySpace->getName());640}641/* TODO: Need to implement a more optimal path for cases where barriers are not required or where a batch barrier can be used. */642if ((NULL != objectPtr) && J9_ARE_ALL_BITS_SET(clazz->classFlags, J9ClassContainsUnflattenedFlattenables)) {643MM_ObjectAccessBarrierAPI objectAccessBarrier(vmThread);644J9Class * elementClass = ((J9ArrayClass *) clazz)->componentType;645j9object_t defaultValue = elementClass->flattenedClassCache->defaultValue;646for (UDATA index = 0; index < numberOfIndexedFields; index++) {647objectAccessBarrier.inlineIndexableObjectStoreObject(vmThread, objectPtr, index, defaultValue);648}649}650651#if defined(J9VM_GC_THREAD_LOCAL_HEAP)652if (extensions->needDisableInlineAllocation()) {653env->disableInlineTLHAllocate();654}655#endif /* J9VM_GC_THREAD_LOCAL_HEAP */656657return objectPtr;658}659660/**661* Async message callback routine called whenever J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE662* or J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD is registered or unregistered. Each time called663* we check to see if at least one user is still registered. If so we disable inline664* TLH allocate to force JIT/Interpreter to go out of line, ie call J9AllocateObject et al,665* for allocates so that the calls to the required calls to the hook routine(s) can be made.666*667* @param vmThread - thread whose inline allocates need enabling/disabling668*/669void670memoryManagerTLHAsyncCallbackHandler(J9VMThread *vmThread, IDATA handlerKey, void *userData)671{672J9JavaVM * vm = (J9JavaVM*)userData;673MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);674MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(env);675MM_ObjectAllocationInterface* allocationInterface = env->_objectAllocationInterface;676677extensions->instrumentableAllocateHookEnabled = (0 != J9_EVENT_IS_HOOKED(vm->hookInterface,J9HOOK_VM_OBJECT_ALLOCATE_INSTRUMENTABLE));678679if ( J9_EVENT_IS_HOOKED(vm->hookInterface,J9HOOK_VM_OBJECT_ALLOCATE_WITHIN_THRESHOLD) ) {680Trc_MM_memoryManagerTLHAsyncCallbackHandler_eventIsHooked(vmThread);681if (extensions->isStandardGC() || extensions->isVLHGC()) {682#if defined(J9VM_GC_THREAD_LOCAL_HEAP)683extensions->disableInlineCacheForAllocationThreshold = (extensions->lowAllocationThreshold < (extensions->tlhMaximumSize + extensions->tlhMinimumSize));684#endif /* defined(J9VM_GC_THREAD_LOCAL_HEAP) */685} else if (extensions->isSegregatedHeap()) {686#if defined(J9VM_GC_SEGREGATED_HEAP)687extensions->disableInlineCacheForAllocationThreshold = (extensions->lowAllocationThreshold <= J9VMGC_SIZECLASSES_MAX_SMALL_SIZE_BYTES);688#endif /* defined(J9VM_GC_SEGREGATED_HEAP) */689}690} else {691Trc_MM_memoryManagerTLHAsyncCallbackHandler_eventNotHooked(vmThread);692extensions->disableInlineCacheForAllocationThreshold = false;693}694695if (extensions->isStandardGC() || extensions->isVLHGC()) {696#if defined(J9VM_GC_THREAD_LOCAL_HEAP)697if (extensions->needDisableInlineAllocation()) {698Trc_MM_memoryManagerTLHAsyncCallbackHandler_disableInlineTLHAllocates(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold,extensions->tlhMinimumSize,extensions->tlhMaximumSize);699if (allocationInterface->cachedAllocationsEnabled(env)) {700/* BEN TODO: Collapse the env->enable/disableInlineTLHAllocate with these enable/disableCachedAllocations */701env->disableInlineTLHAllocate();702allocationInterface->disableCachedAllocations(env);703}704} else {705Trc_MM_memoryManagerTLHAsyncCallbackHandler_enableInlineTLHAllocates(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold,extensions->tlhMinimumSize,extensions->tlhMaximumSize);706if (!allocationInterface->cachedAllocationsEnabled(env)) {707/* BEN TODO: Collapse the env->enable/disableInlineTLHAllocate with these enable/disableCachedAllocations */708env->enableInlineTLHAllocate();709allocationInterface->enableCachedAllocations(env);710}711}712713if (allocationInterface->cachedAllocationsEnabled(env)) {714uintptr_t samplingBytesGranularity = extensions->objectSamplingBytesGranularity;715if (UDATA_MAX != extensions->objectSamplingBytesGranularity) {716env->_traceAllocationBytes = 0;717env->_traceAllocationBytesCurrentTLH = 0;718env->setTLHSamplingTop(samplingBytesGranularity);719} else if (!env->isInlineTLHAllocateEnabled()) {720env->resetTLHSamplingTop();721}722}723724#endif /* defined(J9VM_GC_THREAD_LOCAL_HEAP) */725} else if (extensions->isSegregatedHeap()) {726#if defined(J9VM_GC_SEGREGATED_HEAP)727if (extensions->needDisableInlineAllocation()) {728Trc_MM_memoryManagerTLHAsyncCallbackHandler_disableAllocationCache(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold);729if (allocationInterface->cachedAllocationsEnabled(env)) {730allocationInterface->disableCachedAllocations(env);731}732} else {733Trc_MM_memoryManagerTLHAsyncCallbackHandler_enableAllocationCache(vmThread,extensions->lowAllocationThreshold,extensions->highAllocationThreshold);734if (!allocationInterface->cachedAllocationsEnabled(env)) {735allocationInterface->enableCachedAllocations(env);736}737}738#endif /* defined(J9VM_GC_SEGREGATED_HEAP) */739}740}741} /* extern "C" */742743744745