Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_modron_standard/StandardAccessBarrier.cpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
24
/**
25
* @file
26
* @ingroup GC_Modron_Base
27
*/
28
29
#include "j9.h"
30
#include "j9cfg.h"
31
#include "j9consts.h"
32
#include "j9protos.h"
33
#include "ModronAssertions.h"
34
35
#include "StandardAccessBarrier.hpp"
36
#include "AtomicOperations.hpp"
37
#if defined(OMR_GC_MODRON_CONCURRENT_MARK)
38
#include "ConcurrentGC.hpp"
39
#endif /* OMR_GC_MODRON_CONCURRENT_MARK */
40
#include "Debug.hpp"
41
#include "EnvironmentStandard.hpp"
42
#include "mmhook_internal.h"
43
#include "GCExtensions.hpp"
44
#include "HeapRegionManager.hpp"
45
#include "JNICriticalRegion.hpp"
46
#include "ObjectModel.hpp"
47
#include "Scavenger.hpp"
48
#include "SublistFragment.hpp"
49
50
MM_StandardAccessBarrier *
51
MM_StandardAccessBarrier::newInstance(MM_EnvironmentBase *env, MM_MarkingScheme *markingScheme)
52
{
53
MM_StandardAccessBarrier *barrier;
54
55
barrier = (MM_StandardAccessBarrier *)env->getForge()->allocate(sizeof(MM_StandardAccessBarrier), MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
56
if (barrier) {
57
new(barrier) MM_StandardAccessBarrier(env, markingScheme);
58
if (!barrier->initialize(env)) {
59
barrier->kill(env);
60
barrier = NULL;
61
}
62
}
63
return barrier;
64
}
65
66
void
67
MM_StandardAccessBarrier::initializeForNewThread(MM_EnvironmentBase* env)
68
{
69
#if defined(OMR_GC_REALTIME)
70
if (_extensions->usingSATBBarrier()) {
71
_extensions->sATBBarrierRememberedSet->initializeFragment(env, &(((J9VMThread *)env->getLanguageVMThread())->sATBBarrierRememberedSetFragment));
72
}
73
#endif /* OMR_GC_REALTIME */
74
}
75
76
bool
77
MM_StandardAccessBarrier::initialize(MM_EnvironmentBase *env)
78
{
79
#if defined(J9VM_GC_GENERATIONAL)
80
if (!_generationalAccessBarrierComponent.initialize(env)) {
81
return false;
82
}
83
#endif /* J9VM_GC_GENERATIONAL */
84
85
return MM_ObjectAccessBarrier::initialize(env);
86
}
87
88
void
89
MM_StandardAccessBarrier::kill(MM_EnvironmentBase *env)
90
{
91
tearDown(env);
92
env->getForge()->free(this);
93
}
94
95
void
96
MM_StandardAccessBarrier::tearDown(MM_EnvironmentBase *env)
97
{
98
#if defined(J9VM_GC_GENERATIONAL)
99
_generationalAccessBarrierComponent.tearDown(env);
100
#endif /* J9VM_GC_GENERATIONAL */
101
102
MM_ObjectAccessBarrier::tearDown(env);
103
}
104
105
/**
106
* Unmarked, heap reference, about to be deleted (or overwritten), while marking
107
* is in progress is to be remembered for later marking and scanning.
108
*/
109
void
110
MM_StandardAccessBarrier::rememberObjectToRescan(MM_EnvironmentBase *env, J9Object *object)
111
{
112
if (_markingScheme->markObject(env, object, true)) {
113
rememberObjectImpl(env, object);
114
}
115
}
116
117
/**
118
* Unmarked, heap reference, about to be deleted (or overwritten), while marking
119
* is in progress is to be remembered for later marking and scanning.
120
* This method is called by MM_StandardAccessBarrier::rememberObject()
121
*/
122
void
123
MM_StandardAccessBarrier::rememberObjectImpl(MM_EnvironmentBase *env, J9Object* object)
124
{
125
#if defined(OMR_GC_REALTIME)
126
J9VMThread *vmThread = (J9VMThread *)env->getLanguageVMThread();
127
_extensions->sATBBarrierRememberedSet->storeInFragment(env, &vmThread->sATBBarrierRememberedSetFragment, (UDATA *)object);
128
#endif /* OMR_GC_REALTIME */
129
}
130
131
132
bool
133
MM_StandardAccessBarrier::preObjectStoreImpl(J9VMThread *vmThread, J9Object *destObject, fj9object_t *destAddress, J9Object *value, bool isVolatile)
134
{
135
MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
136
137
if (_extensions->isSATBBarrierActive()) {
138
if (NULL != destObject) {
139
J9Object *oldObject = NULL;
140
protectIfVolatileBefore(vmThread, isVolatile, true, false);
141
GC_SlotObject slotObject(vmThread->javaVM->omrVM, destAddress);
142
oldObject = slotObject.readReferenceFromSlot();
143
protectIfVolatileAfter(vmThread, isVolatile, true, false);
144
rememberObjectToRescan(env, oldObject);
145
}
146
}
147
148
return true;
149
}
150
151
bool
152
MM_StandardAccessBarrier::preObjectStoreImpl(J9VMThread *vmThread, J9Object **destAddress, J9Object *value, bool isVolatile)
153
{
154
MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
155
156
if (_extensions->isSATBBarrierActive()) {
157
J9Object* oldObject = NULL;
158
protectIfVolatileBefore(vmThread, isVolatile, true, false);
159
oldObject = *destAddress;
160
protectIfVolatileAfter(vmThread, isVolatile, true, false);
161
rememberObjectToRescan(env, oldObject);
162
}
163
164
return true;
165
}
166
/**
167
* @copydoc MM_ObjectAccessBarrier::preObjectStore()
168
*
169
* Metronome uses a snapshot-at-the-beginning algorithm, but with a fuzzy snapshot in the
170
* sense that threads are allowed to run during the root scan. This requires a "double
171
* barrier." The barrier is active from the start of root scanning through the end of
172
* tracing. For an unscanned thread performing a store, the new value is remembered by
173
* the collector. For any thread performing a store (whether scanned or not), the old
174
* value is remembered by the collector before being overwritten (thus this barrier must be
175
* positioned as a pre-store barrier). For the latter ("Yuasa barrier") aspect of the
176
* double barrier, only the first overwritten value needs to be remembered (remembering
177
* others is harmless but not needed), and so we omit synchronization on the reading of the
178
* old value.
179
**/
180
bool
181
MM_StandardAccessBarrier::preObjectStore(J9VMThread *vmThread, J9Object *destObject, fj9object_t *destAddress, J9Object *value, bool isVolatile)
182
{
183
return preObjectStoreImpl(vmThread, destObject, destAddress, value, isVolatile);
184
}
185
186
/**
187
* @copydoc MM_MetronomeAccessBarrier::preObjectStore()
188
*
189
* Used for stores into classes
190
*/
191
bool
192
MM_StandardAccessBarrier::preObjectStore(J9VMThread *vmThread, J9Object *destClass, J9Object **destAddress, J9Object *value, bool isVolatile)
193
{
194
return preObjectStoreImpl(vmThread, destAddress, value, isVolatile);
195
}
196
197
/**
198
* @copydoc MM_MetronomeAccessBarrier::preObjectStore()
199
*
200
* Used for stores into internal structures
201
*/
202
bool
203
MM_StandardAccessBarrier::preObjectStore(J9VMThread *vmThread, J9Object **destAddress, J9Object *value, bool isVolatile)
204
{
205
return preObjectStoreImpl(vmThread, destAddress, value, isVolatile);
206
}
207
/**
208
* Called after an object is stored into another object.
209
*/
210
void
211
MM_StandardAccessBarrier::postObjectStore(J9VMThread *vmThread, J9Object *destObject, fj9object_t *destAddress, J9Object *value, bool isVolatile)
212
{
213
postObjectStoreImpl(vmThread, destObject, value);
214
215
}
216
217
/**
218
* Called after an object is stored into a class.
219
*/
220
void
221
MM_StandardAccessBarrier::postObjectStore(J9VMThread *vmThread, J9Class *destClass, J9Object **destAddress, J9Object *value, bool isVolatile)
222
{
223
j9object_t destObject = J9VM_J9CLASS_TO_HEAPCLASS(destClass);
224
225
/* destObject is guaranteed to be in old space, so the common code path will remember objects appropriately here */
226
postObjectStoreImpl(vmThread, destObject, value);
227
}
228
229
bool
230
MM_StandardAccessBarrier::postBatchObjectStore(J9VMThread *vmThread, J9Object *destObject, bool isVolatile)
231
{
232
postBatchObjectStoreImpl(vmThread, destObject);
233
234
return true;
235
}
236
237
bool
238
MM_StandardAccessBarrier::postBatchObjectStore(J9VMThread *vmThread, J9Class *destClass, bool isVolatile)
239
{
240
j9object_t destObject = J9VM_J9CLASS_TO_HEAPCLASS(destClass);
241
242
postBatchObjectStoreImpl(vmThread, destObject);
243
244
return true;
245
}
246
247
/**
248
* Generational write barrier call when a single object is stored into another.
249
* The remembered set system consists of a physical list of objects in the OLD area that
250
* may contain references to the new area. The mutator is responsible for adding these old
251
* area objects to the remembered set; the collectors are responsible for removing these objects
252
* from the list when they no longer contain references. Objects that are to be remembered have their
253
* REMEMBERED bit set in the flags field. For performance reasons, sublists are used to maintain the
254
* remembered set.
255
*
256
* @param vmThread The current thread that has performed the store.
257
* @param dstObject The object which is being stored into.
258
* @param srcObject The object being stored.
259
*
260
* @note The write barrier can be called with minimal, all, or no validation checking.
261
* @note Any object that contains a new reference MUST have its REMEMBERED bit set.
262
*/
263
void
264
MM_StandardAccessBarrier::postObjectStoreImpl(J9VMThread *vmThread, J9Object *dstObject, J9Object *srcObject)
265
{
266
/* If the source object is NULL, there is no need for a write barrier. */
267
if(NULL != srcObject) {
268
if (_extensions->isConcurrentScavengerEnabled() && !_extensions->isScavengerBackOutFlagRaised()) {
269
Assert_MM_false(_extensions->scavenger->isObjectInEvacuateMemory(dstObject));
270
Assert_MM_false(_extensions->scavenger->isObjectInEvacuateMemory(srcObject));
271
}
272
273
#if defined(OMR_GC_MODRON_CONCURRENT_MARK)
274
/* Call the concurrent write barrier if required */
275
if(isIncrementalUpdateBarrierActive(vmThread) && _extensions->isOld(dstObject)) {
276
concurrentPostWriteBarrierStore(vmThread->omrVMThread, dstObject, srcObject);
277
}
278
#endif /* OMR_GC_MODRON_CONCURRENT_MARK */
279
280
#if defined(J9VM_GC_GENERATIONAL)
281
_generationalAccessBarrierComponent.postObjectStore(vmThread, dstObject, srcObject);
282
#endif /* J9VM_GC_GENERATIONAL */
283
}
284
}
285
286
/**
287
* Generational write barrier call when a group of objects are stored into a single object.
288
* The remembered set system consists of a physical list of objects in the OLD area that
289
* may contain references to the new area. The mutator is responsible for adding these old
290
* area objects to the remembered set; the collectors are responsible for removing these objects
291
* from the list when they no longer contain references. Objects that are to be remembered have their
292
* REMEMBERED bit set in the flags field. For performance reasons, sublists are used to maintain the
293
* remembered set.
294
*
295
* @param vmThread The current thread that has performed the store.
296
* @param dstObject The object which is being stored into.
297
*
298
* @note The write barrier can be called with minimal, all, or no validation checking.
299
* @note Any object that contains a new reference MUST have its REMEMBERED bit set.
300
* @note This call is typically used by array copies, when it may be more efficient
301
* to optimistically add an object to the remembered set without checking too hard.
302
*/
303
void
304
MM_StandardAccessBarrier::postBatchObjectStoreImpl(J9VMThread *vmThread, J9Object *dstObject)
305
{
306
#if defined(OMR_GC_MODRON_CONCURRENT_MARK)
307
Assert_MM_true(!_extensions->usingSATBBarrier());
308
/* Call the concurrent write barrier if required */
309
if(_extensions->concurrentMark &&
310
(vmThread->privateFlags & J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE) &&
311
_extensions->isOld(dstObject)) {
312
concurrentPostWriteBarrierBatchStore(vmThread->omrVMThread, dstObject);
313
}
314
#endif /* OMR_GC_MODRON_CONCURRENT_MARK */
315
316
#if defined(J9VM_GC_GENERATIONAL)
317
_generationalAccessBarrierComponent.postBatchObjectStore(vmThread, dstObject);
318
#endif /* J9VM_GC_GENERATIONAL */
319
}
320
321
/**
322
* VMDESIGN 2048
323
* Special barrier for auto-remembering stack-referenced objects. This must be called
324
* in two cases:
325
* 1) an object which was allocated directly into old space.
326
* 2) an object which is being constructed via JNI
327
*
328
* @param vmThread[in] the current thread
329
* @param object[in] the object to be remembered
330
*/
331
void
332
MM_StandardAccessBarrier::recentlyAllocatedObject(J9VMThread *vmThread, J9Object *dstObject)
333
{
334
#if defined(J9VM_GC_GENERATIONAL)
335
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(vmThread);
336
337
if(extensions->scavengerEnabled && !extensions->isConcurrentScavengerEnabled() && extensions->isOld(dstObject) && !extensions->objectModel.isPrimitiveArray(dstObject)) {
338
339
Trc_MM_StandardAccessBarrier_treatObjectAsRecentlyAllocated(vmThread, dstObject);
340
341
if(extensions->objectModel.atomicSwitchReferencedState(dstObject, OMR_TENURED_STACK_OBJECT_CURRENTLY_REFERENCED)) {
342
/* Successfully set the remembered bit in the object. Now allocate an entry from the
343
* remembered set fragment of the current thread and store the destination object into
344
* the remembered set. */
345
MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread);
346
MM_SublistFragment fragment((J9VMGC_SublistFragment*)&vmThread->gcRememberedSet);
347
if (!fragment.add(env, (UDATA)dstObject )) {
348
/* No slot was available from any fragment. Set the remembered set overflow flag.
349
* The REMEMBERED bit is kept in the object for optimization purposes (only scan objects
350
* whose REMEMBERED bit is set in an overflow scan)
351
*/
352
extensions->setRememberedSetOverflowState();
353
#if defined(J9VM_GC_MODRON_EVENTS)
354
reportRememberedSetOverflow(vmThread);
355
#endif /* J9VM_GC_MODRON_EVENTS */
356
}
357
}
358
}
359
#endif /* J9VM_GC_GENERATIONAL */
360
}
361
362
void*
363
MM_StandardAccessBarrier::jniGetPrimitiveArrayCritical(J9VMThread* vmThread, jarray array, jboolean *isCopy)
364
{
365
void *data = NULL;
366
J9JavaVM *javaVM = vmThread->javaVM;
367
J9InternalVMFunctions *functions = javaVM->internalVMFunctions;
368
369
bool shouldCopy = false;
370
bool alwaysCopyInCritical = (javaVM->runtimeFlags & J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) == J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL;
371
if (alwaysCopyInCritical) {
372
shouldCopy = true;
373
}
374
375
if(shouldCopy) {
376
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
377
J9IndexableObject *arrayObject = (J9IndexableObject*)J9_JNI_UNWRAP_REFERENCE(array);
378
GC_ArrayObjectModel* indexableObjectModel = &_extensions->indexableObjectModel;
379
I_32 sizeInElements = (I_32)indexableObjectModel->getSizeInElements(arrayObject);
380
UDATA sizeInBytes = indexableObjectModel->getDataSizeInBytes(arrayObject);
381
data = functions->jniArrayAllocateMemoryFromThread(vmThread, sizeInBytes);
382
if(NULL == data) {
383
functions->setNativeOutOfMemoryError(vmThread, 0, 0); // better error message here?
384
} else {
385
indexableObjectModel->memcpyFromArray(data, arrayObject, 0, sizeInElements);
386
if(NULL != isCopy) {
387
*isCopy = JNI_TRUE;
388
}
389
}
390
vmThread->jniCriticalCopyCount += 1;
391
VM_VMAccess::inlineExitVMToJNI(vmThread);
392
} else {
393
// acquire access and return a direct pointer
394
MM_JNICriticalRegion::enterCriticalRegion(vmThread, false);
395
J9IndexableObject *arrayObject = (J9IndexableObject*)J9_JNI_UNWRAP_REFERENCE(array);
396
data = (void *)_extensions->indexableObjectModel.getDataPointerForContiguous(arrayObject);
397
if(NULL != isCopy) {
398
*isCopy = JNI_FALSE;
399
}
400
}
401
return data;
402
}
403
404
void
405
MM_StandardAccessBarrier::jniReleasePrimitiveArrayCritical(J9VMThread* vmThread, jarray array, void * elems, jint mode)
406
{
407
J9JavaVM *javaVM = vmThread->javaVM;
408
J9InternalVMFunctions *functions = javaVM->internalVMFunctions;
409
410
bool shouldCopy = false;
411
bool alwaysCopyInCritical = (javaVM->runtimeFlags & J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) == J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL;
412
if (alwaysCopyInCritical) {
413
shouldCopy = true;
414
}
415
416
if(shouldCopy) {
417
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
418
if(JNI_ABORT != mode) {
419
J9IndexableObject *arrayObject = (J9IndexableObject*)J9_JNI_UNWRAP_REFERENCE(array);
420
GC_ArrayObjectModel* indexableObjectModel = &_extensions->indexableObjectModel;
421
I_32 sizeInElements = (I_32)indexableObjectModel->getSizeInElements(arrayObject);
422
_extensions->indexableObjectModel.memcpyToArray(arrayObject, 0, sizeInElements, elems);
423
}
424
425
// Commit means copy the data but do not free the buffer.
426
// All other modes free the buffer.
427
if(JNI_COMMIT != mode) {
428
functions->jniArrayFreeMemoryFromThread(vmThread, elems);
429
}
430
431
if(vmThread->jniCriticalCopyCount > 0) {
432
vmThread->jniCriticalCopyCount -= 1;
433
} else {
434
Assert_MM_invalidJNICall();
435
}
436
437
VM_VMAccess::inlineExitVMToJNI(vmThread);
438
} else {
439
/*
440
* Objects can not be moved if critical section is active
441
* This trace point will be generated if object has been moved or passed value of elems is corrupted
442
*/
443
J9IndexableObject *arrayObject = (J9IndexableObject*)J9_JNI_UNWRAP_REFERENCE(array);
444
void *data = (void *)_extensions->indexableObjectModel.getDataPointerForContiguous(arrayObject);
445
if(elems != data) {
446
Trc_MM_JNIReleasePrimitiveArrayCritical_invalid(vmThread, arrayObject, elems, data);
447
}
448
449
MM_JNICriticalRegion::exitCriticalRegion(vmThread, false);
450
}
451
}
452
453
const jchar*
454
MM_StandardAccessBarrier::jniGetStringCritical(J9VMThread* vmThread, jstring str, jboolean *isCopy)
455
{
456
jchar *data = NULL;
457
J9JavaVM *javaVM = vmThread->javaVM;
458
J9InternalVMFunctions *functions = javaVM->internalVMFunctions;
459
bool isCompressed = false;
460
bool shouldCopy = false;
461
bool hasVMAccess = false;
462
463
if ((javaVM->runtimeFlags & J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) == J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) {
464
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
465
hasVMAccess = true;
466
shouldCopy = true;
467
} else if (IS_STRING_COMPRESSION_ENABLED_VM(javaVM)) {
468
/* If the string bytes are in compressed UNICODE, then we need to copy to decompress */
469
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
470
hasVMAccess = true;
471
J9Object *stringObject = (J9Object*)J9_JNI_UNWRAP_REFERENCE(str);
472
if (IS_STRING_COMPRESSED(vmThread,stringObject)) {
473
isCompressed = true;
474
shouldCopy = true;
475
}
476
} else if (_extensions->isConcurrentScavengerEnabled()) {
477
/* reading value field from String object may trigger object movement */
478
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
479
hasVMAccess = true;
480
}
481
482
if (shouldCopy) {
483
J9Object *stringObject = (J9Object*)J9_JNI_UNWRAP_REFERENCE(str);
484
J9IndexableObject *valueObject = (J9IndexableObject*)J9VMJAVALANGSTRING_VALUE(vmThread, stringObject);
485
jint length = J9VMJAVALANGSTRING_LENGTH(vmThread, stringObject);
486
UDATA sizeInBytes = length * sizeof(jchar);
487
488
if (IS_STRING_COMPRESSED(vmThread, stringObject)) {
489
isCompressed = true;
490
}
491
492
data = (jchar*)functions->jniArrayAllocateMemoryFromThread(vmThread, sizeInBytes);
493
if (NULL == data) {
494
functions->setNativeOutOfMemoryError(vmThread, 0, 0); // better error message here?
495
} else {
496
GC_ArrayObjectModel* indexableObjectModel = &_extensions->indexableObjectModel;
497
if (isCompressed) {
498
jint i;
499
500
for (i = 0; i < length; i++) {
501
data[i] = (jchar)(U_8)J9JAVAARRAYOFBYTE_LOAD(vmThread, (j9object_t)valueObject, i);
502
}
503
} else {
504
if (J9_ARE_ANY_BITS_SET(javaVM->runtimeFlags, J9_RUNTIME_STRING_BYTE_ARRAY)) {
505
// This API determines the stride based on the type of valueObject so in the [B case we must passin the length in bytes
506
indexableObjectModel->memcpyFromArray(data, valueObject, 0, (I_32)sizeInBytes);
507
} else {
508
indexableObjectModel->memcpyFromArray(data, valueObject, 0, length);
509
}
510
}
511
if (NULL != isCopy) {
512
*isCopy = JNI_TRUE;
513
}
514
}
515
vmThread->jniCriticalCopyCount += 1;
516
} else {
517
// acquire access and return a direct pointer
518
MM_JNICriticalRegion::enterCriticalRegion(vmThread, hasVMAccess);
519
J9Object *stringObject = (J9Object*)J9_JNI_UNWRAP_REFERENCE(str);
520
J9IndexableObject *valueObject = (J9IndexableObject*)J9VMJAVALANGSTRING_VALUE(vmThread, stringObject);
521
522
data = (jchar*)_extensions->indexableObjectModel.getDataPointerForContiguous(valueObject);
523
524
if (NULL != isCopy) {
525
*isCopy = JNI_FALSE;
526
}
527
}
528
if (hasVMAccess) {
529
VM_VMAccess::inlineExitVMToJNI(vmThread);
530
}
531
return data;
532
}
533
534
void
535
MM_StandardAccessBarrier::jniReleaseStringCritical(J9VMThread* vmThread, jstring str, const jchar* elems)
536
{
537
J9JavaVM *javaVM = vmThread->javaVM;
538
J9InternalVMFunctions *functions = javaVM->internalVMFunctions;
539
bool hasVMAccess = false;
540
bool shouldCopy = false;
541
542
if ((javaVM->runtimeFlags & J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) == J9_RUNTIME_ALWAYS_COPY_JNI_CRITICAL) {
543
shouldCopy = true;
544
} else if (IS_STRING_COMPRESSION_ENABLED_VM(javaVM)) {
545
VM_VMAccess::inlineEnterVMFromJNI(vmThread);
546
hasVMAccess = true;
547
J9Object *stringObject = (J9Object*)J9_JNI_UNWRAP_REFERENCE(str);
548
if (IS_STRING_COMPRESSED(vmThread, stringObject)) {
549
shouldCopy = true;
550
}
551
}
552
553
if (shouldCopy) {
554
// String data is not copied back
555
functions->jniArrayFreeMemoryFromThread(vmThread, (void*)elems);
556
557
if(vmThread->jniCriticalCopyCount > 0) {
558
vmThread->jniCriticalCopyCount -= 1;
559
} else {
560
Assert_MM_invalidJNICall();
561
}
562
} else {
563
/*
564
* We have not put assertion here to check is elems valid for str similar way as in jniReleasePrimitiveArrayCritical
565
* because of complexity of required code
566
*/
567
// direct pointer, just drop access
568
MM_JNICriticalRegion::exitCriticalRegion(vmThread, hasVMAccess);
569
}
570
571
if (hasVMAccess) {
572
VM_VMAccess::inlineExitVMToJNI(vmThread);
573
}
574
}
575
576
UDATA
577
MM_StandardAccessBarrier::getJNICriticalRegionCount(MM_GCExtensions *extensions)
578
{
579
/* TODO kmt : This is probably the slowest way to get this information */
580
GC_VMThreadListIterator threadIterator(((J9JavaVM *)extensions->getOmrVM()->_language_vm));
581
J9VMThread *walkThread;
582
UDATA activeCriticals = 0;
583
584
// TODO kmt : Should get public flags mutex here -- worst case is a false positive
585
while((walkThread = threadIterator.nextVMThread()) != NULL) {
586
activeCriticals += walkThread->jniCriticalDirectCount;
587
}
588
return activeCriticals;
589
}
590
591
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
592
I_32
593
MM_StandardAccessBarrier::doCopyContiguousBackwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
594
{
595
srcIndex += lengthInSlots;
596
destIndex += lengthInSlots;
597
598
if (J9VMTHREAD_COMPRESS_OBJECT_REFERENCES(vmThread)) {
599
uint32_t *srcSlot = (uint32_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(uint32_t));
600
uint32_t *destSlot = (uint32_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(uint32_t));
601
uint32_t *srcEndSlot = srcSlot - lengthInSlots;
602
603
while (srcSlot-- > srcEndSlot) {
604
preObjectRead(vmThread, (J9Object *)srcObject, (fj9object_t*)srcSlot);
605
606
*--destSlot = *srcSlot;
607
}
608
} else {
609
uintptr_t *srcSlot = (uintptr_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(uintptr_t));
610
uintptr_t *destSlot = (uintptr_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(uintptr_t));
611
uintptr_t *srcEndSlot = srcSlot - lengthInSlots;
612
613
while (srcSlot-- > srcEndSlot) {
614
preObjectRead(vmThread, (J9Object *)srcObject, (fj9object_t*)srcSlot);
615
616
*--destSlot = *srcSlot;
617
}
618
}
619
620
return ARRAY_COPY_SUCCESSFUL;
621
}
622
623
I_32
624
MM_StandardAccessBarrier::doCopyContiguousForwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
625
{
626
if (J9VMTHREAD_COMPRESS_OBJECT_REFERENCES(vmThread)) {
627
uint32_t *srcSlot = (uint32_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(uint32_t));
628
uint32_t *destSlot = (uint32_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(uint32_t));
629
uint32_t *srcEndSlot = srcSlot + lengthInSlots;
630
631
while (srcSlot < srcEndSlot) {
632
preObjectRead(vmThread, (J9Object *)srcObject, (fj9object_t*)srcSlot);
633
634
*destSlot++ = *srcSlot++;
635
}
636
} else {
637
uintptr_t *srcSlot = (uintptr_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(uintptr_t));
638
uintptr_t *destSlot = (uintptr_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(uintptr_t));
639
uintptr_t *srcEndSlot = srcSlot + lengthInSlots;
640
641
while (srcSlot < srcEndSlot) {
642
preObjectRead(vmThread, (J9Object *)srcObject, (fj9object_t*)srcSlot);
643
644
*destSlot++ = *srcSlot++;
645
}
646
}
647
648
return ARRAY_COPY_SUCCESSFUL;
649
}
650
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
651
652
/**
653
* Finds opportunities for doing the copy without or partially executing writeBarrier.
654
* @return ARRAY_COPY_SUCCESSFUL if copy was successful, ARRAY_COPY_NOT_DONE no copy is done
655
*/
656
I_32
657
MM_StandardAccessBarrier::backwardReferenceArrayCopyIndex(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
658
{
659
I_32 retValue = ARRAY_COPY_NOT_DONE;
660
/* TODO SATB re-enable opt? */
661
if (_extensions->usingSATBBarrier()) {
662
return retValue;
663
}
664
665
if(0 == lengthInSlots) {
666
retValue = ARRAY_COPY_SUCCESSFUL;
667
} else {
668
/* a high level caller ensured destObject == srcObject */
669
Assert_MM_true(destObject == srcObject);
670
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(destObject));
671
672
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
673
if (_extensions->isConcurrentScavengerInProgress()) {
674
/* During active CS cycle, we need a RB for every slot being copied.
675
* For WB same rules apply - just need the final batch barrier.
676
*/
677
retValue = doCopyContiguousBackwardWithReadBarrier(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
678
} else
679
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
680
{
681
retValue = doCopyContiguousBackward(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
682
}
683
Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);
684
685
postBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
686
}
687
return retValue;
688
}
689
690
/**
691
* Finds opportunities for doing the copy without or partially executing writeBarrier.
692
* @return ARRAY_COPY_SUCCESSFUL if copy was successful, ARRAY_COPY_NOT_DONE no copy is done
693
*/
694
I_32
695
MM_StandardAccessBarrier::forwardReferenceArrayCopyIndex(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
696
{
697
/* TODO SATB re-enable opt */
698
I_32 retValue = ARRAY_COPY_NOT_DONE;
699
700
if (_extensions->usingSATBBarrier()) {
701
return retValue;
702
}
703
704
if(0 == lengthInSlots) {
705
retValue = ARRAY_COPY_SUCCESSFUL;
706
} else {
707
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(destObject));
708
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(srcObject));
709
710
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
711
if (_extensions->isConcurrentScavengerInProgress()) {
712
/* During active CS cycle, we need a RB for every slot being copied.
713
* For WB same rules apply - just need the final batch barrier.
714
*/
715
retValue = doCopyContiguousForwardWithReadBarrier(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
716
} else
717
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
718
{
719
retValue = doCopyContiguousForward(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
720
}
721
722
Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);
723
postBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
724
}
725
return retValue;
726
}
727
728
J9Object*
729
MM_StandardAccessBarrier::asConstantPoolObject(J9VMThread *vmThread, J9Object* toConvert, UDATA allocationFlags)
730
{
731
j9object_t cpObject = toConvert;
732
733
Assert_MM_true(allocationFlags & (J9_GC_ALLOCATE_OBJECT_TENURED | J9_GC_ALLOCATE_OBJECT_NON_INSTRUMENTABLE));
734
735
if (NULL != toConvert) {
736
Assert_MM_false(_extensions->objectModel.isIndexable(toConvert));
737
if (!_extensions->isOld(toConvert)) {
738
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
739
if (!env->saveObjects(toConvert)) {
740
Assert_MM_unreachable();
741
}
742
J9Class *j9class = J9GC_J9OBJECT_CLAZZ_THREAD(toConvert, vmThread);
743
cpObject = J9AllocateObject(vmThread, j9class, allocationFlags);
744
env->restoreObjects(&toConvert);
745
if (cpObject != NULL) {
746
cloneObject(vmThread, toConvert, cpObject);
747
}
748
}
749
}
750
return cpObject;
751
}
752
753
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
754
bool
755
MM_StandardAccessBarrier::preWeakRootSlotRead(J9VMThread *vmThread, j9object_t *srcAddress)
756
{
757
omrobjectptr_t object = (omrobjectptr_t)*srcAddress;
758
bool const compressed = compressObjectReferences();
759
760
if ((NULL != _extensions->scavenger) && _extensions->scavenger->isObjectInEvacuateMemory(object)) {
761
MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread);
762
Assert_MM_true(_extensions->scavenger->isConcurrentCycleInProgress());
763
Assert_MM_true(_extensions->scavenger->isMutatorThreadInSyncWithCycle(env));
764
765
MM_ForwardedHeader forwardHeader(object, compressed);
766
omrobjectptr_t forwardPtr = forwardHeader.getForwardedObject();
767
768
if (NULL != forwardPtr) {
769
/* Object has been or is being copied - ensure the object is fully copied before exposing it, update the slot and return.
770
* Slot update needs to be atomic, only if there is a mutator thread racing with a write operation to this same slot.
771
* The barrier is typically used to update Monitor table entries, which should not be ever reinitialized by any mutator.
772
* Updated can occur, but only by GC threads during STW clearing phase, thus no race with this barrier. */
773
forwardHeader.copyOrWait(forwardPtr);
774
*srcAddress = forwardPtr;
775
}
776
/* Do nothing if the object is not copied already, since it might be dead.
777
* This object is found without real reference to it,
778
* for example by iterating monitor table to dump info about all monitors */
779
}
780
781
return true;
782
}
783
784
bool
785
MM_StandardAccessBarrier::preWeakRootSlotRead(J9JavaVM *vm, j9object_t *srcAddress)
786
{
787
omrobjectptr_t object = (omrobjectptr_t)*srcAddress;
788
bool const compressed = compressObjectReferences();
789
790
if ((NULL != _extensions->scavenger) && _extensions->scavenger->isObjectInEvacuateMemory(object)) {
791
Assert_MM_true(_extensions->scavenger->isConcurrentCycleInProgress());
792
793
MM_ForwardedHeader forwardHeader(object, compressed);
794
omrobjectptr_t forwardPtr = forwardHeader.getForwardedObject();
795
796
if (NULL != forwardPtr) {
797
/* Object has been or is being copied - ensure the object is fully copied before exposing it, update the slot and return */
798
forwardHeader.copyOrWait(forwardPtr);
799
*srcAddress = forwardPtr;
800
}
801
/* Do nothing if the object is not copied already, since it might be dead.
802
* This object is found unintentionally (without real reference to it),
803
* for example by iterating colliding entries in monitor hash table */
804
}
805
806
return true;
807
}
808
809
bool
810
MM_StandardAccessBarrier::preObjectRead(J9VMThread *vmThread, J9Class *srcClass, j9object_t *srcAddress)
811
{
812
omrobjectptr_t object = *(volatile omrobjectptr_t *)srcAddress;
813
bool const compressed = compressObjectReferences();
814
815
if ((NULL != _extensions->scavenger) && _extensions->scavenger->isObjectInEvacuateMemory(object)) {
816
MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread);
817
Assert_MM_true(_extensions->scavenger->isConcurrentCycleInProgress());
818
Assert_MM_true(_extensions->scavenger->isMutatorThreadInSyncWithCycle(env));
819
820
MM_ForwardedHeader forwardHeader(object, compressed);
821
omrobjectptr_t forwardPtr = forwardHeader.getForwardedObject();
822
823
if (NULL != forwardPtr) {
824
/* Object has been strictly (remotely) forwarded. Ensure the object is fully copied before exposing it, update the slot and return. */
825
forwardHeader.copyOrWait(forwardPtr);
826
MM_AtomicOperations::lockCompareExchange((uintptr_t *)srcAddress, (uintptr_t)object, (uintptr_t)forwardPtr);
827
} else {
828
omrobjectptr_t destinationObjectPtr = _extensions->scavenger->copyObject(env, &forwardHeader);
829
if (NULL == destinationObjectPtr) {
830
/* Failure - the scavenger must back out the work it has done. Attempt to return the original object. */
831
forwardPtr = forwardHeader.setSelfForwardedObject();
832
if (forwardPtr != object) {
833
/* Another thread successfully copied the object. Re-fetch forwarding pointer,
834
* and ensure the object is fully copied before exposing it. */
835
MM_ForwardedHeader(object, compressed).copyOrWait(forwardPtr);
836
MM_AtomicOperations::lockCompareExchange((uintptr_t *)srcAddress, (uintptr_t)object, (uintptr_t)forwardPtr);
837
}
838
} else {
839
/* Update the slot. copyObject() ensures that the object is fully copied. */
840
MM_AtomicOperations::lockCompareExchange((uintptr_t *)srcAddress, (uintptr_t)object, (uintptr_t)destinationObjectPtr);
841
}
842
}
843
}
844
845
return true;
846
}
847
848
849
#define GLOBAL_READ_BARRIR_STATS_UPDATE_THRESHOLD 32
850
851
bool
852
MM_StandardAccessBarrier::preObjectRead(J9VMThread *vmThread, J9Object *srcObject, fj9object_t *srcAddress)
853
{
854
/* with volatile cast, ensure that we are really getting a snapshot (instead of the slot being re-read at later points with possibly different values) */
855
bool const compressed = compressObjectReferences();
856
fomrobject_t objectToken = (fomrobject_t)(compressed ? (uintptr_t)*(volatile uint32_t *)srcAddress: *(volatile uintptr_t *)srcAddress);
857
omrobjectptr_t object = convertPointerFromToken(objectToken);
858
859
if (NULL != _extensions->scavenger) {
860
MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread);
861
Assert_GC_true_with_message(env, !_extensions->scavenger->isObjectInEvacuateMemory((omrobjectptr_t)srcAddress) || _extensions->isScavengerBackOutFlagRaised(), "readObject %llx in Evacuate\n", srcAddress);
862
if (_extensions->scavenger->isObjectInEvacuateMemory(object)) {
863
Assert_GC_true_with_message2(env, _extensions->scavenger->isConcurrentCycleInProgress(),
864
"CS not in progress, found a object in Survivor: slot %llx object %llx\n", srcAddress, object);
865
Assert_MM_true(_extensions->scavenger->isMutatorThreadInSyncWithCycle(env));
866
/* since object is still in evacuate, srcObject has not been scanned yet => we cannot assert
867
* if srcObject should (already) be remembered (even if it's old)
868
*/
869
870
env->_scavengerStats._readObjectBarrierUpdate += 1;
871
if (GLOBAL_READ_BARRIR_STATS_UPDATE_THRESHOLD == env->_scavengerStats._readObjectBarrierUpdate) {
872
MM_AtomicOperations::addU64(&_extensions->scavengerStats._readObjectBarrierUpdate, GLOBAL_READ_BARRIR_STATS_UPDATE_THRESHOLD);
873
env->_scavengerStats._readObjectBarrierUpdate = 0;
874
}
875
876
GC_SlotObject slotObject(env->getOmrVM(), srcAddress);
877
MM_ForwardedHeader forwardHeader(object, compressed);
878
omrobjectptr_t forwardPtr = forwardHeader.getForwardedObject();
879
if (NULL != forwardPtr) {
880
/* Object has been strictly (remotely) forwarded. Ensure the object is fully copied before exposing it, update the slot and return. */
881
forwardHeader.copyOrWait(forwardPtr);
882
slotObject.atomicWriteReferenceToSlot(object, forwardPtr);
883
} else {
884
omrobjectptr_t destinationObjectPtr = _extensions->scavenger->copyObject(env, &forwardHeader);
885
if (NULL == destinationObjectPtr) {
886
/* We have no place to copy (or less likely, we lost to another thread forwarding it).
887
* We are forced to return the original location of the object.
888
* But we must prevent any other thread of making a copy of this object.
889
* So we will attempt to atomically self forward it. */
890
forwardPtr = forwardHeader.setSelfForwardedObject();
891
if (forwardPtr != object) {
892
/* Some other thread successfully copied this object. Re-fetch forwarding pointer,
893
* and ensure the object is fully copied before exposing it. */
894
MM_ForwardedHeader(object, compressed).copyOrWait(forwardPtr);
895
slotObject.atomicWriteReferenceToSlot(object, forwardPtr);
896
}
897
/* ... else it's self-forwarded -> no need to update the src slot */
898
} else {
899
/* Successfully copied (or copied by another thread). copyObject() ensures that the object is fully copied. */
900
slotObject.atomicWriteReferenceToSlot(object, destinationObjectPtr);
901
902
env->_scavengerStats._readObjectBarrierCopy += 1;
903
if (GLOBAL_READ_BARRIR_STATS_UPDATE_THRESHOLD == env->_scavengerStats._readObjectBarrierCopy) {
904
MM_AtomicOperations::addU64(&_extensions->scavengerStats._readObjectBarrierCopy, GLOBAL_READ_BARRIR_STATS_UPDATE_THRESHOLD);
905
env->_scavengerStats._readObjectBarrierCopy = 0;
906
}
907
}
908
}
909
}
910
}
911
return true;
912
}
913
#endif
914
915
void
916
MM_StandardAccessBarrier::forcedToFinalizableObject(J9VMThread* vmThread, J9Object* object)
917
{
918
MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
919
if (_extensions->isSATBBarrierActive()) {
920
rememberObjectToRescan(env, object);
921
}
922
}
923
924
/**
925
* Override of referenceGet. When the collector is tracing, it makes any gotten object "grey" to ensure
926
* that it is eventually traced.
927
*
928
* @param refObject the SoftReference or WeakReference object on which get() is being called.
929
* This barrier must not be called for PhantomReferences. The parameter must not be NULL.
930
*/
931
J9Object *
932
MM_StandardAccessBarrier::referenceGet(J9VMThread *vmThread, J9Object *refObject)
933
{
934
J9Object *referent = J9VMJAVALANGREFREFERENCE_REFERENT_VM(vmThread->javaVM, refObject);
935
936
/* SATB - Throughout tracing, we must turn any gotten reference into a root, because the
937
* thread doing the getting may already have been scanned. However, since we are
938
* running on a mutator thread and not a gc thread we do this indirectly by putting
939
* the object in the barrier buffer.
940
*
941
* Do nothing exceptional for NULL or marked referents
942
*/
943
if ((_extensions->isSATBBarrierActive()) && (NULL != referent) && (!_markingScheme->isMarked(referent))) {
944
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
945
rememberObjectToRescan(env, referent);
946
}
947
948
/* We must return the external reference */
949
return referent;
950
}
951
952
void
953
MM_StandardAccessBarrier::referenceReprocess(J9VMThread *vmThread, J9Object *refObject)
954
{
955
if (_extensions->usingSATBBarrier()) {
956
referenceGet(vmThread, refObject);
957
} else {
958
postBatchObjectStore(vmThread, refObject);
959
}
960
}
961
962
void
963
MM_StandardAccessBarrier::jniDeleteGlobalReference(J9VMThread *vmThread, J9Object *reference)
964
{
965
if (_extensions->isSATBBarrierActive()) {
966
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
967
rememberObjectToRescan(env, reference);
968
}
969
}
970
971
void
972
MM_StandardAccessBarrier::stringConstantEscaped(J9VMThread *vmThread, J9Object *stringConst)
973
{
974
MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
975
976
if (_extensions->isSATBBarrierActive()) {
977
rememberObjectToRescan(env, stringConst);
978
}
979
}
980
981
bool
982
MM_StandardAccessBarrier::checkStringConstantsLive(J9JavaVM *javaVM, j9object_t stringOne, j9object_t stringTwo)
983
{
984
if (_extensions->isSATBBarrierActive()) {
985
J9VMThread* vmThread = javaVM->internalVMFunctions->currentVMThread(javaVM);
986
stringConstantEscaped(vmThread, (J9Object *)stringOne);
987
stringConstantEscaped(vmThread, (J9Object *)stringTwo);
988
}
989
990
return true;
991
}
992
993
/**
994
* Equivalent to checkStringConstantsLive but for a single string constant
995
*/
996
bool
997
MM_StandardAccessBarrier::checkStringConstantLive(J9JavaVM *javaVM, j9object_t string)
998
{
999
if (_extensions->isSATBBarrierActive()) {
1000
J9VMThread* vmThread = javaVM->internalVMFunctions->currentVMThread(javaVM);
1001
stringConstantEscaped(vmThread, (J9Object *)string);
1002
}
1003
1004
return true;
1005
}
1006
1007
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
1008
bool
1009
MM_StandardAccessBarrier::checkClassLive(J9JavaVM *javaVM, J9Class *classPtr)
1010
{
1011
bool result = true;
1012
1013
if (_extensions->usingSATBBarrier()) {
1014
J9ClassLoader *classLoader = classPtr->classLoader;
1015
result = false;
1016
1017
if ((0 == (classLoader->gcFlags & J9_GC_CLASS_LOADER_DEAD)) && (0 == (J9CLASS_FLAGS(classPtr) & J9AccClassDying))) {
1018
result = true;
1019
/* this class has not been discovered dead yet so mark it if necessary to force it to be alive */
1020
J9Object *classLoaderObject = classLoader->classLoaderObject;
1021
1022
if (NULL != classLoaderObject) {
1023
/* If mark is active but not completed yet force this class to be marked to survive this GC */
1024
J9VMThread* vmThread = javaVM->internalVMFunctions->currentVMThread(javaVM);
1025
MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(vmThread->omrVMThread);
1026
if (_extensions->isSATBBarrierActive()) {
1027
rememberObjectToRescan(env, classLoaderObject);
1028
}
1029
}
1030
/* else this class loader probably is in initialization process and class loader object has not been attached yet */
1031
}
1032
}
1033
1034
return result;
1035
}
1036
#endif /* defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) */
1037
1038
1039