Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/compiler/compile/J9Compilation.cpp
6000 views
1
/*******************************************************************************
2
* Copyright (c) 2000, 2022 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#if defined(J9ZOS390)
24
#pragma csect(CODE,"TRJ9CompBase#C")
25
#pragma csect(STATIC,"TRJ9CompBase#S")
26
#pragma csect(TEST,"TRJ9CompBase#T")
27
#endif
28
29
#include "compile/J9Compilation.hpp"
30
31
#include <stdint.h>
32
#include "codegen/CodeGenerator.hpp"
33
#include "codegen/Instruction.hpp"
34
#include "compile/Compilation.hpp"
35
#include "compile/Compilation_inlines.hpp"
36
#include "compile/CompilationTypes.hpp"
37
#include "compile/ResolvedMethod.hpp"
38
#include "control/OptimizationPlan.hpp"
39
#include "control/Options.hpp"
40
#include "control/Options_inlines.hpp"
41
#include "control/Recompilation.hpp"
42
#include "control/RecompilationInfo.hpp"
43
#include "env/j9method.h"
44
#include "env/TRMemory.hpp"
45
#include "env/VMJ9.h"
46
#include "env/VMAccessCriticalSection.hpp"
47
#include "env/KnownObjectTable.hpp"
48
#include "env/VerboseLog.hpp"
49
#include "il/Node.hpp"
50
#include "il/Node_inlines.hpp"
51
#include "ilgen/IlGenRequest.hpp"
52
#include "infra/List.hpp"
53
#include "optimizer/Inliner.hpp"
54
#include "optimizer/OptimizationManager.hpp"
55
#include "optimizer/Optimizer.hpp"
56
#include "optimizer/TransformUtil.hpp"
57
#include "runtime/RuntimeAssumptions.hpp"
58
#include "runtime/J9Profiler.hpp"
59
#include "OMR/Bytes.hpp"
60
#include "il/ParameterSymbol.hpp"
61
#include "j9.h"
62
#include "j9cfg.h"
63
64
65
/*
66
* There should be no allocations that use the global operator new, since
67
* all allocations should go through the JitMemory allocation routines.
68
* To catch cases that we miss, we define global operator new and delete here.
69
* (xlC won't link statically with the -noe flag when we override these.)
70
*/
71
bool firstCompileStarted = false;
72
73
// JITSERVER_TODO: disabled to allow for JITServer
74
#if !defined(J9VM_OPT_JITSERVER)
75
void *operator new(size_t size)
76
{
77
#if defined(DEBUG)
78
#if LINUX
79
// glibc allocates something at dl_init; check if a method is being compiled to avoid
80
// getting assumes at _dl_init
81
if (firstCompileStarted)
82
#endif
83
{
84
printf( "\n*** ERROR *** Invalid use of global operator new\n");
85
TR_ASSERT(0,"Invalid use of global operator new");
86
}
87
#endif
88
return malloc(size);
89
}
90
91
// Avoid -Wimplicit-exception-spec-mismatch error on platforms that specify the global delete operator with throw()
92
#ifndef _NOEXCEPT
93
#define _NOEXCEPT
94
#endif
95
96
/**
97
* Since we are using arena allocation, heap deletions must be a no-op, and
98
* can't be used by JIT code, so we inject an assertion here.
99
*/
100
void operator delete(void *) _NOEXCEPT
101
{
102
TR_ASSERT(0, "Invalid use of global operator delete");
103
}
104
#endif /* !defined(J9VM_OPT_JITSERVER) */
105
106
107
108
109
uint64_t J9::Compilation::_maxYieldIntervalS = 0;
110
111
TR_CallingContext J9::Compilation::_sourceContextForMaxYieldIntervalS = NO_CONTEXT;
112
113
TR_CallingContext J9::Compilation::_destinationContextForMaxYieldIntervalS = NO_CONTEXT;
114
115
TR_Stats** J9::Compilation::_compYieldStatsMatrix = NULL;
116
117
118
const char * callingContextNames[] = {
119
"FBVA_INITIALIZE_CONTEXT",
120
"FBVA_ANALYZE_CONTEXT",
121
"BBVA_INITIALIZE_CONTEXT",
122
"BBVA_ANALYZE_CONTEXT",
123
"GRA_ASSIGN_CONTEXT",
124
"PRE_ANALYZE_CONTEXT",
125
"AFTER_INSTRUCTION_SELECTION_CONTEXT",
126
"AFTER_REGISTER_ASSIGNMENT_CONTEXT",
127
"AFTER_POST_RA_SCHEDULING_CONTEXT",
128
"BEFORE_PROCESS_STRUCTURE_CONTEXT",
129
"GRA_FIND_LOOPS_AND_CORRESPONDING_AUTOS_BLOCK_CONTEXT",
130
"GRA_AFTER_FIND_LOOP_AUTO_CONTEXT",
131
"ESC_CHECK_DEFSUSES_CONTEXT",
132
"LAST_CONTEXT"
133
};
134
135
#if defined(J9VM_OPT_JITSERVER)
136
bool J9::Compilation::_outOfProcessCompilation = false;
137
#endif /* defined(J9VM_OPT_JITSERVER) */
138
139
J9::Compilation::Compilation(int32_t id,
140
J9VMThread *j9vmThread,
141
TR_FrontEnd *fe,
142
TR_ResolvedMethod *compilee,
143
TR::IlGenRequest &ilGenRequest,
144
TR::Options &options,
145
TR::Region &heapMemoryRegion,
146
TR_Memory *m,
147
TR_OptimizationPlan *optimizationPlan,
148
TR_RelocationRuntime *reloRuntime,
149
TR::Environment *target)
150
: OMR::CompilationConnector(
151
id,
152
j9vmThread->omrVMThread,
153
(firstCompileStarted = true, fe),
154
compilee,
155
ilGenRequest,
156
options,
157
heapMemoryRegion,
158
m,
159
optimizationPlan,
160
target),
161
_updateCompYieldStats(
162
options.getOption(TR_EnableCompYieldStats) ||
163
options.getVerboseOption(TR_VerboseCompYieldStats) ||
164
TR::Options::_compYieldStatsHeartbeatPeriod > 0),
165
_maxYieldInterval(0),
166
_previousCallingContext(NO_CONTEXT),
167
_sourceContextForMaxYieldInterval(NO_CONTEXT),
168
_destinationContextForMaxYieldInterval(NO_CONTEXT),
169
_needsClassLookahead(true),
170
_reservedDataCache(NULL),
171
_totalNeededDataCacheSpace(0),
172
_aotMethodDataStart(NULL),
173
_curMethodMetadata(NULL),
174
_getImplInlineable(false),
175
_vpInfoManager(NULL),
176
_bpInfoManager(NULL),
177
_methodBranchInfoList(getTypedAllocator<TR_MethodBranchProfileInfo*>(self()->allocator())),
178
_externalVPInfoList(getTypedAllocator<TR_ExternalValueProfileInfo*>(self()->allocator())),
179
_doneHWProfile(false),
180
_hwpInstructions(m),
181
_hwpBCMap(m),
182
_sideEffectGuardPatchSites(getTypedAllocator<TR_VirtualGuardSite*>(self()->allocator())),
183
_j9VMThread(j9vmThread),
184
_monitorAutos(m),
185
_monitorAutoSymRefsInCompiledMethod(getTypedAllocator<TR::SymbolReference*>(self()->allocator())),
186
_classForOSRRedefinition(m),
187
_classForStaticFinalFieldModification(m),
188
_profileInfo(NULL),
189
_skippedJProfilingBlock(false),
190
_reloRuntime(reloRuntime),
191
#if defined(J9VM_OPT_JITSERVER)
192
_remoteCompilation(false),
193
_serializedRuntimeAssumptions(getTypedAllocator<SerializedRuntimeAssumption *>(self()->allocator())),
194
_clientData(NULL),
195
_stream(NULL),
196
_globalMemory(*::trPersistentMemory, heapMemoryRegion),
197
_perClientMemory(_trMemory),
198
_methodsRequiringTrampolines(getTypedAllocator<TR_OpaqueMethodBlock *>(self()->allocator())),
199
_deserializedAOTMethod(false),
200
_deserializedAOTMethodUsingSVM(false),
201
_aotCacheStore(false),
202
_serializationRecords(decltype(_serializationRecords)::allocator_type(heapMemoryRegion)),
203
#endif /* defined(J9VM_OPT_JITSERVER) */
204
_osrProhibitedOverRangeOfTrees(false)
205
{
206
_symbolValidationManager = new (self()->region()) TR::SymbolValidationManager(self()->region(), compilee);
207
208
_aotClassClassPointer = NULL;
209
_aotClassClassPointerInitialized = false;
210
211
_aotGuardPatchSites = new (m->trHeapMemory()) TR::list<TR_AOTGuardSite*>(getTypedAllocator<TR_AOTGuardSite*>(self()->allocator()));
212
213
_aotClassInfo = new (m->trHeapMemory()) TR::list<TR::AOTClassInfo*>(getTypedAllocator<TR::AOTClassInfo*>(self()->allocator()));
214
215
if (_updateCompYieldStats)
216
_hiresTimeForPreviousCallingContext = TR::Compiler->vm.getHighResClock(self());
217
218
_profileInfo = new (m->trHeapMemory()) TR_AccessedProfileInfo(heapMemoryRegion);
219
220
for (int i = 0; i < CACHED_CLASS_POINTER_COUNT; i++)
221
_cachedClassPointers[i] = NULL;
222
223
224
// Add known object index to parm 0 so that other optmizations can be unlocked.
225
// It is safe to do so because method and method symbols of a archetype specimen
226
// are not shared other methods.
227
//
228
TR::KnownObjectTable *knot = self()->getOrCreateKnownObjectTable();
229
TR::IlGeneratorMethodDetails & details = ilGenRequest.details();
230
if (knot && details.isMethodHandleThunk())
231
{
232
J9::MethodHandleThunkDetails & thunkDetails = static_cast<J9::MethodHandleThunkDetails &>(details);
233
if (thunkDetails.isCustom())
234
{
235
TR::KnownObjectTable::Index index = knot->getOrCreateIndexAt(thunkDetails.getHandleRef());
236
ListIterator<TR::ParameterSymbol> parms(&_methodSymbol->getParameterList());
237
TR::ParameterSymbol* parm0 = parms.getFirst();
238
parm0->setKnownObjectIndex(index);
239
}
240
}
241
}
242
243
J9::Compilation::~Compilation()
244
{
245
_profileInfo->~TR_AccessedProfileInfo();
246
}
247
248
TR_J9VMBase *
249
J9::Compilation::fej9()
250
{
251
return (TR_J9VMBase *)self()->fe();
252
}
253
254
TR_J9VM *
255
J9::Compilation::fej9vm()
256
{
257
return (TR_J9VM *)self()->fe();
258
}
259
260
void
261
J9::Compilation::updateCompYieldStatistics(TR_CallingContext callingContext)
262
{
263
// get time of this call
264
//
265
uint64_t crtTime = TR::Compiler->vm.getHighResClock(self());
266
267
// compute the difference between 2 consecutive calls
268
//
269
static uint64_t hiresClockResolution = TR::Compiler->vm.getHighResClockResolution();
270
uint64_t ticks = crtTime - _hiresTimeForPreviousCallingContext;
271
uint64_t diffTime;
272
273
if (hiresClockResolution < 1000000)
274
diffTime = (ticks * 1000000)/hiresClockResolution;
275
else
276
diffTime = ticks / (hiresClockResolution/1000000);
277
278
// update stats for the corresponding cell in the matrix
279
// May lead to problems in the future when we add multiple compilation threads
280
//
281
if (self()->getOption(TR_EnableCompYieldStats))
282
_compYieldStatsMatrix[(int32_t)_previousCallingContext][(int32_t)callingContext].update((double)diffTime);
283
284
if (self()->getOptions()->getVerboseOption(TR_VerboseCompYieldStats))
285
{
286
if (diffTime > _maxYieldInterval)
287
{
288
_maxYieldInterval = diffTime;
289
_sourceContextForMaxYieldInterval = _previousCallingContext;
290
_destinationContextForMaxYieldInterval = callingContext;
291
}
292
}
293
294
if (TR::Options::_compYieldStatsHeartbeatPeriod > 0)
295
{
296
if (diffTime > _maxYieldIntervalS)
297
{
298
_maxYieldIntervalS = diffTime;
299
_sourceContextForMaxYieldIntervalS = _previousCallingContext;
300
_destinationContextForMaxYieldIntervalS = callingContext;
301
}
302
}
303
304
// prepare for next call
305
//
306
_hiresTimeForPreviousCallingContext = crtTime;
307
_previousCallingContext = callingContext;
308
}
309
310
311
void
312
J9::Compilation::allocateCompYieldStatsMatrix()
313
{
314
// need to use persistent memory
315
_compYieldStatsMatrix = (TR_Stats**)TR::Compilation::jitPersistentAlloc(sizeof(TR_Stats *)*(int32_t)LAST_CONTEXT);
316
317
for (int32_t i=0; i < (int32_t)LAST_CONTEXT; i++)
318
{
319
_compYieldStatsMatrix[i] = (TR_Stats *)TR::Compilation::jitPersistentAlloc(sizeof(TR_Stats)*(int32_t)LAST_CONTEXT);
320
for (int32_t j=0; j < (int32_t)LAST_CONTEXT; j++)
321
{
322
char buffer[128];
323
sprintf(buffer, "%d-%d", i,j);
324
_compYieldStatsMatrix[i][j].setName(buffer);
325
}
326
}
327
}
328
329
void
330
J9::Compilation::printCompYieldStats()
331
{
332
TR_VerboseLog::writeLine(
333
TR_Vlog_PERF,
334
"Max yield-to-yield time of %u usec for %s -- %s",
335
static_cast<uint32_t>(_maxYieldInterval),
336
J9::Compilation::getContextName(_sourceContextForMaxYieldInterval),
337
J9::Compilation::getContextName(_destinationContextForMaxYieldInterval));
338
}
339
340
const char *
341
J9::Compilation::getContextName(TR_CallingContext context)
342
{
343
if (context == OMR::endOpts || context == TR_CallingContext::NO_CONTEXT)
344
return "NO CONTEXT";
345
else if (context < OMR::numOpts)
346
return TR::Optimizer::getOptimizationName((OMR::Optimizations)context);
347
else
348
return callingContextNames[context-OMR::numOpts];
349
}
350
351
void
352
J9::Compilation::printEntryName(int32_t i, int32_t j)
353
{
354
fprintf(stderr, "\n%s -", J9::Compilation::getContextName((TR_CallingContext) i));
355
fprintf(stderr, "- %s\n", J9::Compilation::getContextName((TR_CallingContext) j));
356
}
357
358
359
void
360
J9::Compilation::printCompYieldStatsMatrix()
361
{
362
if (!_compYieldStatsMatrix)
363
return; // the matrix may not have been allocated (for instance when we give a bad command line option)
364
365
for (int32_t i=0; i < (int32_t)LAST_CONTEXT; i++)
366
{
367
for (int32_t j=0; j < (int32_t)LAST_CONTEXT; j++)
368
{
369
TR_Stats *stats = &_compYieldStatsMatrix[i][j];
370
if (stats->samples() > 0 && stats->maxVal() > TR::Options::_compYieldStatsThreshold)
371
{
372
TR::Compilation::printEntryName(i, j);
373
stats->report(stderr);
374
}
375
}
376
}
377
}
378
379
TR_AOTMethodHeader *
380
J9::Compilation::getAotMethodHeaderEntry()
381
{
382
J9JITDataCacheHeader *aotMethodHeader = (J9JITDataCacheHeader *)self()->getAotMethodDataStart();
383
TR_AOTMethodHeader *aotMethodHeaderEntry = (TR_AOTMethodHeader *)(aotMethodHeader + 1);
384
return aotMethodHeaderEntry;
385
}
386
387
TR::Node *
388
J9::Compilation::findNullChkInfo(TR::Node *node)
389
{
390
TR_ASSERT((node->getOpCodeValue() == TR::checkcastAndNULLCHK), "should call this only for checkcastAndNullChk\n");
391
TR::Node * newNode = NULL;
392
for (auto pair = self()->getCheckcastNullChkInfo().begin(); pair != self()->getCheckcastNullChkInfo().end(); ++pair)
393
{
394
if ((*pair)->getKey()->getByteCodeIndex() == node->getByteCodeIndex() &&
395
(*pair)->getKey()->getCallerIndex() == node->getInlinedSiteIndex())
396
{
397
newNode = (*pair)->getValue();
398
//dumpOptDetails("found bytecodeinfo for node %p as %x [%p]\n", node, newNode->getByteCodeIndex(), newNode);
399
break;
400
}
401
}
402
TR_ASSERT(newNode, "checkcastAndNullChk node doesnt have a corresponding null chk bytecodeinfo\n");
403
return newNode;
404
}
405
406
407
/**
408
* Sometimes we start the compilation with an optLevel, but later on,
409
* after we get more information, we decide to change it to something else.
410
* This method is used to change the optLevel. Note that the optLevel
411
* is cached in various data structures and it needs to be kept in sync.
412
*/
413
void
414
J9::Compilation::changeOptLevel(TR_Hotness newOptLevel)
415
{
416
self()->getOptions()->setOptLevel(newOptLevel);
417
self()->getOptimizationPlan()->setOptLevel(newOptLevel);
418
if (self()->getRecompilationInfo())
419
{
420
TR_PersistentJittedBodyInfo *bodyInfo = self()->getRecompilationInfo()->getJittedBodyInfo();
421
if (bodyInfo)
422
bodyInfo->setHotness(newOptLevel);
423
}
424
}
425
426
427
bool
428
J9::Compilation::isConverterMethod(TR::RecognizedMethod rm)
429
{
430
switch (rm)
431
{
432
case TR::sun_nio_cs_ISO_8859_1_Encoder_encodeISOArray:
433
case TR::java_lang_StringCoding_implEncodeISOArray:
434
case TR::java_lang_String_decodeUTF8_UTF16:
435
case TR::sun_nio_cs_ISO_8859_1_Decoder_decodeISO8859_1:
436
case TR::sun_nio_cs_US_ASCII_Encoder_encodeASCII:
437
case TR::sun_nio_cs_US_ASCII_Decoder_decodeASCII:
438
case TR::sun_nio_cs_ext_SBCS_Encoder_encodeSBCS:
439
case TR::sun_nio_cs_ext_SBCS_Decoder_decodeSBCS:
440
case TR::sun_nio_cs_UTF_8_Encoder_encodeUTF_8:
441
case TR::sun_nio_cs_UTF_8_Decoder_decodeUTF_8:
442
case TR::sun_nio_cs_UTF_16_Encoder_encodeUTF16Big:
443
case TR::sun_nio_cs_UTF_16_Encoder_encodeUTF16Little:
444
return true;
445
default:
446
return false;
447
}
448
449
return false;
450
}
451
452
453
//This implicitly checks if method is recognized converter method.
454
bool
455
J9::Compilation::canTransformConverterMethod(TR::RecognizedMethod rm)
456
{
457
TR_ASSERT(self()->isConverterMethod(rm), "not a converter method\n");
458
459
if (self()->getOption(TR_DisableConverterReducer))
460
return false;
461
462
bool aot = self()->compileRelocatableCode();
463
bool genSIMD = self()->cg()->getSupportsVectorRegisters() && !self()->getOption(TR_DisableSIMDArrayTranslate);
464
bool genTRxx = !aot && self()->cg()->getSupportsArrayTranslateTRxx();
465
466
switch (rm)
467
{
468
case TR::sun_nio_cs_ISO_8859_1_Encoder_encodeISOArray:
469
case TR::java_lang_StringCoding_implEncodeISOArray:
470
return genTRxx || self()->cg()->getSupportsArrayTranslateTRTO255() || self()->cg()->getSupportsArrayTranslateTRTO() || genSIMD;
471
472
case TR::sun_nio_cs_ISO_8859_1_Decoder_decodeISO8859_1:
473
return genTRxx || self()->cg()->getSupportsArrayTranslateTROTNoBreak() || genSIMD;
474
475
case TR::sun_nio_cs_US_ASCII_Encoder_encodeASCII:
476
case TR::sun_nio_cs_UTF_8_Encoder_encodeUTF_8:
477
return genTRxx || self()->cg()->getSupportsArrayTranslateTRTO() || genSIMD;
478
479
case TR::sun_nio_cs_US_ASCII_Decoder_decodeASCII:
480
case TR::sun_nio_cs_UTF_8_Decoder_decodeUTF_8:
481
return genTRxx || self()->cg()->getSupportsArrayTranslateTROT() || genSIMD;
482
483
case TR::sun_nio_cs_ext_SBCS_Encoder_encodeSBCS:
484
return genTRxx && self()->cg()->getSupportsTestCharComparisonControl();
485
486
case TR::sun_nio_cs_ext_SBCS_Decoder_decodeSBCS:
487
return genTRxx;
488
489
// devinmp: I'm not sure whether these could be transformed in AOT, but
490
// they haven't been so far.
491
case TR::sun_nio_cs_UTF_16_Encoder_encodeUTF16Little:
492
return !aot && self()->cg()->getSupportsEncodeUtf16LittleWithSurrogateTest();
493
494
case TR::sun_nio_cs_UTF_16_Encoder_encodeUTF16Big:
495
return !aot && self()->cg()->getSupportsEncodeUtf16BigWithSurrogateTest();
496
497
default:
498
return false;
499
}
500
}
501
502
503
bool
504
J9::Compilation::useCompressedPointers()
505
{
506
//FIXME: probably have to query the GC as well
507
return (self()->target().is64Bit() && TR::Options::useCompressedPointers());
508
}
509
510
511
bool
512
J9::Compilation::useAnchors()
513
{
514
return (self()->useCompressedPointers());
515
}
516
517
518
bool
519
J9::Compilation::hasBlockFrequencyInfo()
520
{
521
return TR_BlockFrequencyInfo::get(self()) != NULL;
522
}
523
524
bool
525
J9::Compilation::isShortRunningMethod(int32_t callerIndex)
526
{
527
{
528
const char *sig = NULL;
529
if (callerIndex > -1)
530
{
531
// this should be more reliable, but needs verification as equivalent
532
sig = self()->getInlinedResolvedMethod(callerIndex)->signature(self()->trMemory());
533
}
534
else
535
sig = self()->signature();
536
537
if (sig &&
538
((strncmp("java/lang/String.", sig, 17) == 0) ||
539
(strncmp("java/util/HashMap.", sig, 18) == 0) ||
540
(strncmp("java/util/TreeMap.", sig, 18) == 0) ||
541
(strncmp("java/math/DivisionLong.", sig, 23) == 0) ||
542
(strncmp("com/ibm/xml/xlxp2/scan/util/XMLString.", sig, 38) == 0) ||
543
(strncmp("com/ibm/xml/xlxp2/scan/util/SymbolMap.", sig, 38) == 0) ||
544
(strncmp("java/util/Random.next(I)I",sig,25) == 0) ||
545
(strncmp("java/util/zip/ZipFile.safeToUseModifiedUTF8", sig, 43) == 0) ||
546
(strncmp("java/util/HashMap$HashIterator.", sig, 31) == 0) ||
547
(strncmp("sun/misc/FloatingDecimal.readJavaFormatString", sig, 45) == 0)
548
)
549
)
550
{
551
return true;
552
}
553
}
554
return false;
555
}
556
557
bool
558
J9::Compilation::isRecompilationEnabled()
559
{
560
561
if (!self()->cg()->getSupportsRecompilation())
562
{
563
return false;
564
}
565
566
if (self()->isDLT())
567
{
568
return false;
569
}
570
571
// Don't do recompilation on JNI virtual thunk methods
572
//
573
if (self()->getCurrentMethod()->isJNINative())
574
return false;
575
576
return self()->allowRecompilation();
577
}
578
579
bool
580
J9::Compilation::isJProfilingCompilation()
581
{
582
return self()->getRecompilationInfo() ? self()->getRecompilationInfo()->getJittedBodyInfo()->getUsesJProfiling() : false;
583
}
584
585
// See if it is OK to remove this allocation node to e.g. merge it with others
586
// or allocate it locally on a stack frame.
587
// If so, return the allocation size if the size is constant, or zero if the
588
// size is variable.
589
// If not, return -1.
590
//
591
int32_t
592
J9::Compilation::canAllocateInlineOnStack(TR::Node* node, TR_OpaqueClassBlock* &classInfo)
593
{
594
if (self()->compileRelocatableCode())
595
return -1;
596
597
if (node->getOpCodeValue() == TR::New)
598
{
599
J9Class* clazz = self()->fej9vm()->getClassForAllocationInlining(self(), node->getFirstChild()->getSymbolReference());
600
601
if (clazz == NULL)
602
return -1;
603
604
// Can not inline the allocation on stack if the class is special
605
if (TR::Compiler->cls.isClassSpecialForStackAllocation((TR_OpaqueClassBlock *)clazz))
606
return -1;
607
}
608
return self()->canAllocateInline(node, classInfo);
609
}
610
611
612
bool
613
J9::Compilation::canAllocateInlineClass(TR_OpaqueClassBlock *block)
614
{
615
if (block == NULL)
616
return false;
617
618
return self()->fej9()->canAllocateInlineClass(block);
619
}
620
621
622
// This code was previously in canAllocateInlineOnStack. However, it is required by code gen to
623
// inline heap allocations. The only difference, for now, is that inlined heap allocations
624
// are being enabled for AOT, but stack allocations are not (yet).
625
//
626
int32_t
627
J9::Compilation::canAllocateInline(TR::Node* node, TR_OpaqueClassBlock* &classInfo)
628
{
629
630
// Can't skip the allocation if we are generating JVMPI hooks, since
631
// JVMPI needs to know about the allocation.
632
//
633
if (self()->suppressAllocationInlining() || !self()->fej9vm()->supportAllocationInlining(self(), node))
634
return -1;
635
636
// Pending inline allocation support on platforms for variable new
637
//
638
if (node->getOpCodeValue() == TR::variableNew || node->getOpCodeValue() == TR::variableNewArray)
639
return -1;
640
641
int32_t size;
642
TR::Node * classRef;
643
TR::SymbolReference * classSymRef;
644
TR::StaticSymbol * classSym;
645
J9Class * clazz;
646
647
bool isRealTimeGC = self()->getOptions()->realTimeGC();
648
649
bool generateArraylets = self()->generateArraylets();
650
651
const bool areValueTypesEnabled = TR::Compiler->om.areValueTypesEnabled();
652
653
if (node->getOpCodeValue() == TR::New)
654
{
655
656
classRef = node->getFirstChild();
657
classSymRef = classRef->getSymbolReference();
658
659
classSym = classSymRef->getSymbol()->getStaticSymbol();
660
661
// Check if the class can be inlined allocation.
662
// The class has to be resolved, initialized, concrete, etc.
663
clazz = self()->fej9vm()->getClassForAllocationInlining(self(), classSymRef);
664
if (!self()->canAllocateInlineClass(reinterpret_cast<TR_OpaqueClassBlock*> (clazz)))
665
return -1;
666
667
classInfo = self()->fej9vm()->getClassOffsetForAllocationInlining(clazz);
668
669
return self()->fej9()->getAllocationSize(classSym, reinterpret_cast<TR_OpaqueClassBlock*> (clazz));
670
}
671
672
int32_t elementSize;
673
if (node->getOpCodeValue() == TR::newarray)
674
{
675
TR_ASSERT(node->getSecondChild()->getOpCode().isLoadConst(), "Expecting const child \n");
676
677
int32_t arrayClassIndex = node->getSecondChild()->getInt();
678
clazz = (J9Class *) self()->fej9()->getClassFromNewArrayTypeNonNull(arrayClassIndex);
679
680
if (node->getFirstChild()->getOpCodeValue() != TR::iconst)
681
{
682
classInfo = self()->fej9vm()->getPrimitiveArrayAllocationClass(clazz);
683
return 0;
684
}
685
686
// Make sure the number constant of elements requested is within reasonable bounds
687
//
688
TR_ASSERT(node->getFirstChild()->getOpCode().isLoadConst(), "Expecting const child \n");
689
size = node->getFirstChild()->getInt();
690
if (size < 0 || size > 0x000FFFFF)
691
return -1;
692
693
classInfo = self()->fej9vm()->getPrimitiveArrayAllocationClass(clazz);
694
695
elementSize = TR::Compiler->om.getSizeOfArrayElement(node);
696
}
697
else if (node->getOpCodeValue() == TR::anewarray)
698
{
699
classRef = node->getSecondChild();
700
701
// In the case of dynamic array allocation, return 0 indicating variable dynamic array allocation,
702
// unless value types are enabled, in which case return -1 to prevent inline allocation
703
if (classRef->getOpCodeValue() != TR::loadaddr)
704
{
705
classInfo = NULL;
706
if (areValueTypesEnabled)
707
{
708
if (self()->getOption(TR_TraceCG))
709
{
710
traceMsg(self(), "cannot inline array allocation @ node %p because value types are enabled\n", node);
711
}
712
const char *signature = self()->signature();
713
714
TR::DebugCounter::incStaticDebugCounter(self(), TR::DebugCounter::debugCounterName(self(), "inlineAllocation/dynamicArray/failed/valueTypes/(%s)", signature));
715
return -1;
716
}
717
else
718
{
719
return 0;
720
}
721
}
722
723
classSymRef = classRef->getSymbolReference();
724
// Can't skip the allocation if the class is unresolved
725
//
726
clazz = self()->fej9vm()->getClassForAllocationInlining(self(), classSymRef);
727
if (clazz == NULL)
728
return -1;
729
730
// Arrays of value type classes must have all their elements initialized with the
731
// default value of the component type. For now, prevent inline allocation of them.
732
//
733
if (areValueTypesEnabled && TR::Compiler->cls.isValueTypeClass(reinterpret_cast<TR_OpaqueClassBlock*>(clazz)))
734
{
735
return -1;
736
}
737
738
auto classOffset = self()->fej9()->getArrayClassFromComponentClass(TR::Compiler->cls.convertClassPtrToClassOffset(clazz));
739
clazz = TR::Compiler->cls.convertClassOffsetToClassPtr(classOffset);
740
741
if (!clazz)
742
return -1;
743
744
if (node->getFirstChild()->getOpCodeValue() != TR::iconst)
745
{
746
classInfo = self()->fej9vm()->getClassOffsetForAllocationInlining(clazz);
747
return 0;
748
}
749
750
// Make sure the number of elements requested is in reasonable bounds
751
//
752
TR_ASSERT(node->getFirstChild()->getOpCode().isLoadConst(), "Expecting const child \n");
753
size = node->getFirstChild()->getInt();
754
if (size < 0 || size > 0x000FFFFF)
755
return -1;
756
757
classInfo = self()->fej9vm()->getClassOffsetForAllocationInlining(clazz);
758
759
if (self()->useCompressedPointers())
760
elementSize = TR::Compiler->om.sizeofReferenceField();
761
else
762
elementSize = (int32_t)(TR::Compiler->om.sizeofReferenceAddress());
763
}
764
765
766
TR_ASSERT(node->getOpCodeValue() == TR::newarray ||
767
node->getOpCodeValue() == TR::anewarray, "unexpected allocation node");
768
769
size *= elementSize;
770
771
if (TR::Compiler->om.useHybridArraylets() && TR::Compiler->om.isDiscontiguousArray(size))
772
{
773
if (self()->getOption(TR_TraceCG))
774
traceMsg(self(), "cannot inline array allocation @ node %p because size %d is discontiguous\n", node, size);
775
return -1;
776
}
777
else if (!isRealTimeGC && size == 0)
778
{
779
#if (defined(TR_HOST_S390) && defined(TR_TARGET_S390)) || (defined(TR_TARGET_X86) && defined(TR_HOST_X86)) || (defined(TR_TARGET_POWER) && defined(TR_HOST_POWER)) || (defined(TR_TARGET_ARM64) && defined(TR_HOST_ARM64))
780
size = TR::Compiler->om.discontiguousArrayHeaderSizeInBytes();
781
if (self()->getOption(TR_TraceCG))
782
traceMsg(self(), "inline array allocation @ node %p for size 0\n", node);
783
#else
784
if (self()->getOption(TR_TraceCG))
785
traceMsg(self(), "cannot inline array allocation @ node %p because size 0 is discontiguous\n", node);
786
return -1;
787
#endif
788
}
789
else if (generateArraylets)
790
{
791
size += self()->fej9()->getArrayletFirstElementOffset(elementSize, self());
792
}
793
else
794
{
795
size += TR::Compiler->om.contiguousArrayHeaderSizeInBytes();
796
}
797
798
if (node->getOpCodeValue() == TR::newarray || self()->useCompressedPointers())
799
{
800
size = (int32_t)OMR::align(size, TR::Compiler->om.sizeofReferenceAddress());
801
}
802
803
if (isRealTimeGC &&
804
((size < 0) || (size > self()->fej9()->getMaxObjectSizeForSizeClass())))
805
return -1;
806
807
TR_ASSERT(size != -1, "unexpected array size");
808
809
return size >= J9_GC_MINIMUM_OBJECT_SIZE ? size : J9_GC_MINIMUM_OBJECT_SIZE;
810
}
811
812
813
TR::KnownObjectTable *
814
J9::Compilation::getOrCreateKnownObjectTable()
815
{
816
if (!_knownObjectTable && !self()->getOption(TR_DisableKnownObjectTable))
817
{
818
_knownObjectTable = new (self()->trHeapMemory()) TR::KnownObjectTable(self());
819
}
820
821
return _knownObjectTable;
822
}
823
824
825
void
826
J9::Compilation::freeKnownObjectTable()
827
{
828
if (_knownObjectTable)
829
{
830
#if defined(J9VM_OPT_JITSERVER)
831
if (!isOutOfProcessCompilation())
832
#endif /* defined(J9VM_OPT_JITSERVER) */
833
{
834
TR::VMAccessCriticalSection freeKnownObjectTable(self()->fej9());
835
836
J9VMThread *thread = self()->fej9()->vmThread();
837
TR_ASSERT(thread, "assertion failure");
838
839
TR_ArrayIterator<uintptr_t> i(&_knownObjectTable->_references);
840
for (uintptr_t *ref = i.getFirst(); !i.pastEnd(); ref = i.getNext())
841
thread->javaVM->internalVMFunctions->j9jni_deleteLocalRef((JNIEnv*)thread, (jobject)ref);
842
}
843
}
844
845
_knownObjectTable = NULL;
846
}
847
848
849
bool
850
J9::Compilation::compileRelocatableCode()
851
{
852
return self()->fej9()->isAOT_DEPRECATED_DO_NOT_USE();
853
}
854
855
bool
856
J9::Compilation::compilePortableCode()
857
{
858
return self()->fej9()->inSnapshotMode();
859
}
860
861
862
int32_t
863
J9::Compilation::maxInternalPointers()
864
{
865
if (self()->getOption(TR_DisableInternalPointers))
866
return 0;
867
else
868
return 128;
869
}
870
871
872
void
873
J9::Compilation::addHWPInstruction(TR::Instruction *instruction,
874
TR_HWPInstructionInfo::type instructionType,
875
void *data)
876
{
877
if (!self()->getPersistentInfo()->isRuntimeInstrumentationEnabled())
878
return;
879
880
TR::Node *node = instruction->getNode();
881
882
switch (instructionType)
883
{
884
case TR_HWPInstructionInfo::callInstructions:
885
case TR_HWPInstructionInfo::indirectCallInstructions:
886
TR_ASSERT(node->getOpCode().isCall(), "Unknown instruction for HW profiling");
887
break;
888
case TR_HWPInstructionInfo::returnInstructions:
889
case TR_HWPInstructionInfo::valueProfileInstructions:
890
break;
891
default:
892
TR_ASSERT(false, "Unknown instruction for HW profiling");
893
}
894
895
TR_HWPInstructionInfo hwpInstructionInfo = {(void*)instruction,
896
data,
897
instructionType};
898
899
_hwpInstructions.add(hwpInstructionInfo);
900
}
901
902
903
void
904
J9::Compilation::addHWPCallInstruction(TR::Instruction *instruction, bool indirectCall, TR::Instruction *prev)
905
{
906
if (indirectCall)
907
self()->addHWPInstruction(instruction, TR_HWPInstructionInfo::indirectCallInstructions, (void*)prev);
908
else
909
self()->addHWPInstruction(instruction, TR_HWPInstructionInfo::callInstructions);
910
}
911
912
913
void
914
J9::Compilation::addHWPReturnInstruction(TR::Instruction *instruction)
915
{
916
self()->addHWPInstruction(instruction, TR_HWPInstructionInfo::returnInstructions);
917
}
918
919
920
void
921
J9::Compilation::addHWPValueProfileInstruction(TR::Instruction *instruction)
922
{
923
self()->addHWPInstruction(instruction, TR_HWPInstructionInfo::valueProfileInstructions);
924
}
925
926
927
void
928
J9::Compilation::verifyCompressedRefsAnchors()
929
{
930
vcount_t visitCount = self()->incVisitCount();
931
932
TR::TreeTop *tt;
933
for (tt = self()->getStartTree(); tt; tt = tt->getNextTreeTop())
934
{
935
TR::Node *node = tt->getNode();
936
self()->verifyCompressedRefsAnchors(NULL, node, tt, visitCount);
937
}
938
}
939
940
void
941
J9::Compilation::verifyCompressedRefsAnchors(TR::Node *parent, TR::Node *node,
942
TR::TreeTop *tt, vcount_t visitCount)
943
{
944
if (node->getVisitCount() == visitCount)
945
return;
946
947
node->setVisitCount(visitCount);
948
949
// check stores
950
//
951
if (node->getOpCode().isLoadIndirect() ||
952
(node->getOpCode().isStoreIndirect() &&
953
!node->getOpCode().isWrtBar()))
954
{
955
if (node->getSymbolReference()->getSymbol()->getDataType() == TR::Address &&
956
node->getOpCode().isRef())
957
TR_ASSERT(0, "indirect store %p not lowered!\n", node);
958
}
959
960
// check children for loads/stores
961
//
962
for (int32_t i = node->getNumChildren()-1; i >= 0; i--)
963
{
964
TR::Node *child = node->getChild(i);
965
self()->verifyCompressedRefsAnchors(node, child, tt, visitCount);
966
}
967
}
968
969
bool
970
J9::Compilation::verifyCompressedRefsAnchors(bool anchorize)
971
{
972
bool status = true;
973
974
vcount_t visitCount = self()->incVisitCount();
975
TR::list<TR_Pair<TR::Node, TR::TreeTop> *> nodesList(getTypedAllocator<TR_Pair<TR::Node, TR::TreeTop> *>(self()->allocator()));
976
TR::TreeTop *tt;
977
for (tt = self()->getStartTree(); tt; tt = tt->getNextTreeTop())
978
{
979
TR::Node *n = tt->getNode();
980
self()->verifyCompressedRefsAnchors(NULL, n, tt, visitCount, nodesList);
981
}
982
983
// create anchors if required
984
if (anchorize)
985
{
986
TR_Pair<TR::Node, TR::TreeTop> *info;
987
// all non-null tt fields indicate some loads/stores were found
988
// with no corresponding anchors
989
//
990
for (auto info = nodesList.begin(); info != nodesList.end(); ++info)
991
{
992
TR::TreeTop *tt = (*info)->getValue();
993
if (tt)
994
{
995
TR::Node *n = (*info)->getKey();
996
dumpOptDetails(self(), "No anchor found for load/store [%p]\n", n);
997
if (TR::TransformUtil::fieldShouldBeCompressed(n, self()))
998
{
999
status = false;
1000
dumpOptDetails(self(), "placing anchor at [%p]\n", tt->getNode());
1001
TR::TreeTop *newTT = TR::TreeTop::create(self(),
1002
TR::Node::createCompressedRefsAnchor( n),
1003
NULL, NULL);
1004
#if 0 ///#ifdef DEBUG
1005
TR_ASSERT(0, "No anchor found for load/store [%p]", n);
1006
#else
1007
// For the child of null check or resolve check, the side effect doesn't rely on the
1008
// value of the child, thus the anchor needs to be placed after tt. For other nodes,
1009
// place the anchor before tt.
1010
//
1011
TR::TreeTop *next = tt->getNextTreeTop();
1012
if ((tt->getNode()->getOpCode().isNullCheck()
1013
|| tt->getNode()->getOpCode().isResolveCheck())
1014
&& n == tt->getNode()->getFirstChild())
1015
{
1016
tt->join(newTT);
1017
newTT->join(next);
1018
}
1019
else
1020
{
1021
TR::TreeTop *prev = tt->getPrevTreeTop();
1022
prev->join(newTT);
1023
// Previously, the below path only applied to store nodes (hence
1024
// the isTreeTop() check). However, it's now been made to apply to
1025
// void-type nodes as well. This is to account for nodes such as
1026
// TR::arrayset. Specifically, in the case where the child to be set
1027
// in an arrayset node is an indirect reference (e.g static String),
1028
// we need to treat the arrayset node as an indirect store (and compress
1029
// the reference accordingly)
1030
if (n->getOpCode().isTreeTop() || n->getOpCode().isVoid())
1031
{
1032
newTT->join(next);
1033
1034
// In the case where the void node's (e.g TR::arrayset) parent is
1035
// not itself (e.g it's a TR::treetop), we anchor the arrayset node and it's children
1036
// under a compressedRefs node and remove the original arrayset tree
1037
// found under TR::treetop. The reference count of the arrayset node is
1038
// incremented when we create the compressedRefs anchor, but not when
1039
// we 'remove' the TR::treetop node. Hence we must recursively decrement
1040
// here.
1041
if (n != tt->getNode())
1042
{
1043
for (int i = 0; i < tt->getNode()->getNumChildren(); i++)
1044
tt->getNode()->getChild(i)->recursivelyDecReferenceCount();
1045
}
1046
}
1047
else
1048
newTT->join(tt);
1049
}
1050
status = true;
1051
#endif
1052
}
1053
else
1054
dumpOptDetails(self(), "field at [%p] need not be compressed\n", n);
1055
}
1056
else
1057
dumpOptDetails(self(), "Anchor found for load/store [%p]\n", (*info)->getKey());
1058
}
1059
}
1060
return status;
1061
}
1062
1063
1064
static TR_Pair<TR::Node, TR::TreeTop> *findCPtrsInfo(TR::list<TR_Pair<TR::Node, TR::TreeTop> *> &haystack,
1065
TR::Node *needle)
1066
{
1067
for (auto info = haystack.begin(); info != haystack.end(); ++info)
1068
{
1069
if ((*info)->getKey() == needle)
1070
return *info;
1071
}
1072
return NULL;
1073
}
1074
1075
1076
void
1077
J9::Compilation::verifyCompressedRefsAnchors(TR::Node *parent, TR::Node *node,
1078
TR::TreeTop *tt, vcount_t visitCount,
1079
TR::list<TR_Pair<TR::Node, TR::TreeTop> *> &nodesList)
1080
{
1081
if (node->getVisitCount() == visitCount)
1082
return;
1083
1084
// process loads/stores that are references
1085
//
1086
if (((node->getOpCode().isLoadIndirect() || node->getOpCode().isStoreIndirect()) &&
1087
node->getSymbolReference()->getSymbol()->getDataType() == TR::Address) ||
1088
(node->getOpCodeValue() == TR::arrayset && node->getSecondChild()->getDataType() == TR::Address))
1089
{
1090
TR_Pair<TR::Node, TR::TreeTop> *info = findCPtrsInfo(nodesList, node);
1091
1092
// check if the load/store is already under an anchor
1093
// if so, this load/store will be lowered correctly
1094
//
1095
if (parent && parent->getOpCodeValue() == TR::compressedRefs)
1096
{
1097
// set tt value to null to indicate success
1098
//
1099
if (info)
1100
info->setValue(NULL);
1101
1102
// donot process this node again
1103
//
1104
node->setVisitCount(visitCount);
1105
}
1106
else
1107
{
1108
// either encountered the load/store for the first time in which
1109
// case record it,
1110
// -or-
1111
// its referenced multiple times in which case do nothing until
1112
// an anchor is found
1113
//
1114
if (!info)
1115
{
1116
// add node, tt to the nodesList
1117
TR_Pair<TR::Node, TR::TreeTop> *newVal = new (self()->trStackMemory()) TR_Pair<TR::Node, TR::TreeTop> (node, tt);
1118
nodesList.push_front(newVal);
1119
}
1120
}
1121
}
1122
else
1123
node->setVisitCount(visitCount);
1124
1125
// process the children
1126
//
1127
for (int32_t i = node->getNumChildren()-1; i >=0; i--)
1128
{
1129
TR::Node *child = node->getChild(i);
1130
self()->verifyCompressedRefsAnchors(node, child, tt, visitCount, nodesList);
1131
}
1132
}
1133
1134
1135
TR_VirtualGuardSite *
1136
J9::Compilation::addSideEffectNOPSite()
1137
{
1138
TR_VirtualGuardSite *site = new /* (PERSISTENT_NEW)*/ (self()->trHeapMemory()) TR_VirtualGuardSite;
1139
_sideEffectGuardPatchSites.push_front(site);
1140
return site;
1141
}
1142
1143
1144
TR_AOTGuardSite *
1145
J9::Compilation::addAOTNOPSite()
1146
{
1147
TR_AOTGuardSite *site = new /* (PERSISTENT_NEW)*/ (self()->trHeapMemory()) TR_AOTGuardSite;
1148
_aotGuardPatchSites->push_front(site);
1149
return site;
1150
}
1151
1152
bool
1153
J9::Compilation::incInlineDepth(TR::ResolvedMethodSymbol * method, TR_ByteCodeInfo & bcInfo, int32_t cpIndex, TR::SymbolReference *callSymRef, bool directCall, TR_PrexArgInfo *argInfo)
1154
{
1155
TR_ASSERT_FATAL(callSymRef == NULL, "Should not be calling this API for non-NULL symref!\n");
1156
return OMR::CompilationConnector::incInlineDepth(method, bcInfo, cpIndex, callSymRef, directCall, argInfo);
1157
}
1158
1159
bool
1160
J9::Compilation::isGeneratedReflectionMethod(TR_ResolvedMethod * method)
1161
{
1162
1163
if (!method) return false;
1164
1165
if (strstr(method->signature(self()->trMemory()), "sun/reflect/GeneratedMethodAccessor"))
1166
return true;
1167
1168
return false;
1169
}
1170
1171
TR_ExternalRelocationTargetKind
1172
J9::Compilation::getReloTypeForMethodToBeInlined(TR_VirtualGuardSelection *guard, TR::Node *callNode, TR_OpaqueClassBlock *receiverClass)
1173
{
1174
TR_ExternalRelocationTargetKind reloKind = OMR::Compilation::getReloTypeForMethodToBeInlined(guard, callNode, receiverClass);
1175
1176
if (callNode && self()->compileRelocatableCode())
1177
{
1178
if (guard && guard->_kind == TR_ProfiledGuard)
1179
{
1180
if (guard->_type == TR_MethodTest)
1181
reloKind = TR_ProfiledMethodGuardRelocation;
1182
else if (guard->_type == TR_VftTest)
1183
reloKind = TR_ProfiledClassGuardRelocation;
1184
}
1185
else
1186
{
1187
TR::MethodSymbol *methodSymbol = callNode->getSymbolReference()->getSymbol()->castToMethodSymbol();
1188
1189
if (methodSymbol->isSpecial())
1190
{
1191
reloKind = TR_InlinedSpecialMethod;
1192
}
1193
else if (methodSymbol->isStatic())
1194
{
1195
reloKind = TR_InlinedStaticMethod;
1196
}
1197
else if (receiverClass
1198
&& TR::Compiler->cls.isAbstractClass(self(), receiverClass)
1199
&& methodSymbol->getResolvedMethodSymbol()->getResolvedMethod()->isAbstract())
1200
{
1201
reloKind = TR_InlinedAbstractMethod;
1202
}
1203
else if (methodSymbol->isVirtual())
1204
{
1205
reloKind = TR_InlinedVirtualMethod;
1206
}
1207
else if (methodSymbol->isInterface())
1208
{
1209
reloKind = TR_InlinedInterfaceMethod;
1210
}
1211
}
1212
1213
if (reloKind == TR_NoRelocation)
1214
{
1215
TR_InlinedCallSite *site = self()->getCurrentInlinedCallSite();
1216
TR_OpaqueMethodBlock *caller;
1217
if (site)
1218
{
1219
caller = site->_methodInfo;
1220
}
1221
else
1222
{
1223
caller = self()->getMethodBeingCompiled()->getNonPersistentIdentifier();
1224
}
1225
1226
TR_ASSERT_FATAL(false, "Can't find relo kind for Caller %p Callee %p TR_ByteCodeInfo %p\n",
1227
caller,
1228
callNode->getSymbol()->castToResolvedMethodSymbol()->getResolvedMethod()->getNonPersistentIdentifier(),
1229
callNode->getByteCodeInfo());
1230
}
1231
}
1232
1233
return reloKind;
1234
}
1235
1236
bool
1237
J9::Compilation::compilationShouldBeInterrupted(TR_CallingContext callingContext)
1238
{
1239
return self()->fej9()->compilationShouldBeInterrupted(self(), callingContext);
1240
}
1241
1242
void
1243
J9::Compilation::enterHeuristicRegion()
1244
{
1245
if (self()->getOption(TR_UseSymbolValidationManager)
1246
&& self()->compileRelocatableCode())
1247
{
1248
self()->getSymbolValidationManager()->enterHeuristicRegion();
1249
}
1250
}
1251
1252
void
1253
J9::Compilation::exitHeuristicRegion()
1254
{
1255
if (self()->getOption(TR_UseSymbolValidationManager)
1256
&& self()->compileRelocatableCode())
1257
{
1258
self()->getSymbolValidationManager()->exitHeuristicRegion();
1259
}
1260
}
1261
1262
bool
1263
J9::Compilation::validateTargetToBeInlined(TR_ResolvedMethod *implementer)
1264
{
1265
if (self()->getOption(TR_UseSymbolValidationManager)
1266
&& self()->compileRelocatableCode())
1267
{
1268
return self()->getSymbolValidationManager()->addMethodFromClassRecord(implementer->getPersistentIdentifier(),
1269
implementer->classOfMethod(),
1270
-1);
1271
}
1272
return true;
1273
}
1274
1275
1276
void
1277
J9::Compilation::reportILGeneratorPhase()
1278
{
1279
self()->fej9()->reportILGeneratorPhase();
1280
}
1281
1282
1283
void
1284
J9::Compilation::reportAnalysisPhase(uint8_t id)
1285
{
1286
self()->fej9()->reportAnalysisPhase(id);
1287
}
1288
1289
1290
void
1291
J9::Compilation::reportOptimizationPhase(OMR::Optimizations opts)
1292
{
1293
self()->fej9()->reportOptimizationPhase(opts);
1294
}
1295
1296
1297
void
1298
J9::Compilation::reportOptimizationPhaseForSnap(OMR::Optimizations opts)
1299
{
1300
self()->fej9()->reportOptimizationPhaseForSnap(opts, self());
1301
}
1302
1303
1304
TR::Compilation::CompilationPhase
1305
J9::Compilation::saveCompilationPhase()
1306
{
1307
return self()->fej9()->saveCompilationPhase();
1308
}
1309
1310
1311
void
1312
J9::Compilation::restoreCompilationPhase(TR::Compilation::CompilationPhase phase)
1313
{
1314
self()->fej9()->restoreCompilationPhase(phase);
1315
}
1316
1317
void
1318
J9::Compilation::addMonitorAuto(TR::RegisterMappedSymbol * a, int32_t callerIndex)
1319
{
1320
TR_Array<List<TR::RegisterMappedSymbol> *> & monitorAutos = self()->getMonitorAutos();
1321
List<TR::RegisterMappedSymbol> * autos = monitorAutos[callerIndex + 1];
1322
if (!autos)
1323
monitorAutos[callerIndex + 1] = autos = new (self()->trHeapMemory()) List<TR::RegisterMappedSymbol>(self()->trMemory());
1324
1325
autos->add(a);
1326
}
1327
1328
void
1329
J9::Compilation::addAsMonitorAuto(TR::SymbolReference* symRef, bool dontAddIfDLT)
1330
{
1331
symRef->getSymbol()->setHoldsMonitoredObject();
1332
int32_t siteIndex = self()->getCurrentInlinedSiteIndex();
1333
if (!self()->isPeekingMethod())
1334
{
1335
self()->addMonitorAuto(symRef->getSymbol()->castToRegisterMappedSymbol(), siteIndex);
1336
if (!dontAddIfDLT)
1337
{
1338
if (siteIndex == -1)
1339
self()->getMonitorAutoSymRefsInCompiledMethod()->push_front(symRef);
1340
}
1341
else
1342
{
1343
// only add the symref into the list for initialization when not in DLT and not peeking.
1344
// in DLT, we already use the corresponding slot to store the locked object from the interpreter
1345
// so initializing the symRef later in the block can overwrite the first store.
1346
if (!self()->isDLT() && siteIndex == -1)
1347
self()->getMonitorAutoSymRefsInCompiledMethod()->push_front(symRef);
1348
}
1349
}
1350
}
1351
1352
TR_OpaqueClassBlock *
1353
J9::Compilation::getClassClassPointer(bool isVettedForAOT)
1354
{
1355
if (!isVettedForAOT || self()->getOption(TR_UseSymbolValidationManager))
1356
{
1357
TR_OpaqueClassBlock *jlObject = self()->getObjectClassPointer();
1358
return jlObject ? self()->fe()->getClassClassPointer(jlObject) : 0;
1359
}
1360
1361
if (_aotClassClassPointerInitialized)
1362
return _aotClassClassPointer;
1363
1364
_aotClassClassPointerInitialized = true;
1365
1366
bool jlObjectVettedForAOT = true;
1367
TR_OpaqueClassBlock *jlObject = self()->fej9()->getClassFromSignature(
1368
"Ljava/lang/Object;",
1369
18,
1370
self()->getCurrentMethod(),
1371
jlObjectVettedForAOT);
1372
1373
if (jlObject == NULL)
1374
return NULL;
1375
1376
TR_OpaqueClassBlock *jlClass = self()->fe()->getClassClassPointer(jlObject);
1377
if (jlClass == NULL)
1378
return NULL;
1379
1380
TR_ResolvedJ9Method *method = (TR_ResolvedJ9Method*)self()->getCurrentMethod();
1381
if (!method->validateArbitraryClass(self(), (J9Class*)jlClass))
1382
return NULL;
1383
1384
_aotClassClassPointer = jlClass;
1385
return jlClass;
1386
}
1387
1388
TR_OpaqueClassBlock *
1389
J9::Compilation::getObjectClassPointer()
1390
{
1391
return self()->getCachedClassPointer(OBJECT_CLASS_POINTER);
1392
}
1393
1394
TR_OpaqueClassBlock *
1395
J9::Compilation::getRunnableClassPointer()
1396
{
1397
return self()->getCachedClassPointer(RUNNABLE_CLASS_POINTER);
1398
}
1399
1400
TR_OpaqueClassBlock *
1401
J9::Compilation::getStringClassPointer()
1402
{
1403
return self()->getCachedClassPointer(STRING_CLASS_POINTER);
1404
}
1405
1406
TR_OpaqueClassBlock *
1407
J9::Compilation::getSystemClassPointer()
1408
{
1409
return self()->getCachedClassPointer(SYSTEM_CLASS_POINTER);
1410
}
1411
1412
TR_OpaqueClassBlock *
1413
J9::Compilation::getReferenceClassPointer()
1414
{
1415
return self()->getCachedClassPointer(REFERENCE_CLASS_POINTER);
1416
}
1417
1418
TR_OpaqueClassBlock *
1419
J9::Compilation::getJITHelpersClassPointer()
1420
{
1421
return self()->getCachedClassPointer(JITHELPERS_CLASS_POINTER);
1422
}
1423
1424
TR_OpaqueClassBlock *
1425
J9::Compilation::getCachedClassPointer(CachedClassPointerId which)
1426
{
1427
TR_OpaqueClassBlock *clazz = _cachedClassPointers[which];
1428
if (clazz != NULL)
1429
return clazz;
1430
1431
if (self()->compileRelocatableCode()
1432
&& !self()->getOption(TR_UseSymbolValidationManager))
1433
return NULL;
1434
1435
static const char * const names[] =
1436
{
1437
"Ljava/lang/Object;",
1438
"Ljava/lang/Runnable;",
1439
"Ljava/lang/String;",
1440
"Ljava/lang/System;",
1441
"Ljava/lang/ref/Reference;",
1442
"Lcom/ibm/jit/JITHelpers;",
1443
};
1444
1445
static_assert(
1446
sizeof (names) / sizeof (names[0]) == CACHED_CLASS_POINTER_COUNT,
1447
"wrong number of entries in J9::Compilation cached class names array");
1448
1449
const char *name = names[which];
1450
clazz = self()->fej9()->getClassFromSignature(
1451
name,
1452
strlen(name),
1453
self()->getCurrentMethod());
1454
1455
_cachedClassPointers[which] = clazz;
1456
return clazz;
1457
}
1458
1459
/*
1460
* Adds the provided TR_OpaqueClassBlock to the set of those to trigger OSR Guard patching
1461
* on a redefinition.
1462
* Cheaper implementation would be a set, not an array.
1463
*/
1464
void
1465
J9::Compilation::addClassForOSRRedefinition(TR_OpaqueClassBlock *clazz)
1466
{
1467
for (uint32_t i = 0; i < _classForOSRRedefinition.size(); ++i)
1468
if (_classForOSRRedefinition[i] == clazz)
1469
return;
1470
1471
_classForOSRRedefinition.add(clazz);
1472
}
1473
1474
/*
1475
* Adds the provided TR_OpaqueClassBlock to the set of those to trigger OSR Guard patching
1476
* on a static final field modification.
1477
*/
1478
void
1479
J9::Compilation::addClassForStaticFinalFieldModification(TR_OpaqueClassBlock *clazz)
1480
{
1481
// Class redefinition can also modify static final fields
1482
self()->addClassForOSRRedefinition(clazz);
1483
1484
for (uint32_t i = 0; i < _classForStaticFinalFieldModification.size(); ++i)
1485
if (_classForStaticFinalFieldModification[i] == clazz)
1486
return;
1487
1488
_classForStaticFinalFieldModification.add(clazz);
1489
}
1490
1491
/*
1492
* Controls if pending push liveness is stashed during IlGen to reduce OSRLiveRange
1493
* overhead.
1494
*/
1495
bool
1496
J9::Compilation::pendingPushLivenessDuringIlgen()
1497
{
1498
static bool enabled = (feGetEnv("TR_DisablePendingPushLivenessDuringIlGen") == NULL);
1499
if (self()->getOSRMode() == TR::involuntaryOSR)
1500
return false;
1501
else return enabled;
1502
}
1503
1504
bool
1505
J9::Compilation::supportsQuadOptimization()
1506
{
1507
if (self()->isDLT() || self()->getOption(TR_FullSpeedDebug))
1508
return false;
1509
return true;
1510
}
1511
1512
1513
bool
1514
J9::Compilation::notYetRunMeansCold()
1515
{
1516
if (self()->getOptimizer() && !(self()->getOptimizer()->isIlGenOpt()))
1517
return false;
1518
1519
TR_ResolvedMethod *currentMethod = self()->getJittedMethodSymbol()->getResolvedMethod();
1520
1521
intptr_t initialCount = currentMethod->hasBackwardBranches() ?
1522
self()->getOptions()->getInitialBCount() :
1523
self()->getOptions()->getInitialCount();
1524
1525
switch (currentMethod->getRecognizedMethod())
1526
{
1527
case TR::com_ibm_jit_DecimalFormatHelper_formatAsDouble:
1528
case TR::com_ibm_jit_DecimalFormatHelper_formatAsFloat:
1529
initialCount = 0;
1530
break;
1531
default:
1532
break;
1533
}
1534
1535
if (currentMethod->containingClass() == self()->getStringClassPointer())
1536
{
1537
if (currentMethod->isConstructor())
1538
{
1539
char *sig = currentMethod->signatureChars();
1540
if (!strncmp(sig, "([CIIII)", 8) ||
1541
!strncmp(sig, "([CIICII)", 9) ||
1542
!strncmp(sig, "(II[C)", 6))
1543
initialCount = 0;
1544
}
1545
else
1546
{
1547
char *sig = "isRepeatedCharCacheHit";
1548
if (strncmp(currentMethod->nameChars(), sig, strlen(sig)) == 0)
1549
initialCount = 0;
1550
}
1551
}
1552
1553
if (
1554
self()->isDLT()
1555
|| (initialCount < TR_UNRESOLVED_IMPLIES_COLD_COUNT)
1556
|| ((self()->getOption(TR_UnresolvedAreNotColdAtCold) && self()->getMethodHotness() == cold) || self()->getMethodHotness() < cold)
1557
|| currentMethod->convertToMethod()->isArchetypeSpecimen()
1558
|| ( self()->getCurrentMethod()
1559
&& self()->getCurrentMethod()->convertToMethod()->isArchetypeSpecimen())
1560
)
1561
return false;
1562
else
1563
return true;
1564
}
1565
1566
bool
1567
J9::Compilation::incompleteOptimizerSupportForReadWriteBarriers()
1568
{
1569
return self()->getOption(TR_EnableFieldWatch);
1570
}
1571
1572
#if defined(J9VM_OPT_JITSERVER)
1573
void
1574
J9::Compilation::addSerializationRecord(const AOTCacheRecord *record, uintptr_t reloDataOffset)
1575
{
1576
TR_ASSERT_FATAL(_aotCacheStore, "Trying to add serialization record for compilation that is not an AOT cache store");
1577
if (record)
1578
_serializationRecords.push_back({ record, reloDataOffset });
1579
else
1580
_aotCacheStore = false;// Serialization failed; method won't be stored in AOT cache
1581
}
1582
#endif /* defined(J9VM_OPT_JITSERVER) */
1583
1584