Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/compiler/optimizer/J9Optimizer.cpp
6000 views
1
/*******************************************************************************
2
* Copyright (c) 2000, 2022 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#if defined(J9ZOS390)
24
//On zOS XLC linker can't handle files with same name at link time
25
//This workaround with pragma is needed. What this does is essentially
26
//give a different name to the codesection (csect) for this file. So it
27
//doesn't conflict with another file with same name.
28
#pragma csect(CODE,"J9Optimizer#C")
29
#pragma csect(STATIC,"J9Optimizer#S")
30
#pragma csect(TEST,"J9Optimizer#T")
31
#endif
32
33
#include "optimizer/Optimizer.hpp"
34
35
#include <stddef.h>
36
#include <stdint.h>
37
#include "compile/Compilation.hpp"
38
#include "compile/Method.hpp"
39
#include "control/Options.hpp"
40
#include "control/Options_inlines.hpp"
41
#include "control/Recompilation.hpp"
42
#include "control/RecompilationInfo.hpp"
43
#include "il/ResolvedMethodSymbol.hpp"
44
#include "optimizer/AllocationSinking.hpp"
45
#include "optimizer/IdiomRecognition.hpp"
46
#include "optimizer/Inliner.hpp"
47
#include "optimizer/J9Inliner.hpp"
48
#include "optimizer/JitProfiler.hpp"
49
#include "optimizer/LiveVariablesForGC.hpp"
50
#include "optimizer/OptimizationManager.hpp"
51
#include "optimizer/OptimizationStrategies.hpp"
52
#include "optimizer/Optimizations.hpp"
53
#include "optimizer/PartialRedundancy.hpp"
54
#include "optimizer/ProfileGenerator.hpp"
55
#include "optimizer/SequentialStoreSimplifier.hpp"
56
#include "optimizer/SignExtendLoads.hpp"
57
#include "optimizer/StringBuilderTransformer.hpp"
58
#include "optimizer/SwitchAnalyzer.hpp"
59
#include "optimizer/DynamicLiteralPool.hpp"
60
#include "optimizer/EscapeAnalysis.hpp"
61
#include "optimizer/PreEscapeAnalysis.hpp"
62
#include "optimizer/PostEscapeAnalysis.hpp"
63
#include "optimizer/DataAccessAccelerator.hpp"
64
#include "optimizer/HotFieldMarking.hpp"
65
#include "optimizer/IsolatedStoreElimination.hpp"
66
#include "optimizer/LoopAliasRefiner.hpp"
67
#include "optimizer/MonitorElimination.hpp"
68
#include "optimizer/NewInitialization.hpp"
69
#include "optimizer/SinkStores.hpp"
70
#include "optimizer/SPMDParallelizer.hpp"
71
#include "optimizer/StringPeepholes.hpp"
72
#include "optimizer/StripMiner.hpp"
73
#include "optimizer/ValuePropagation.hpp"
74
#include "optimizer/TrivialDeadBlockRemover.hpp"
75
#include "optimizer/OSRGuardInsertion.hpp"
76
#include "optimizer/OSRGuardRemoval.hpp"
77
#include "optimizer/JProfilingBlock.hpp"
78
#include "optimizer/JProfilingValue.hpp"
79
#include "optimizer/JProfilingRecompLoopTest.hpp"
80
#include "runtime/J9Profiler.hpp"
81
#include "optimizer/UnsafeFastPath.hpp"
82
#include "optimizer/TreeLowering.hpp"
83
#include "optimizer/VarHandleTransformer.hpp"
84
#include "optimizer/StaticFinalFieldFolding.hpp"
85
#include "optimizer/HandleRecompilationOps.hpp"
86
#include "optimizer/MethodHandleTransformer.hpp"
87
#include "optimizer/VectorAPIExpansion.hpp"
88
89
90
static const OptimizationStrategy J9EarlyGlobalOpts[] =
91
{
92
{ OMR::stringBuilderTransformer },
93
{ OMR::stringPeepholes }, // need stringpeepholes to catch bigdecimal patterns
94
{ OMR::inlining },
95
{ OMR::methodHandleInvokeInliningGroup, OMR::IfEnabled },
96
{ OMR::staticFinalFieldFolding, },
97
{ OMR::osrGuardInsertion, OMR::MustBeDone },
98
{ OMR::osrExceptionEdgeRemoval }, // most inlining is done by now
99
{ OMR::jProfilingBlock },
100
{ OMR::stringBuilderTransformer },
101
{ OMR::stringPeepholes, },
102
//{ basicBlockOrdering, IfLoops }, // early ordering with no extension
103
{ OMR::treeSimplification, OMR::IfEnabled },
104
{ OMR::compactNullChecks }, // cleans up after inlining; MUST be done before PRE
105
{ OMR::virtualGuardTailSplitter }, // merge virtual guards
106
{ OMR::treeSimplification },
107
{ OMR::CFGSimplification },
108
{ OMR::endGroup }
109
};
110
111
static const OptimizationStrategy J9EarlyLocalOpts[] =
112
{
113
{ OMR::localValuePropagation },
114
//{ localValuePropagationGroup },
115
{ OMR::localReordering },
116
{ OMR::switchAnalyzer },
117
{ OMR::treeSimplification, OMR::IfEnabled }, // simplify any exprs created by LCP/LCSE
118
{ OMR::catchBlockRemoval }, // if all possible exceptions in a try were removed by inlining/LCP/LCSE
119
{ OMR::deadTreesElimination }, // remove any anchored dead loads
120
{ OMR::profiledNodeVersioning },
121
{ OMR::endGroup }
122
};
123
124
static const OptimizationStrategy signExtendLoadsOpts[] =
125
{
126
{ OMR::signExtendLoads },
127
{ OMR::endGroup }
128
};
129
130
// **************************************************************************
131
//
132
// Strategy that is used by full speed debug for methods that do share slots (the old FSD strategy before OSR)
133
//
134
// **************************************************************************
135
static const OptimizationStrategy fsdStrategyOptsForMethodsWithSlotSharing[] =
136
{
137
{ OMR::trivialInlining, OMR::IfNotFullInliningUnderOSRDebug }, //added for fsd inlining
138
{ OMR::inlining, OMR::IfFullInliningUnderOSRDebug }, //added for fsd inlining
139
{ OMR::basicBlockExtension },
140
{ OMR::treeSimplification }, //added for fsd inlining
141
{ OMR::localCSE },
142
{ OMR::treeSimplification },
143
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup }, // added for fsd gra
144
{ OMR::treeLowering, OMR::MustBeDone },
145
{ OMR::globalLiveVariablesForGC },
146
{ OMR::regDepCopyRemoval },
147
{ OMR::endOpts },
148
};
149
150
151
// **************************************************************************
152
//
153
// Strategy that is used by full speed debug for methods that do not share slots
154
//
155
// **************************************************************************
156
static const OptimizationStrategy fsdStrategyOptsForMethodsWithoutSlotSharing[] =
157
{
158
{ OMR::coldBlockOutlining },
159
{ OMR::trivialInlining, OMR::IfNotFullInliningUnderOSRDebug }, //added for fsd inlining
160
{ OMR::inlining, OMR::IfFullInliningUnderOSRDebug }, //added for fsd inlining
161
{ OMR::virtualGuardTailSplitter }, // merge virtual guards
162
{ OMR::treeSimplification },
163
164
{ OMR::CFGSimplification, OMR::IfOptServer }, // for WAS trace folding
165
{ OMR::treeSimplification, OMR::IfOptServer }, // for WAS trace folding
166
{ OMR::localCSE, OMR::IfEnabledAndOptServer }, // for WAS trace folding
167
{ OMR::treeSimplification, OMR::IfEnabledAndOptServer }, // for WAS trace folding
168
{ OMR::globalValuePropagation, },
169
{ OMR::treeSimplification, OMR::IfEnabled },
170
{ OMR::cheapObjectAllocationGroup, },
171
{ OMR::globalValuePropagation, OMR::IfEnabled }, // if inlined a call or an object
172
{ OMR::treeSimplification, OMR::IfEnabled },
173
{ OMR::catchBlockRemoval, OMR::IfEnabled }, // if checks were removed
174
{ OMR::globalValuePropagation, OMR::IfEnabledMarkLastRun}, // mark monitors requiring sync
175
{ OMR::virtualGuardTailSplitter, OMR::IfEnabled }, // merge virtual guards
176
{ OMR::CFGSimplification },
177
{ OMR::globalCopyPropagation, },
178
{ OMR::lastLoopVersionerGroup, OMR::IfLoops },
179
{ OMR::globalDeadStoreElimination, OMR::IfLoops },
180
{ OMR::deadTreesElimination, },
181
{ OMR::basicBlockOrdering, OMR::IfLoops }, // required for loop reduction
182
{ OMR::treeSimplification },
183
{ OMR::loopReduction },
184
{ OMR::blockShuffling }, // to stress idiom recognition
185
{ OMR::idiomRecognition, OMR::IfLoops },
186
{ OMR::blockSplitter },
187
{ OMR::treeSimplification },
188
{ OMR::inductionVariableAnalysis, OMR::IfLoopsAndNotProfiling },
189
{ OMR::generalLoopUnroller, OMR::IfLoopsAndNotProfiling },
190
{ OMR::samplingJProfiling },
191
{ OMR::basicBlockExtension, OMR::MarkLastRun }, // extend blocks; move trees around if reqd
192
{ OMR::treeSimplification }, // revisit; not really required ?
193
{ OMR::localValuePropagationGroup, },
194
{ OMR::arraycopyTransformation },
195
{ OMR::treeSimplification, OMR::IfEnabled },
196
{ OMR::localDeadStoreElimination, }, // after latest copy propagation
197
{ OMR::deadTreesElimination, }, // remove dead anchors created by check/store removal
198
{ OMR::treeSimplification, OMR::IfEnabled },
199
{ OMR::localCSE },
200
{ OMR::treeSimplification, OMR::MarkLastRun },
201
{ OMR::andSimplification, }, //clean up after versioner
202
{ OMR::compactNullChecks, }, // cleanup at the end
203
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
204
{ OMR::treesCleansing, OMR::IfEnabled },
205
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
206
{ OMR::localCSE, OMR::IfEnabled }, // common up expressions for sunk stores
207
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // cleanup the trees after sunk store and localCSE
208
{ OMR::dynamicLiteralPool, },
209
{ OMR::localDeadStoreElimination, OMR::IfEnabled }, //remove the astore if no literal pool is required
210
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
211
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
212
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // Simplify non-normalized address computations introduced by prefetch insertion
213
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled }, // final cleanup before opcode expansion
214
{ OMR::globalDeadStoreElimination, },
215
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup, },
216
{ OMR::treeLowering, OMR::MustBeDone },
217
{ OMR::globalDeadStoreGroup, },
218
{ OMR::rematerialization, },
219
{ OMR::compactNullChecks, }, // cleanup at the end
220
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead anchors created by check/store removal
221
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead RegStores produced by previous deadTrees pass
222
{ OMR::globalLiveVariablesForGC },
223
{ OMR::regDepCopyRemoval },
224
{ OMR::endOpts },
225
};
226
227
228
static const OptimizationStrategy *fsdStrategies[] =
229
{
230
fsdStrategyOptsForMethodsWithSlotSharing,
231
fsdStrategyOptsForMethodsWithoutSlotSharing
232
};
233
234
235
// **********************************************************
236
//
237
// NO-OPT STRATEGY
238
//
239
// **********************************************************
240
static const OptimizationStrategy noOptStrategyOpts[] =
241
{
242
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled },
243
{ OMR::treeSimplification },
244
{ OMR::recompilationModifier, OMR::IfEnabled },
245
{ OMR::treeLowering, OMR::MustBeDone },
246
{ OMR::globalLiveVariablesForGC, OMR::IfAggressiveLiveness },
247
{ OMR::endOpts }
248
};
249
250
251
// ***************************************************************************
252
//
253
// Strategy for cold methods. This is an early compile for methods known to have
254
// loops so it should have a light optimization load.
255
//
256
// ***************************************************************************
257
258
static const OptimizationStrategy coldStrategyOpts[] =
259
{
260
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled },
261
{ OMR::coldBlockOutlining },
262
{ OMR::stringBuilderTransformer, OMR::IfNotQuickStart },
263
{ OMR::stringPeepholes, OMR::IfNotQuickStart }, // need stringpeepholes to catch bigdecimal patterns
264
{ OMR::trivialInlining },
265
{ OMR::jProfilingBlock },
266
{ OMR::virtualGuardTailSplitter },
267
{ OMR::recompilationModifier, OMR::IfEnabled },
268
{ OMR::samplingJProfiling },
269
{ OMR::treeSimplification }, // cleanup before basicBlockExtension
270
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
271
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
272
#endif
273
{ OMR::basicBlockExtension },
274
{ OMR::localValuePropagationGroup },
275
{ OMR::deadTreesElimination },
276
{ OMR::localCSE, OMR::IfEnabled },
277
{ OMR::treeSimplification },
278
{ OMR::arraycopyTransformation },
279
{ OMR::sequentialLoadAndStoreColdGroup, OMR::IfEnabled }, // disabled by default, enabled by -Xjit:enableSequentialLoadStoreCold
280
{ OMR::localCSE, OMR::IfEnabled },
281
{ OMR::treeSimplification, },
282
{ OMR::localDeadStoreElimination, OMR::IfEnabled },
283
{ OMR::deadTreesElimination, OMR::IfEnabled },
284
{ OMR::localCSE, OMR::IfEnabled },
285
{ OMR::treeSimplification },
286
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
287
{ OMR::localCSE, OMR::IfEnabled },
288
{ OMR::treeSimplification, OMR::MarkLastRun },
289
{ OMR::rematerialization },
290
{ OMR::compactNullChecks, OMR::IfEnabled },
291
{ OMR::signExtendLoadsGroup, OMR::IfEnabled },
292
{ OMR::jProfilingRecompLoopTest, OMR::IfLoops },
293
{ OMR::trivialDeadTreeRemoval, },
294
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup, OMR::IfAOTAndEnabled },
295
{ OMR::jProfilingValue, OMR::MustBeDone },
296
{ OMR::treeLowering, OMR::MustBeDone },
297
{ OMR::globalLiveVariablesForGC, OMR::IfAggressiveLiveness },
298
{ OMR::profilingGroup, OMR::IfProfiling },
299
{ OMR::regDepCopyRemoval },
300
{ OMR::hotFieldMarking },
301
{ OMR::endOpts }
302
};
303
304
305
// ***************************************************************************
306
//
307
// Strategy for warm methods. An initial number of invocations of the method
308
// have already happened, but this is the first compile of the method.
309
//
310
// ***************************************************************************
311
//
312
static const OptimizationStrategy warmStrategyOpts[] =
313
{
314
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled},
315
{ OMR::coldBlockOutlining },
316
{ OMR::stringBuilderTransformer },
317
{ OMR::stringPeepholes }, // need stringpeepholes to catch bigdecimal patterns
318
{ OMR::inlining },
319
{ OMR::methodHandleInvokeInliningGroup, OMR::IfEnabled },
320
{ OMR::staticFinalFieldFolding, },
321
{ OMR::osrGuardInsertion, OMR::MustBeDone },
322
{ OMR::osrExceptionEdgeRemoval }, // most inlining is done by now
323
{ OMR::jProfilingBlock },
324
{ OMR::virtualGuardTailSplitter }, // merge virtual guards
325
{ OMR::treeSimplification },
326
{ OMR::sequentialLoadAndStoreWarmGroup, OMR::IfEnabled }, // disabled by default, enabled by -Xjit:enableSequentialLoadStoreWarm
327
{ OMR::cheapGlobalValuePropagationGroup },
328
{ OMR::localCSE, OMR::IfVectorAPI },
329
{ OMR::dataAccessAccelerator }, // globalValuePropagation and inlining might expose opportunities for dataAccessAccelerator
330
{ OMR::globalCopyPropagation, OMR::IfVoluntaryOSR },
331
{ OMR::lastLoopVersionerGroup, OMR::IfLoops },
332
{ OMR::globalDeadStoreElimination, OMR::IfEnabledAndLoops},
333
{ OMR::deadTreesElimination },
334
{ OMR::recompilationModifier, OMR::IfEnabledAndNotProfiling },
335
{ OMR::localReordering, OMR::IfNoLoopsOREnabledAndLoops }, // if required or if not done earlier
336
{ OMR::basicBlockOrdering, OMR::IfLoops }, // required for loop reduction
337
{ OMR::treeSimplification },
338
{ OMR::loopReduction },
339
{ OMR::blockShuffling }, // to stress idiom recognition
340
{ OMR::idiomRecognition, OMR::IfLoopsAndNotProfiling },
341
{ OMR::blockSplitter },
342
{ OMR::treeSimplification },
343
{ OMR::inductionVariableAnalysis, OMR::IfLoopsAndNotProfiling },
344
{ OMR::generalLoopUnroller, OMR::IfLoopsAndNotProfiling },
345
{ OMR::virtualGuardHeadMerger },
346
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
347
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
348
#endif
349
{ OMR::basicBlockExtension, OMR::MarkLastRun }, // extend blocks; move trees around if reqd
350
{ OMR::treeSimplification }, // revisit; not really required ?
351
{ OMR::localValuePropagationGroup },
352
{ OMR::arraycopyTransformation },
353
{ OMR::treeSimplification, OMR::IfEnabled },
354
{ OMR::redundantAsyncCheckRemoval, OMR::IfNotJitProfiling },
355
{ OMR::localDeadStoreElimination }, // after latest copy propagation
356
{ OMR::deadTreesElimination }, // remove dead anchors created by check/store removal
357
{ OMR::treeSimplification, OMR::IfEnabled },
358
{ OMR::localCSE },
359
{ OMR::treeSimplification, OMR::MarkLastRun },
360
{ OMR::andSimplification, OMR::IfEnabled }, //clean up after versioner
361
{ OMR::compactNullChecks }, // cleanup at the end
362
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
363
{ OMR::globalCopyPropagation, OMR::IfMethodHandleInvokes }, // Does a lot of good after methodHandleInvokeInliningGroup
364
{ OMR::generalStoreSinking },
365
{ OMR::treesCleansing, OMR::IfEnabled },
366
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
367
{ OMR::localCSE, OMR::IfEnabled }, // common up expressions for sunk stores
368
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // cleanup the trees after sunk store and localCSE
369
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
370
{ OMR::samplingJProfiling },
371
{ OMR::trivialBlockExtension },
372
{ OMR::localDeadStoreElimination, OMR::IfEnabled }, //remove the astore if no literal pool is required
373
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
374
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
375
{ OMR::signExtendLoadsGroup, OMR::IfEnabled }, // last opt before GRA
376
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // Simplify non-normalized address computations introduced by prefetch insertion
377
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled }, // final cleanup before opcode expansion
378
{ OMR::globalDeadStoreElimination, OMR::IfVoluntaryOSR },
379
{ OMR::arraysetStoreElimination },
380
{ OMR::checkcastAndProfiledGuardCoalescer },
381
{ OMR::jProfilingRecompLoopTest, OMR::IfLoops },
382
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
383
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
384
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI },
385
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
386
{ OMR::jProfilingValue, OMR::MustBeDone },
387
{ OMR::treeLowering, OMR::MustBeDone },
388
{ OMR::globalDeadStoreGroup, },
389
{ OMR::rematerialization },
390
{ OMR::compactNullChecks, OMR::IfEnabled }, // cleanup at the end
391
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead anchors created by check/store removal
392
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead RegStores produced by previous deadTrees pass
393
{ OMR::compactLocals, OMR::IfNotJitProfiling }, // analysis results are invalidated by profilingGroup
394
{ OMR::globalLiveVariablesForGC },
395
{ OMR::profilingGroup, OMR::IfProfiling },
396
{ OMR::regDepCopyRemoval },
397
{ OMR::hotFieldMarking },
398
{ OMR::endOpts }
399
};
400
401
402
// ***************************************************************************
403
// A (possibly temporary) strategy for partially optimizing W-Code
404
// ***************************************************************************
405
//
406
static const OptimizationStrategy reducedWarmStrategyOpts[] =
407
{
408
{ OMR::inlining },
409
{ OMR::staticFinalFieldFolding, },
410
{ OMR::osrGuardInsertion, OMR::MustBeDone },
411
{ OMR::osrExceptionEdgeRemoval }, // most inlining is done by now
412
{ OMR::jProfilingBlock },
413
{ OMR::dataAccessAccelerator }, // immediate does unconditional dataAccessAccelerator after inlining
414
{ OMR::treeSimplification },
415
{ OMR::deadTreesElimination },
416
{ OMR::treeSimplification },
417
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
418
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
419
#endif
420
{ OMR::basicBlockExtension }, // extend blocks; move trees around if reqd
421
{ OMR::treeSimplification }, // revisit; not really required ?
422
{ OMR::localCSE },
423
{ OMR::treeSimplification, OMR::MarkLastRun },
424
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
425
{ OMR::jProfilingRecompLoopTest, OMR::IfLoops },
426
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
427
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
428
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI},
429
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
430
{ OMR::treeLowering, OMR::MustBeDone },
431
{ OMR::jProfilingValue, OMR::MustBeDone },
432
{ OMR::hotFieldMarking },
433
{ OMR::endOpts }
434
};
435
436
437
// ***************************************************************************
438
//
439
// Strategy for hot methods. The method has been compiled before and sampling
440
// has discovered that it is hot.
441
//
442
// ***************************************************************************
443
const OptimizationStrategy hotStrategyOpts[] =
444
{
445
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled },
446
{ OMR::coldBlockOutlining },
447
{ OMR::earlyGlobalGroup },
448
{ OMR::earlyLocalGroup },
449
{ OMR::stripMiningGroup, OMR::IfLoops }, // strip mining in loops
450
{ OMR::loopReplicator, OMR::IfLoops }, // tail-duplication in loops
451
{ OMR::blockSplitter, OMR::IfNews }, // treeSimplification + blockSplitter + VP => opportunity for EA
452
{ OMR::expensiveGlobalValuePropagationGroup },
453
{ OMR::localCSE, OMR::IfVectorAPI },
454
{ OMR::loopCanonicalization, OMR::IfVectorAPI },
455
{ OMR::partialRedundancyEliminationGroup, OMR::IfVectorAPI },
456
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
457
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
458
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI },
459
{ OMR::dataAccessAccelerator },
460
{ OMR::osrGuardRemoval, OMR::IfEnabled }, // run after calls/monents/asyncchecks have been removed
461
{ OMR::globalDeadStoreGroup, },
462
{ OMR::idiomRecognition, OMR::IfLoopsAndNotProfiling }, // Early pass of idiomRecognition - Loop Canonicalizer transformations break certain idioms (i.e. arrayTranslateAndTest)
463
{ OMR::globalCopyPropagation, OMR::IfNoLoops },
464
{ OMR::loopCanonicalizationGroup, OMR::IfLoops }, // canonicalize loops (improve fall throughs)
465
{ OMR::inductionVariableAnalysis, OMR::IfLoops },
466
{ OMR::redundantInductionVarElimination, OMR::IfLoops },
467
{ OMR::loopAliasRefinerGroup, OMR::IfLoops },
468
{ OMR::recompilationModifier, OMR::IfEnabledAndNotProfiling },
469
{ OMR::partialRedundancyEliminationGroup },
470
{ OMR::globalDeadStoreElimination, OMR::IfLoopsAndNotProfiling },
471
{ OMR::inductionVariableAnalysis, OMR::IfLoopsAndNotProfiling },
472
{ OMR::loopSpecializerGroup, OMR::IfLoopsAndNotProfiling },
473
{ OMR::inductionVariableAnalysis, OMR::IfLoopsAndNotProfiling },
474
{ OMR::generalLoopUnroller, OMR::IfLoopsAndNotProfiling }, // unroll Loops
475
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
476
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
477
#endif
478
{ OMR::blockManipulationGroup },
479
{ OMR::lateLocalGroup },
480
{ OMR::sequentialStoreSimplificationGroup, }, // reduce sequential stores into an arrayset
481
{ OMR::redundantAsyncCheckRemoval, OMR::IfNotJitProfiling }, // optimize async check placement
482
{ OMR::recompilationModifier, OMR::IfProfiling }, // do before GRA to avoid commoning of longs afterwards
483
{ OMR::globalCopyPropagation, OMR::IfMoreThanOneBlock }, // Can produce opportunities for store sinking
484
{ OMR::generalStoreSinking },
485
486
487
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
488
{ OMR::treeSimplification, OMR::IfEnabled }, // cleanup the trees after sunk store and localCSE
489
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
490
{ OMR::trivialBlockExtension },
491
{ OMR::localDeadStoreElimination, OMR::IfEnabled }, //remove the astore if no literal pool is required
492
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
493
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
494
{ OMR::signExtendLoadsGroup, OMR::IfEnabled }, // last opt before GRA
495
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled }, // final cleanup before opcode expansion
496
{ OMR::arraysetStoreElimination },
497
{ OMR::localValuePropagation, OMR::MarkLastRun },
498
{ OMR::arraycopyTransformation },
499
{ OMR::checkcastAndProfiledGuardCoalescer },
500
{ OMR::jProfilingRecompLoopTest, OMR::IfLoops },
501
{ OMR::tacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
502
{ OMR::jProfilingValue, OMR::MustBeDone },
503
{ OMR::treeLowering, OMR::MustBeDone },
504
{ OMR::globalDeadStoreElimination, OMR::IfMoreThanOneBlock }, // global dead store removal
505
{ OMR::deadTreesElimination }, // cleanup after dead store removal
506
{ OMR::compactNullChecks }, // cleanup at the end
507
{ OMR::finalGlobalGroup }, // done just before codegen
508
{ OMR::profilingGroup, OMR::IfProfiling },
509
{ OMR::regDepCopyRemoval },
510
{ OMR::hotFieldMarking },
511
{ OMR::endOpts }
512
};
513
514
// ***************************************************************************
515
//
516
// Strategy for very hot methods. This is not currently used, same as hot.
517
//
518
// ***************************************************************************
519
const OptimizationStrategy veryHotStrategyOpts[] =
520
{
521
{ OMR::hotStrategy },
522
{ OMR::endOpts }
523
};
524
525
// ***************************************************************************
526
//
527
// Strategy for scorching hot methods. This is the last time the method will
528
// be compiled, so throw everything (within reason) at it.
529
//
530
// ***************************************************************************
531
const OptimizationStrategy scorchingStrategyOpts[] =
532
{
533
#if 0
534
{ OMR::hotStrategy },
535
{ OMR::endOpts }
536
#else
537
{ OMR::coldBlockOutlining },
538
{ OMR::earlyGlobalGroup },
539
{ OMR::earlyLocalGroup },
540
{ OMR::andSimplification }, // needs commoning across blocks to work well; must be done after versioning
541
{ OMR::stripMiningGroup, OMR::IfLoops }, // strip mining in loops
542
{ OMR::loopReplicator, OMR::IfLoops }, // tail-duplication in loops
543
{ OMR::blockSplitter, OMR::IfNews }, // treeSimplification + blockSplitter + VP => opportunity for EA
544
{ OMR::arrayPrivatizationGroup, OMR::IfNews }, // must precede escape analysis
545
{ OMR::veryExpensiveGlobalValuePropagationGroup },
546
{ OMR::dataAccessAccelerator }, //always run after GVP
547
{ OMR::osrGuardRemoval, OMR::IfEnabled }, // run after calls/monents/asyncchecks have been removed
548
{ OMR::globalDeadStoreGroup, },
549
{ OMR::idiomRecognition, OMR::IfLoopsAndNotProfiling }, // Early pass of idiomRecognition - Loop Canonicalizer transformations break certain idioms (i.e. arrayTranslateAndTest)
550
{ OMR::globalCopyPropagation, OMR::IfNoLoops },
551
{ OMR::localCSE, OMR::IfVectorAPI },
552
{ OMR::loopCanonicalization, OMR::IfVectorAPI },
553
{ OMR::partialRedundancyEliminationGroup, OMR::IfVectorAPI },
554
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
555
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
556
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI },
557
{ OMR::loopCanonicalizationGroup, OMR::IfLoops }, // canonicalize loops (improve fall throughs)
558
{ OMR::inductionVariableAnalysis, OMR::IfLoops },
559
{ OMR::redundantInductionVarElimination, OMR::IfLoops },
560
{ OMR::loopAliasRefinerGroup, OMR::IfLoops }, // version loops to improve aliasing (after versioned to reduce code growth)
561
{ OMR::expressionsSimplification, OMR::IfLoops },
562
{ OMR::recompilationModifier, OMR::IfEnabled },
563
{ OMR::partialRedundancyEliminationGroup },
564
{ OMR::globalDeadStoreElimination, OMR::IfLoops },
565
{ OMR::inductionVariableAnalysis, OMR::IfLoops },
566
{ OMR::loopSpecializerGroup, OMR::IfLoops },
567
{ OMR::inductionVariableAnalysis, OMR::IfLoops },
568
{ OMR::generalLoopUnroller, OMR::IfLoops }, // unroll Loops
569
{ OMR::blockSplitter, OMR::MarkLastRun },
570
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
571
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
572
#endif
573
{ OMR::blockManipulationGroup },
574
{ OMR::lateLocalGroup },
575
{ OMR::sequentialStoreSimplificationGroup }, // reduce sequential stores into an arrayset
576
{ OMR::redundantAsyncCheckRemoval, OMR::IfNotJitProfiling }, // optimize async check placement
577
{ OMR::recompilationModifier, OMR::IfProfiling }, // do before GRA to avoid commoning of longs afterwards
578
{ OMR::globalCopyPropagation, OMR::IfMoreThanOneBlock }, // Can produce opportunities for store sinking
579
{ OMR::generalStoreSinking },
580
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
581
{ OMR::treeSimplification, OMR::IfEnabled }, // cleanup the trees after sunk store and localCSE
582
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
583
{ OMR::trivialBlockExtension },
584
{ OMR::localDeadStoreElimination, OMR::IfEnabled }, //remove the astore if no literal pool is required
585
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
586
{ OMR::signExtendLoadsGroup, OMR::IfEnabled }, // last opt before GRA
587
{ OMR::arraysetStoreElimination },
588
{ OMR::localValuePropagation, OMR::MarkLastRun },
589
{ OMR::arraycopyTransformation },
590
{ OMR::checkcastAndProfiledGuardCoalescer },
591
{ OMR::tacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
592
{ OMR::jProfilingValue, OMR::MustBeDone },
593
{ OMR::treeLowering, OMR::MustBeDone },
594
{ OMR::globalDeadStoreElimination, OMR::IfMoreThanOneBlock }, // global dead store removal
595
{ OMR::deadTreesElimination }, // cleanup after dead store removal
596
{ OMR::compactNullChecks }, // cleanup at the end
597
{ OMR::finalGlobalGroup }, // done just before codegen
598
{ OMR::profilingGroup, OMR::IfProfiling },
599
{ OMR::regDepCopyRemoval },
600
{ OMR::hotFieldMarking },
601
{ OMR::endOpts }
602
#endif
603
};
604
605
const OptimizationStrategy sequentialLoadAndStoreColdOpts[] =
606
{
607
{ OMR::localDeadStoreElimination },
608
{ OMR::deadTreesElimination },
609
{ OMR::expensiveGlobalValuePropagationGroup },
610
{ OMR::sequentialStoreSimplificationGroup },
611
{ OMR::endGroup }
612
};
613
614
const OptimizationStrategy sequentialLoadAndStoreWarmOpts[] =
615
{
616
{ OMR::localValuePropagationGroup },
617
{ OMR::localDeadStoreElimination },
618
{ OMR::deadTreesElimination },
619
{ OMR::expensiveGlobalValuePropagationGroup },
620
{ OMR::sequentialStoreSimplificationGroup },
621
{ OMR::endGroup }
622
};
623
624
const OptimizationStrategy sequentialStoreSimplificationOpts[] =
625
{
626
{ OMR::treeSimplification },
627
{ OMR::sequentialStoreSimplification },
628
{ OMR::treeSimplification }, // might fold expressions created by versioning/induction variables
629
{ OMR::endGroup }
630
};
631
632
633
// **********************************************************
634
//
635
// AHEAD-OF-TIME-COMPILATION STRATEGY
636
//
637
// **********************************************************
638
static const OptimizationStrategy AOTStrategyOpts[] =
639
{
640
{ OMR::earlyGlobalGroup },
641
{ OMR::earlyLocalGroup },
642
{ OMR::stripMiningGroup, OMR::IfLoops }, // strip mining in loops
643
{ OMR::loopReplicator, OMR::IfLoops }, // tail-duplication in loops
644
{ OMR::expensiveGlobalValuePropagationGroup },
645
{ OMR::localCSE, OMR::IfVectorAPI },
646
{ OMR::loopCanonicalization, OMR::IfVectorAPI },
647
{ OMR::partialRedundancyEliminationGroup, OMR::IfVectorAPI },
648
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
649
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
650
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI },
651
{ OMR::globalDeadStoreGroup, },
652
{ OMR::globalCopyPropagation, OMR::IfNoLoops },
653
{ OMR::loopCanonicalizationGroup, OMR::IfLoops }, // canonicalize loops (improve fall throughs) and versioning
654
{ OMR::partialRedundancyEliminationGroup },
655
{ OMR::globalDeadStoreElimination, OMR::IfLoops },
656
{ OMR::generalLoopUnroller, OMR::IfLoops }, // unroll Loops
657
{ OMR::blockManipulationGroup },
658
{ OMR::lateLocalGroup },
659
{ OMR::sequentialStoreSimplificationGroup }, // reduce sequential stores into an arrayset
660
{ OMR::redundantAsyncCheckRemoval, OMR::IfNotJitProfiling }, // optimize async check placement
661
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
662
{ OMR::localDeadStoreElimination, OMR::IfEnabled }, //remove the astore if no literal pool is required
663
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
664
{ OMR::signExtendLoadsGroup, OMR::IfEnabled }, // last opt before GRA
665
{ OMR::arraysetStoreElimination },
666
{ OMR::tacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
667
{ OMR::treeLowering, OMR::MustBeDone},
668
{ OMR::globalCopyPropagation, OMR::IfMoreThanOneBlock}, // global copy propagation
669
{ OMR::globalDeadStoreElimination, OMR::IfMoreThanOneBlock}, // global dead store removal
670
{ OMR::deadTreesElimination }, // cleanup after dead store removal
671
{ OMR::compactNullChecks }, // cleanup at the end
672
{ OMR::finalGlobalGroup }, // done just before codegen
673
{ OMR::regDepCopyRemoval },
674
{ OMR::endOpts }
675
};
676
677
678
static const OptimizationStrategy *j9CompilationStrategies[] =
679
{
680
noOptStrategyOpts,
681
coldStrategyOpts,
682
warmStrategyOpts,
683
hotStrategyOpts,
684
veryHotStrategyOpts,
685
scorchingStrategyOpts,
686
AOTStrategyOpts,
687
reducedWarmStrategyOpts
688
};
689
690
691
// ***************************************************************************
692
//
693
// Cheaper strategy for warm methods.
694
//
695
// ***************************************************************************
696
//
697
static const OptimizationStrategy cheapWarmStrategyOpts[] =
698
{
699
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled },
700
{ OMR::coldBlockOutlining },
701
{ OMR::stringBuilderTransformer },
702
{ OMR::stringPeepholes }, // need stringpeepholes to catch bigdecimal patterns
703
{ OMR::inlining },
704
{ OMR::methodHandleInvokeInliningGroup, OMR::IfEnabled },
705
{ OMR::staticFinalFieldFolding, },
706
{ OMR::osrGuardInsertion, OMR::MustBeDone },
707
{ OMR::osrExceptionEdgeRemoval }, // most inlining is done by now
708
{ OMR::jProfilingBlock },
709
{ OMR::virtualGuardTailSplitter }, // merge virtual guards
710
{ OMR::treeSimplification },
711
#ifdef TR_HOST_S390
712
{ OMR::sequentialLoadAndStoreWarmGroup, OMR::IfEnabled },
713
#endif
714
{ OMR::cheapGlobalValuePropagationGroup },
715
{ OMR::localCSE, OMR::IfVectorAPI },
716
{ OMR::dataAccessAccelerator },
717
#ifdef TR_HOST_S390
718
{ OMR::globalCopyPropagation, OMR::IfVoluntaryOSR },
719
#endif
720
{ OMR::lastLoopVersionerGroup, OMR::IfLoops },
721
#ifdef TR_HOST_S390
722
{ OMR::globalDeadStoreElimination, OMR::IfEnabledAndLoops },
723
{ OMR::deadTreesElimination },
724
{ OMR::recompilationModifier, OMR::IfEnabledAndNotProfiling },
725
{ OMR::localReordering, OMR::IfNoLoopsOREnabledAndLoops },
726
{ OMR::basicBlockOrdering, OMR::IfLoops },
727
{ OMR::treeSimplification },
728
{ OMR::loopReduction },
729
{ OMR::blockShuffling },
730
#endif
731
{ OMR::localCSE, OMR::IfLoopsAndNotProfiling },
732
{ OMR::idiomRecognition, OMR::IfLoopsAndNotProfiling },
733
{ OMR::blockSplitter },
734
{ OMR::treeSimplification }, // revisit; not really required ?
735
{ OMR::virtualGuardHeadMerger },
736
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
737
{ OMR::recognizedCallTransformer, OMR::MarkLastRun },
738
#endif
739
{ OMR::basicBlockExtension, OMR::MarkLastRun }, // extend blocks; move trees around if reqd
740
{ OMR::localValuePropagationGroup },
741
{ OMR::explicitNewInitialization, OMR::IfNews },
742
{ OMR::arraycopyTransformation },
743
{ OMR::treeSimplification, OMR::IfEnabled },
744
{ OMR::asyncCheckInsertion, OMR::IfNotJitProfiling },
745
{ OMR::localCSE },
746
{ OMR::treeSimplification, OMR::MarkLastRun },
747
{ OMR::andSimplification, OMR::IfEnabled }, //clean up after versioner
748
{ OMR::compactNullChecks }, // cleanup at the end
749
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
750
{ OMR::globalCopyPropagation, OMR::IfMethodHandleInvokes }, // Does a lot of good after methodHandleInvokeInliningGroup
751
{ OMR::treesCleansing, OMR::IfEnabled },
752
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
753
{ OMR::localCSE, OMR::IfEnabled }, // common up expressions for sunk stores
754
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // cleanup the trees after sunk store and localCSE
755
756
/** \breif
757
* This optimization is performance critical on z Systems. On z Systems a literal pool register is blocked off
758
* by default at the start of the compilation since materializing this address could be expensive depending on
759
* the architecture level we are executing on. This optimization pass validates support for dynamically
760
* materializing the literal pool address and frees up the literal pool register for register allocation.
761
*/
762
{ OMR::dynamicLiteralPool, OMR::IfNotProfiling },
763
{ OMR::samplingJProfiling },
764
{ OMR::trivialBlockExtension },
765
{ OMR::localCSE, OMR::IfEnabled }, //common up lit pool refs in the same block
766
{ OMR::deadTreesElimination, OMR::IfEnabled }, // cleanup at the end
767
{ OMR::treeSimplification, OMR::IfEnabledMarkLastRun }, // Simplify non-normalized address computations introduced by prefetch insertion
768
{ OMR::trivialDeadTreeRemoval, OMR::IfEnabled }, // final cleanup before opcode expansion
769
{ OMR::jProfilingRecompLoopTest, OMR::IfLoops },
770
{ OMR::globalDeadStoreElimination, OMR::IfVectorAPI }, // global dead store removal
771
{ OMR::deadTreesElimination, OMR::IfVectorAPI }, // cleanup after dead store removal
772
{ OMR::vectorAPIExpansion, OMR::IfVectorAPI },
773
{ OMR::cheapTacticalGlobalRegisterAllocatorGroup, OMR::IfEnabled },
774
{ OMR::jProfilingValue, OMR::MustBeDone },
775
{ OMR::treeLowering, OMR::MustBeDone },
776
{ OMR::globalDeadStoreGroup, },
777
{ OMR::compactNullChecks, OMR::IfEnabled }, // cleanup at the end
778
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead anchors created by check/store removal
779
{ OMR::deadTreesElimination, OMR::IfEnabled }, // remove dead RegStores produced by previous deadTrees pass
780
{ OMR::redundantGotoElimination, OMR::IfEnabledAndNotJitProfiling }, // dead store and dead tree elimination may have left empty blocks
781
{ OMR::compactLocals, OMR::IfNotJitProfiling }, // analysis results are invalidated by profilingGroup
782
{ OMR::globalLiveVariablesForGC },
783
{ OMR::profilingGroup, OMR::IfProfiling },
784
{ OMR::regDepCopyRemoval },
785
{ OMR::hotFieldMarking },
786
{ OMR::endOpts }
787
};
788
789
790
static const OptimizationStrategy profilingOpts[] =
791
{
792
{ OMR::profileGenerator, OMR::MustBeDone },
793
{ OMR::deadTreesElimination, OMR::IfEnabled },
794
{ OMR::endGroup }
795
};
796
797
static const OptimizationStrategy cheapTacticalGlobalRegisterAllocatorOpts[] =
798
{
799
{ OMR::redundantGotoElimination, OMR::IfNotJitProfiling }, // need to be run before global register allocator
800
{ OMR::tacticalGlobalRegisterAllocator, OMR::IfEnabled },
801
{ OMR::endGroup }
802
};
803
804
805
J9::Optimizer::Optimizer(TR::Compilation *comp, TR::ResolvedMethodSymbol *methodSymbol, bool isIlGen,
806
const OptimizationStrategy *strategy, uint16_t VNType)
807
: OMR::Optimizer(comp, methodSymbol, isIlGen, strategy, VNType)
808
{
809
// initialize additional J9 optimizations
810
811
_opts[OMR::inlining] =
812
new (comp->allocator()) TR::OptimizationManager(self(), TR_Inliner::create, OMR::inlining);
813
_opts[OMR::targetedInlining] =
814
new (comp->allocator()) TR::OptimizationManager(self(), TR_Inliner::create, OMR::targetedInlining);
815
_opts[OMR::targetedInlining]->setOptPolicy(new (comp->allocator()) TR_J9JSR292InlinerPolicy(comp));
816
817
_opts[OMR::trivialInlining] =
818
new (comp->allocator()) TR::OptimizationManager(self(), TR_TrivialInliner::create, OMR::trivialInlining);
819
820
_opts[OMR::dynamicLiteralPool] =
821
new (comp->allocator()) TR::OptimizationManager(self(), TR_DynamicLiteralPool::create, OMR::dynamicLiteralPool);
822
_opts[OMR::arraycopyTransformation] =
823
new (comp->allocator()) TR::OptimizationManager(self(), TR::ArraycopyTransformation::create, OMR::arraycopyTransformation);
824
_opts[OMR::signExtendLoads] =
825
new (comp->allocator()) TR::OptimizationManager(self(), TR_SignExtendLoads::create, OMR::signExtendLoads);
826
_opts[OMR::sequentialStoreSimplification] =
827
new (comp->allocator()) TR::OptimizationManager(self(), TR_SequentialStoreSimplifier::create, OMR::sequentialStoreSimplification);
828
_opts[OMR::explicitNewInitialization] =
829
new (comp->allocator()) TR::OptimizationManager(self(), TR_LocalNewInitialization::create, OMR::explicitNewInitialization);
830
_opts[OMR::redundantMonitorElimination] =
831
new (comp->allocator()) TR::OptimizationManager(self(), TR::MonitorElimination::create, OMR::redundantMonitorElimination);
832
_opts[OMR::preEscapeAnalysis] =
833
new (comp->allocator()) TR::OptimizationManager(self(), TR_PreEscapeAnalysis::create, OMR::preEscapeAnalysis);
834
_opts[OMR::escapeAnalysis] =
835
new (comp->allocator()) TR::OptimizationManager(self(), TR_EscapeAnalysis::create, OMR::escapeAnalysis);
836
_opts[OMR::postEscapeAnalysis] =
837
new (comp->allocator()) TR::OptimizationManager(self(), TR_PostEscapeAnalysis::create, OMR::postEscapeAnalysis);
838
_opts[OMR::isolatedStoreElimination] =
839
new (comp->allocator()) TR::OptimizationManager(self(), TR_IsolatedStoreElimination::create, OMR::isolatedStoreElimination);
840
_opts[OMR::localLiveVariablesForGC] =
841
new (comp->allocator()) TR::OptimizationManager(self(), TR_LocalLiveVariablesForGC::create, OMR::localLiveVariablesForGC);
842
_opts[OMR::globalLiveVariablesForGC] =
843
new (comp->allocator()) TR::OptimizationManager(self(), TR_GlobalLiveVariablesForGC::create, OMR::globalLiveVariablesForGC);
844
_opts[OMR::recompilationModifier] =
845
new (comp->allocator()) TR::OptimizationManager(self(), TR_RecompilationModifier::create, OMR::recompilationModifier);
846
_opts[OMR::profileGenerator] =
847
new (comp->allocator()) TR::OptimizationManager(self(), TR_ProfileGenerator::create, OMR::profileGenerator);
848
_opts[OMR::dataAccessAccelerator] =
849
new (comp->allocator()) TR::OptimizationManager(self(), TR_DataAccessAccelerator::create, OMR::dataAccessAccelerator);
850
_opts[OMR::stringBuilderTransformer] =
851
new (comp->allocator()) TR::OptimizationManager(self(), TR_StringBuilderTransformer::create, OMR::stringBuilderTransformer);
852
_opts[OMR::stringPeepholes] =
853
new (comp->allocator()) TR::OptimizationManager(self(), TR_StringPeepholes::create, OMR::stringPeepholes);
854
_opts[OMR::switchAnalyzer] =
855
new (comp->allocator()) TR::OptimizationManager(self(), TR::SwitchAnalyzer::create, OMR::switchAnalyzer);
856
_opts[OMR::treeLowering] =
857
new (comp->allocator()) TR::OptimizationManager(self(), TR::TreeLowering::create, OMR::treeLowering);
858
_opts[OMR::varHandleTransformer] =
859
new (comp->allocator()) TR::OptimizationManager(self(), TR_VarHandleTransformer::create, OMR::varHandleTransformer);
860
_opts[OMR::methodHandleTransformer] =
861
new (comp->allocator()) TR::OptimizationManager(self(), TR_MethodHandleTransformer::create, OMR::methodHandleTransformer);
862
_opts[OMR::unsafeFastPath] =
863
new (comp->allocator()) TR::OptimizationManager(self(), TR_UnsafeFastPath::create, OMR::unsafeFastPath);
864
_opts[OMR::idiomRecognition] =
865
new (comp->allocator()) TR::OptimizationManager(self(), TR_CISCTransformer::create, OMR::idiomRecognition);
866
_opts[OMR::loopAliasRefiner] =
867
new (comp->allocator()) TR::OptimizationManager(self(), TR_LoopAliasRefiner::create, OMR::loopAliasRefiner);
868
_opts[OMR::allocationSinking] =
869
new (comp->allocator()) TR::OptimizationManager(self(), TR_AllocationSinking::create, OMR::allocationSinking);
870
_opts[OMR::samplingJProfiling] =
871
new (comp->allocator()) TR::OptimizationManager(self(), TR_JitProfiler::create, OMR::samplingJProfiling);
872
_opts[OMR::SPMDKernelParallelization] =
873
new (comp->allocator()) TR::OptimizationManager(self(), TR_SPMDKernelParallelizer::create, OMR::SPMDKernelParallelization);
874
_opts[OMR::trivialDeadBlockRemover] =
875
new (comp->allocator()) TR::OptimizationManager(self(), TR_TrivialDeadBlockRemover::create, OMR::trivialDeadBlockRemover);
876
_opts[OMR::osrGuardInsertion] =
877
new (comp->allocator()) TR::OptimizationManager(self(), TR_OSRGuardInsertion::create, OMR::osrGuardInsertion);
878
_opts[OMR::osrGuardRemoval] =
879
new (comp->allocator()) TR::OptimizationManager(self(), TR_OSRGuardRemoval::create, OMR::osrGuardRemoval);
880
_opts[OMR::jProfilingBlock] =
881
new (comp->allocator()) TR::OptimizationManager(self(), TR_JProfilingBlock::create, OMR::jProfilingBlock);
882
_opts[OMR::jProfilingRecompLoopTest] =
883
new (comp->allocator()) TR::OptimizationManager(self(), TR_JProfilingRecompLoopTest::create, OMR::jProfilingRecompLoopTest);
884
_opts[OMR::jProfilingValue] =
885
new (comp->allocator()) TR::OptimizationManager(self(), TR_JProfilingValue::create, OMR::jProfilingValue);
886
_opts[OMR::staticFinalFieldFolding] =
887
new (comp->allocator()) TR::OptimizationManager(self(), TR_StaticFinalFieldFolding::create, OMR::staticFinalFieldFolding);
888
_opts[OMR::handleRecompilationOps] =
889
new (comp->allocator()) TR::OptimizationManager(self(), TR_HandleRecompilationOps::create, OMR::handleRecompilationOps);
890
_opts[OMR::hotFieldMarking] =
891
new (comp->allocator()) TR::OptimizationManager(self(), TR_HotFieldMarking::create, OMR::hotFieldMarking);
892
_opts[OMR::vectorAPIExpansion] =
893
new (comp->allocator()) TR::OptimizationManager(self(), TR_VectorAPIExpansion::create, OMR::vectorAPIExpansion);
894
// NOTE: Please add new J9 optimizations here!
895
896
// initialize additional J9 optimization groups
897
898
_opts[OMR::loopAliasRefinerGroup] =
899
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::loopAliasRefinerGroup, loopAliasRefinerOpts);
900
_opts[OMR::cheapObjectAllocationGroup] =
901
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::cheapObjectAllocationGroup, cheapObjectAllocationOpts);
902
_opts[OMR::expensiveObjectAllocationGroup] =
903
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::expensiveObjectAllocationGroup, expensiveObjectAllocationOpts);
904
_opts[OMR::eachEscapeAnalysisPassGroup] =
905
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::eachEscapeAnalysisPassGroup, eachEscapeAnalysisPassOpts);
906
_opts[OMR::cheapGlobalValuePropagationGroup] =
907
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::cheapGlobalValuePropagationGroup, cheapGlobalValuePropagationOpts);
908
_opts[OMR::expensiveGlobalValuePropagationGroup] =
909
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::expensiveGlobalValuePropagationGroup, expensiveGlobalValuePropagationOpts);
910
_opts[OMR::earlyGlobalGroup] =
911
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::earlyGlobalGroup, J9EarlyGlobalOpts);
912
_opts[OMR::earlyLocalGroup] =
913
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::earlyLocalGroup, J9EarlyLocalOpts);
914
_opts[OMR::isolatedStoreGroup] =
915
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::isolatedStoreGroup, isolatedStoreOpts);
916
_opts[OMR::cheapTacticalGlobalRegisterAllocatorGroup] =
917
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::cheapTacticalGlobalRegisterAllocatorGroup, cheapTacticalGlobalRegisterAllocatorOpts);
918
_opts[OMR::sequentialStoreSimplificationGroup] =
919
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::sequentialStoreSimplificationGroup, sequentialStoreSimplificationOpts);
920
_opts[OMR::signExtendLoadsGroup] =
921
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::signExtendLoadsGroup, signExtendLoadsOpts);
922
_opts[OMR::loopSpecializerGroup] =
923
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::loopSpecializerGroup, loopSpecializerOpts);
924
_opts[OMR::profilingGroup] =
925
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::profilingGroup, profilingOpts);
926
_opts[OMR::sequentialLoadAndStoreColdGroup] =
927
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::sequentialLoadAndStoreColdGroup, sequentialLoadAndStoreColdOpts);
928
_opts[OMR::sequentialLoadAndStoreWarmGroup] =
929
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::sequentialLoadAndStoreWarmGroup, sequentialLoadAndStoreWarmOpts);
930
931
_opts[OMR::noOptStrategy] =
932
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::noOptStrategy, noOptStrategyOpts);
933
_opts[OMR::coldStrategy] =
934
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::coldStrategy, coldStrategyOpts);
935
_opts[OMR::warmStrategy] =
936
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::warmStrategy, warmStrategyOpts);
937
_opts[OMR::hotStrategy] =
938
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::hotStrategy, hotStrategyOpts);
939
_opts[OMR::veryHotStrategy] =
940
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::veryHotStrategy, veryHotStrategyOpts);
941
_opts[OMR::scorchingStrategy] =
942
new (comp->allocator()) TR::OptimizationManager(self(), NULL, OMR::scorchingStrategy, scorchingStrategyOpts);
943
944
// NOTE: Please add new J9 optimization groups here!
945
946
// turn requested on for optimizations/groups
947
self()->setRequestOptimization(OMR::eachExpensiveGlobalValuePropagationGroup, true);
948
949
self()->setRequestOptimization(OMR::cheapTacticalGlobalRegisterAllocatorGroup, true);
950
self()->setRequestOptimization(OMR::tacticalGlobalRegisterAllocatorGroup, true);
951
952
self()->setRequestOptimization(OMR::tacticalGlobalRegisterAllocator, true);
953
954
if (shouldEnableSEL(comp))
955
self()->setRequestOptimization(OMR::signExtendLoadsGroup, true);
956
if (comp->getOption(TR_EnableSequentialLoadStoreWarm))
957
self()->setRequestOptimization(OMR::sequentialLoadAndStoreWarmGroup, true);
958
if (comp->getOption(TR_EnableSequentialLoadStoreCold))
959
self()->setRequestOptimization(OMR::sequentialLoadAndStoreColdGroup, true);
960
}
961
962
inline
963
TR::Optimizer *J9::Optimizer::self()
964
{
965
return (static_cast<TR::Optimizer *>(this));
966
}
967
968
OMR_InlinerPolicy *J9::Optimizer::getInlinerPolicy()
969
{
970
return new (comp()->allocator()) TR_J9InlinerPolicy(comp());
971
}
972
973
974
OMR_InlinerUtil *J9::Optimizer::getInlinerUtil()
975
{
976
return new (comp()->allocator()) TR_J9InlinerUtil(comp());
977
}
978
979
bool
980
J9::Optimizer::switchToProfiling(uint32_t f, uint32_t c)
981
{
982
TR::Recompilation *recomp = comp()->getRecompilationInfo();
983
if (!recomp) return false;
984
if (!recomp->shouldBeCompiledAgain()) return false; // do not profile if do not intend to compile again
985
if (!recomp->switchToProfiling(f, c)) return false;
986
setRequestOptimization(OMR::recompilationModifier, true);
987
setRequestOptimization(OMR::profileGenerator, true);
988
return true;
989
}
990
991
992
bool
993
J9::Optimizer::switchToProfiling()
994
{
995
return self()->switchToProfiling(DEFAULT_PROFILING_FREQUENCY, DEFAULT_PROFILING_COUNT);
996
}
997
998
999
const OptimizationStrategy *
1000
J9::Optimizer::optimizationStrategy(TR::Compilation *c)
1001
{
1002
if (c->getOption(TR_MimicInterpreterFrameShape))
1003
{
1004
if (c->getJittedMethodSymbol()->sharesStackSlots(c))
1005
return fsdStrategies[0]; //0 is fsdStrategyOptsForMethodsWithSlotSharing
1006
else
1007
return fsdStrategies[1]; // 1 is fsdStrategyOptsForMethodsWithoutSlotSharing
1008
}
1009
1010
TR_Hotness strategy = c->getMethodHotness();
1011
if (strategy == warm && !c->getOption(TR_DisableCheapWarmOpts))
1012
{
1013
return cheapWarmStrategyOpts;
1014
}
1015
else
1016
{
1017
return j9CompilationStrategies[strategy];
1018
}
1019
}
1020
1021
1022
ValueNumberInfoBuildType
1023
J9::Optimizer::valueNumberInfoBuildType()
1024
{
1025
return PrePartitionVN;
1026
}
1027
1028