Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/compiler/control/CompilationController.cpp
6000 views
1
/*******************************************************************************
2
* Copyright (c) 2000, 2022 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "control/CompilationController.hpp"
24
25
#include "codegen/PrivateLinkage.hpp"
26
#include "compile/Compilation.hpp"
27
#include "compile/CompilationTypes.hpp"
28
#include "control/MethodToBeCompiled.hpp"
29
#include "control/OptimizationPlan.hpp"
30
#include "control/Recompilation.hpp"
31
#include "control/RecompilationInfo.hpp"
32
#include "env/IO.hpp"
33
#include "env/TRMemory.hpp"
34
#include "env/VerboseLog.hpp"
35
#include "ilgen/IlGeneratorMethodDetails_inlines.hpp"
36
#include "infra/Monitor.hpp"
37
#include "runtime/CodeCacheManager.hpp"
38
#include "control/CompilationRuntime.hpp"
39
#include "env/ut_j9jit.h"
40
#include "env/CompilerEnv.hpp"
41
42
// NOTE: TR::CompilationController is actually defined in control/OptimizationPlan.hpp
43
44
TR::CompilationStrategy *TR::CompilationController::_compilationStrategy = NULL;
45
TR::CompilationInfo * TR::CompilationController::_compInfo = 0;
46
int32_t TR::CompilationController::_verbose = 0;
47
bool TR::CompilationController::_useController = false;
48
bool TR::CompilationController::_tlsCompObjCreated = false;
49
50
51
//------------------------------------ init -----------------------------------
52
// Initializes the compilationController.
53
// Return false if it fails
54
//-----------------------------------------------------------------------------
55
bool TR::CompilationController::init(TR::CompilationInfo *compInfo)
56
{
57
_useController = false; // Default to failure
58
_compilationStrategy = 0; // Default to failure
59
TR::Options *options = TR::Options::getCmdLineOptions();
60
char *strategyName = options->getCompilationStrategyName();
61
62
if (strategyName && strcmp(strategyName, "none"))
63
{
64
_compInfo = compInfo;
65
if (strcmp(strategyName, "default") == 0)
66
_compilationStrategy = new (PERSISTENT_NEW) TR::DefaultCompilationStrategy();
67
else if (strcmp(strategyName, "threshold") == 0)
68
_compilationStrategy = new (PERSISTENT_NEW) TR::ThresholdCompilationStrategy();
69
else // if no match, use default
70
{
71
_compilationStrategy = new (PERSISTENT_NEW) TR::DefaultCompilationStrategy();
72
}
73
74
if (_compilationStrategy)
75
{
76
TR_OptimizationPlan::_optimizationPlanMonitor = TR::Monitor::create("OptimizationPlanMonitor");
77
_useController = (TR_OptimizationPlan::_optimizationPlanMonitor != 0);
78
if (_useController)
79
{
80
static char *verboseController = feGetEnv("TR_VerboseController");
81
if (verboseController)
82
setVerbose(atoi(verboseController));
83
if (verbose() >= LEVEL1)
84
fprintf(stderr, "Using %s comp strategy\n", strategyName);
85
}
86
}
87
}
88
//TR_ASSERT(_useController, "Must use compilation controller");
89
//#ifdef COMP_YIELD_ANALYSIS
90
if (options->getOption(TR_EnableCompYieldStats))
91
TR::Compilation::allocateCompYieldStatsMatrix();
92
tlsAlloc(OMR::compilation);
93
_tlsCompObjCreated = true;
94
return _useController;
95
}
96
97
98
//-------------------------------- shutdown ---------------------------------
99
// Called at shutdown time after compilation thread has been stopped
100
// --------------------------------------------------------------------------
101
void TR::CompilationController::shutdown()
102
{
103
if (_tlsCompObjCreated)
104
tlsFree(OMR::compilation);
105
if (!_useController)
106
return;
107
// would like to free all entries in the pool of compilation plans
108
int32_t remainingPlans = TR_OptimizationPlan::freeEntirePool();
109
// print some stats
110
if (verbose() >= LEVEL1)
111
{
112
fprintf(stderr, "Remaining optimizations plans in the system: %d\n", remainingPlans);
113
}
114
_compilationStrategy->shutdown();
115
}
116
117
118
//======================== DefaultCompilationStrategy ==========================
119
120
121
122
TR::DefaultCompilationStrategy::DefaultCompilationStrategy()
123
{
124
// initialize the statistics
125
for (int32_t i=0; i < TR_MethodEvent::NumEvents; i++)
126
_statEventType[i] = 0;
127
}
128
129
130
void TR::DefaultCompilationStrategy::shutdown()
131
{
132
// printing stats
133
if (TR::CompilationController::verbose() >= TR::CompilationController::LEVEL1)
134
{
135
fprintf(stderr, "Stats for type of events:\n");
136
for (int32_t i=0; i < TR_MethodEvent::NumEvents; i++)
137
fprintf(stderr, "EventType:%d cases:%u\n", i, _statEventType[i]);
138
}
139
}
140
141
142
TR_Hotness TR::DefaultCompilationStrategy::getInitialOptLevel(J9Method *j9method)
143
{
144
J9ROMMethod *romMethod = J9_ROM_METHOD_FROM_RAM_METHOD(j9method);
145
return TR::Options::getInitialHotnessLevel(J9ROMMETHOD_HAS_BACKWARDS_BRANCHES(romMethod) ? true : false);
146
}
147
148
149
//------------------------------- processEvent ------------------------
150
// If the function returns NULL, then the value of *newPlanCreated is
151
// undefined and should not be tested
152
//---------------------------------------------------------------------
153
TR_OptimizationPlan *TR::DefaultCompilationStrategy::processEvent(TR_MethodEvent *event, bool *newPlanCreated)
154
{
155
TR_OptimizationPlan *plan = NULL, *attachedPlan = NULL;
156
TR_Hotness hotnessLevel;
157
TR_PersistentJittedBodyInfo *bodyInfo;
158
TR_PersistentMethodInfo *methodInfo;
159
TR::CompilationInfo *compInfo = TR::CompilationController::getCompilationInfo();
160
161
if (TR::CompilationController::verbose() >= TR::CompilationController::LEVEL3)
162
fprintf(stderr, "Event %d\n", event->_eventType);
163
164
// first decode the event type
165
switch (event->_eventType)
166
{
167
case TR_MethodEvent::JittedMethodSample:
168
compInfo->_stats._sampleMessagesReceived++;
169
plan = processJittedSample(event);
170
*newPlanCreated = true;
171
break;
172
case TR_MethodEvent::InterpretedMethodSample:
173
compInfo->_stats._sampleMessagesReceived++;
174
plan = processInterpreterSample(event);
175
*newPlanCreated = true;
176
break;
177
case TR_MethodEvent::InterpreterCounterTripped:
178
TR_ASSERT(event->_oldStartPC == 0, "oldStartPC should be 0 for an interpreted method");
179
compInfo->_stats._methodsCompiledOnCount++;
180
// most likely we need to compile the method, unless it's already being compiled
181
// even if the method is already queued for compilation we must still invoke
182
// compilemethod because we may need to do a async compilation and the thread
183
// needs to block
184
185
// use the counts to determine the first level of compilation
186
// the level of compilation can be changed later on if option subsets are present
187
hotnessLevel = TR::DefaultCompilationStrategy::getInitialOptLevel(event->_j9method);
188
if (hotnessLevel == veryHot && // we probably want to profile
189
!TR::Options::getCmdLineOptions()->getOption(TR_DisableProfiling) &&
190
TR::Recompilation::countingSupported() &&
191
!TR::CodeCacheManager::instance()->almostOutOfCodeCache())
192
plan = TR_OptimizationPlan::alloc(hotnessLevel, true, false);
193
else
194
plan = TR_OptimizationPlan::alloc(hotnessLevel);
195
*newPlanCreated = true;
196
// the optimization plan needs to include opt level and if we do profiling
197
// these may change
198
break;
199
case TR_MethodEvent::JitCompilationInducedByDLT:
200
hotnessLevel = TR::DefaultCompilationStrategy::getInitialOptLevel(event->_j9method);
201
plan = TR_OptimizationPlan::alloc(hotnessLevel);
202
if (plan)
203
plan->setInducedByDLT(true);
204
*newPlanCreated = true;
205
break;
206
case TR_MethodEvent::OtherRecompilationTrigger: // sync recompilation through fixMethodCode or recomp triggered from jitted code (like counting recompilation)
207
// For sync re-compilation we have attached a plan to the persistentBodyInfo
208
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(event->_oldStartPC);
209
methodInfo = bodyInfo->getMethodInfo();
210
211
if (methodInfo->getReasonForRecompilation() == TR_PersistentMethodInfo::RecompDueToInlinedMethodRedefinition ||
212
(methodInfo->getReasonForRecompilation() == TR_PersistentMethodInfo::RecompDueToJProfiling && !bodyInfo->getIsProfilingBody())) // if the recompilation is triggered from a JProfiling block but not in a profiled compilation keep the current compilation level unchanged
213
{
214
hotnessLevel = bodyInfo->getHotness();
215
plan = TR_OptimizationPlan::alloc(hotnessLevel);
216
*newPlanCreated = true;
217
}
218
else
219
{
220
hotnessLevel = TR::Recompilation::getNextCompileLevel(event->_oldStartPC);
221
plan = TR_OptimizationPlan::alloc(hotnessLevel);
222
*newPlanCreated = true;
223
}
224
225
TR_OptimizationPlan::_optimizationPlanMonitor->enter();
226
attachedPlan = methodInfo->_optimizationPlan;
227
if (attachedPlan)
228
{
229
TR_ASSERT(!TR::CompilationController::getCompilationInfo()->asynchronousCompilation(),
230
"This case should happen only for sync recompilation");
231
plan->clone(attachedPlan); // override
232
}
233
TR_OptimizationPlan::_optimizationPlanMonitor->exit();
234
break;
235
case TR_MethodEvent::NewInstanceImpl:
236
// use the counts to determine the first level of compilation
237
// the level of compilation can be changed later on if option subsets are present
238
hotnessLevel = TR::Options::getInitialHotnessLevel(false);
239
plan = TR_OptimizationPlan::alloc(hotnessLevel);
240
*newPlanCreated = true;
241
break;
242
case TR_MethodEvent::ShareableMethodHandleThunk:
243
case TR_MethodEvent::CustomMethodHandleThunk:
244
// TODO: methodInfo->setWasNeverInterpreted()
245
hotnessLevel = TR::DefaultCompilationStrategy::getInitialOptLevel(event->_j9method);
246
if (hotnessLevel < warm && event->_eventType == TR_MethodEvent::CustomMethodHandleThunk)
247
hotnessLevel = warm; // Custom thunks benefit a LOT from warm opts like preexistence and repeated inlining passes
248
plan = TR_OptimizationPlan::alloc(hotnessLevel);
249
// plan->setIsForcedCompilation(); // TODO:JSR292: Seems reasonable, but somehow it crashes
250
plan->setUseSampling(false); // We don't yet support sampling-based recompilation of MH thunks
251
*newPlanCreated = true;
252
break;
253
case TR_MethodEvent::MethodBodyInvalidated:
254
// keep the same optimization level
255
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(event->_oldStartPC);
256
TR_ASSERT(bodyInfo, "A recompilable method should have jittedBodyInfo");
257
hotnessLevel = bodyInfo->getHotness();
258
plan = TR_OptimizationPlan::alloc(hotnessLevel);
259
*newPlanCreated = true;
260
bodyInfo->getMethodInfo()->incrementNumberOfInvalidations();
261
262
// the following is just for compatibility with older implementation
263
//bodyInfo->getMethodInfo()->setNextCompileLevel(hotnessLevel, false); // no profiling
264
break;
265
case TR_MethodEvent::HWPRecompilationTrigger:
266
{
267
plan = processHWPSample(event);
268
}
269
break;
270
default:
271
TR_ASSERT(0, "Bad event type %d", event->_eventType);
272
}
273
274
_statEventType[event->_eventType]++; // statistics
275
276
if (TR::CompilationController::verbose() >= TR::CompilationController::LEVEL2)
277
fprintf(stderr, "Event %d created plan %p\n", event->_eventType, plan);
278
279
return plan;
280
}
281
282
283
284
//--------------------- processInterpreterSample ----------------------
285
TR_OptimizationPlan *
286
TR::DefaultCompilationStrategy::processInterpreterSample(TR_MethodEvent *event)
287
{
288
// Sampling an interpreted method. The method could have been already
289
// compiled (but we got a sample in the old interpreted body).
290
//
291
TR_OptimizationPlan *plan = 0;
292
TR::Options * cmdLineOptions = TR::Options::getCmdLineOptions();
293
J9Method *j9method = event->_j9method;
294
J9JITConfig *jitConfig = event->_vmThread->javaVM->jitConfig;
295
TR::CompilationInfo *compInfo = 0;
296
if (jitConfig)
297
compInfo = TR::CompilationInfo::get(jitConfig);
298
TR_J9VMBase *fe = TR_J9VMBase::get(jitConfig, event->_vmThread);
299
300
int32_t totalSampleCount = TR::Recompilation::globalSampleCount;
301
char msg[350]; // size should be big enough to hold the whole one-line msg
302
msg[0] = 0;
303
char *curMsg = msg;
304
bool logSampling = fe->isLogSamplingSet() || TrcEnabled_Trc_JIT_Sampling_Detail;
305
#define SIG_SZ 150
306
char sig[SIG_SZ]; // hopefully the size is good for most cases
307
308
J9ROMMethod * romMethod = J9_ROM_METHOD_FROM_RAM_METHOD(j9method);
309
bool loopy = J9ROMMETHOD_HAS_BACKWARDS_BRANCHES(romMethod) ? true : false;
310
311
if (logSampling || TrcEnabled_Trc_JIT_Sampling)
312
{
313
fe->printTruncatedSignature(sig, SIG_SZ, (TR_OpaqueMethodBlock*)j9method);
314
315
if (logSampling)
316
curMsg += sprintf(curMsg, "(%d)\tInterpreted %s\t", totalSampleCount, sig);
317
if (TrcEnabled_Trc_JIT_Sampling && ((totalSampleCount % 4) == 0))
318
Trc_JIT_Sampling(getJ9VMThreadFromTR_VM(fe), "Interpreted", sig, 0);
319
}
320
321
compInfo->_stats._interpretedMethodSamples++;
322
323
if (!TR::CompilationInfo::isCompiled(j9method))
324
{
325
int32_t count = TR::CompilationInfo::getInvocationCount(j9method);
326
// the count will be -1 for JNI or if extra is negative
327
if (!cmdLineOptions->getOption(TR_DisableInterpreterSampling))
328
{
329
// If the method is an interpreted non-JNI method, the last slot in
330
// the RAM method is an invocation count. See if it is reasonable
331
// to reduce the invocation count since this method has been sampled.
332
//
333
if (count > 0)
334
{
335
int32_t threshold, divisor;
336
/* Modify thresholds for JSR292 methods */
337
bool isJSR292Method = _J9ROMMETHOD_J9MODIFIER_IS_SET((J9_ROM_METHOD_FROM_RAM_METHOD(j9method)), J9AccMethodHasMethodHandleInvokes );
338
if (jitConfig->javaVM->phase != J9VM_PHASE_NOT_STARTUP)
339
{
340
threshold = isJSR292Method ? TR::Options::_interpreterSamplingThresholdInJSR292 : TR::Options::_interpreterSamplingThresholdInStartupMode;
341
divisor = TR::Options::_interpreterSamplingDivisorInStartupMode;
342
}
343
else
344
{
345
threshold = isJSR292Method ? TR::Options::_interpreterSamplingThresholdInJSR292 : TR::Options::_interpreterSamplingThreshold;
346
divisor = TR::Options::_interpreterSamplingDivisor;
347
}
348
int32_t activeThreadsThreshold = TR::Options::_activeThreadsThreshold;
349
if (activeThreadsThreshold == -1) // -1 means we want to determine this dynamically
350
activeThreadsThreshold = compInfo->getNumAppThreadsActive();
351
352
if (count <= threshold && count > activeThreadsThreshold)
353
{
354
// This is an interpreted method that can be compiled.
355
// Reduce the invocation count.
356
//
357
int32_t newCount = count / divisor;
358
// Don't decrement more than the number of active threads
359
if (newCount < activeThreadsThreshold)
360
newCount = activeThreadsThreshold;
361
if (TR::CompilationInfo::setInvocationCount(j9method, count, newCount))
362
{
363
if (logSampling)
364
curMsg += sprintf(curMsg, " reducing count %d --> %d", count, newCount);
365
if (cmdLineOptions->getOption(TR_UseSamplingJProfilingForInterpSampledMethods))
366
compInfo->getInterpSamplTrackingInfo()->addOrUpdate(j9method, count - newCount);
367
}
368
else
369
{
370
if (logSampling)
371
curMsg += sprintf(curMsg, " count = %d, already changed", count);
372
}
373
374
// If the method is ready to be compiled and we are using a separate
375
// compilation thread, get a head start by scheduling the compilation
376
// now
377
//
378
if (newCount == 0 && fe->isAsyncCompilation())
379
{
380
if (TR::Options::_compilationDelayTime <= 0 ||
381
compInfo->getPersistentInfo()->getElapsedTime() >= 1000 * TR::Options::_compilationDelayTime)
382
plan = TR_OptimizationPlan::alloc(getInitialOptLevel(j9method));
383
}
384
}
385
else if (returnIprofilerState() == IPROFILING_STATE_OFF)
386
{
387
int32_t newCount = 0;
388
if (cmdLineOptions->getOption(TR_SubtractMethodCountsWhenIprofilerIsOff))
389
newCount = count - TR::Options::_IprofilerOffSubtractionFactor;
390
else
391
newCount = count / TR::Options::_IprofilerOffDivisionFactor;
392
393
if (newCount < 0)
394
newCount = 0;
395
396
if (TR::CompilationInfo::setInvocationCount(j9method, count, newCount))
397
{
398
if (logSampling)
399
curMsg += sprintf(curMsg, " reducing count %d --> %d", count, newCount);
400
if (cmdLineOptions->getOption(TR_UseSamplingJProfilingForInterpSampledMethods))
401
compInfo->getInterpSamplTrackingInfo()->addOrUpdate(j9method, count - newCount);
402
}
403
else
404
{
405
if (logSampling)
406
curMsg += sprintf(curMsg, " count = %d, already changed", count);
407
}
408
}
409
else if (loopy && count > activeThreadsThreshold)
410
{
411
int32_t newCount = 0;
412
if (cmdLineOptions->getOption(TR_SubtractLoopyMethodCounts))
413
newCount = count - TR::Options::_LoopyMethodSubtractionFactor;
414
else
415
newCount = count / TR::Options::_LoopyMethodDivisionFactor;
416
417
if (newCount < 0)
418
newCount = 0;
419
if (newCount < activeThreadsThreshold)
420
newCount = activeThreadsThreshold;
421
if (TR::CompilationInfo::setInvocationCount(j9method, count, newCount))
422
{
423
if (logSampling)
424
curMsg += sprintf(curMsg, " reducing count %d --> %d", count, newCount);
425
if (cmdLineOptions->getOption(TR_UseSamplingJProfilingForInterpSampledMethods))
426
compInfo->getInterpSamplTrackingInfo()->addOrUpdate(j9method, count - newCount);
427
}
428
else
429
{
430
if (logSampling)
431
curMsg += sprintf(curMsg, " count = %d, already changed", count);
432
}
433
}
434
else
435
{
436
if (logSampling)
437
curMsg += sprintf(curMsg, " count = %d / %d", count, threshold);
438
}
439
}
440
else if (count == 0)
441
{
442
// Possible scenario: a long activation method receives a MIL count of 1.
443
// The method gets invoked and the count becomes 0 (but the compilation is not
444
// triggered now, only when the counter would become negative).
445
// The method receives a sample while still being interpreted. We should probably
446
// schedule a compilation
447
if (logSampling)
448
curMsg += sprintf(curMsg, " count = 0 (long running?)");
449
if (fe->isAsyncCompilation())
450
{
451
if (TR::Options::_compilationDelayTime <= 0 ||
452
compInfo->getPersistentInfo()->getElapsedTime() >= 1000 * TR::Options::_compilationDelayTime)
453
plan = TR_OptimizationPlan::alloc(getInitialOptLevel(j9method));
454
}
455
}
456
else // count==-1
457
{
458
if (TR::CompilationInfo::getJ9MethodVMExtra(j9method) == J9_JIT_QUEUED_FOR_COMPILATION)
459
{
460
if (logSampling)
461
curMsg += sprintf(curMsg, " already queued");
462
if (compInfo &&
463
(compInfo->compBudgetSupport() || compInfo->dynamicThreadPriority()))
464
{
465
fe->acquireCompilationLock();
466
int32_t n = compInfo->promoteMethodInAsyncQueue(j9method, 0);
467
fe->releaseCompilationLock();
468
if (logSampling)
469
{
470
if (n > 0)
471
curMsg += sprintf(curMsg, " promoted from %d", n);
472
else if (n == 0)
473
curMsg += sprintf(curMsg, " comp in progress");
474
else
475
curMsg += sprintf(curMsg, " already in the right place %d", n);
476
}
477
}
478
}
479
else
480
{
481
if (logSampling)
482
curMsg += sprintf(curMsg, " cannot be compiled, extra field is %" OMR_PRIdPTR, TR::CompilationInfo::getJ9MethodExtra(j9method));
483
}
484
}
485
TR::Recompilation::globalSampleCount++;
486
}
487
else if (logSampling)
488
{
489
if (count >= 0)
490
curMsg += sprintf(curMsg, " %d invocations before compiling", count);
491
else
492
curMsg += sprintf(curMsg, " cannot be compiled");
493
}
494
}
495
else // sampling interpreted body, but method was compiled
496
{
497
// Unlikely scenario, unless the method has long running activations.
498
// Create an activation length record for this method
499
//
500
//if(TR::Options::getCmdLineOptions()->getFixedOptLevel() == -1)
501
// compInfo->getPersistentInfo()->getActivationTable()->insert(j9method, totalSampleCount, fe);
502
503
TR_PersistentJittedBodyInfo *bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(j9method->extra);
504
if (bodyInfo)
505
bodyInfo->_longRunningInterpreted = true;
506
507
if (logSampling)
508
curMsg += sprintf(curMsg, " counter = XX (long running?)");
509
// Note that we do not increment globalSampleCount here
510
}
511
if (fe->isLogSamplingSet())
512
{
513
TR_VerboseLog::writeLineLocked(TR_Vlog_SAMPLING,"%s", msg);
514
}
515
Trc_JIT_Sampling_Detail(getJ9VMThreadFromTR_VM(fe), msg);
516
return plan;
517
}
518
519
520
TR_OptimizationPlan *
521
TR::DefaultCompilationStrategy::processJittedSample(TR_MethodEvent *event)
522
{
523
TR_OptimizationPlan *plan = 0;
524
TR::Options * cmdLineOptions = TR::Options::getCmdLineOptions();
525
J9Method *j9method = event->_j9method;
526
J9JITConfig *jitConfig = event->_vmThread->javaVM->jitConfig;
527
TR::CompilationInfo *compInfo = 0;
528
if (jitConfig)
529
compInfo = TR::CompilationInfo::get(jitConfig);
530
531
TR_J9VMBase * fe = TR_J9VMBase::get(jitConfig, event->_vmThread);
532
int32_t totalSampleCount = ++ TR::Recompilation::globalSampleCount;
533
uint64_t crtTime = compInfo->getPersistentInfo()->getElapsedTime();
534
535
#define MSG_SZ 450
536
char msg[MSG_SZ]; // size should be big enough to hold the whole one-line msg
537
msg[0] = 0;
538
char *curMsg = msg;
539
void *startPC = event->_oldStartPC;
540
bool logSampling = fe->isLogSamplingSet() || TrcEnabled_Trc_JIT_Sampling_Detail;
541
if (logSampling || TrcEnabled_Trc_JIT_Sampling)
542
{
543
#define SIG_SZ 150
544
char sig[SIG_SZ]; // hopefully the size is good for most cases
545
fe->printTruncatedSignature(sig, SIG_SZ, (TR_OpaqueMethodBlock*)j9method);
546
int32_t pcOffset = (uint8_t *)(event->_samplePC) - (uint8_t *)startPC;
547
if (logSampling)
548
curMsg += sprintf(curMsg, "(%d)\tCompiled %s\tPC=" POINTER_PRINTF_FORMAT "\t%+d\t", totalSampleCount, sig, startPC, pcOffset);
549
if (TrcEnabled_Trc_JIT_Sampling && ((totalSampleCount % 4) == 0))
550
Trc_JIT_Sampling(getJ9VMThreadFromTR_VM(fe), "Compiled", sig, 0); // TODO put good pcOffset
551
}
552
553
TR::Recompilation::jitGlobalSampleCount++;
554
555
// Insert an yield point if compilation queue size is too big and CPU utilization is close to 100%
556
// QueueSize changes all the time, so threads may experience cache misses
557
// trying to access it. It's better to have a variable defined in compInfo
558
// which says by how much we need to delay application threads. This variable
559
// will be changed by the sampling thread, every 0.5 seconds
560
if (TR::Options::getCmdLineOptions()->getOption(TR_EnableAppThreadYield))
561
{
562
int32_t sleepNano = compInfo->getAppSleepNano(); // determine how much I need to sleep
563
if (sleepNano != 0) // If I need to sleep at all
564
{
565
if (sleepNano == 1000000)
566
{
567
j9thread_sleep(1); // param in ms
568
}
569
else
570
{
571
if (fe->shouldSleep()) // sleep every other sample point
572
j9thread_sleep(1); // param in ms
573
}
574
}
575
}
576
J9::PrivateLinkage::LinkageInfo *linkageInfo = J9::PrivateLinkage::LinkageInfo::get(startPC);
577
TR_PersistentJittedBodyInfo *bodyInfo = NULL;
578
579
compInfo->_stats._compiledMethodSamples++;
580
581
if (linkageInfo->hasFailedRecompilation())
582
{
583
compInfo->_stats._compiledMethodSamplesIgnored++;
584
if (logSampling)
585
curMsg += sprintf(curMsg, " has already failed a recompilation attempt");
586
}
587
else if (!linkageInfo->isSamplingMethodBody())
588
{
589
compInfo->_stats._compiledMethodSamplesIgnored++;
590
if (logSampling)
591
curMsg += sprintf(curMsg, " does not use sampling");
592
}
593
else if (debug("disableSamplingRecompilation"))
594
{
595
compInfo->_stats._compiledMethodSamplesIgnored++;
596
if (logSampling)
597
curMsg += sprintf(curMsg, " sampling disabled");
598
}
599
else
600
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(startPC);
601
602
if (bodyInfo && bodyInfo->getDisableSampling())
603
{
604
compInfo->_stats._compiledMethodSamplesIgnored++;
605
if (logSampling)
606
curMsg += sprintf(curMsg, " uses sampling but sampling disabled (last comp. with prex)");
607
bodyInfo = NULL;
608
}
609
610
if (bodyInfo)
611
{
612
bool getOut = false;
613
TR_PersistentMethodInfo *methodInfo = bodyInfo->getMethodInfo();
614
fe->acquireCompilationLock();
615
bool isAlreadyBeingCompiled;
616
TR_OpaqueMethodBlock *j9m = methodInfo->getMethodInfo();
617
void *currentStartPC = TR::CompilationInfo::getPCIfCompiled((J9Method*)j9m);
618
619
// See if the method has already been compiled but we get a sample in the old body
620
if (currentStartPC != startPC) // rare case
621
getOut = true;
622
else if (TR::Options::getCmdLineOptions()->getFixedOptLevel() != -1
623
|| TR::Options::getAOTCmdLineOptions()->getFixedOptLevel() != -1) // prevent recompilation when opt level is specified
624
{
625
getOut = true;
626
}
627
else
628
{
629
isAlreadyBeingCompiled = TR::Recompilation::isAlreadyBeingCompiled(methodInfo->getMethodInfo(), startPC, fe);
630
// If we already decided to recompile this body, and we haven't yet
631
// queued the method don't bother continuing. Very small window of time.
632
//
633
if (bodyInfo->getSamplingRecomp() && // flag needs to be tested after getting compilationMonitor
634
!isAlreadyBeingCompiled)
635
{
636
if (logSampling)
637
curMsg += sprintf(curMsg, " uses sampling but a recomp decision has already been taken");
638
getOut = true;
639
}
640
}
641
if (getOut)
642
{
643
fe->releaseCompilationLock();
644
// and do nothing
645
}
646
else
647
{
648
bool recompile = false;
649
TR_Hotness nextOptLevel;
650
bool useProfiling = false;
651
652
// Profiling compilations that precede scorching ones are quite taxing
653
// on large multicore machines. Thus, we want to observe the hotness of a
654
// method for longer, rather than rushing into a profiling very-hot compilation.
655
// We can afford to do so because scorching methods accumulate samples at a
656
// higher rate than hot ones.
657
// The goal here is to implement a rather short decision window (sampling interval)
658
// for decisions to upgrade to hot, but a larger decision window for decisions
659
// to go to scorching. This is based on density of samples observed in the JVM:
660
// the larger the density of samples, the larger the scorching decision window.
661
// scorchingSampleInterval will be a multiple of hotSampleInterval
662
// When a hotSampleInterval ends, if the method looks scorching we postpone any
663
// recompilation decision until a scorchingSampleInterval finishes. If the method
664
// only looks hot, then we decide to recompile at hot at the end of the hotSampleInterval
665
666
uint32_t intervalIncreaseFactor = compInfo->getJitSampleInfoRef().getIncreaseFactor();
667
// possibly larger sample interval for scorching compilations
668
int32_t scorchingSampleInterval = TR::Options::_sampleInterval * intervalIncreaseFactor;
669
670
// Hot recompilation decisions use the regular sized sampling interval
671
uint8_t hotSampleInterval = TR::Options::_sampleInterval;
672
int32_t hotSampleThreshold = TR::Options::_sampleThreshold;
673
674
int32_t count = bodyInfo->decCounter();
675
uint8_t crtSampleIntervalCount = bodyInfo->incSampleIntervalCount(scorchingSampleInterval);
676
bool hotSamplingWindowComplete = (crtSampleIntervalCount % hotSampleInterval) == 0;
677
bool scorchingSamplingWindowComplete = (crtSampleIntervalCount == 0);
678
679
int32_t startSampleCount = bodyInfo->getStartCount();
680
int32_t globalSamples = totalSampleCount - startSampleCount;
681
int32_t globalSamplesInHotWindow = globalSamples - bodyInfo->getHotStartCountDelta();
682
683
int32_t scaledScorchingThreshold = 0, scaledHotThreshold = 0;
684
685
if (logSampling)
686
curMsg += sprintf(curMsg, " cnt=%d ncl=%d glblSmplCnt=%d startCnt=%d[-%u,+%u] samples=[%d %d] windows=[%d %u] crtSmplIntrvlCnt=%u",
687
count, methodInfo->getNextCompileLevel(), totalSampleCount, startSampleCount,
688
bodyInfo->getOldStartCountDelta(), bodyInfo->getHotStartCountDelta(),
689
globalSamples, globalSamplesInHotWindow,
690
scorchingSampleInterval, hotSampleInterval, crtSampleIntervalCount);
691
692
bool dontSwitchToProfiling = false;
693
if (count <= 0)
694
{
695
if (!isAlreadyBeingCompiled)
696
{
697
// do not allow scorching compiles based on count reaching 0
698
if (methodInfo->getNextCompileLevel() > hot)
699
{
700
// replenish the counter with a multiple of sampleInterval
701
bodyInfo->setCounter(hotSampleInterval);
702
// even if the count reached 0, we still need to check if we can
703
// promote this method through sample thresholds
704
}
705
else // allow transition to HOT through exhaustion of count
706
{
707
recompile = true;
708
TR::Recompilation::limitMethodsCompiled++;
709
// Currently the counter can be decremented because (1) the method was
710
// sampled; (2) EDO; (3) PIC miss; (4) megamorphic interface call profile.
711
// EDO will have its own recompilation snippet, but in cases (3) and (4)
712
// the counter just reaches 0, and only the next sample will trigger
713
// recompilation. These cases can be identified by the negative counter
714
// (we decrement the counter above in sampleMethod()). In contrast, if the
715
// counter is decremented through sampling, only the first thread that sees
716
// the counter 0 will recompile the method, and all the others will be
717
// prevented from reaching this point due to isAlreadyBeingCompiled
718
719
if (count < 0 && !methodInfo->disableMiscSamplingCounterDecrementation())
720
{
721
// recompile at same level
722
nextOptLevel = bodyInfo->getHotness();
723
724
// mark this special situation
725
methodInfo->setDisableMiscSamplingCounterDecrementation();
726
// write a message in the vlog to know the reason of recompilation
727
if (logSampling)
728
curMsg += sprintf(curMsg, " PICrecomp");
729
methodInfo->setReasonForRecompilation(TR_PersistentMethodInfo::RecompDueToMegamorphicCallProfile);
730
}
731
else
732
{
733
nextOptLevel = methodInfo->getNextCompileLevel();
734
methodInfo->setReasonForRecompilation(bodyInfo->getIsPushedForRecompilation() ?
735
TR_PersistentMethodInfo::RecompDueToRecompilationPushing : TR_PersistentMethodInfo::RecompDueToCounterZero);
736
// It's possible that a thread decrements the counter to 0 and another
737
// thread decrements it further to -1 which will trigger a compilation
738
// at same level. The following line will prevent that.
739
methodInfo->setDisableMiscSamplingCounterDecrementation();
740
}
741
}
742
}
743
744
if (recompile) // recompilation due to count reaching 0
745
{
746
bodyInfo->setOldStartCountDelta(totalSampleCount - startSampleCount);// Should we handle overflow?
747
bodyInfo->setHotStartCountDelta(0);
748
bodyInfo->setStartCount(totalSampleCount);
749
}
750
}
751
752
bool postponeDecision = false;
753
if (!recompile && hotSamplingWindowComplete && totalSampleCount > startSampleCount)
754
{
755
compInfo->_stats._methodsReachingSampleInterval++;
756
757
// Goal: based on codeSize, scale the original Threshold by no more than +/-x%
758
// 'x' will be called sampleThresholdVariationAllowance
759
// When codeSize==avgCodeSize, we want the scaling factor to be 1.0
760
// The scaling of the threshold can be turned off by having
761
// the sampleThresholdVariationAllowance equal to 0
762
J9JITExceptionTable *metaData = jitConfig->jitGetExceptionTableFromPC(event->_vmThread, (UDATA)startPC);
763
int32_t codeSize = 0; // TODO eliminate the overhead; we already have metadata
764
if (metaData)
765
codeSize = compInfo->calculateCodeSize(metaData);
766
767
// Scale the recompilation thresholds based on method size
768
int32_t avgCodeSize = (TR::Compiler->target.cpu.isI386() || TR::Compiler->target.cpu.isPower()) ? 1500 : 3000; // experimentally determined
769
770
TR_ASSERT(codeSize != 0, "startPC!=0 ==> codeSize!=0");
771
772
float scalingFactor = 0.01*((100 - TR::Options::_sampleThresholdVariationAllowance) +
773
(avgCodeSize << 1)*TR::Options::_sampleThresholdVariationAllowance /
774
(float)(avgCodeSize + codeSize));
775
curMsg += sprintf(curMsg, " SizeScaling=%.1f", scalingFactor);
776
scaledHotThreshold = (int32_t)(hotSampleThreshold*scalingFactor);
777
778
// Do not use aggressive recompilations for big applications like websphere.
779
// WebSphere loads more than 14000 classes, typical small apps more like 1000-2000 classes.
780
// ==> use a reasonable value like 5000 to determine if the application is big
781
bool useAggressiveRecompilations = !cmdLineOptions->getOption(TR_DisableAggressiveRecompilations) &&
782
(bodyInfo->decAggressiveRecompilationChances() > 0 ||
783
compInfo->getPersistentInfo()->getNumLoadedClasses() < TR::Options::_bigAppThreshold);
784
785
bool conservativeCase = TR::Options::getCmdLineOptions()->getOption(TR_ConservativeCompilation) &&
786
compInfo->getPersistentInfo()->getNumLoadedClasses() >= TR::Options::_bigAppThreshold;
787
788
if (conservativeCase)
789
{
790
scaledHotThreshold >>= 1; // halve the threshold for a more conservative comp decision
791
useAggressiveRecompilations = true; // force it, to allow recomp at original threshold,
792
// but double the sample interval (60 samples)
793
}
794
// For low number of processors become more conservative during startup
795
if (jitConfig->javaVM->phase != J9VM_PHASE_NOT_STARTUP &&
796
TR::Compiler->target.numberOfProcessors() <= 2)
797
scaledHotThreshold >>= 2;
798
799
// Used to make recompilations less aggressive during WebSphere startup,
800
// avoiding costly hot, and very hot compilation
801
bool isBigAppStartup = (jitConfig->javaVM->phase != J9VM_PHASE_NOT_STARTUP
802
&& TR::Options::sharedClassCache()
803
&& compInfo->getPersistentInfo()->getNumLoadedClasses() >= TR::Options::_bigAppThreshold
804
&& TR::Options::_bigAppSampleThresholdAdjust > 0);
805
if (isBigAppStartup)
806
{
807
scaledHotThreshold >>= TR::Options::_bigAppSampleThresholdAdjust; //adjust to avoid hot recomps
808
useAggressiveRecompilations = false; //also to avoid potential hot recomps, this could have been set
809
}
810
811
// We allow hot compilations at a lower CPU, but for a longer period of time (scorching window)
812
bool secondCriteriaHot = false;
813
// Check for non first hot interval
814
if (useAggressiveRecompilations)
815
{
816
int32_t samplesInSelf = scorchingSamplingWindowComplete ? scorchingSampleInterval : crtSampleIntervalCount;
817
// Alternative: Here we may want to do something only if a scorchingSampleWindow is complete
818
if (samplesInSelf > hotSampleInterval)
819
{
820
// 0.5*targetCPU < crtCPU
821
if (((globalSamples*hotSampleInterval) >> 1) < (scaledHotThreshold * samplesInSelf))
822
secondCriteriaHot = true;
823
}
824
}
825
826
// TODO: if the scorching window is complete, should we look at CPU over the larger window?
827
if (globalSamplesInHotWindow <= scaledHotThreshold || secondCriteriaHot)
828
{
829
// The method is hot, but is it actually scorching?
830
//
831
// If the scorching interval is done, perform normal scorching test
832
// If the scorching interval is not done, performs a sniff test for a shorter interval
833
// 1. If the method looks scorching during this small interval, do not
834
// do anything; just wait for the scorching interval to finish
835
// 2. If the method does not look scorching, perform a hot compilation
836
//
837
// First let's do some scaling based on size, startup, bigApp, numProc, etc
838
scaledScorchingThreshold = (int32_t)(TR::Options::_scorchingSampleThreshold * scalingFactor);
839
if (conservativeCase)
840
{
841
scaledScorchingThreshold >>= 1; // halve the threshold for a more conservative comp decision
842
if (TR::Compiler->target.numberOfProcessors() != 1)
843
useAggressiveRecompilations = true; // to allow recomp at original threshold,
844
else // but double the sample interval (60 samples)
845
useAggressiveRecompilations = false;
846
}
847
848
if (isBigAppStartup)
849
{
850
scaledScorchingThreshold >>= TR::Options::_bigAppSampleThresholdAdjust; //adjust to avoid scorching recomps
851
useAggressiveRecompilations = false; //this could have been set, so disable to avoid
852
}
853
854
if (!scorchingSamplingWindowComplete)
855
{
856
// Perform scorching recompilation sniff test using a shorter sample interval
857
// TODO: relax the thresholds a bit, maybe we can go directly to scorching next time
858
if (globalSamplesInHotWindow <= scaledScorchingThreshold)
859
postponeDecision = true;
860
}
861
else // scorching sample interval is done
862
{
863
// Adjust the scorchingSampleThreshold because the sample window is larger
864
scaledScorchingThreshold = scaledScorchingThreshold * intervalIncreaseFactor;
865
866
// Make the scorching compilation less likely as time goes by
867
// The bigger the number of scorching intervals, the smaller scaledScorchingThreshold
868
if (bodyInfo->getNumScorchingIntervals() > 3)
869
scaledScorchingThreshold >>= 1;
870
871
// secondCriteria looks at hotness over a period of time that is double
872
// than normal (60 samples). This is why we have to increase scaledScorchingThreshold
873
// by a factor of 2. If we want to become twice as aggressive we need to double
874
// scaledScorchingThreshold yet again
875
//
876
bool secondCriteriaScorching = useAggressiveRecompilations &&
877
(totalSampleCount - bodyInfo->getOldStartCount() <= (scaledScorchingThreshold << 2));
878
// Scorching test
879
if ((globalSamples <= scaledScorchingThreshold) || secondCriteriaScorching)
880
{
881
// Determine whether or not the method is to be profiled before
882
// being compiled as scorching hot.
883
// For profiling the platform must support counting recompilation.
884
//
885
if (!TR::Options::getCmdLineOptions()->getOption(TR_DisableProfiling) &&
886
TR::Recompilation::countingSupported() && !TR::CodeCacheManager::instance()->almostOutOfCodeCache() &&
887
!(methodInfo->profilingDisabled()))
888
{
889
nextOptLevel = veryHot;
890
useProfiling = true;
891
}
892
else
893
{
894
nextOptLevel = scorching;
895
}
896
recompile = true;
897
compInfo->_stats._methodsSelectedToRecompile++;
898
TR::Recompilation::scorchingThresholdMethodsCompiled++;
899
}
900
}
901
// Should we proceed with the hot compilation?
902
if (!recompile && !postponeDecision && bodyInfo->getHotness() <= warm)
903
{
904
nextOptLevel = hot;
905
// Decide whether to deny optimizer to switch to profiling on the fly
906
if (globalSamplesInHotWindow > TR::Options::_sampleDontSwitchToProfilingThreshold &&
907
!TR::Options::getCmdLineOptions()->getOption(TR_AggressiveSwitchingToProfiling))
908
dontSwitchToProfiling = true;
909
recompile = true;
910
compInfo->_stats._methodsSelectedToRecompile++;
911
TR::Recompilation::hotThresholdMethodsCompiled++;
912
}
913
}
914
// If the method is truly cold, replenish the counter to avoid
915
// recompilation through counter decrementation
916
else if (globalSamplesInHotWindow >= TR::Options::_resetCountThreshold)
917
{
918
compInfo->_stats._methodsSampleWindowReset++;
919
bodyInfo->setCounter(count + hotSampleInterval);
920
if (logSampling)
921
curMsg += sprintf(curMsg, " is cold, reset cnt to %d", bodyInfo->getCounter());
922
}
923
// The hot sample interval is done. Prepare for next interval.
924
if (scorchingSamplingWindowComplete)
925
{
926
// scorching sample interval is done
927
bodyInfo->setStartCount(totalSampleCount);
928
bodyInfo->setOldStartCountDelta(totalSampleCount - startSampleCount);
929
bodyInfo->setHotStartCountDelta(0);
930
}
931
else
932
{
933
int32_t hotStartCountDelta = totalSampleCount - startSampleCount;
934
TR_ASSERT(hotStartCountDelta >= 0, "hotStartCountDelta should not be negative\n");
935
if (hotStartCountDelta > 0xffff)
936
hotStartCountDelta = 0xffff;
937
bodyInfo->setHotStartCountDelta(hotStartCountDelta);
938
}
939
940
if (recompile)
941
{
942
// One more test
943
if (!isAlreadyBeingCompiled)
944
{
945
methodInfo->setReasonForRecompilation(TR_PersistentMethodInfo::RecompDueToThreshold);
946
}
947
else // the method is already being compiled; maybe we need to update the opt level
948
{
949
recompile = false; // do not need to recompile the method
950
if ((int32_t)nextOptLevel > (int32_t)methodInfo->getNextCompileLevel())
951
{
952
// search the queue to update the optimization plan.
953
//
954
TR::IlGeneratorMethodDetails details(j9method);
955
TR_MethodToBeCompiled *entry =
956
compInfo->adjustCompilationEntryAndRequeue(details, methodInfo, nextOptLevel,
957
useProfiling,
958
CP_ASYNC_NORMAL, fe);
959
if (entry)
960
{
961
if (logSampling)
962
curMsg += sprintf(curMsg, " adj opt lvl to %d", (int32_t)(entry->_optimizationPlan->getOptLevel()));
963
int32_t measuredCpuUtil = crtSampleIntervalCount == 0 ? // scorching interval done?
964
scorchingSampleInterval * 1000 / globalSamples :
965
hotSampleInterval * 1000 / globalSamplesInHotWindow;
966
entry->_optimizationPlan->setPerceivedCPUUtil(measuredCpuUtil);
967
}
968
}
969
}
970
}
971
}
972
973
// try to upgrade some of the less optimized compilations
974
bool willUpgrade = false;
975
if (!recompile)
976
{
977
if (bodyInfo->getFastRecompilation() && !isAlreadyBeingCompiled)
978
{
979
// Allow profiling even if we are about to exhaust the code cache
980
// because this case is used for diagnostic only
981
if (bodyInfo->getFastScorchingRecompilation())
982
{
983
if (!TR::Options::getCmdLineOptions()->getOption(TR_DisableProfiling) &&
984
TR::Recompilation::countingSupported() &&
985
!(methodInfo->profilingDisabled()))
986
{
987
nextOptLevel = veryHot;
988
useProfiling = true;
989
}
990
else
991
{
992
nextOptLevel = scorching;
993
}
994
}
995
else
996
{
997
nextOptLevel = hot;
998
}
999
recompile = true;
1000
methodInfo->setReasonForRecompilation(TR_PersistentMethodInfo::RecompDueToThreshold);//lie
1001
}
1002
else if (!postponeDecision &&
1003
!TR::Options::getCmdLineOptions()->getOption(TR_DisableUpgrades) &&
1004
// case (1) methods downgraded to cold
1005
((bodyInfo->getHotness() < warm &&
1006
(methodInfo->isOptLevelDowngraded() || cmdLineOptions->getOption(TR_EnableUpgradingAllColdCompilations))) ||
1007
// case (2) methods taken from shared cache
1008
bodyInfo->getIsAotedBody()))
1009
// case (3) cold compilations for bootstrap methods, even if not downgraded
1010
{
1011
// test other conditions for upgrading
1012
1013
uint32_t threshold = TR::Options::_coldUpgradeSampleThreshold;
1014
// Pick a threshold based on method size (higher thresholds for bigger methods)
1015
if (jitConfig->javaVM->phase != J9VM_PHASE_NOT_STARTUP &&
1016
compInfo->getPersistentInfo()->getNumLoadedClasses() >= TR::Options::_bigAppThreshold)
1017
{
1018
threshold += (uint32_t)(TR::CompilationInfo::getMethodBytecodeSize(j9method) >> 8);
1019
// sampleIntervalCount goes from 0 to _sampleInterval-1
1020
// Very big methods (bigger than 6K bytecodes) will have a threshold bigger than this
1021
// and never be upgraded, which is what we want
1022
}
1023
if ((uint32_t)crtSampleIntervalCount >= threshold &&
1024
compInfo->getMethodQueueSize() <= TR::CompilationInfo::SMALL_QUEUE &&
1025
!compInfo->getPersistentInfo()->isClassLoadingPhase() &&
1026
!isAlreadyBeingCompiled &&
1027
!cmdLineOptions->getOption(TR_DisableUpgradingColdCompilations))
1028
{
1029
recompile = true;
1030
if (!bodyInfo->getIsAotedBody())
1031
{
1032
// cold-nonaot compilations can only be upgraded to warm
1033
nextOptLevel = warm;
1034
}
1035
else // AOT bodies
1036
{
1037
if (!TR::Options::isQuickstartDetected())
1038
{
1039
// AOT upgrades are performed at warm
1040
// We may want to look at how expensive the method is though
1041
nextOptLevel = warm;
1042
}
1043
else // -Xquickstart (and AOT)
1044
{
1045
nextOptLevel = cold;
1046
// Exception: bootstrap class methods that are cheap should be upgraded directly at warm
1047
if (cmdLineOptions->getOption(TR_UpgradeBootstrapAtWarm) && fe->isClassLibraryMethod((TR_OpaqueMethodBlock *)j9method))
1048
{
1049
TR_J9SharedCache *sc = TR_J9VMBase::get(jitConfig, event->_vmThread, TR_J9VMBase::AOT_VM)->sharedCache();
1050
bool expensiveComp = sc->isHint(j9method, TR_HintLargeMemoryMethodW);
1051
if (!expensiveComp)
1052
nextOptLevel = warm;
1053
}
1054
}
1055
}
1056
methodInfo->setReasonForRecompilation(TR_PersistentMethodInfo::RecompDueToOptLevelUpgrade);
1057
// reset the flag to avoid upgrading repeatedly
1058
methodInfo->setOptLevelDowngraded(false);
1059
willUpgrade = true;
1060
}
1061
}
1062
}
1063
1064
// if we don't take any recompilation decision, let's see if we can
1065
// schedule a compilation from the low priority queue
1066
if (!recompile && compInfo && compInfo->getLowPriorityCompQueue().hasLowPriorityRequest() &&
1067
compInfo->canProcessLowPriorityRequest())
1068
{
1069
// wake up the compilation thread
1070
compInfo->getCompilationMonitor()->notifyAll();
1071
}
1072
if (recompile)
1073
{
1074
// Method is being recompiled because it is truly hot;
1075
bodyInfo->setSamplingRecomp();
1076
}
1077
fe->releaseCompilationLock();
1078
if (recompile)
1079
{
1080
//induceRecompilation(fe, startPC);
1081
bool useSampling = (nextOptLevel != scorching && !useProfiling);
1082
plan = TR_OptimizationPlan::alloc(nextOptLevel, useProfiling, useSampling);
1083
if (plan)
1084
{
1085
int32_t measuredCpuUtil = crtSampleIntervalCount == 0 ? // scorching interval done?
1086
(globalSamples != 0 ? scorchingSampleInterval * 1000 / globalSamples : 0) :
1087
(globalSamplesInHotWindow != 0 ? hotSampleInterval * 1000 / globalSamplesInHotWindow : 0);
1088
plan->setPerceivedCPUUtil(measuredCpuUtil);
1089
plan->setIsUpgradeRecompilation(willUpgrade);
1090
plan->setDoNotSwitchToProfiling(dontSwitchToProfiling);
1091
if (crtSampleIntervalCount == 0 && // scorching compilation decision can be taken
1092
globalSamples <= TR::Options::_relaxedCompilationLimitsSampleThreshold) // FIXME: needs scaling
1093
plan->setRelaxedCompilationLimits(true);
1094
if (logSampling)
1095
{
1096
float cpu = measuredCpuUtil / 10.0;
1097
if (useProfiling)
1098
curMsg += sprintf(curMsg, " --> recompile at level %d, profiled CPU=%.1f%%", nextOptLevel, cpu);
1099
else
1100
curMsg += sprintf(curMsg, " --> recompile at level %d CPU=%.1f%%", nextOptLevel, cpu);
1101
1102
if (methodInfo->getReasonForRecompilation() == TR_PersistentMethodInfo::RecompDueToThreshold)
1103
{
1104
curMsg += sprintf(curMsg, " scaledThresholds=[%d %d]", scaledScorchingThreshold, scaledHotThreshold);
1105
}
1106
}
1107
}
1108
else // OOM
1109
{
1110
if (logSampling)
1111
curMsg += sprintf(curMsg, " --> not recompiled: OOM");
1112
}
1113
}
1114
else if (logSampling)
1115
{
1116
if (isAlreadyBeingCompiled)
1117
curMsg += sprintf(curMsg, " - is already being recompiled");
1118
else if (!hotSamplingWindowComplete)
1119
curMsg += sprintf(curMsg, " not recompiled, smpl interval not done");
1120
else
1121
{
1122
float measuredCpuUtil = 0.0;
1123
if (crtSampleIntervalCount == 0) // scorching interval done
1124
{
1125
if (globalSamples)
1126
measuredCpuUtil = scorchingSampleInterval * 100.0 / globalSamples;
1127
}
1128
else
1129
{
1130
if (globalSamplesInHotWindow)
1131
measuredCpuUtil = hotSampleInterval * 100.0 / globalSamplesInHotWindow;
1132
}
1133
curMsg += sprintf(curMsg, " not recompiled, CPU=%.1f%% %s scaledThresholds=[%d %d]",
1134
measuredCpuUtil, postponeDecision ? " postpone decision" : "",
1135
scaledScorchingThreshold, scaledHotThreshold);
1136
}
1137
}
1138
}
1139
} // endif (bodyInfo)
1140
1141
if (logSampling)
1142
{
1143
bool bufferOverflow = ((curMsg - msg) >= MSG_SZ); // check for overflow at runtime
1144
if (fe->isLogSamplingSet())
1145
{
1146
TR_VerboseLog::CriticalSection vlogLock;
1147
TR_VerboseLog::writeLine(TR_Vlog_SAMPLING,"%s", msg);
1148
if (bufferOverflow)
1149
TR_VerboseLog::writeLine(TR_Vlog_SAMPLING,"Sampling line is too big: %d characters", curMsg-msg);
1150
}
1151
Trc_JIT_Sampling_Detail(getJ9VMThreadFromTR_VM(fe), msg);
1152
if (bufferOverflow)
1153
Trc_JIT_Sampling_Detail(getJ9VMThreadFromTR_VM(fe), "Sampling line will cause buffer overflow");
1154
// check for buffer overflow and write a message
1155
}
1156
return plan;
1157
}
1158
1159
TR_OptimizationPlan *
1160
TR::DefaultCompilationStrategy::processHWPSample(TR_MethodEvent *event)
1161
{
1162
TR_OptimizationPlan *plan = NULL;
1163
TR_Hotness hotnessLevel;
1164
TR_PersistentJittedBodyInfo *bodyInfo;
1165
TR_PersistentMethodInfo *methodInfo;
1166
1167
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(event->_oldStartPC);
1168
1169
TR_ASSERT(bodyInfo, "bodyInfo should not be NULL!\n");
1170
if (!bodyInfo)
1171
return NULL;
1172
1173
methodInfo = bodyInfo->getMethodInfo();
1174
hotnessLevel = bodyInfo->getHotness();
1175
if (bodyInfo->getIsProfilingBody() && !bodyInfo->getUsesJProfiling())
1176
{
1177
// We rely on a count-based recompilation for profiled methods.
1178
return NULL;
1179
}
1180
1181
TR_Hotness nextOptLevel = event->_nextOptLevel;
1182
if (nextOptLevel > hotnessLevel ||
1183
(bodyInfo->getIsAotedBody() && !TR::Options::getCmdLineOptions()->getOption(TR_DontRIUpgradeAOTWarmMethods)))
1184
{
1185
J9JITConfig *jitConfig = event->_vmThread->javaVM->jitConfig;
1186
TR_J9VMBase * fe = TR_J9VMBase::get(jitConfig, event->_vmThread);
1187
fe->acquireCompilationLock();
1188
bool isAlreadyBeingCompiled = TR::Recompilation::isAlreadyBeingCompiled((TR_OpaqueMethodBlock *) event->_j9method, event->_oldStartPC, fe);
1189
fe->releaseCompilationLock();
1190
if (!isAlreadyBeingCompiled)
1191
{
1192
if (nextOptLevel == scorching &&
1193
!TR::Options::getCmdLineOptions()->getOption(TR_DisableProfiling) &&
1194
TR::Recompilation::countingSupported() &&
1195
!bodyInfo->_methodInfo->profilingDisabled())
1196
{
1197
plan = TR_OptimizationPlan::alloc(veryHot, true, false);
1198
}
1199
else
1200
{
1201
plan = TR_OptimizationPlan::alloc(nextOptLevel, false, true);
1202
}
1203
1204
if (plan)
1205
methodInfo->setReasonForRecompilation(TR_PersistentMethodInfo::RecompDueToRI);
1206
}
1207
}
1208
return plan;
1209
}
1210
1211
//-------------------------- adjustOptimizationPlan ---------------------------
1212
// Input: structure with information about the method to be compiled
1213
// Output: returns true if the optimization plan has been changed. In that case
1214
// the optimization level will be changed and also 2 flags in the
1215
// optimization plan may be changed (OptLevelDowngraded, AddToUpgradeQueue)
1216
//----------------------------------------------------------------------------
1217
bool TR::DefaultCompilationStrategy::adjustOptimizationPlan(TR_MethodToBeCompiled *entry, int32_t optLevelAdjustment)
1218
{
1219
// Run SmoothCompilation to see if we need to change the opt level and/or priority
1220
bool shouldAddToUpgradeQueue = false;
1221
TR::CompilationInfo *compInfo = TR::CompilationController::getCompilationInfo();
1222
if (optLevelAdjustment == 0) // unchanged opt level (default)
1223
{
1224
shouldAddToUpgradeQueue = compInfo->SmoothCompilation(entry, &optLevelAdjustment);
1225
}
1226
1227
// Recompilations are treated differently
1228
if (entry->_oldStartPC != 0)
1229
{
1230
// Downgrade the optimization level of invalidation requests
1231
// if too many invalidations are present into the compilation queue
1232
// Here we access _numInvRequestsInCompQueue outside the protection of compilation queue monitor
1233
// This is fine because it's just a heuristic
1234
if (entry->_entryIsCountedAsInvRequest &&
1235
compInfo->getNumInvRequestsInCompQueue() >= TR::Options::_numQueuedInvReqToDowngradeOptLevel &&
1236
entry->_optimizationPlan->getOptLevel() > cold &&
1237
!TR::Options::getCmdLineOptions()->getOption(TR_DontDowngradeToCold))
1238
{
1239
entry->_optimizationPlan->setOptLevel(cold);
1240
// Keep the optLevel in sync between the optPlan and methodInfo
1241
TR_PersistentMethodInfo* methodInfo = TR::Recompilation::getMethodInfoFromPC(entry->_oldStartPC);
1242
TR_ASSERT(methodInfo, "methodInfo must exist because we recompile");
1243
methodInfo->setNextCompileLevel(entry->_optimizationPlan->getOptLevel(), entry->_optimizationPlan->insertInstrumentation());
1244
// DO NOT mark this as optLevelDowngraded
1245
// entry->_optimizationPlan->setOptLevelDowngraded(true);
1246
return true;
1247
}
1248
return false;
1249
}
1250
1251
if (optLevelAdjustment == 0)
1252
return false;
1253
1254
// Must check if we really downgrade this method (for fixed opt level we do not do it)
1255
TR_Hotness hotnessLevel = entry->_optimizationPlan->getOptLevel();
1256
bool optLevelDowngraded = false;
1257
1258
if (true)
1259
{
1260
if (TR::Options::getCmdLineOptions()->allowRecompilation()) // don't do it for fixed level
1261
{
1262
if (optLevelAdjustment > 0) // would like to increase the opt level
1263
{
1264
if (hotnessLevel == warm || hotnessLevel == cold || hotnessLevel == noOpt)
1265
hotnessLevel = (TR_Hotness)((int)hotnessLevel + 1);
1266
}
1267
else // would like to decrease the opt level
1268
{
1269
if (optLevelAdjustment < -1)
1270
{
1271
hotnessLevel = noOpt;
1272
optLevelDowngraded = true;
1273
}
1274
else if (hotnessLevel == warm || hotnessLevel == hot)
1275
{
1276
hotnessLevel = (TR_Hotness)((int)hotnessLevel - 1);
1277
optLevelDowngraded = true;
1278
}
1279
}
1280
}
1281
}
1282
1283
// If change in hotness level
1284
if (entry->_optimizationPlan->getOptLevel() != hotnessLevel)
1285
{
1286
entry->_optimizationPlan->setOptLevel(hotnessLevel);
1287
entry->_optimizationPlan->setOptLevelDowngraded(optLevelDowngraded);
1288
// Set the flag to add to the upgrade queue
1289
if (optLevelDowngraded && shouldAddToUpgradeQueue)
1290
entry->_optimizationPlan->setAddToUpgradeQueue();
1291
return true;
1292
}
1293
else
1294
{
1295
return false;
1296
}
1297
}
1298
1299
1300
void TR::DefaultCompilationStrategy::beforeCodeGen(TR_OptimizationPlan *plan, TR::Recompilation *recomp)
1301
{
1302
// Set up the opt level and counter for the next compilation. This will
1303
// also decide if there is going to be a next compilation. If there is no
1304
// next compilation, remove any counters that have been inserted into the code
1305
// Ideally, we should have a single step after the compilation
1306
if (! recomp->_doNotCompileAgain)
1307
{
1308
int32_t level;
1309
int32_t countValue;
1310
1311
// do not test plan->insertInstrumentation() because we might have switched to profiling
1312
TR_Hotness current = recomp->_compilation->getMethodHotness();
1313
if (recomp->isProfilingCompilation() && current < scorching)
1314
{
1315
// Set the level for the next compilation. This will be higher than
1316
// the level at which we are compiling the current method for profiling.
1317
//
1318
level = current+1;
1319
countValue = PROFILING_INVOCATION_COUNT - 1; // defined in Profiler.hpp
1320
}
1321
else
1322
{
1323
// figure out the next opt level and the next count
1324
TR::Compilation *comp = recomp->_compilation;
1325
bool mayHaveLoops = comp->mayHaveLoops();
1326
TR::Options *options = comp->getOptions();
1327
if (recomp->_bodyInfo->getUsesGCR())
1328
{
1329
level = warm; // GCR recompilations should be performed at warm
1330
// If a GCR count was specified, used that
1331
if (options->getGCRCount() > 0)
1332
{
1333
countValue = options->getGCRCount();
1334
}
1335
else // find the count corresponding to the warm level (or next available)
1336
{
1337
countValue = options->getCountValue(mayHaveLoops, (TR_Hotness) level);
1338
if (countValue < 0)
1339
{
1340
// Last resort: use some sensible values
1341
countValue = mayHaveLoops ? options->getInitialBCount() : options->getInitialCount();
1342
}
1343
}
1344
}
1345
else
1346
{
1347
level = options->getNextHotnessLevel(mayHaveLoops, plan->getOptLevel());
1348
countValue = options->getCountValue(mayHaveLoops, (TR_Hotness) level);
1349
}
1350
}
1351
1352
if ((countValue > 0) || (recomp->isProfilingCompilation() && current < scorching) || plan->isOptLevelDowngraded() || recomp->_bodyInfo->getUsesGCR())
1353
{
1354
recomp->_nextLevel = (TR_Hotness)level; // There may be another compilation
1355
}
1356
else
1357
{
1358
// There will not be another compilation - remove any counters that
1359
// have been inserted into the code.
1360
//
1361
recomp->preventRecompilation();
1362
//recomp->_useSampling = false; // wrong, because the codegen will generate a counting body
1363
recomp->_bodyInfo->setDisableSampling(true);
1364
// also turn off sampling for this body
1365
}
1366
recomp->_nextCounter = countValue;
1367
}
1368
}
1369
1370
void TR::DefaultCompilationStrategy::postCompilation(TR_OptimizationPlan *plan, TR::Recompilation *recomp)
1371
{
1372
if (!TR::CompilationController::getCompilationInfo()->asynchronousCompilation())
1373
{
1374
TR_OptimizationPlan::_optimizationPlanMonitor->enter();
1375
recomp->getMethodInfo()->_optimizationPlan = NULL;
1376
TR_OptimizationPlan::_optimizationPlanMonitor->exit();
1377
}
1378
}
1379
1380
1381
1382
1383
1384
//============================= ThresholdCompilationStrategy ====================
1385
1386
1387
TR::ThresholdCompilationStrategy::ThresholdCompilationStrategy()
1388
{
1389
// To be safe, clear everything out before setting anything
1390
for (int32_t level=noOpt; level <= numHotnessLevels; level++)
1391
{
1392
_nextLevel[level] = unknownHotness;
1393
_samplesNeededToMoveTo[level] = -1;
1394
_performInstrumentation[level] = false;
1395
}
1396
1397
// Now, initialize our strategy threshold based strategy
1398
//
1399
// These could easily be set from command line options or any other
1400
// way (maybe from the existing options string?)
1401
//
1402
// The current strategy uses only noOpt -> warm -> scorching. (and veryHot if instrumentation-based profiling is used)
1403
_samplesNeededToMoveTo[noOpt] = 1;
1404
_samplesNeededToMoveTo[warm] = 6;
1405
int32_t SCORCHING_THRESH = 20;
1406
_samplesNeededToMoveTo[scorching] = SCORCHING_THRESH;
1407
1408
// If we are doing instrumentation-based profiling
1409
if (!TR::Options::getCmdLineOptions()->getOption(TR_DisableProfiling))
1410
{
1411
// Insert instrumentation in VeryHot
1412
_samplesNeededToMoveTo[veryHot] = SCORCHING_THRESH;
1413
_performInstrumentation[veryHot] = 1; // Yes, perform profiling at this level
1414
1415
_samplesNeededToMoveTo[scorching] = SCORCHING_THRESH + 1; // Sampling is disable during instrumentation-based profiling,
1416
// so this is really just a place holder
1417
}
1418
1419
// Use the information above to setup the "next" pointers.
1420
// Go through list backwards, and for each "active" level, set where you'll jump to next.
1421
int32_t prevActiveLevel = unknownHotness;
1422
for (int32_t curLevel = numHotnessLevels;
1423
curLevel >= noOpt; // should be "> minHotness" if it existed
1424
curLevel--)
1425
{
1426
if (_samplesNeededToMoveTo[curLevel] > 0)
1427
{
1428
// curLevel is an active level
1429
_nextLevel[curLevel] = (TR_Hotness) prevActiveLevel;
1430
prevActiveLevel = curLevel;
1431
}
1432
}
1433
// Finally, check unknownHotness (which represents the method still being interpreted) last
1434
_nextLevel[unknownHotness] = (TR_Hotness) prevActiveLevel;
1435
1436
}
1437
1438
1439
1440
TR_Hotness TR::ThresholdCompilationStrategy::getInitialOptLevel()
1441
{
1442
return noOpt;
1443
}
1444
1445
1446
TR_OptimizationPlan *TR::ThresholdCompilationStrategy::processEvent(TR_MethodEvent *event, bool *newPlanCreated)
1447
{
1448
TR_OptimizationPlan *plan = NULL;
1449
TR_Hotness hotnessLevel;
1450
TR_PersistentJittedBodyInfo *bodyInfo;
1451
TR_PersistentMethodInfo *methodInfo;
1452
*newPlanCreated = false;
1453
1454
if (TR::CompilationController::verbose() >= TR::CompilationController::LEVEL3)
1455
fprintf(stderr, "Received event %d\n", event->_eventType);
1456
1457
// first decode the event type
1458
switch (event->_eventType)
1459
{
1460
case TR_MethodEvent::InterpretedMethodSample:
1461
// do nothing
1462
break;
1463
case TR_MethodEvent::InterpreterCounterTripped:
1464
TR_ASSERT(event->_oldStartPC == 0, "oldStartPC should be 0 for an interpreted method");
1465
// use the counts to determine the first level of compilation
1466
// the level of compilation can be changed later on if option subsets are present
1467
hotnessLevel = TR::ThresholdCompilationStrategy::getInitialOptLevel();
1468
plan = TR_OptimizationPlan::alloc(hotnessLevel);
1469
*newPlanCreated = true;
1470
break;
1471
case TR_MethodEvent::OtherRecompilationTrigger: // sync recompilation through fixMethodCode
1472
// For sync re-compilation we attach the plan to the persistentBodyInfo
1473
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(event->_oldStartPC);
1474
methodInfo = bodyInfo->getMethodInfo();
1475
1476
if (methodInfo->getReasonForRecompilation() == TR_PersistentMethodInfo::RecompDueToInlinedMethodRedefinition)
1477
{
1478
methodInfo->incrementNumberOfInlinedMethodRedefinition();
1479
hotnessLevel = bodyInfo->getHotness();
1480
plan = TR_OptimizationPlan::alloc(hotnessLevel);
1481
*newPlanCreated = true;
1482
}
1483
else if (methodInfo->getOptimizationPlan())
1484
{
1485
TR_ASSERT(!TR::CompilationController::getCompilationInfo()->asynchronousCompilation(), "This case should happen only for sync recompilation");
1486
plan = methodInfo->getOptimizationPlan();
1487
}
1488
else
1489
{
1490
//hotnessLevel = TR::Recompilation::getNextCompileLevel(event->_oldStartPC);
1491
hotnessLevel = getNextOptLevel(bodyInfo->getHotness());
1492
plan = TR_OptimizationPlan::alloc(hotnessLevel);
1493
*newPlanCreated = true;
1494
}
1495
break;
1496
case TR_MethodEvent::NewInstanceImpl:
1497
hotnessLevel = getInitialOptLevel();
1498
plan = TR_OptimizationPlan::alloc(hotnessLevel);
1499
*newPlanCreated = true;
1500
break;
1501
case TR_MethodEvent::MethodBodyInvalidated:
1502
// keep the same optimization level
1503
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(event->_oldStartPC);
1504
TR_ASSERT(bodyInfo, "A recompilable method should have jittedBodyInfo");
1505
hotnessLevel = bodyInfo->getHotness();
1506
plan = TR_OptimizationPlan::alloc(hotnessLevel);
1507
*newPlanCreated = true;
1508
// the following is just for compatibility with older implementation
1509
bodyInfo->getMethodInfo()->setNextCompileLevel(hotnessLevel, false); // no profiling
1510
break;
1511
case TR_MethodEvent::JittedMethodSample:
1512
plan = processJittedSample(event);
1513
*newPlanCreated = true;
1514
break;
1515
1516
default:
1517
TR_ASSERT(0, "Bad event type %d", event->_eventType);
1518
}
1519
1520
if (TR::CompilationController::verbose() >= TR::CompilationController::LEVEL2)
1521
fprintf(stderr, "Event %d created plan %p\n", event->_eventType, plan);
1522
1523
return plan;
1524
}
1525
1526
1527
TR_OptimizationPlan *
1528
TR::ThresholdCompilationStrategy::processJittedSample(TR_MethodEvent *event)
1529
{
1530
TR_OptimizationPlan *plan = NULL;
1531
TR::Options * cmdLineOptions = TR::Options::getCmdLineOptions();
1532
J9Method *j9method = event->_j9method;
1533
J9JITConfig *jitConfig = event->_vmThread->javaVM->jitConfig;
1534
TR_J9VMBase * fe = TR_J9VMBase::get(jitConfig, event->_vmThread);
1535
void *startPC = event->_oldStartPC;
1536
// here we may need to write into the vlog
1537
1538
1539
J9::PrivateLinkage::LinkageInfo *linkageInfo = J9::PrivateLinkage::LinkageInfo::get(startPC);
1540
TR_PersistentJittedBodyInfo *bodyInfo = NULL;
1541
1542
if (linkageInfo->hasFailedRecompilation())
1543
{
1544
//if (logSampling)
1545
// msgLen += sprintf(msg + msgLen, " has already failed a recompilation attempt");
1546
}
1547
else if (!linkageInfo->isSamplingMethodBody())
1548
{
1549
//if (logSampling)
1550
// msgLen += sprintf(msg + msgLen, " does not use sampling");
1551
}
1552
else if (debug("disableSamplingRecompilation"))
1553
{
1554
//if (logSampling)
1555
//msgLen += sprintf(msg + msgLen, " sampling disabled");
1556
}
1557
else
1558
bodyInfo = TR::Recompilation::getJittedBodyInfoFromPC(startPC);
1559
1560
if (bodyInfo && bodyInfo->getDisableSampling())
1561
{
1562
//if (logSampling)
1563
//msgLen += sprintf(msg + msgLen, " uses sampling but sampling disabled (last comp. with prex)");
1564
bodyInfo = NULL;
1565
}
1566
1567
if (bodyInfo)
1568
{
1569
TR_PersistentMethodInfo *methodInfo = bodyInfo->getMethodInfo();
1570
fe->acquireCompilationLock();
1571
void *currentStartPC = (void *)TR::Compiler->mtd.startPC((TR_OpaqueMethodBlock *) methodInfo->getMethodInfo());
1572
if (currentStartPC != startPC) // rare case; sampling an old body
1573
{
1574
fe->releaseCompilationLock();
1575
// do nothing
1576
}
1577
else if (TR::Options::getCmdLineOptions()->getFixedOptLevel() != -1
1578
|| TR::Options::getAOTCmdLineOptions()->getFixedOptLevel() != -1) // prevent recompilation when opt level is specified
1579
{
1580
fe->releaseCompilationLock();
1581
// do nothing
1582
}
1583
else
1584
{
1585
// increment the CPOcount and see if we need to recompile
1586
int32_t sampleCount = methodInfo->cpoIncCounter();
1587
fe->releaseCompilationLock();
1588
TR_Hotness curOptLevel = bodyInfo->getHotness();
1589
TR_Hotness nextOptLevel = getNextOptLevel(curOptLevel);
1590
1591
if ((nextOptLevel != unknownHotness) && (sampleCount == getSamplesNeeded(nextOptLevel)))
1592
{
1593
bool useSampling = (getNextOptLevel(nextOptLevel) != unknownHotness);
1594
plan = TR_OptimizationPlan::alloc(nextOptLevel,
1595
getPerformInstrumentation(nextOptLevel), useSampling);
1596
}
1597
}
1598
}
1599
return plan;
1600
}
1601
1602