Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/compiler/compilerDefinitions.cpp
64440 views
1
/*
2
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/codeCache.hpp"
27
#include "runtime/arguments.hpp"
28
#include "runtime/flags/jvmFlag.hpp"
29
#include "runtime/flags/jvmFlagAccess.hpp"
30
#include "runtime/flags/jvmFlagLimit.hpp"
31
#include "runtime/globals.hpp"
32
#include "runtime/globals_extension.hpp"
33
#include "compiler/compilerDefinitions.hpp"
34
#include "gc/shared/gcConfig.hpp"
35
#include "utilities/defaultStream.hpp"
36
37
const char* compilertype2name_tab[compiler_number_of_types] = {
38
"",
39
"c1",
40
"c2",
41
"jvmci"
42
};
43
44
CompilationModeFlag::Mode CompilationModeFlag::_mode = CompilationModeFlag::Mode::NORMAL;
45
46
static void print_mode_unavailable(const char* mode_name, const char* reason) {
47
warning("%s compilation mode unavailable because %s.", mode_name, reason);
48
}
49
50
bool CompilationModeFlag::initialize() {
51
_mode = Mode::NORMAL;
52
// During parsing we want to be very careful not to use any methods of CompilerConfig that depend on
53
// CompilationModeFlag.
54
if (CompilationMode != NULL) {
55
if (strcmp(CompilationMode, "default") == 0 || strcmp(CompilationMode, "normal") == 0) {
56
assert(_mode == Mode::NORMAL, "Precondition");
57
} else if (strcmp(CompilationMode, "quick-only") == 0) {
58
if (!CompilerConfig::has_c1()) {
59
print_mode_unavailable("quick-only", "there is no c1 present");
60
} else {
61
_mode = Mode::QUICK_ONLY;
62
}
63
} else if (strcmp(CompilationMode, "high-only") == 0) {
64
if (!CompilerConfig::has_c2() && !CompilerConfig::is_jvmci_compiler()) {
65
print_mode_unavailable("high-only", "there is no c2 or jvmci compiler present");
66
} else {
67
_mode = Mode::HIGH_ONLY;
68
}
69
} else if (strcmp(CompilationMode, "high-only-quick-internal") == 0) {
70
if (!CompilerConfig::has_c1() || !CompilerConfig::is_jvmci_compiler()) {
71
print_mode_unavailable("high-only-quick-internal", "there is no c1 and jvmci compiler present");
72
} else {
73
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
74
}
75
} else {
76
print_error();
77
return false;
78
}
79
}
80
81
// Now that the flag is parsed, we can use any methods of CompilerConfig.
82
if (normal()) {
83
if (CompilerConfig::is_c1_simple_only()) {
84
_mode = Mode::QUICK_ONLY;
85
} else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
86
_mode = Mode::HIGH_ONLY;
87
} else if (CompilerConfig::is_jvmci_compiler_enabled() && CompilerConfig::is_c1_enabled() && !TieredCompilation) {
88
warning("Disabling tiered compilation with non-native JVMCI compiler is not recommended, "
89
"disabling intermediate compilation levels instead. ");
90
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
91
}
92
}
93
return true;
94
}
95
96
void CompilationModeFlag::print_error() {
97
jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', available modes are:", CompilationMode);
98
bool comma = false;
99
if (CompilerConfig::has_c1()) {
100
jio_fprintf(defaultStream::error_stream(), "%s quick-only", comma ? "," : "");
101
comma = true;
102
}
103
if (CompilerConfig::has_c2() || CompilerConfig::has_jvmci()) {
104
jio_fprintf(defaultStream::error_stream(), "%s high-only", comma ? "," : "");
105
comma = true;
106
}
107
if (CompilerConfig::has_c1() && CompilerConfig::has_jvmci()) {
108
jio_fprintf(defaultStream::error_stream(), "%s high-only-quick-internal", comma ? "," : "");
109
comma = true;
110
}
111
jio_fprintf(defaultStream::error_stream(), "\n");
112
}
113
114
// Returns threshold scaled with CompileThresholdScaling
115
intx CompilerConfig::scaled_compile_threshold(intx threshold) {
116
return scaled_compile_threshold(threshold, CompileThresholdScaling);
117
}
118
119
// Returns freq_log scaled with CompileThresholdScaling
120
intx CompilerConfig::scaled_freq_log(intx freq_log) {
121
return scaled_freq_log(freq_log, CompileThresholdScaling);
122
}
123
124
// Returns threshold scaled with the value of scale.
125
// If scale < 0.0, threshold is returned without scaling.
126
intx CompilerConfig::scaled_compile_threshold(intx threshold, double scale) {
127
assert(threshold >= 0, "must be");
128
if (scale == 1.0 || scale < 0.0) {
129
return threshold;
130
} else {
131
double v = threshold * scale;
132
assert(v >= 0, "must be");
133
if (v > max_intx) {
134
return max_intx;
135
} else {
136
return (intx)(v);
137
}
138
}
139
}
140
141
// Returns freq_log scaled with the value of scale.
142
// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1].
143
// If scale < 0.0, freq_log is returned without scaling.
144
intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
145
// Check if scaling is necessary or if negative value was specified.
146
if (scale == 1.0 || scale < 0.0) {
147
return freq_log;
148
}
149
// Check values to avoid calculating log2 of 0.
150
if (scale == 0.0 || freq_log == 0) {
151
return 0;
152
}
153
// Determine the maximum notification frequency value currently supported.
154
// The largest mask value that the interpreter/C1 can handle is
155
// of length InvocationCounter::number_of_count_bits. Mask values are always
156
// one bit shorter then the value of the notification frequency. Set
157
// max_freq_bits accordingly.
158
int max_freq_bits = InvocationCounter::number_of_count_bits + 1;
159
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
160
161
if (scaled_freq == 0) {
162
// Return 0 right away to avoid calculating log2 of 0.
163
return 0;
164
} else {
165
return MIN2(log2i(scaled_freq), max_freq_bits);
166
}
167
}
168
169
void CompilerConfig::set_client_emulation_mode_flags() {
170
assert(has_c1(), "Must have C1 compiler present");
171
CompilationModeFlag::set_quick_only();
172
173
FLAG_SET_ERGO(ProfileInterpreter, false);
174
#if INCLUDE_JVMCI
175
FLAG_SET_ERGO(EnableJVMCI, false);
176
FLAG_SET_ERGO(UseJVMCICompiler, false);
177
#endif
178
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
179
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
180
}
181
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
182
FLAG_SET_ERGO(InitialCodeCacheSize, 160*K);
183
}
184
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
185
FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M);
186
}
187
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
188
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M);
189
}
190
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
191
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
192
}
193
if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) {
194
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M);
195
}
196
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
197
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
198
}
199
if (FLAG_IS_DEFAULT(MaxRAM)) {
200
// Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
201
// heap setting done based on available phys_mem (see Arguments::set_heap_size).
202
FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
203
}
204
if (FLAG_IS_DEFAULT(CICompilerCount)) {
205
FLAG_SET_ERGO(CICompilerCount, 1);
206
}
207
}
208
209
bool CompilerConfig::is_compilation_mode_selected() {
210
return !FLAG_IS_DEFAULT(TieredCompilation) ||
211
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
212
!FLAG_IS_DEFAULT(CompilationMode)
213
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
214
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
215
}
216
217
bool CompilerConfig::is_interpreter_only() {
218
return Arguments::is_interpreter_only() || TieredStopAtLevel == CompLevel_none;
219
}
220
221
static bool check_legacy_flags() {
222
JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold));
223
if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
224
return false;
225
}
226
JVMFlag* on_stack_replace_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(OnStackReplacePercentage));
227
if (JVMFlagAccess::check_constraint(on_stack_replace_percentage_flag, JVMFlagLimit::get_constraint(on_stack_replace_percentage_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
228
return false;
229
}
230
JVMFlag* interpreter_profile_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(InterpreterProfilePercentage));
231
if (JVMFlagAccess::check_range(interpreter_profile_percentage_flag, false) != JVMFlag::SUCCESS) {
232
return false;
233
}
234
return true;
235
}
236
237
void CompilerConfig::set_legacy_emulation_flags() {
238
// Any legacy flags set?
239
if (!FLAG_IS_DEFAULT(CompileThreshold) ||
240
!FLAG_IS_DEFAULT(OnStackReplacePercentage) ||
241
!FLAG_IS_DEFAULT(InterpreterProfilePercentage)) {
242
if (CompilerConfig::is_c1_only() || CompilerConfig::is_c2_or_jvmci_compiler_only()) {
243
// This function is called before these flags are validated. In order to not confuse the user with extraneous
244
// error messages, we check the validity of these flags here and bail out if any of them are invalid.
245
if (!check_legacy_flags()) {
246
return;
247
}
248
// Note, we do not scale CompileThreshold before this because the tiered flags are
249
// all going to be scaled further in set_compilation_policy_flags().
250
const intx threshold = CompileThreshold;
251
const intx profile_threshold = threshold * InterpreterProfilePercentage / 100;
252
const intx osr_threshold = threshold * OnStackReplacePercentage / 100;
253
const intx osr_profile_threshold = osr_threshold * InterpreterProfilePercentage / 100;
254
255
const intx threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? threshold : profile_threshold);
256
const intx osr_threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
257
258
if (Tier0InvokeNotifyFreqLog > threshold_log) {
259
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, MAX2<intx>(0, threshold_log));
260
}
261
262
// Note: Emulation oddity. The legacy policy limited the amount of callbacks from the
263
// interpreter for backedge events to once every 1024 counter increments.
264
// We simulate this behavior by limiting the backedge notification frequency to be
265
// at least 2^10.
266
if (Tier0BackedgeNotifyFreqLog > osr_threshold_log) {
267
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, MAX2<intx>(10, osr_threshold_log));
268
}
269
// Adjust the tiered policy flags to approximate the legacy behavior.
270
FLAG_SET_ERGO(Tier3InvocationThreshold, threshold);
271
FLAG_SET_ERGO(Tier3MinInvocationThreshold, threshold);
272
FLAG_SET_ERGO(Tier3CompileThreshold, threshold);
273
FLAG_SET_ERGO(Tier3BackEdgeThreshold, osr_threshold);
274
if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
275
FLAG_SET_ERGO(Tier4InvocationThreshold, threshold);
276
FLAG_SET_ERGO(Tier4MinInvocationThreshold, threshold);
277
FLAG_SET_ERGO(Tier4CompileThreshold, threshold);
278
FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold);
279
FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage);
280
}
281
} else {
282
// Normal tiered mode, ignore legacy flags
283
}
284
}
285
// Scale CompileThreshold
286
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
287
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0 && CompileThreshold > 0) {
288
FLAG_SET_ERGO(CompileThreshold, scaled_compile_threshold(CompileThreshold));
289
}
290
}
291
292
293
void CompilerConfig::set_compilation_policy_flags() {
294
if (is_tiered()) {
295
// Increase the code cache size - tiered compiles a lot more.
296
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
297
FLAG_SET_ERGO(ReservedCodeCacheSize,
298
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
299
}
300
// Enable SegmentedCodeCache if tiered compilation is enabled, ReservedCodeCacheSize >= 240M
301
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
302
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
303
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
304
FLAG_SET_ERGO(SegmentedCodeCache, true);
305
}
306
if (Arguments::is_compiler_only()) { // -Xcomp
307
// Be much more aggressive in tiered mode with -Xcomp and exercise C2 more.
308
// We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and
309
// compile a level 4 (C2) and then continue executing it.
310
if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) {
311
FLAG_SET_CMDLINE(Tier3InvokeNotifyFreqLog, 0);
312
}
313
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
314
FLAG_SET_CMDLINE(Tier4InvocationThreshold, 0);
315
}
316
}
317
}
318
319
320
if (CompileThresholdScaling < 0) {
321
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
322
}
323
324
if (CompilationModeFlag::disable_intermediate()) {
325
if (FLAG_IS_DEFAULT(Tier0ProfilingStartPercentage)) {
326
FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33);
327
}
328
329
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
330
FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000);
331
}
332
if (FLAG_IS_DEFAULT(Tier4MinInvocationThreshold)) {
333
FLAG_SET_DEFAULT(Tier4MinInvocationThreshold, 600);
334
}
335
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
336
FLAG_SET_DEFAULT(Tier4CompileThreshold, 10000);
337
}
338
if (FLAG_IS_DEFAULT(Tier4BackEdgeThreshold)) {
339
FLAG_SET_DEFAULT(Tier4BackEdgeThreshold, 15000);
340
}
341
342
if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
343
FLAG_SET_DEFAULT(Tier3InvocationThreshold, Tier4InvocationThreshold);
344
}
345
if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
346
FLAG_SET_DEFAULT(Tier3MinInvocationThreshold, Tier4MinInvocationThreshold);
347
}
348
if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
349
FLAG_SET_DEFAULT(Tier3CompileThreshold, Tier4CompileThreshold);
350
}
351
if (FLAG_IS_DEFAULT(Tier3BackEdgeThreshold)) {
352
FLAG_SET_DEFAULT(Tier3BackEdgeThreshold, Tier4BackEdgeThreshold);
353
}
354
355
}
356
357
// Scale tiered compilation thresholds.
358
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
359
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
360
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
361
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
362
363
FLAG_SET_ERGO(Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
364
FLAG_SET_ERGO(Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
365
FLAG_SET_ERGO(Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
366
FLAG_SET_ERGO(Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
367
368
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
369
// once these thresholds become supported.
370
371
FLAG_SET_ERGO(Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
372
FLAG_SET_ERGO(Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
373
374
FLAG_SET_ERGO(Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
375
FLAG_SET_ERGO(Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
376
377
FLAG_SET_ERGO(Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
378
379
FLAG_SET_ERGO(Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
380
FLAG_SET_ERGO(Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
381
FLAG_SET_ERGO(Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
382
FLAG_SET_ERGO(Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
383
}
384
385
#ifdef COMPILER1
386
// Reduce stack usage due to inlining of methods which require much stack.
387
// (High tier compiler can inline better based on profiling information.)
388
if (FLAG_IS_DEFAULT(C1InlineStackLimit) &&
389
TieredStopAtLevel == CompLevel_full_optimization && !CompilerConfig::is_c1_only()) {
390
FLAG_SET_DEFAULT(C1InlineStackLimit, 5);
391
}
392
#endif
393
394
if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) {
395
#ifdef COMPILER2
396
// Some inlining tuning
397
#ifdef X86
398
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
399
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
400
}
401
#endif
402
403
#if defined AARCH64
404
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
405
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
406
}
407
#endif
408
#endif // COMPILER2
409
}
410
411
}
412
413
#if INCLUDE_JVMCI
414
void CompilerConfig::set_jvmci_specific_flags() {
415
if (UseJVMCICompiler) {
416
if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
417
FLAG_SET_DEFAULT(TypeProfileWidth, 8);
418
}
419
if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
420
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
421
}
422
423
if (UseJVMCINativeLibrary) {
424
// SVM compiled code requires more stack space
425
if (FLAG_IS_DEFAULT(CompilerThreadStackSize)) {
426
// Duplicate logic in the implementations of os::create_thread
427
// so that we can then double the computed stack size. Once
428
// the stack size requirements of SVM are better understood,
429
// this logic can be pushed down into os::create_thread.
430
int stack_size = CompilerThreadStackSize;
431
if (stack_size == 0) {
432
stack_size = VMThreadStackSize;
433
}
434
if (stack_size != 0) {
435
FLAG_SET_DEFAULT(CompilerThreadStackSize, stack_size * 2);
436
}
437
}
438
} else {
439
// JVMCI needs values not less than defaults
440
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
441
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
442
}
443
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
444
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
445
}
446
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
447
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
448
}
449
if (FLAG_IS_DEFAULT(Tier3DelayOn)) {
450
// This effectively prevents the compile broker scheduling tier 2
451
// (i.e., limited C1 profiling) compilations instead of tier 3
452
// (i.e., full C1 profiling) compilations when the tier 4 queue
453
// backs up (which is quite likely when using a non-AOT compiled JVMCI
454
// compiler). The observation based on jargraal is that the downside
455
// of skipping full profiling is much worse for performance than the
456
// queue backing up.
457
FLAG_SET_DEFAULT(Tier3DelayOn, 100000);
458
}
459
} // !UseJVMCINativeLibrary
460
} // UseJVMCICompiler
461
}
462
#endif // INCLUDE_JVMCI
463
464
bool CompilerConfig::check_args_consistency(bool status) {
465
// Check lower bounds of the code cache
466
// Template Interpreter code is approximately 3X larger in debug builds.
467
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
468
if (ReservedCodeCacheSize < InitialCodeCacheSize) {
469
jio_fprintf(defaultStream::error_stream(),
470
"Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
471
ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
472
status = false;
473
} else if (ReservedCodeCacheSize < min_code_cache_size) {
474
jio_fprintf(defaultStream::error_stream(),
475
"Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
476
min_code_cache_size/K);
477
status = false;
478
} else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) {
479
// Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported.
480
jio_fprintf(defaultStream::error_stream(),
481
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
482
CODE_CACHE_SIZE_LIMIT/M);
483
status = false;
484
} else if (NonNMethodCodeHeapSize < min_code_cache_size) {
485
jio_fprintf(defaultStream::error_stream(),
486
"Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
487
min_code_cache_size/K);
488
status = false;
489
}
490
491
#ifdef _LP64
492
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
493
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
494
}
495
#endif
496
497
if (BackgroundCompilation && ReplayCompiles) {
498
if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
499
warning("BackgroundCompilation disabled due to ReplayCompiles option.");
500
}
501
FLAG_SET_CMDLINE(BackgroundCompilation, false);
502
}
503
504
#ifdef COMPILER2
505
if (PostLoopMultiversioning && !RangeCheckElimination) {
506
if (!FLAG_IS_DEFAULT(PostLoopMultiversioning)) {
507
warning("PostLoopMultiversioning disabled because RangeCheckElimination is disabled.");
508
}
509
FLAG_SET_CMDLINE(PostLoopMultiversioning, false);
510
}
511
#endif // COMPILER2
512
513
if (CompilerConfig::is_interpreter_only()) {
514
if (UseCompiler) {
515
if (!FLAG_IS_DEFAULT(UseCompiler)) {
516
warning("UseCompiler disabled due to -Xint.");
517
}
518
FLAG_SET_CMDLINE(UseCompiler, false);
519
}
520
if (ProfileInterpreter) {
521
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
522
warning("ProfileInterpreter disabled due to -Xint.");
523
}
524
FLAG_SET_CMDLINE(ProfileInterpreter, false);
525
}
526
if (TieredCompilation) {
527
if (!FLAG_IS_DEFAULT(TieredCompilation)) {
528
warning("TieredCompilation disabled due to -Xint.");
529
}
530
FLAG_SET_CMDLINE(TieredCompilation, false);
531
}
532
#if INCLUDE_JVMCI
533
if (EnableJVMCI) {
534
if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) {
535
warning("JVMCI Compiler disabled due to -Xint.");
536
}
537
FLAG_SET_CMDLINE(EnableJVMCI, false);
538
FLAG_SET_CMDLINE(UseJVMCICompiler, false);
539
}
540
#endif
541
} else {
542
#if INCLUDE_JVMCI
543
status = status && JVMCIGlobals::check_jvmci_flags_are_consistent();
544
#endif
545
}
546
547
return status;
548
}
549
550
void CompilerConfig::ergo_initialize() {
551
#if !COMPILER1_OR_COMPILER2
552
return;
553
#endif
554
555
if (has_c1()) {
556
if (!is_compilation_mode_selected()) {
557
#if defined(_WINDOWS) && !defined(_LP64)
558
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
559
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
560
}
561
#endif
562
if (NeverActAsServerClassMachine) {
563
set_client_emulation_mode_flags();
564
}
565
} else if (!has_c2() && !is_jvmci_compiler()) {
566
set_client_emulation_mode_flags();
567
}
568
}
569
570
set_legacy_emulation_flags();
571
set_compilation_policy_flags();
572
573
#if INCLUDE_JVMCI
574
// Check that JVMCI supports selected GC.
575
// Should be done after GCConfig::initialize() was called.
576
JVMCIGlobals::check_jvmci_supported_gc();
577
578
// Do JVMCI specific settings
579
set_jvmci_specific_flags();
580
#endif
581
582
if (FLAG_IS_DEFAULT(SweeperThreshold)) {
583
if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
584
// Cap default SweeperThreshold value to an equivalent of 1.2 Mb
585
FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize);
586
}
587
}
588
589
if (UseOnStackReplacement && !UseLoopCounter) {
590
warning("On-stack-replacement requires loop counters; enabling loop counters");
591
FLAG_SET_DEFAULT(UseLoopCounter, true);
592
}
593
594
if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) {
595
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
596
warning("ProfileInterpreter disabled due to client emulation mode");
597
}
598
FLAG_SET_CMDLINE(ProfileInterpreter, false);
599
}
600
601
#ifdef COMPILER2
602
if (!EliminateLocks) {
603
EliminateNestedLocks = false;
604
}
605
if (!Inline || !IncrementalInline) {
606
IncrementalInline = false;
607
IncrementalInlineMH = false;
608
IncrementalInlineVirtual = false;
609
}
610
#ifndef PRODUCT
611
if (!IncrementalInline) {
612
AlwaysIncrementalInline = false;
613
}
614
if (FLAG_IS_CMDLINE(PrintIdealGraph) && !PrintIdealGraph) {
615
FLAG_SET_ERGO(PrintIdealGraphLevel, -1);
616
}
617
#endif
618
if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
619
// nothing to use the profiling, turn if off
620
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
621
}
622
if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
623
FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
624
}
625
if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
626
// blind guess
627
LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
628
}
629
#endif // COMPILER2
630
}
631
632
633