Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/opto/compile.hpp
64441 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_OPTO_COMPILE_HPP
26
#define SHARE_OPTO_COMPILE_HPP
27
28
#include "asm/codeBuffer.hpp"
29
#include "ci/compilerInterface.hpp"
30
#include "code/debugInfoRec.hpp"
31
#include "compiler/compiler_globals.hpp"
32
#include "compiler/compilerOracle.hpp"
33
#include "compiler/compileBroker.hpp"
34
#include "compiler/compilerEvent.hpp"
35
#include "libadt/dict.hpp"
36
#include "libadt/vectset.hpp"
37
#include "memory/resourceArea.hpp"
38
#include "oops/methodData.hpp"
39
#include "opto/idealGraphPrinter.hpp"
40
#include "opto/phasetype.hpp"
41
#include "opto/phase.hpp"
42
#include "opto/regmask.hpp"
43
#include "runtime/deoptimization.hpp"
44
#include "runtime/sharedRuntime.hpp"
45
#include "runtime/timerTrace.hpp"
46
#include "runtime/vmThread.hpp"
47
#include "utilities/ticks.hpp"
48
49
class AbstractLockNode;
50
class AddPNode;
51
class Block;
52
class Bundle;
53
class CallGenerator;
54
class CloneMap;
55
class ConnectionGraph;
56
class IdealGraphPrinter;
57
class InlineTree;
58
class Int_Array;
59
class Matcher;
60
class MachConstantNode;
61
class MachConstantBaseNode;
62
class MachNode;
63
class MachOper;
64
class MachSafePointNode;
65
class Node;
66
class Node_Array;
67
class Node_List;
68
class Node_Notes;
69
class NodeCloneInfo;
70
class OptoReg;
71
class PhaseCFG;
72
class PhaseGVN;
73
class PhaseIterGVN;
74
class PhaseRegAlloc;
75
class PhaseCCP;
76
class PhaseOutput;
77
class RootNode;
78
class relocInfo;
79
class Scope;
80
class StartNode;
81
class SafePointNode;
82
class JVMState;
83
class Type;
84
class TypeData;
85
class TypeInt;
86
class TypeInteger;
87
class TypePtr;
88
class TypeOopPtr;
89
class TypeFunc;
90
class TypeVect;
91
class Unique_Node_List;
92
class nmethod;
93
class Node_Stack;
94
struct Final_Reshape_Counts;
95
96
enum LoopOptsMode {
97
LoopOptsDefault,
98
LoopOptsNone,
99
LoopOptsMaxUnroll,
100
LoopOptsShenandoahExpand,
101
LoopOptsShenandoahPostExpand,
102
LoopOptsSkipSplitIf,
103
LoopOptsVerify
104
};
105
106
typedef unsigned int node_idx_t;
107
class NodeCloneInfo {
108
private:
109
uint64_t _idx_clone_orig;
110
public:
111
112
void set_idx(node_idx_t idx) {
113
_idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx;
114
}
115
node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
116
117
void set_gen(int generation) {
118
uint64_t g = (uint64_t)generation << 32;
119
_idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
120
}
121
int gen() const { return (int)(_idx_clone_orig >> 32); }
122
123
void set(uint64_t x) { _idx_clone_orig = x; }
124
void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
125
uint64_t get() const { return _idx_clone_orig; }
126
127
NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
128
NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
129
130
void dump() const;
131
};
132
133
class CloneMap {
134
friend class Compile;
135
private:
136
bool _debug;
137
Dict* _dict;
138
int _clone_idx; // current cloning iteration/generation in loop unroll
139
public:
140
void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
141
node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; }
142
Dict* dict() const { return _dict; }
143
void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); }
144
void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
145
void remove(node_idx_t key) { _dict->Delete(_2p(key)); }
146
uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); }
147
node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); }
148
int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); }
149
int gen(const void* k) const { return gen(_2_node_idx_t(k)); }
150
int max_gen() const;
151
void clone(Node* old, Node* nnn, int gen);
152
void verify_insert_and_clone(Node* old, Node* nnn, int gen);
153
void dump(node_idx_t key) const;
154
155
int clone_idx() const { return _clone_idx; }
156
void set_clone_idx(int x) { _clone_idx = x; }
157
bool is_debug() const { return _debug; }
158
void set_debug(bool debug) { _debug = debug; }
159
static const char* debug_option_name;
160
161
bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
162
bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
163
};
164
165
//------------------------------Compile----------------------------------------
166
// This class defines a top-level Compiler invocation.
167
168
class Compile : public Phase {
169
friend class VMStructs;
170
171
public:
172
// Fixed alias indexes. (See also MergeMemNode.)
173
enum {
174
AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
175
AliasIdxBot = 2, // pseudo-index, aliases to everything
176
AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
177
};
178
179
// Variant of TraceTime(NULL, &_t_accumulator, CITime);
180
// Integrated with logging. If logging is turned on, and CITimeVerbose is true,
181
// then brackets are put into the log, with time stamps and node counts.
182
// (The time collection itself is always conditionalized on CITime.)
183
class TracePhase : public TraceTime {
184
private:
185
Compile* C;
186
CompileLog* _log;
187
const char* _phase_name;
188
bool _dolog;
189
public:
190
TracePhase(const char* name, elapsedTimer* accumulator);
191
~TracePhase();
192
};
193
194
// Information per category of alias (memory slice)
195
class AliasType {
196
private:
197
friend class Compile;
198
199
int _index; // unique index, used with MergeMemNode
200
const TypePtr* _adr_type; // normalized address type
201
ciField* _field; // relevant instance field, or null if none
202
const Type* _element; // relevant array element type, or null if none
203
bool _is_rewritable; // false if the memory is write-once only
204
int _general_index; // if this is type is an instance, the general
205
// type that this is an instance of
206
207
void Init(int i, const TypePtr* at);
208
209
public:
210
int index() const { return _index; }
211
const TypePtr* adr_type() const { return _adr_type; }
212
ciField* field() const { return _field; }
213
const Type* element() const { return _element; }
214
bool is_rewritable() const { return _is_rewritable; }
215
bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
216
int general_index() const { return (_general_index != 0) ? _general_index : _index; }
217
218
void set_rewritable(bool z) { _is_rewritable = z; }
219
void set_field(ciField* f) {
220
assert(!_field,"");
221
_field = f;
222
if (f->is_final() || f->is_stable()) {
223
// In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
224
_is_rewritable = false;
225
}
226
}
227
void set_element(const Type* e) {
228
assert(_element == NULL, "");
229
_element = e;
230
}
231
232
BasicType basic_type() const;
233
234
void print_on(outputStream* st) PRODUCT_RETURN;
235
};
236
237
enum {
238
logAliasCacheSize = 6,
239
AliasCacheSize = (1<<logAliasCacheSize)
240
};
241
struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
242
enum {
243
trapHistLength = MethodData::_trap_hist_limit
244
};
245
246
private:
247
// Fixed parameters to this compilation.
248
const int _compile_id;
249
const bool _subsume_loads; // Load can be matched as part of a larger op.
250
const bool _do_escape_analysis; // Do escape analysis.
251
const bool _install_code; // Install the code that was compiled
252
const bool _eliminate_boxing; // Do boxing elimination.
253
const bool _do_locks_coarsening; // Do locks coarsening
254
ciMethod* _method; // The method being compiled.
255
int _entry_bci; // entry bci for osr methods.
256
const TypeFunc* _tf; // My kind of signature
257
InlineTree* _ilt; // Ditto (temporary).
258
address _stub_function; // VM entry for stub being compiled, or NULL
259
const char* _stub_name; // Name of stub or adapter being compiled, or NULL
260
address _stub_entry_point; // Compile code entry for generated stub, or NULL
261
262
// Control of this compilation.
263
int _max_inline_size; // Max inline size for this compilation
264
int _freq_inline_size; // Max hot method inline size for this compilation
265
int _fixed_slots; // count of frame slots not allocated by the register
266
// allocator i.e. locks, original deopt pc, etc.
267
uintx _max_node_limit; // Max unique node count during a single compilation.
268
269
bool _post_loop_opts_phase; // Loop opts are finished.
270
271
int _major_progress; // Count of something big happening
272
bool _inlining_progress; // progress doing incremental inlining?
273
bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
274
bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining
275
bool _has_loops; // True if the method _may_ have some loops
276
bool _has_split_ifs; // True if the method _may_ have some split-if
277
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
278
bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
279
bool _has_boxed_value; // True if a boxed object is allocated
280
bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
281
uint _max_vector_size; // Maximum size of generated vectors
282
bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper
283
uint _trap_hist[trapHistLength]; // Cumulative traps
284
bool _trap_can_recompile; // Have we emitted a recompiling trap?
285
uint _decompile_count; // Cumulative decompilation counts.
286
bool _do_inlining; // True if we intend to do inlining
287
bool _do_scheduling; // True if we intend to do scheduling
288
bool _do_freq_based_layout; // True if we intend to do frequency based block layout
289
bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
290
bool _use_cmove; // True if CMove should be used without profitability analysis
291
bool _age_code; // True if we need to profile code age (decrement the aging counter)
292
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
293
bool _print_assembly; // True if we should dump assembly code for this compilation
294
bool _print_inlining; // True if we should print inlining for this compilation
295
bool _print_intrinsics; // True if we should print intrinsics for this compilation
296
#ifndef PRODUCT
297
uint _igv_idx; // Counter for IGV node identifiers
298
bool _trace_opto_output;
299
bool _print_ideal;
300
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
301
#endif
302
bool _has_irreducible_loop; // Found irreducible loops
303
// JSR 292
304
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
305
RTMState _rtm_state; // State of Restricted Transactional Memory usage
306
int _loop_opts_cnt; // loop opts round
307
bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
308
uint _stress_seed; // Seed for stress testing
309
310
// Compilation environment.
311
Arena _comp_arena; // Arena with lifetime equivalent to Compile
312
void* _barrier_set_state; // Potential GC barrier state for Compile
313
ciEnv* _env; // CI interface
314
DirectiveSet* _directive; // Compiler directive
315
CompileLog* _log; // from CompilerThread
316
const char* _failure_reason; // for record_failure/failing pattern
317
GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
318
GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
319
GrowableArray<Node*> _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
320
GrowableArray<Node*> _skeleton_predicate_opaqs; // List of Opaque4 nodes for the loop skeleton predicates.
321
GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
322
GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
323
GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
324
ConnectionGraph* _congraph;
325
#ifndef PRODUCT
326
IdealGraphPrinter* _printer;
327
static IdealGraphPrinter* _debug_file_printer;
328
static IdealGraphPrinter* _debug_network_printer;
329
#endif
330
331
332
// Node management
333
uint _unique; // Counter for unique Node indices
334
VectorSet _dead_node_list; // Set of dead nodes
335
uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
336
// So use this to keep count and make the call O(1).
337
DEBUG_ONLY(Unique_Node_List* _modified_nodes;) // List of nodes which inputs were modified
338
DEBUG_ONLY(bool _phase_optimize_finished;) // Used for live node verification while creating new nodes
339
340
debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
341
Arena _node_arena; // Arena for new-space Nodes
342
Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
343
RootNode* _root; // Unique root of compilation, or NULL after bail-out.
344
Node* _top; // Unique top node. (Reset by various phases.)
345
346
Node* _immutable_memory; // Initial memory state
347
348
Node* _recent_alloc_obj;
349
Node* _recent_alloc_ctl;
350
351
// Constant table
352
MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
353
354
355
// Blocked array of debugging and profiling information,
356
// tracked per node.
357
enum { _log2_node_notes_block_size = 8,
358
_node_notes_block_size = (1<<_log2_node_notes_block_size)
359
};
360
GrowableArray<Node_Notes*>* _node_note_array;
361
Node_Notes* _default_node_notes; // default notes for new nodes
362
363
// After parsing and every bulk phase we hang onto the Root instruction.
364
// The RootNode instruction is where the whole program begins. It produces
365
// the initial Control and BOTTOM for everybody else.
366
367
// Type management
368
Arena _Compile_types; // Arena for all types
369
Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
370
Dict* _type_dict; // Intern table
371
CloneMap _clone_map; // used for recording history of cloned nodes
372
size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
373
ciMethod* _last_tf_m; // Cache for
374
const TypeFunc* _last_tf; // TypeFunc::make
375
AliasType** _alias_types; // List of alias types seen so far.
376
int _num_alias_types; // Logical length of _alias_types
377
int _max_alias_types; // Physical length of _alias_types
378
AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
379
380
// Parsing, optimization
381
PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
382
Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
383
384
GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
385
GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
386
GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
387
388
GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
389
390
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
391
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
392
393
GrowableArray<RuntimeStub*> _native_invokers;
394
395
// Inlining may not happen in parse order which would make
396
// PrintInlining output confusing. Keep track of PrintInlining
397
// pieces in order.
398
class PrintInliningBuffer : public CHeapObj<mtCompiler> {
399
private:
400
CallGenerator* _cg;
401
stringStream _ss;
402
static const size_t default_stream_buffer_size = 128;
403
404
public:
405
PrintInliningBuffer()
406
: _cg(NULL), _ss(default_stream_buffer_size) {}
407
408
stringStream* ss() { return &_ss; }
409
CallGenerator* cg() { return _cg; }
410
void set_cg(CallGenerator* cg) { _cg = cg; }
411
};
412
413
stringStream* _print_inlining_stream;
414
GrowableArray<PrintInliningBuffer*>* _print_inlining_list;
415
int _print_inlining_idx;
416
char* _print_inlining_output;
417
418
// Only keep nodes in the expensive node list that need to be optimized
419
void cleanup_expensive_nodes(PhaseIterGVN &igvn);
420
// Use for sorting expensive nodes to bring similar nodes together
421
static int cmp_expensive_nodes(Node** n1, Node** n2);
422
// Expensive nodes list already sorted?
423
bool expensive_nodes_sorted() const;
424
// Remove the speculative part of types and clean up the graph
425
void remove_speculative_types(PhaseIterGVN &igvn);
426
427
void* _replay_inline_data; // Pointer to data loaded from file
428
429
void print_inlining_stream_free();
430
void print_inlining_init();
431
void print_inlining_reinit();
432
void print_inlining_commit();
433
void print_inlining_push();
434
PrintInliningBuffer* print_inlining_current();
435
436
void log_late_inline_failure(CallGenerator* cg, const char* msg);
437
DEBUG_ONLY(bool _exception_backedge;)
438
439
public:
440
441
void* barrier_set_state() const { return _barrier_set_state; }
442
443
outputStream* print_inlining_stream() const {
444
assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
445
return _print_inlining_stream;
446
}
447
448
void print_inlining_update(CallGenerator* cg);
449
void print_inlining_update_delayed(CallGenerator* cg);
450
void print_inlining_move_to(CallGenerator* cg);
451
void print_inlining_assert_ready();
452
void print_inlining_reset();
453
454
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
455
stringStream ss;
456
CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
457
print_inlining_stream()->print("%s", ss.as_string());
458
}
459
460
#ifndef PRODUCT
461
IdealGraphPrinter* printer() { return _printer; }
462
#endif
463
464
void log_late_inline(CallGenerator* cg);
465
void log_inline_id(CallGenerator* cg);
466
void log_inline_failure(const char* msg);
467
468
void* replay_inline_data() const { return _replay_inline_data; }
469
470
// Dump inlining replay data to the stream.
471
void dump_inline_data(outputStream* out);
472
473
private:
474
// Matching, CFG layout, allocation, code generation
475
PhaseCFG* _cfg; // Results of CFG finding
476
int _java_calls; // Number of java calls in the method
477
int _inner_loops; // Number of inner loops in the method
478
Matcher* _matcher; // Engine to map ideal to machine instructions
479
PhaseRegAlloc* _regalloc; // Results of register allocation.
480
RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
481
Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
482
void* _indexSet_free_block_list; // free list of IndexSet bit blocks
483
int _interpreter_frame_size;
484
485
PhaseOutput* _output;
486
487
public:
488
// Accessors
489
490
// The Compile instance currently active in this (compiler) thread.
491
static Compile* current() {
492
return (Compile*) ciEnv::current()->compiler_data();
493
}
494
495
int interpreter_frame_size() const { return _interpreter_frame_size; }
496
497
PhaseOutput* output() const { return _output; }
498
void set_output(PhaseOutput* o) { _output = o; }
499
500
// ID for this compilation. Useful for setting breakpoints in the debugger.
501
int compile_id() const { return _compile_id; }
502
DirectiveSet* directive() const { return _directive; }
503
504
// Does this compilation allow instructions to subsume loads? User
505
// instructions that subsume a load may result in an unschedulable
506
// instruction sequence.
507
bool subsume_loads() const { return _subsume_loads; }
508
/** Do escape analysis. */
509
bool do_escape_analysis() const { return _do_escape_analysis; }
510
/** Do boxing elimination. */
511
bool eliminate_boxing() const { return _eliminate_boxing; }
512
/** Do aggressive boxing elimination. */
513
bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
514
bool should_install_code() const { return _install_code; }
515
/** Do locks coarsening. */
516
bool do_locks_coarsening() const { return _do_locks_coarsening; }
517
518
// Other fixed compilation parameters.
519
ciMethod* method() const { return _method; }
520
int entry_bci() const { return _entry_bci; }
521
bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
522
bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
523
const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
524
void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
525
InlineTree* ilt() const { return _ilt; }
526
address stub_function() const { return _stub_function; }
527
const char* stub_name() const { return _stub_name; }
528
address stub_entry_point() const { return _stub_entry_point; }
529
void set_stub_entry_point(address z) { _stub_entry_point = z; }
530
531
// Control of this compilation.
532
int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
533
void set_fixed_slots(int n) { _fixed_slots = n; }
534
int major_progress() const { return _major_progress; }
535
void set_inlining_progress(bool z) { _inlining_progress = z; }
536
int inlining_progress() const { return _inlining_progress; }
537
void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
538
int inlining_incrementally() const { return _inlining_incrementally; }
539
void set_do_cleanup(bool z) { _do_cleanup = z; }
540
int do_cleanup() const { return _do_cleanup; }
541
void set_major_progress() { _major_progress++; }
542
void restore_major_progress(int progress) { _major_progress += progress; }
543
void clear_major_progress() { _major_progress = 0; }
544
int max_inline_size() const { return _max_inline_size; }
545
void set_freq_inline_size(int n) { _freq_inline_size = n; }
546
int freq_inline_size() const { return _freq_inline_size; }
547
void set_max_inline_size(int n) { _max_inline_size = n; }
548
bool has_loops() const { return _has_loops; }
549
void set_has_loops(bool z) { _has_loops = z; }
550
bool has_split_ifs() const { return _has_split_ifs; }
551
void set_has_split_ifs(bool z) { _has_split_ifs = z; }
552
bool has_unsafe_access() const { return _has_unsafe_access; }
553
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
554
bool has_stringbuilder() const { return _has_stringbuilder; }
555
void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
556
bool has_boxed_value() const { return _has_boxed_value; }
557
void set_has_boxed_value(bool z) { _has_boxed_value = z; }
558
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
559
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
560
uint max_vector_size() const { return _max_vector_size; }
561
void set_max_vector_size(uint s) { _max_vector_size = s; }
562
bool clear_upper_avx() const { return _clear_upper_avx; }
563
void set_clear_upper_avx(bool s) { _clear_upper_avx = s; }
564
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
565
uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
566
bool trap_can_recompile() const { return _trap_can_recompile; }
567
void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
568
uint decompile_count() const { return _decompile_count; }
569
void set_decompile_count(uint c) { _decompile_count = c; }
570
bool allow_range_check_smearing() const;
571
bool do_inlining() const { return _do_inlining; }
572
void set_do_inlining(bool z) { _do_inlining = z; }
573
bool do_scheduling() const { return _do_scheduling; }
574
void set_do_scheduling(bool z) { _do_scheduling = z; }
575
bool do_freq_based_layout() const{ return _do_freq_based_layout; }
576
void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
577
bool do_vector_loop() const { return _do_vector_loop; }
578
void set_do_vector_loop(bool z) { _do_vector_loop = z; }
579
bool use_cmove() const { return _use_cmove; }
580
void set_use_cmove(bool z) { _use_cmove = z; }
581
bool age_code() const { return _age_code; }
582
void set_age_code(bool z) { _age_code = z; }
583
int AliasLevel() const { return _AliasLevel; }
584
bool print_assembly() const { return _print_assembly; }
585
void set_print_assembly(bool z) { _print_assembly = z; }
586
bool print_inlining() const { return _print_inlining; }
587
void set_print_inlining(bool z) { _print_inlining = z; }
588
bool print_intrinsics() const { return _print_intrinsics; }
589
void set_print_intrinsics(bool z) { _print_intrinsics = z; }
590
RTMState rtm_state() const { return _rtm_state; }
591
void set_rtm_state(RTMState s) { _rtm_state = s; }
592
bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
593
bool profile_rtm() const { return _rtm_state == ProfileRTM; }
594
uint max_node_limit() const { return (uint)_max_node_limit; }
595
void set_max_node_limit(uint n) { _max_node_limit = n; }
596
bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
597
void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
598
599
// check the CompilerOracle for special behaviours for this compile
600
bool method_has_option(enum CompileCommand option) {
601
return method() != NULL && method()->has_option(option);
602
}
603
604
#ifndef PRODUCT
605
uint next_igv_idx() { return _igv_idx++; }
606
bool trace_opto_output() const { return _trace_opto_output; }
607
bool print_ideal() const { return _print_ideal; }
608
bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
609
void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
610
int _in_dump_cnt; // Required for dumping ir nodes.
611
#endif
612
bool has_irreducible_loop() const { return _has_irreducible_loop; }
613
void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
614
615
// JSR 292
616
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
617
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
618
619
Ticks _latest_stage_start_counter;
620
621
void begin_method(int level = 1) {
622
#ifndef PRODUCT
623
if (_method != NULL && should_print(level)) {
624
_printer->begin_method();
625
}
626
#endif
627
C->_latest_stage_start_counter.stamp();
628
}
629
630
bool should_print(int level = 1) {
631
#ifndef PRODUCT
632
if (PrintIdealGraphLevel < 0) { // disabled by the user
633
return false;
634
}
635
636
bool need = directive()->IGVPrintLevelOption >= level;
637
if (need && !_printer) {
638
_printer = IdealGraphPrinter::printer();
639
assert(_printer != NULL, "_printer is NULL when we need it!");
640
_printer->set_compile(this);
641
}
642
return need;
643
#else
644
return false;
645
#endif
646
}
647
648
void print_method(CompilerPhaseType cpt, const char *name, int level = 1);
649
void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0);
650
void print_method(CompilerPhaseType cpt, Node* n, int level = 3);
651
652
#ifndef PRODUCT
653
void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false);
654
void igv_print_method_to_network(const char* phase_name = "Debug");
655
static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
656
static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
657
#endif
658
659
void end_method(int level = 1);
660
661
int macro_count() const { return _macro_nodes.length(); }
662
int predicate_count() const { return _predicate_opaqs.length(); }
663
int skeleton_predicate_count() const { return _skeleton_predicate_opaqs.length(); }
664
int expensive_count() const { return _expensive_nodes.length(); }
665
int coarsened_count() const { return _coarsened_locks.length(); }
666
667
Node* macro_node(int idx) const { return _macro_nodes.at(idx); }
668
Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs.at(idx); }
669
Node* skeleton_predicate_opaque4_node(int idx) const { return _skeleton_predicate_opaqs.at(idx); }
670
Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); }
671
672
ConnectionGraph* congraph() { return _congraph;}
673
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
674
void add_macro_node(Node * n) {
675
//assert(n->is_macro(), "must be a macro node");
676
assert(!_macro_nodes.contains(n), "duplicate entry in expand list");
677
_macro_nodes.append(n);
678
}
679
void remove_macro_node(Node* n) {
680
// this function may be called twice for a node so we can only remove it
681
// if it's still existing.
682
_macro_nodes.remove_if_existing(n);
683
// remove from _predicate_opaqs list also if it is there
684
if (predicate_count() > 0) {
685
_predicate_opaqs.remove_if_existing(n);
686
}
687
// Remove from coarsened locks list if present
688
if (coarsened_count() > 0) {
689
remove_coarsened_lock(n);
690
}
691
}
692
void add_expensive_node(Node* n);
693
void remove_expensive_node(Node* n) {
694
_expensive_nodes.remove_if_existing(n);
695
}
696
void add_predicate_opaq(Node* n) {
697
assert(!_predicate_opaqs.contains(n), "duplicate entry in predicate opaque1");
698
assert(_macro_nodes.contains(n), "should have already been in macro list");
699
_predicate_opaqs.append(n);
700
}
701
void add_skeleton_predicate_opaq(Node* n) {
702
assert(!_skeleton_predicate_opaqs.contains(n), "duplicate entry in skeleton predicate opaque4 list");
703
_skeleton_predicate_opaqs.append(n);
704
}
705
void remove_skeleton_predicate_opaq(Node* n) {
706
if (skeleton_predicate_count() > 0) {
707
_skeleton_predicate_opaqs.remove_if_existing(n);
708
}
709
}
710
void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks);
711
void remove_coarsened_lock(Node* n);
712
bool coarsened_locks_consistent();
713
714
bool post_loop_opts_phase() { return _post_loop_opts_phase; }
715
void set_post_loop_opts_phase() { _post_loop_opts_phase = true; }
716
void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; }
717
718
void record_for_post_loop_opts_igvn(Node* n);
719
void remove_from_post_loop_opts_igvn(Node* n);
720
void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
721
722
void sort_macro_nodes();
723
724
// remove the opaque nodes that protect the predicates so that the unused checks and
725
// uncommon traps will be eliminated from the graph.
726
void cleanup_loop_predicates(PhaseIterGVN &igvn);
727
bool is_predicate_opaq(Node* n) {
728
return _predicate_opaqs.contains(n);
729
}
730
731
// Are there candidate expensive nodes for optimization?
732
bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
733
// Check whether n1 and n2 are similar
734
static int cmp_expensive_nodes(Node* n1, Node* n2);
735
// Sort expensive nodes to locate similar expensive nodes
736
void sort_expensive_nodes();
737
738
// Compilation environment.
739
Arena* comp_arena() { return &_comp_arena; }
740
ciEnv* env() const { return _env; }
741
CompileLog* log() const { return _log; }
742
bool failing() const { return _env->failing() || _failure_reason != NULL; }
743
const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
744
745
bool failure_reason_is(const char* r) const {
746
return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
747
}
748
749
void record_failure(const char* reason);
750
void record_method_not_compilable(const char* reason) {
751
env()->record_method_not_compilable(reason);
752
// Record failure reason.
753
record_failure(reason);
754
}
755
bool check_node_count(uint margin, const char* reason) {
756
if (live_nodes() + margin > max_node_limit()) {
757
record_method_not_compilable(reason);
758
return true;
759
} else {
760
return false;
761
}
762
}
763
764
// Node management
765
uint unique() const { return _unique; }
766
uint next_unique() { return _unique++; }
767
void set_unique(uint i) { _unique = i; }
768
static int debug_idx() { return debug_only(_debug_idx)+0; }
769
static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
770
Arena* node_arena() { return &_node_arena; }
771
Arena* old_arena() { return &_old_arena; }
772
RootNode* root() const { return _root; }
773
void set_root(RootNode* r) { _root = r; }
774
StartNode* start() const; // (Derived from root.)
775
void init_start(StartNode* s);
776
Node* immutable_memory();
777
778
Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
779
Node* recent_alloc_obj() const { return _recent_alloc_obj; }
780
void set_recent_alloc(Node* ctl, Node* obj) {
781
_recent_alloc_ctl = ctl;
782
_recent_alloc_obj = obj;
783
}
784
void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
785
_dead_node_count++;
786
}
787
void reset_dead_node_list() { _dead_node_list.reset();
788
_dead_node_count = 0;
789
}
790
uint live_nodes() const {
791
int val = _unique - _dead_node_count;
792
assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count);
793
return (uint) val;
794
}
795
#ifdef ASSERT
796
void set_phase_optimize_finished() { _phase_optimize_finished = true; }
797
bool phase_optimize_finished() const { return _phase_optimize_finished; }
798
uint count_live_nodes_by_graph_walk();
799
void print_missing_nodes();
800
#endif
801
802
// Record modified nodes to check that they are put on IGVN worklist
803
void record_modified_node(Node* n) NOT_DEBUG_RETURN;
804
void remove_modified_node(Node* n) NOT_DEBUG_RETURN;
805
DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } )
806
807
MachConstantBaseNode* mach_constant_base_node();
808
bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
809
// Generated by adlc, true if CallNode requires MachConstantBase.
810
bool needs_deep_clone_jvms();
811
812
// Handy undefined Node
813
Node* top() const { return _top; }
814
815
// these are used by guys who need to know about creation and transformation of top:
816
Node* cached_top_node() { return _top; }
817
void set_cached_top_node(Node* tn);
818
819
GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
820
void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
821
Node_Notes* default_node_notes() const { return _default_node_notes; }
822
void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
823
824
Node_Notes* node_notes_at(int idx) {
825
return locate_node_notes(_node_note_array, idx, false);
826
}
827
inline bool set_node_notes_at(int idx, Node_Notes* value);
828
829
// Copy notes from source to dest, if they exist.
830
// Overwrite dest only if source provides something.
831
// Return true if information was moved.
832
bool copy_node_notes_to(Node* dest, Node* source);
833
834
// Workhorse function to sort out the blocked Node_Notes array:
835
inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
836
int idx, bool can_grow = false);
837
838
void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
839
840
// Type management
841
Arena* type_arena() { return _type_arena; }
842
Dict* type_dict() { return _type_dict; }
843
size_t type_last_size() { return _type_last_size; }
844
int num_alias_types() { return _num_alias_types; }
845
846
void init_type_arena() { _type_arena = &_Compile_types; }
847
void set_type_arena(Arena* a) { _type_arena = a; }
848
void set_type_dict(Dict* d) { _type_dict = d; }
849
void set_type_last_size(size_t sz) { _type_last_size = sz; }
850
851
const TypeFunc* last_tf(ciMethod* m) {
852
return (m == _last_tf_m) ? _last_tf : NULL;
853
}
854
void set_last_tf(ciMethod* m, const TypeFunc* tf) {
855
assert(m != NULL || tf == NULL, "");
856
_last_tf_m = m;
857
_last_tf = tf;
858
}
859
860
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
861
AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
862
bool have_alias_type(const TypePtr* adr_type);
863
AliasType* alias_type(ciField* field);
864
865
int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
866
const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
867
int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
868
869
// Building nodes
870
void rethrow_exceptions(JVMState* jvms);
871
void return_values(JVMState* jvms);
872
JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
873
874
// Decide how to build a call.
875
// The profile factor is a discount to apply to this site's interp. profile.
876
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
877
JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
878
bool allow_intrinsics = true);
879
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
880
return should_delay_string_inlining(call_method, jvms) ||
881
should_delay_boxing_inlining(call_method, jvms) ||
882
should_delay_vector_inlining(call_method, jvms);
883
}
884
bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
885
bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
886
bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
887
bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
888
889
// Helper functions to identify inlining potential at call-site
890
ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
891
ciKlass* holder, ciMethod* callee,
892
const TypeOopPtr* receiver_type, bool is_virtual,
893
bool &call_does_dispatch, int &vtable_index,
894
bool check_access = true);
895
ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
896
ciMethod* callee, const TypeOopPtr* receiver_type,
897
bool check_access = true);
898
899
// Report if there were too many traps at a current method and bci.
900
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
901
// If there is no MDO at all, report no trap unless told to assume it.
902
bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
903
// This version, unspecific to a particular bci, asks if
904
// PerMethodTrapLimit was exceeded for all inlined methods seen so far.
905
bool too_many_traps(Deoptimization::DeoptReason reason,
906
// Privately used parameter for logging:
907
ciMethodData* logmd = NULL);
908
// Report if there were too many recompiles at a method and bci.
909
bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
910
// Report if there were too many traps or recompiles at a method and bci.
911
bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
912
return too_many_traps(method, bci, reason) ||
913
too_many_recompiles(method, bci, reason);
914
}
915
// Return a bitset with the reasons where deoptimization is allowed,
916
// i.e., where there were not too many uncommon traps.
917
int _allowed_reasons;
918
int allowed_deopt_reasons() { return _allowed_reasons; }
919
void set_allowed_deopt_reasons();
920
921
// Parsing, optimization
922
PhaseGVN* initial_gvn() { return _initial_gvn; }
923
Unique_Node_List* for_igvn() { return _for_igvn; }
924
inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List.
925
void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
926
void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
927
928
// Replace n by nn using initial_gvn, calling hash_delete and
929
// record_for_igvn as needed.
930
void gvn_replace_by(Node* n, Node* nn);
931
932
933
void identify_useful_nodes(Unique_Node_List &useful);
934
void update_dead_node_list(Unique_Node_List &useful);
935
void remove_useless_nodes (Unique_Node_List &useful);
936
937
void remove_useless_node(Node* dead);
938
939
// Record this CallGenerator for inlining at the end of parsing.
940
void add_late_inline(CallGenerator* cg) {
941
_late_inlines.insert_before(_late_inlines_pos, cg);
942
_late_inlines_pos++;
943
}
944
945
void prepend_late_inline(CallGenerator* cg) {
946
_late_inlines.insert_before(0, cg);
947
}
948
949
void add_string_late_inline(CallGenerator* cg) {
950
_string_late_inlines.push(cg);
951
}
952
953
void add_boxing_late_inline(CallGenerator* cg) {
954
_boxing_late_inlines.push(cg);
955
}
956
957
void add_vector_reboxing_late_inline(CallGenerator* cg) {
958
_vector_reboxing_late_inlines.push(cg);
959
}
960
961
void add_native_invoker(RuntimeStub* stub);
962
963
const GrowableArray<RuntimeStub*> native_invokers() const { return _native_invokers; }
964
965
void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);
966
967
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
968
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
969
970
void remove_useless_coarsened_locks(Unique_Node_List& useful);
971
972
void process_print_inlining();
973
void dump_print_inlining();
974
975
bool over_inlining_cutoff() const {
976
if (!inlining_incrementally()) {
977
return unique() > (uint)NodeCountInliningCutoff;
978
} else {
979
// Give some room for incremental inlining algorithm to "breathe"
980
// and avoid thrashing when live node count is close to the limit.
981
// Keep in mind that live_nodes() isn't accurate during inlining until
982
// dead node elimination step happens (see Compile::inline_incrementally).
983
return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
984
}
985
}
986
987
void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
988
void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
989
bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
990
991
bool inline_incrementally_one();
992
void inline_incrementally_cleanup(PhaseIterGVN& igvn);
993
void inline_incrementally(PhaseIterGVN& igvn);
994
void inline_string_calls(bool parse_time);
995
void inline_boxing_calls(PhaseIterGVN& igvn);
996
bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
997
void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
998
999
void inline_vector_reboxing_calls();
1000
bool has_vbox_nodes();
1001
1002
void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
1003
1004
// Matching, CFG layout, allocation, code generation
1005
PhaseCFG* cfg() { return _cfg; }
1006
bool has_java_calls() const { return _java_calls > 0; }
1007
int java_calls() const { return _java_calls; }
1008
int inner_loops() const { return _inner_loops; }
1009
Matcher* matcher() { return _matcher; }
1010
PhaseRegAlloc* regalloc() { return _regalloc; }
1011
RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
1012
Arena* indexSet_arena() { return _indexSet_arena; }
1013
void* indexSet_free_block_list() { return _indexSet_free_block_list; }
1014
DebugInformationRecorder* debug_info() { return env()->debug_info(); }
1015
1016
void update_interpreter_frame_size(int size) {
1017
if (_interpreter_frame_size < size) {
1018
_interpreter_frame_size = size;
1019
}
1020
}
1021
1022
void set_matcher(Matcher* m) { _matcher = m; }
1023
//void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
1024
void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
1025
void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
1026
1027
void set_java_calls(int z) { _java_calls = z; }
1028
void set_inner_loops(int z) { _inner_loops = z; }
1029
1030
Dependencies* dependencies() { return env()->dependencies(); }
1031
1032
// Major entry point. Given a Scope, compile the associated method.
1033
// For normal compilations, entry_bci is InvocationEntryBci. For on stack
1034
// replacement, entry_bci indicates the bytecode for which to compile a
1035
// continuation.
1036
Compile(ciEnv* ci_env, ciMethod* target,
1037
int entry_bci, bool subsume_loads, bool do_escape_analysis,
1038
bool eliminate_boxing, bool do_locks_coarsening,
1039
bool install_code, DirectiveSet* directive);
1040
1041
// Second major entry point. From the TypeFunc signature, generate code
1042
// to pass arguments from the Java calling convention to the C calling
1043
// convention.
1044
Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1045
address stub_function, const char *stub_name,
1046
int is_fancy_jump, bool pass_tls,
1047
bool return_pc, DirectiveSet* directive);
1048
1049
// Are we compiling a method?
1050
bool has_method() { return method() != NULL; }
1051
1052
// Maybe print some information about this compile.
1053
void print_compile_messages();
1054
1055
// Final graph reshaping, a post-pass after the regular optimizer is done.
1056
bool final_graph_reshaping();
1057
1058
// returns true if adr is completely contained in the given alias category
1059
bool must_alias(const TypePtr* adr, int alias_idx);
1060
1061
// returns true if adr overlaps with the given alias category
1062
bool can_alias(const TypePtr* adr, int alias_idx);
1063
1064
// Stack slots that may be unused by the calling convention but must
1065
// otherwise be preserved. On Intel this includes the return address.
1066
// On PowerPC it includes the 4 words holding the old TOC & LR glue.
1067
uint in_preserve_stack_slots() {
1068
return SharedRuntime::in_preserve_stack_slots();
1069
}
1070
1071
// "Top of Stack" slots that may be unused by the calling convention but must
1072
// otherwise be preserved.
1073
// On Intel these are not necessary and the value can be zero.
1074
static uint out_preserve_stack_slots() {
1075
return SharedRuntime::out_preserve_stack_slots();
1076
}
1077
1078
// Number of outgoing stack slots killed above the out_preserve_stack_slots
1079
// for calls to C. Supports the var-args backing area for register parms.
1080
uint varargs_C_out_slots_killed() const;
1081
1082
// Number of Stack Slots consumed by a synchronization entry
1083
int sync_stack_slots() const;
1084
1085
// Compute the name of old_SP. See <arch>.ad for frame layout.
1086
OptoReg::Name compute_old_SP();
1087
1088
private:
1089
// Phase control:
1090
void Init(int aliaslevel); // Prepare for a single compilation
1091
int Inline_Warm(); // Find more inlining work.
1092
void Finish_Warm(); // Give up on further inlines.
1093
void Optimize(); // Given a graph, optimize it
1094
void Code_Gen(); // Generate code from a graph
1095
1096
// Management of the AliasType table.
1097
void grow_alias_types();
1098
AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1099
const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1100
AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1101
1102
void verify_top(Node*) const PRODUCT_RETURN;
1103
1104
// Intrinsic setup.
1105
CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
1106
int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper
1107
CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
1108
void register_intrinsic(CallGenerator* cg); // update fn
1109
1110
#ifndef PRODUCT
1111
static juint _intrinsic_hist_count[];
1112
static jubyte _intrinsic_hist_flags[];
1113
#endif
1114
// Function calls made by the public function final_graph_reshaping.
1115
// No need to be made public as they are not called elsewhere.
1116
void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1117
void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop);
1118
void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1119
void eliminate_redundant_card_marks(Node* n);
1120
1121
// Logic cone optimization.
1122
void optimize_logic_cones(PhaseIterGVN &igvn);
1123
void collect_logic_cone_roots(Unique_Node_List& list);
1124
void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited);
1125
bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs);
1126
uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs);
1127
uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3);
1128
Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs);
1129
void check_no_dead_use() const NOT_DEBUG_RETURN;
1130
1131
public:
1132
1133
// Note: Histogram array size is about 1 Kb.
1134
enum { // flag bits:
1135
_intrinsic_worked = 1, // succeeded at least once
1136
_intrinsic_failed = 2, // tried it but it failed
1137
_intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1138
_intrinsic_virtual = 8, // was seen in the virtual form (rare)
1139
_intrinsic_both = 16 // was seen in the non-virtual form (usual)
1140
};
1141
// Update histogram. Return boolean if this is a first-time occurrence.
1142
static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1143
bool is_virtual, int flags) PRODUCT_RETURN0;
1144
static void print_intrinsic_statistics() PRODUCT_RETURN;
1145
1146
// Graph verification code
1147
// Walk the node list, verifying that there is a one-to-one
1148
// correspondence between Use-Def edges and Def-Use edges
1149
// The option no_dead_code enables stronger checks that the
1150
// graph is strongly connected from root in both directions.
1151
void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1152
1153
// End-of-run dumps.
1154
static void print_statistics() PRODUCT_RETURN;
1155
1156
// Verify ADLC assumptions during startup
1157
static void adlc_verification() PRODUCT_RETURN;
1158
1159
// Definitions of pd methods
1160
static void pd_compiler2_init();
1161
1162
// Static parse-time type checking logic for gen_subtype_check:
1163
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1164
int static_subtype_check(ciKlass* superk, ciKlass* subk);
1165
1166
static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1167
// Optional control dependency (for example, on range check)
1168
Node* ctrl = NULL);
1169
1170
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1171
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
1172
1173
// Auxiliary methods for randomized fuzzing/stressing
1174
int random();
1175
bool randomized_select(int count);
1176
1177
// supporting clone_map
1178
CloneMap& clone_map();
1179
void set_clone_map(Dict* d);
1180
1181
bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method);
1182
bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method);
1183
bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1184
1185
#ifdef IA32
1186
private:
1187
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
1188
bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
1189
1190
// Remember if this compilation changes hardware mode to 24-bit precision.
1191
void set_24_bit_selection_and_mode(bool selection, bool mode) {
1192
_select_24_bit_instr = selection;
1193
_in_24_bit_fp_mode = mode;
1194
}
1195
1196
public:
1197
bool select_24_bit_instr() const { return _select_24_bit_instr; }
1198
bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
1199
#endif // IA32
1200
#ifdef ASSERT
1201
bool _type_verify_symmetry;
1202
void set_exception_backedge() { _exception_backedge = true; }
1203
bool has_exception_backedge() const { return _exception_backedge; }
1204
#endif
1205
1206
static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry,
1207
BasicType bt);
1208
1209
static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res);
1210
};
1211
1212
#endif // SHARE_OPTO_COMPILE_HPP
1213
1214