Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/opto/compile.cpp
40930 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "jvm_io.h"
27
#include "asm/macroAssembler.hpp"
28
#include "asm/macroAssembler.inline.hpp"
29
#include "ci/ciReplay.hpp"
30
#include "classfile/javaClasses.hpp"
31
#include "code/exceptionHandlerTable.hpp"
32
#include "code/nmethod.hpp"
33
#include "compiler/compileBroker.hpp"
34
#include "compiler/compileLog.hpp"
35
#include "compiler/disassembler.hpp"
36
#include "compiler/oopMap.hpp"
37
#include "gc/shared/barrierSet.hpp"
38
#include "gc/shared/c2/barrierSetC2.hpp"
39
#include "jfr/jfrEvents.hpp"
40
#include "memory/resourceArea.hpp"
41
#include "opto/addnode.hpp"
42
#include "opto/block.hpp"
43
#include "opto/c2compiler.hpp"
44
#include "opto/callGenerator.hpp"
45
#include "opto/callnode.hpp"
46
#include "opto/castnode.hpp"
47
#include "opto/cfgnode.hpp"
48
#include "opto/chaitin.hpp"
49
#include "opto/compile.hpp"
50
#include "opto/connode.hpp"
51
#include "opto/convertnode.hpp"
52
#include "opto/divnode.hpp"
53
#include "opto/escape.hpp"
54
#include "opto/idealGraphPrinter.hpp"
55
#include "opto/loopnode.hpp"
56
#include "opto/machnode.hpp"
57
#include "opto/macro.hpp"
58
#include "opto/matcher.hpp"
59
#include "opto/mathexactnode.hpp"
60
#include "opto/memnode.hpp"
61
#include "opto/mulnode.hpp"
62
#include "opto/narrowptrnode.hpp"
63
#include "opto/node.hpp"
64
#include "opto/opcodes.hpp"
65
#include "opto/output.hpp"
66
#include "opto/parse.hpp"
67
#include "opto/phaseX.hpp"
68
#include "opto/rootnode.hpp"
69
#include "opto/runtime.hpp"
70
#include "opto/stringopts.hpp"
71
#include "opto/type.hpp"
72
#include "opto/vector.hpp"
73
#include "opto/vectornode.hpp"
74
#include "runtime/globals_extension.hpp"
75
#include "runtime/sharedRuntime.hpp"
76
#include "runtime/signature.hpp"
77
#include "runtime/stubRoutines.hpp"
78
#include "runtime/timer.hpp"
79
#include "utilities/align.hpp"
80
#include "utilities/copy.hpp"
81
#include "utilities/macros.hpp"
82
#include "utilities/resourceHash.hpp"
83
84
85
// -------------------- Compile::mach_constant_base_node -----------------------
86
// Constant table base node singleton.
87
MachConstantBaseNode* Compile::mach_constant_base_node() {
88
if (_mach_constant_base_node == NULL) {
89
_mach_constant_base_node = new MachConstantBaseNode();
90
_mach_constant_base_node->add_req(C->root());
91
}
92
return _mach_constant_base_node;
93
}
94
95
96
/// Support for intrinsics.
97
98
// Return the index at which m must be inserted (or already exists).
99
// The sort order is by the address of the ciMethod, with is_virtual as minor key.
100
class IntrinsicDescPair {
101
private:
102
ciMethod* _m;
103
bool _is_virtual;
104
public:
105
IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {}
106
static int compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
107
ciMethod* m= elt->method();
108
ciMethod* key_m = key->_m;
109
if (key_m < m) return -1;
110
else if (key_m > m) return 1;
111
else {
112
bool is_virtual = elt->is_virtual();
113
bool key_virtual = key->_is_virtual;
114
if (key_virtual < is_virtual) return -1;
115
else if (key_virtual > is_virtual) return 1;
116
else return 0;
117
}
118
}
119
};
120
int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) {
121
#ifdef ASSERT
122
for (int i = 1; i < _intrinsics.length(); i++) {
123
CallGenerator* cg1 = _intrinsics.at(i-1);
124
CallGenerator* cg2 = _intrinsics.at(i);
125
assert(cg1->method() != cg2->method()
126
? cg1->method() < cg2->method()
127
: cg1->is_virtual() < cg2->is_virtual(),
128
"compiler intrinsics list must stay sorted");
129
}
130
#endif
131
IntrinsicDescPair pair(m, is_virtual);
132
return _intrinsics.find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
133
}
134
135
void Compile::register_intrinsic(CallGenerator* cg) {
136
bool found = false;
137
int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
138
assert(!found, "registering twice");
139
_intrinsics.insert_before(index, cg);
140
assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
141
}
142
143
CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
144
assert(m->is_loaded(), "don't try this on unloaded methods");
145
if (_intrinsics.length() > 0) {
146
bool found = false;
147
int index = intrinsic_insertion_index(m, is_virtual, found);
148
if (found) {
149
return _intrinsics.at(index);
150
}
151
}
152
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
153
if (m->intrinsic_id() != vmIntrinsics::_none &&
154
m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
155
CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
156
if (cg != NULL) {
157
// Save it for next time:
158
register_intrinsic(cg);
159
return cg;
160
} else {
161
gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
162
}
163
}
164
return NULL;
165
}
166
167
// Compile::make_vm_intrinsic is defined in library_call.cpp.
168
169
#ifndef PRODUCT
170
// statistics gathering...
171
172
juint Compile::_intrinsic_hist_count[vmIntrinsics::number_of_intrinsics()] = {0};
173
jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::number_of_intrinsics()] = {0};
174
175
inline int as_int(vmIntrinsics::ID id) {
176
return vmIntrinsics::as_int(id);
177
}
178
179
bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
180
assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
181
int oflags = _intrinsic_hist_flags[as_int(id)];
182
assert(flags != 0, "what happened?");
183
if (is_virtual) {
184
flags |= _intrinsic_virtual;
185
}
186
bool changed = (flags != oflags);
187
if ((flags & _intrinsic_worked) != 0) {
188
juint count = (_intrinsic_hist_count[as_int(id)] += 1);
189
if (count == 1) {
190
changed = true; // first time
191
}
192
// increment the overall count also:
193
_intrinsic_hist_count[as_int(vmIntrinsics::_none)] += 1;
194
}
195
if (changed) {
196
if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
197
// Something changed about the intrinsic's virtuality.
198
if ((flags & _intrinsic_virtual) != 0) {
199
// This is the first use of this intrinsic as a virtual call.
200
if (oflags != 0) {
201
// We already saw it as a non-virtual, so note both cases.
202
flags |= _intrinsic_both;
203
}
204
} else if ((oflags & _intrinsic_both) == 0) {
205
// This is the first use of this intrinsic as a non-virtual
206
flags |= _intrinsic_both;
207
}
208
}
209
_intrinsic_hist_flags[as_int(id)] = (jubyte) (oflags | flags);
210
}
211
// update the overall flags also:
212
_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] |= (jubyte) flags;
213
return changed;
214
}
215
216
static char* format_flags(int flags, char* buf) {
217
buf[0] = 0;
218
if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked");
219
if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed");
220
if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled");
221
if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual");
222
if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual");
223
if (buf[0] == 0) strcat(buf, ",");
224
assert(buf[0] == ',', "must be");
225
return &buf[1];
226
}
227
228
void Compile::print_intrinsic_statistics() {
229
char flagsbuf[100];
230
ttyLocker ttyl;
231
if (xtty != NULL) xtty->head("statistics type='intrinsic'");
232
tty->print_cr("Compiler intrinsic usage:");
233
juint total = _intrinsic_hist_count[as_int(vmIntrinsics::_none)];
234
if (total == 0) total = 1; // avoid div0 in case of no successes
235
#define PRINT_STAT_LINE(name, c, f) \
236
tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
237
for (auto id : EnumRange<vmIntrinsicID>{}) {
238
int flags = _intrinsic_hist_flags[as_int(id)];
239
juint count = _intrinsic_hist_count[as_int(id)];
240
if ((flags | count) != 0) {
241
PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
242
}
243
}
244
PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[as_int(vmIntrinsics::_none)], flagsbuf));
245
if (xtty != NULL) xtty->tail("statistics");
246
}
247
248
void Compile::print_statistics() {
249
{ ttyLocker ttyl;
250
if (xtty != NULL) xtty->head("statistics type='opto'");
251
Parse::print_statistics();
252
PhaseCCP::print_statistics();
253
PhaseRegAlloc::print_statistics();
254
PhaseOutput::print_statistics();
255
PhasePeephole::print_statistics();
256
PhaseIdealLoop::print_statistics();
257
if (xtty != NULL) xtty->tail("statistics");
258
}
259
if (_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] != 0) {
260
// put this under its own <statistics> element.
261
print_intrinsic_statistics();
262
}
263
}
264
#endif //PRODUCT
265
266
void Compile::gvn_replace_by(Node* n, Node* nn) {
267
for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
268
Node* use = n->last_out(i);
269
bool is_in_table = initial_gvn()->hash_delete(use);
270
uint uses_found = 0;
271
for (uint j = 0; j < use->len(); j++) {
272
if (use->in(j) == n) {
273
if (j < use->req())
274
use->set_req(j, nn);
275
else
276
use->set_prec(j, nn);
277
uses_found++;
278
}
279
}
280
if (is_in_table) {
281
// reinsert into table
282
initial_gvn()->hash_find_insert(use);
283
}
284
record_for_igvn(use);
285
i -= uses_found; // we deleted 1 or more copies of this edge
286
}
287
}
288
289
290
// Identify all nodes that are reachable from below, useful.
291
// Use breadth-first pass that records state in a Unique_Node_List,
292
// recursive traversal is slower.
293
void Compile::identify_useful_nodes(Unique_Node_List &useful) {
294
int estimated_worklist_size = live_nodes();
295
useful.map( estimated_worklist_size, NULL ); // preallocate space
296
297
// Initialize worklist
298
if (root() != NULL) { useful.push(root()); }
299
// If 'top' is cached, declare it useful to preserve cached node
300
if( cached_top_node() ) { useful.push(cached_top_node()); }
301
302
// Push all useful nodes onto the list, breadthfirst
303
for( uint next = 0; next < useful.size(); ++next ) {
304
assert( next < unique(), "Unique useful nodes < total nodes");
305
Node *n = useful.at(next);
306
uint max = n->len();
307
for( uint i = 0; i < max; ++i ) {
308
Node *m = n->in(i);
309
if (not_a_node(m)) continue;
310
useful.push(m);
311
}
312
}
313
}
314
315
// Update dead_node_list with any missing dead nodes using useful
316
// list. Consider all non-useful nodes to be useless i.e., dead nodes.
317
void Compile::update_dead_node_list(Unique_Node_List &useful) {
318
uint max_idx = unique();
319
VectorSet& useful_node_set = useful.member_set();
320
321
for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
322
// If node with index node_idx is not in useful set,
323
// mark it as dead in dead node list.
324
if (!useful_node_set.test(node_idx)) {
325
record_dead_node(node_idx);
326
}
327
}
328
}
329
330
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
331
int shift = 0;
332
for (int i = 0; i < inlines->length(); i++) {
333
CallGenerator* cg = inlines->at(i);
334
if (useful.member(cg->call_node())) {
335
if (shift > 0) {
336
inlines->at_put(i - shift, cg);
337
}
338
} else {
339
shift++; // skip over the dead element
340
}
341
}
342
if (shift > 0) {
343
inlines->trunc_to(inlines->length() - shift); // remove last elements from compacted array
344
}
345
}
346
347
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead) {
348
assert(dead != NULL && dead->is_Call(), "sanity");
349
int found = 0;
350
for (int i = 0; i < inlines->length(); i++) {
351
if (inlines->at(i)->call_node() == dead) {
352
inlines->remove_at(i);
353
found++;
354
NOT_DEBUG( break; ) // elements are unique, so exit early
355
}
356
}
357
assert(found <= 1, "not unique");
358
}
359
360
void Compile::remove_useless_nodes(GrowableArray<Node*>& node_list, Unique_Node_List& useful) {
361
for (int i = node_list.length() - 1; i >= 0; i--) {
362
Node* n = node_list.at(i);
363
if (!useful.member(n)) {
364
node_list.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
365
}
366
}
367
}
368
369
void Compile::remove_useless_node(Node* dead) {
370
remove_modified_node(dead);
371
372
// Constant node that has no out-edges and has only one in-edge from
373
// root is usually dead. However, sometimes reshaping walk makes
374
// it reachable by adding use edges. So, we will NOT count Con nodes
375
// as dead to be conservative about the dead node count at any
376
// given time.
377
if (!dead->is_Con()) {
378
record_dead_node(dead->_idx);
379
}
380
if (dead->is_macro()) {
381
remove_macro_node(dead);
382
}
383
if (dead->is_expensive()) {
384
remove_expensive_node(dead);
385
}
386
if (dead->Opcode() == Op_Opaque4) {
387
remove_skeleton_predicate_opaq(dead);
388
}
389
if (dead->for_post_loop_opts_igvn()) {
390
remove_from_post_loop_opts_igvn(dead);
391
}
392
if (dead->is_Call()) {
393
remove_useless_late_inlines( &_late_inlines, dead);
394
remove_useless_late_inlines( &_string_late_inlines, dead);
395
remove_useless_late_inlines( &_boxing_late_inlines, dead);
396
remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
397
}
398
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
399
bs->unregister_potential_barrier_node(dead);
400
}
401
402
// Disconnect all useless nodes by disconnecting those at the boundary.
403
void Compile::remove_useless_nodes(Unique_Node_List &useful) {
404
uint next = 0;
405
while (next < useful.size()) {
406
Node *n = useful.at(next++);
407
if (n->is_SafePoint()) {
408
// We're done with a parsing phase. Replaced nodes are not valid
409
// beyond that point.
410
n->as_SafePoint()->delete_replaced_nodes();
411
}
412
// Use raw traversal of out edges since this code removes out edges
413
int max = n->outcnt();
414
for (int j = 0; j < max; ++j) {
415
Node* child = n->raw_out(j);
416
if (!useful.member(child)) {
417
assert(!child->is_top() || child != top(),
418
"If top is cached in Compile object it is in useful list");
419
// Only need to remove this out-edge to the useless node
420
n->raw_del_out(j);
421
--j;
422
--max;
423
}
424
}
425
if (n->outcnt() == 1 && n->has_special_unique_user()) {
426
record_for_igvn(n->unique_out());
427
}
428
}
429
430
remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
431
remove_useless_nodes(_predicate_opaqs, useful); // remove useless predicate opaque nodes
432
remove_useless_nodes(_skeleton_predicate_opaqs, useful);
433
remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
434
remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
435
436
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
437
bs->eliminate_useless_gc_barriers(useful, this);
438
// clean up the late inline lists
439
remove_useless_late_inlines( &_late_inlines, useful);
440
remove_useless_late_inlines( &_string_late_inlines, useful);
441
remove_useless_late_inlines( &_boxing_late_inlines, useful);
442
remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
443
debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
444
}
445
446
// ============================================================================
447
//------------------------------CompileWrapper---------------------------------
448
class CompileWrapper : public StackObj {
449
Compile *const _compile;
450
public:
451
CompileWrapper(Compile* compile);
452
453
~CompileWrapper();
454
};
455
456
CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
457
// the Compile* pointer is stored in the current ciEnv:
458
ciEnv* env = compile->env();
459
assert(env == ciEnv::current(), "must already be a ciEnv active");
460
assert(env->compiler_data() == NULL, "compile already active?");
461
env->set_compiler_data(compile);
462
assert(compile == Compile::current(), "sanity");
463
464
compile->set_type_dict(NULL);
465
compile->set_clone_map(new Dict(cmpkey, hashkey, _compile->comp_arena()));
466
compile->clone_map().set_clone_idx(0);
467
compile->set_type_last_size(0);
468
compile->set_last_tf(NULL, NULL);
469
compile->set_indexSet_arena(NULL);
470
compile->set_indexSet_free_block_list(NULL);
471
compile->init_type_arena();
472
Type::Initialize(compile);
473
_compile->begin_method();
474
_compile->clone_map().set_debug(_compile->has_method() && _compile->directive()->CloneMapDebugOption);
475
}
476
CompileWrapper::~CompileWrapper() {
477
_compile->end_method();
478
_compile->env()->set_compiler_data(NULL);
479
}
480
481
482
//----------------------------print_compile_messages---------------------------
483
void Compile::print_compile_messages() {
484
#ifndef PRODUCT
485
// Check if recompiling
486
if (_subsume_loads == false && PrintOpto) {
487
// Recompiling without allowing machine instructions to subsume loads
488
tty->print_cr("*********************************************************");
489
tty->print_cr("** Bailout: Recompile without subsuming loads **");
490
tty->print_cr("*********************************************************");
491
}
492
if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
493
// Recompiling without escape analysis
494
tty->print_cr("*********************************************************");
495
tty->print_cr("** Bailout: Recompile without escape analysis **");
496
tty->print_cr("*********************************************************");
497
}
498
if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
499
// Recompiling without boxing elimination
500
tty->print_cr("*********************************************************");
501
tty->print_cr("** Bailout: Recompile without boxing elimination **");
502
tty->print_cr("*********************************************************");
503
}
504
if (env()->break_at_compile()) {
505
// Open the debugger when compiling this method.
506
tty->print("### Breaking when compiling: ");
507
method()->print_short_name();
508
tty->cr();
509
BREAKPOINT;
510
}
511
512
if( PrintOpto ) {
513
if (is_osr_compilation()) {
514
tty->print("[OSR]%3d", _compile_id);
515
} else {
516
tty->print("%3d", _compile_id);
517
}
518
}
519
#endif
520
}
521
522
// ============================================================================
523
//------------------------------Compile standard-------------------------------
524
debug_only( int Compile::_debug_idx = 100000; )
525
526
// Compile a method. entry_bci is -1 for normal compilations and indicates
527
// the continuation bci for on stack replacement.
528
529
530
Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
531
bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, bool install_code, DirectiveSet* directive)
532
: Phase(Compiler),
533
_compile_id(ci_env->compile_id()),
534
_subsume_loads(subsume_loads),
535
_do_escape_analysis(do_escape_analysis),
536
_install_code(install_code),
537
_eliminate_boxing(eliminate_boxing),
538
_method(target),
539
_entry_bci(osr_bci),
540
_stub_function(NULL),
541
_stub_name(NULL),
542
_stub_entry_point(NULL),
543
_max_node_limit(MaxNodeLimit),
544
_post_loop_opts_phase(false),
545
_inlining_progress(false),
546
_inlining_incrementally(false),
547
_do_cleanup(false),
548
_has_reserved_stack_access(target->has_reserved_stack_access()),
549
#ifndef PRODUCT
550
_igv_idx(0),
551
_trace_opto_output(directive->TraceOptoOutputOption),
552
_print_ideal(directive->PrintIdealOption),
553
#endif
554
_has_method_handle_invokes(false),
555
_clinit_barrier_on_entry(false),
556
_stress_seed(0),
557
_comp_arena(mtCompiler),
558
_barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
559
_env(ci_env),
560
_directive(directive),
561
_log(ci_env->log()),
562
_failure_reason(NULL),
563
_intrinsics (comp_arena(), 0, 0, NULL),
564
_macro_nodes (comp_arena(), 8, 0, NULL),
565
_predicate_opaqs (comp_arena(), 8, 0, NULL),
566
_skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
567
_expensive_nodes (comp_arena(), 8, 0, NULL),
568
_for_post_loop_igvn(comp_arena(), 8, 0, NULL),
569
_congraph(NULL),
570
NOT_PRODUCT(_printer(NULL) COMMA)
571
_dead_node_list(comp_arena()),
572
_dead_node_count(0),
573
_node_arena(mtCompiler),
574
_old_arena(mtCompiler),
575
_mach_constant_base_node(NULL),
576
_Compile_types(mtCompiler),
577
_initial_gvn(NULL),
578
_for_igvn(NULL),
579
_late_inlines(comp_arena(), 2, 0, NULL),
580
_string_late_inlines(comp_arena(), 2, 0, NULL),
581
_boxing_late_inlines(comp_arena(), 2, 0, NULL),
582
_vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
583
_late_inlines_pos(0),
584
_number_of_mh_late_inlines(0),
585
_native_invokers(comp_arena(), 1, 0, NULL),
586
_print_inlining_stream(NULL),
587
_print_inlining_list(NULL),
588
_print_inlining_idx(0),
589
_print_inlining_output(NULL),
590
_replay_inline_data(NULL),
591
_java_calls(0),
592
_inner_loops(0),
593
_interpreter_frame_size(0)
594
#ifndef PRODUCT
595
, _in_dump_cnt(0)
596
#endif
597
{
598
C = this;
599
CompileWrapper cw(this);
600
601
if (CITimeVerbose) {
602
tty->print(" ");
603
target->holder()->name()->print();
604
tty->print(".");
605
target->print_short_name();
606
tty->print(" ");
607
}
608
TraceTime t1("Total compilation time", &_t_totalCompilation, CITime, CITimeVerbose);
609
TraceTime t2(NULL, &_t_methodCompilation, CITime, false);
610
611
#if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
612
bool print_opto_assembly = directive->PrintOptoAssemblyOption;
613
// We can always print a disassembly, either abstract (hex dump) or
614
// with the help of a suitable hsdis library. Thus, we should not
615
// couple print_assembly and print_opto_assembly controls.
616
// But: always print opto and regular assembly on compile command 'print'.
617
bool print_assembly = directive->PrintAssemblyOption;
618
set_print_assembly(print_opto_assembly || print_assembly);
619
#else
620
set_print_assembly(false); // must initialize.
621
#endif
622
623
#ifndef PRODUCT
624
set_parsed_irreducible_loop(false);
625
626
if (directive->ReplayInlineOption) {
627
_replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
628
}
629
#endif
630
set_print_inlining(directive->PrintInliningOption || PrintOptoInlining);
631
set_print_intrinsics(directive->PrintIntrinsicsOption);
632
set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
633
634
if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
635
// Make sure the method being compiled gets its own MDO,
636
// so we can at least track the decompile_count().
637
// Need MDO to record RTM code generation state.
638
method()->ensure_method_data();
639
}
640
641
Init(::AliasLevel);
642
643
644
print_compile_messages();
645
646
_ilt = InlineTree::build_inline_tree_root();
647
648
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
649
assert(num_alias_types() >= AliasIdxRaw, "");
650
651
#define MINIMUM_NODE_HASH 1023
652
// Node list that Iterative GVN will start with
653
Unique_Node_List for_igvn(comp_arena());
654
set_for_igvn(&for_igvn);
655
656
// GVN that will be run immediately on new nodes
657
uint estimated_size = method()->code_size()*4+64;
658
estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
659
PhaseGVN gvn(node_arena(), estimated_size);
660
set_initial_gvn(&gvn);
661
662
print_inlining_init();
663
{ // Scope for timing the parser
664
TracePhase tp("parse", &timers[_t_parser]);
665
666
// Put top into the hash table ASAP.
667
initial_gvn()->transform_no_reclaim(top());
668
669
// Set up tf(), start(), and find a CallGenerator.
670
CallGenerator* cg = NULL;
671
if (is_osr_compilation()) {
672
const TypeTuple *domain = StartOSRNode::osr_domain();
673
const TypeTuple *range = TypeTuple::make_range(method()->signature());
674
init_tf(TypeFunc::make(domain, range));
675
StartNode* s = new StartOSRNode(root(), domain);
676
initial_gvn()->set_type_bottom(s);
677
init_start(s);
678
cg = CallGenerator::for_osr(method(), entry_bci());
679
} else {
680
// Normal case.
681
init_tf(TypeFunc::make(method()));
682
StartNode* s = new StartNode(root(), tf()->domain());
683
initial_gvn()->set_type_bottom(s);
684
init_start(s);
685
if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
686
// With java.lang.ref.reference.get() we must go through the
687
// intrinsic - even when get() is the root
688
// method of the compile - so that, if necessary, the value in
689
// the referent field of the reference object gets recorded by
690
// the pre-barrier code.
691
cg = find_intrinsic(method(), false);
692
}
693
if (cg == NULL) {
694
float past_uses = method()->interpreter_invocation_count();
695
float expected_uses = past_uses;
696
cg = CallGenerator::for_inline(method(), expected_uses);
697
}
698
}
699
if (failing()) return;
700
if (cg == NULL) {
701
record_method_not_compilable("cannot parse method");
702
return;
703
}
704
JVMState* jvms = build_start_state(start(), tf());
705
if ((jvms = cg->generate(jvms)) == NULL) {
706
if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {
707
record_method_not_compilable("method parse failed");
708
}
709
return;
710
}
711
GraphKit kit(jvms);
712
713
if (!kit.stopped()) {
714
// Accept return values, and transfer control we know not where.
715
// This is done by a special, unique ReturnNode bound to root.
716
return_values(kit.jvms());
717
}
718
719
if (kit.has_exceptions()) {
720
// Any exceptions that escape from this call must be rethrown
721
// to whatever caller is dynamically above us on the stack.
722
// This is done by a special, unique RethrowNode bound to root.
723
rethrow_exceptions(kit.transfer_exceptions_into_jvms());
724
}
725
726
assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
727
728
if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
729
inline_string_calls(true);
730
}
731
732
if (failing()) return;
733
734
print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
735
736
// Remove clutter produced by parsing.
737
if (!failing()) {
738
ResourceMark rm;
739
PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
740
}
741
}
742
743
// Note: Large methods are capped off in do_one_bytecode().
744
if (failing()) return;
745
746
// After parsing, node notes are no longer automagic.
747
// They must be propagated by register_new_node_with_optimizer(),
748
// clone(), or the like.
749
set_default_node_notes(NULL);
750
751
#ifndef PRODUCT
752
if (should_print(1)) {
753
_printer->print_inlining();
754
}
755
#endif
756
757
if (failing()) return;
758
NOT_PRODUCT( verify_graph_edges(); )
759
760
// If any phase is randomized for stress testing, seed random number
761
// generation and log the seed for repeatability.
762
if (StressLCM || StressGCM || StressIGVN || StressCCP) {
763
_stress_seed = FLAG_IS_DEFAULT(StressSeed) ?
764
static_cast<uint>(Ticks::now().nanoseconds()) : StressSeed;
765
if (_log != NULL) {
766
_log->elem("stress_test seed='%u'", _stress_seed);
767
}
768
}
769
770
// Now optimize
771
Optimize();
772
if (failing()) return;
773
NOT_PRODUCT( verify_graph_edges(); )
774
775
#ifndef PRODUCT
776
if (print_ideal()) {
777
ttyLocker ttyl; // keep the following output all in one block
778
// This output goes directly to the tty, not the compiler log.
779
// To enable tools to match it up with the compilation activity,
780
// be sure to tag this tty output with the compile ID.
781
if (xtty != NULL) {
782
xtty->head("ideal compile_id='%d'%s", compile_id(),
783
is_osr_compilation() ? " compile_kind='osr'" :
784
"");
785
}
786
root()->dump(9999);
787
if (xtty != NULL) {
788
xtty->tail("ideal");
789
}
790
}
791
#endif
792
793
#ifdef ASSERT
794
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
795
bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
796
#endif
797
798
// Dump compilation data to replay it.
799
if (directive->DumpReplayOption) {
800
env()->dump_replay_data(_compile_id);
801
}
802
if (directive->DumpInlineOption && (ilt() != NULL)) {
803
env()->dump_inline_data(_compile_id);
804
}
805
806
// Now that we know the size of all the monitors we can add a fixed slot
807
// for the original deopt pc.
808
int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
809
set_fixed_slots(next_slot);
810
811
// Compute when to use implicit null checks. Used by matching trap based
812
// nodes and NullCheck optimization.
813
set_allowed_deopt_reasons();
814
815
// Now generate code
816
Code_Gen();
817
}
818
819
//------------------------------Compile----------------------------------------
820
// Compile a runtime stub
821
Compile::Compile( ciEnv* ci_env,
822
TypeFunc_generator generator,
823
address stub_function,
824
const char *stub_name,
825
int is_fancy_jump,
826
bool pass_tls,
827
bool return_pc,
828
DirectiveSet* directive)
829
: Phase(Compiler),
830
_compile_id(0),
831
_subsume_loads(true),
832
_do_escape_analysis(false),
833
_install_code(true),
834
_eliminate_boxing(false),
835
_method(NULL),
836
_entry_bci(InvocationEntryBci),
837
_stub_function(stub_function),
838
_stub_name(stub_name),
839
_stub_entry_point(NULL),
840
_max_node_limit(MaxNodeLimit),
841
_post_loop_opts_phase(false),
842
_inlining_progress(false),
843
_inlining_incrementally(false),
844
_has_reserved_stack_access(false),
845
#ifndef PRODUCT
846
_igv_idx(0),
847
_trace_opto_output(directive->TraceOptoOutputOption),
848
_print_ideal(directive->PrintIdealOption),
849
#endif
850
_has_method_handle_invokes(false),
851
_clinit_barrier_on_entry(false),
852
_stress_seed(0),
853
_comp_arena(mtCompiler),
854
_barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
855
_env(ci_env),
856
_directive(directive),
857
_log(ci_env->log()),
858
_failure_reason(NULL),
859
_congraph(NULL),
860
NOT_PRODUCT(_printer(NULL) COMMA)
861
_dead_node_list(comp_arena()),
862
_dead_node_count(0),
863
_node_arena(mtCompiler),
864
_old_arena(mtCompiler),
865
_mach_constant_base_node(NULL),
866
_Compile_types(mtCompiler),
867
_initial_gvn(NULL),
868
_for_igvn(NULL),
869
_number_of_mh_late_inlines(0),
870
_native_invokers(),
871
_print_inlining_stream(NULL),
872
_print_inlining_list(NULL),
873
_print_inlining_idx(0),
874
_print_inlining_output(NULL),
875
_replay_inline_data(NULL),
876
_java_calls(0),
877
_inner_loops(0),
878
_interpreter_frame_size(0),
879
#ifndef PRODUCT
880
_in_dump_cnt(0),
881
#endif
882
_allowed_reasons(0) {
883
C = this;
884
885
TraceTime t1(NULL, &_t_totalCompilation, CITime, false);
886
TraceTime t2(NULL, &_t_stubCompilation, CITime, false);
887
888
#ifndef PRODUCT
889
set_print_assembly(PrintFrameConverterAssembly);
890
set_parsed_irreducible_loop(false);
891
#else
892
set_print_assembly(false); // Must initialize.
893
#endif
894
set_has_irreducible_loop(false); // no loops
895
896
CompileWrapper cw(this);
897
Init(/*AliasLevel=*/ 0);
898
init_tf((*generator)());
899
900
{
901
// The following is a dummy for the sake of GraphKit::gen_stub
902
Unique_Node_List for_igvn(comp_arena());
903
set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this
904
PhaseGVN gvn(Thread::current()->resource_area(),255);
905
set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively
906
gvn.transform_no_reclaim(top());
907
908
GraphKit kit;
909
kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
910
}
911
912
NOT_PRODUCT( verify_graph_edges(); )
913
914
Code_Gen();
915
}
916
917
//------------------------------Init-------------------------------------------
918
// Prepare for a single compilation
919
void Compile::Init(int aliaslevel) {
920
_unique = 0;
921
_regalloc = NULL;
922
923
_tf = NULL; // filled in later
924
_top = NULL; // cached later
925
_matcher = NULL; // filled in later
926
_cfg = NULL; // filled in later
927
928
IA32_ONLY( set_24_bit_selection_and_mode(true, false); )
929
930
_node_note_array = NULL;
931
_default_node_notes = NULL;
932
DEBUG_ONLY( _modified_nodes = NULL; ) // Used in Optimize()
933
934
_immutable_memory = NULL; // filled in at first inquiry
935
936
// Globally visible Nodes
937
// First set TOP to NULL to give safe behavior during creation of RootNode
938
set_cached_top_node(NULL);
939
set_root(new RootNode());
940
// Now that you have a Root to point to, create the real TOP
941
set_cached_top_node( new ConNode(Type::TOP) );
942
set_recent_alloc(NULL, NULL);
943
944
// Create Debug Information Recorder to record scopes, oopmaps, etc.
945
env()->set_oop_recorder(new OopRecorder(env()->arena()));
946
env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
947
env()->set_dependencies(new Dependencies(env()));
948
949
_fixed_slots = 0;
950
set_has_split_ifs(false);
951
set_has_loops(false); // first approximation
952
set_has_stringbuilder(false);
953
set_has_boxed_value(false);
954
_trap_can_recompile = false; // no traps emitted yet
955
_major_progress = true; // start out assuming good things will happen
956
set_has_unsafe_access(false);
957
set_max_vector_size(0);
958
set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
959
Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
960
set_decompile_count(0);
961
962
set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
963
_loop_opts_cnt = LoopOptsCount;
964
set_do_inlining(Inline);
965
set_max_inline_size(MaxInlineSize);
966
set_freq_inline_size(FreqInlineSize);
967
set_do_scheduling(OptoScheduling);
968
969
set_do_vector_loop(false);
970
971
if (AllowVectorizeOnDemand) {
972
if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
973
set_do_vector_loop(true);
974
NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
975
} else if (has_method() && method()->name() != 0 &&
976
method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
977
set_do_vector_loop(true);
978
}
979
}
980
set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
981
NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
982
983
set_age_code(has_method() && method()->profile_aging());
984
set_rtm_state(NoRTM); // No RTM lock eliding by default
985
_max_node_limit = _directive->MaxNodeLimitOption;
986
987
#if INCLUDE_RTM_OPT
988
if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
989
int rtm_state = method()->method_data()->rtm_state();
990
if (method_has_option(CompileCommand::NoRTMLockEliding) || ((rtm_state & NoRTM) != 0)) {
991
// Don't generate RTM lock eliding code.
992
set_rtm_state(NoRTM);
993
} else if (method_has_option(CompileCommand::UseRTMLockEliding) || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
994
// Generate RTM lock eliding code without abort ratio calculation code.
995
set_rtm_state(UseRTM);
996
} else if (UseRTMDeopt) {
997
// Generate RTM lock eliding code and include abort ratio calculation
998
// code if UseRTMDeopt is on.
999
set_rtm_state(ProfileRTM);
1000
}
1001
}
1002
#endif
1003
if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
1004
set_clinit_barrier_on_entry(true);
1005
}
1006
if (debug_info()->recording_non_safepoints()) {
1007
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1008
(comp_arena(), 8, 0, NULL));
1009
set_default_node_notes(Node_Notes::make(this));
1010
}
1011
1012
// // -- Initialize types before each compile --
1013
// // Update cached type information
1014
// if( _method && _method->constants() )
1015
// Type::update_loaded_types(_method, _method->constants());
1016
1017
// Init alias_type map.
1018
if (!_do_escape_analysis && aliaslevel == 3)
1019
aliaslevel = 2; // No unique types without escape analysis
1020
_AliasLevel = aliaslevel;
1021
const int grow_ats = 16;
1022
_max_alias_types = grow_ats;
1023
_alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1024
AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1025
Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1026
{
1027
for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1028
}
1029
// Initialize the first few types.
1030
_alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1031
_alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1032
_alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1033
_num_alias_types = AliasIdxRaw+1;
1034
// Zero out the alias type cache.
1035
Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1036
// A NULL adr_type hits in the cache right away. Preload the right answer.
1037
probe_alias_cache(NULL)->_index = AliasIdxTop;
1038
1039
#ifdef ASSERT
1040
_type_verify_symmetry = true;
1041
_phase_optimize_finished = false;
1042
_exception_backedge = false;
1043
#endif
1044
}
1045
1046
//---------------------------init_start----------------------------------------
1047
// Install the StartNode on this compile object.
1048
void Compile::init_start(StartNode* s) {
1049
if (failing())
1050
return; // already failing
1051
assert(s == start(), "");
1052
}
1053
1054
/**
1055
* Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1056
* can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1057
* the ideal graph.
1058
*/
1059
StartNode* Compile::start() const {
1060
assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1061
for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1062
Node* start = root()->fast_out(i);
1063
if (start->is_Start()) {
1064
return start->as_Start();
1065
}
1066
}
1067
fatal("Did not find Start node!");
1068
return NULL;
1069
}
1070
1071
//-------------------------------immutable_memory-------------------------------------
1072
// Access immutable memory
1073
Node* Compile::immutable_memory() {
1074
if (_immutable_memory != NULL) {
1075
return _immutable_memory;
1076
}
1077
StartNode* s = start();
1078
for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
1079
Node *p = s->fast_out(i);
1080
if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
1081
_immutable_memory = p;
1082
return _immutable_memory;
1083
}
1084
}
1085
ShouldNotReachHere();
1086
return NULL;
1087
}
1088
1089
//----------------------set_cached_top_node------------------------------------
1090
// Install the cached top node, and make sure Node::is_top works correctly.
1091
void Compile::set_cached_top_node(Node* tn) {
1092
if (tn != NULL) verify_top(tn);
1093
Node* old_top = _top;
1094
_top = tn;
1095
// Calling Node::setup_is_top allows the nodes the chance to adjust
1096
// their _out arrays.
1097
if (_top != NULL) _top->setup_is_top();
1098
if (old_top != NULL) old_top->setup_is_top();
1099
assert(_top == NULL || top()->is_top(), "");
1100
}
1101
1102
#ifdef ASSERT
1103
uint Compile::count_live_nodes_by_graph_walk() {
1104
Unique_Node_List useful(comp_arena());
1105
// Get useful node list by walking the graph.
1106
identify_useful_nodes(useful);
1107
return useful.size();
1108
}
1109
1110
void Compile::print_missing_nodes() {
1111
1112
// Return if CompileLog is NULL and PrintIdealNodeCount is false.
1113
if ((_log == NULL) && (! PrintIdealNodeCount)) {
1114
return;
1115
}
1116
1117
// This is an expensive function. It is executed only when the user
1118
// specifies VerifyIdealNodeCount option or otherwise knows the
1119
// additional work that needs to be done to identify reachable nodes
1120
// by walking the flow graph and find the missing ones using
1121
// _dead_node_list.
1122
1123
Unique_Node_List useful(comp_arena());
1124
// Get useful node list by walking the graph.
1125
identify_useful_nodes(useful);
1126
1127
uint l_nodes = C->live_nodes();
1128
uint l_nodes_by_walk = useful.size();
1129
1130
if (l_nodes != l_nodes_by_walk) {
1131
if (_log != NULL) {
1132
_log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
1133
_log->stamp();
1134
_log->end_head();
1135
}
1136
VectorSet& useful_member_set = useful.member_set();
1137
int last_idx = l_nodes_by_walk;
1138
for (int i = 0; i < last_idx; i++) {
1139
if (useful_member_set.test(i)) {
1140
if (_dead_node_list.test(i)) {
1141
if (_log != NULL) {
1142
_log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
1143
}
1144
if (PrintIdealNodeCount) {
1145
// Print the log message to tty
1146
tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
1147
useful.at(i)->dump();
1148
}
1149
}
1150
}
1151
else if (! _dead_node_list.test(i)) {
1152
if (_log != NULL) {
1153
_log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
1154
}
1155
if (PrintIdealNodeCount) {
1156
// Print the log message to tty
1157
tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
1158
}
1159
}
1160
}
1161
if (_log != NULL) {
1162
_log->tail("mismatched_nodes");
1163
}
1164
}
1165
}
1166
void Compile::record_modified_node(Node* n) {
1167
if (_modified_nodes != NULL && !_inlining_incrementally && !n->is_Con()) {
1168
_modified_nodes->push(n);
1169
}
1170
}
1171
1172
void Compile::remove_modified_node(Node* n) {
1173
if (_modified_nodes != NULL) {
1174
_modified_nodes->remove(n);
1175
}
1176
}
1177
#endif
1178
1179
#ifndef PRODUCT
1180
void Compile::verify_top(Node* tn) const {
1181
if (tn != NULL) {
1182
assert(tn->is_Con(), "top node must be a constant");
1183
assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
1184
assert(tn->in(0) != NULL, "must have live top node");
1185
}
1186
}
1187
#endif
1188
1189
1190
///-------------------Managing Per-Node Debug & Profile Info-------------------
1191
1192
void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
1193
guarantee(arr != NULL, "");
1194
int num_blocks = arr->length();
1195
if (grow_by < num_blocks) grow_by = num_blocks;
1196
int num_notes = grow_by * _node_notes_block_size;
1197
Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
1198
Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
1199
while (num_notes > 0) {
1200
arr->append(notes);
1201
notes += _node_notes_block_size;
1202
num_notes -= _node_notes_block_size;
1203
}
1204
assert(num_notes == 0, "exact multiple, please");
1205
}
1206
1207
bool Compile::copy_node_notes_to(Node* dest, Node* source) {
1208
if (source == NULL || dest == NULL) return false;
1209
1210
if (dest->is_Con())
1211
return false; // Do not push debug info onto constants.
1212
1213
#ifdef ASSERT
1214
// Leave a bread crumb trail pointing to the original node:
1215
if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
1216
dest->set_debug_orig(source);
1217
}
1218
#endif
1219
1220
if (node_note_array() == NULL)
1221
return false; // Not collecting any notes now.
1222
1223
// This is a copy onto a pre-existing node, which may already have notes.
1224
// If both nodes have notes, do not overwrite any pre-existing notes.
1225
Node_Notes* source_notes = node_notes_at(source->_idx);
1226
if (source_notes == NULL || source_notes->is_clear()) return false;
1227
Node_Notes* dest_notes = node_notes_at(dest->_idx);
1228
if (dest_notes == NULL || dest_notes->is_clear()) {
1229
return set_node_notes_at(dest->_idx, source_notes);
1230
}
1231
1232
Node_Notes merged_notes = (*source_notes);
1233
// The order of operations here ensures that dest notes will win...
1234
merged_notes.update_from(dest_notes);
1235
return set_node_notes_at(dest->_idx, &merged_notes);
1236
}
1237
1238
1239
//--------------------------allow_range_check_smearing-------------------------
1240
// Gating condition for coalescing similar range checks.
1241
// Sometimes we try 'speculatively' replacing a series of a range checks by a
1242
// single covering check that is at least as strong as any of them.
1243
// If the optimization succeeds, the simplified (strengthened) range check
1244
// will always succeed. If it fails, we will deopt, and then give up
1245
// on the optimization.
1246
bool Compile::allow_range_check_smearing() const {
1247
// If this method has already thrown a range-check,
1248
// assume it was because we already tried range smearing
1249
// and it failed.
1250
uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1251
return !already_trapped;
1252
}
1253
1254
1255
//------------------------------flatten_alias_type-----------------------------
1256
const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1257
int offset = tj->offset();
1258
TypePtr::PTR ptr = tj->ptr();
1259
1260
// Known instance (scalarizable allocation) alias only with itself.
1261
bool is_known_inst = tj->isa_oopptr() != NULL &&
1262
tj->is_oopptr()->is_known_instance();
1263
1264
// Process weird unsafe references.
1265
if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1266
assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
1267
assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1268
tj = TypeOopPtr::BOTTOM;
1269
ptr = tj->ptr();
1270
offset = tj->offset();
1271
}
1272
1273
// Array pointers need some flattening
1274
const TypeAryPtr *ta = tj->isa_aryptr();
1275
if (ta && ta->is_stable()) {
1276
// Erase stability property for alias analysis.
1277
tj = ta = ta->cast_to_stable(false);
1278
}
1279
if( ta && is_known_inst ) {
1280
if ( offset != Type::OffsetBot &&
1281
offset > arrayOopDesc::length_offset_in_bytes() ) {
1282
offset = Type::OffsetBot; // Flatten constant access into array body only
1283
tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1284
}
1285
} else if( ta && _AliasLevel >= 2 ) {
1286
// For arrays indexed by constant indices, we flatten the alias
1287
// space to include all of the array body. Only the header, klass
1288
// and array length can be accessed un-aliased.
1289
if( offset != Type::OffsetBot ) {
1290
if( ta->const_oop() ) { // MethodData* or Method*
1291
offset = Type::OffsetBot; // Flatten constant access into array body
1292
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1293
} else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1294
// range is OK as-is.
1295
tj = ta = TypeAryPtr::RANGE;
1296
} else if( offset == oopDesc::klass_offset_in_bytes() ) {
1297
tj = TypeInstPtr::KLASS; // all klass loads look alike
1298
ta = TypeAryPtr::RANGE; // generic ignored junk
1299
ptr = TypePtr::BotPTR;
1300
} else if( offset == oopDesc::mark_offset_in_bytes() ) {
1301
tj = TypeInstPtr::MARK;
1302
ta = TypeAryPtr::RANGE; // generic ignored junk
1303
ptr = TypePtr::BotPTR;
1304
} else { // Random constant offset into array body
1305
offset = Type::OffsetBot; // Flatten constant access into array body
1306
tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1307
}
1308
}
1309
// Arrays of fixed size alias with arrays of unknown size.
1310
if (ta->size() != TypeInt::POS) {
1311
const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1312
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1313
}
1314
// Arrays of known objects become arrays of unknown objects.
1315
if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1316
const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1317
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1318
}
1319
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1320
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1321
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1322
}
1323
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1324
// cannot be distinguished by bytecode alone.
1325
if (ta->elem() == TypeInt::BOOL) {
1326
const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1327
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1328
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1329
}
1330
// During the 2nd round of IterGVN, NotNull castings are removed.
1331
// Make sure the Bottom and NotNull variants alias the same.
1332
// Also, make sure exact and non-exact variants alias the same.
1333
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1334
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1335
}
1336
}
1337
1338
// Oop pointers need some flattening
1339
const TypeInstPtr *to = tj->isa_instptr();
1340
if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1341
ciInstanceKlass *k = to->klass()->as_instance_klass();
1342
if( ptr == TypePtr::Constant ) {
1343
if (to->klass() != ciEnv::current()->Class_klass() ||
1344
offset < k->size_helper() * wordSize) {
1345
// No constant oop pointers (such as Strings); they alias with
1346
// unknown strings.
1347
assert(!is_known_inst, "not scalarizable allocation");
1348
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1349
}
1350
} else if( is_known_inst ) {
1351
tj = to; // Keep NotNull and klass_is_exact for instance type
1352
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1353
// During the 2nd round of IterGVN, NotNull castings are removed.
1354
// Make sure the Bottom and NotNull variants alias the same.
1355
// Also, make sure exact and non-exact variants alias the same.
1356
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1357
}
1358
if (to->speculative() != NULL) {
1359
tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1360
}
1361
// Canonicalize the holder of this field
1362
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1363
// First handle header references such as a LoadKlassNode, even if the
1364
// object's klass is unloaded at compile time (4965979).
1365
if (!is_known_inst) { // Do it only for non-instance types
1366
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1367
}
1368
} else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1369
// Static fields are in the space above the normal instance
1370
// fields in the java.lang.Class instance.
1371
if (to->klass() != ciEnv::current()->Class_klass()) {
1372
to = NULL;
1373
tj = TypeOopPtr::BOTTOM;
1374
offset = tj->offset();
1375
}
1376
} else {
1377
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1378
if (!k->equals(canonical_holder) || tj->offset() != offset) {
1379
if( is_known_inst ) {
1380
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1381
} else {
1382
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1383
}
1384
}
1385
}
1386
}
1387
1388
// Klass pointers to object array klasses need some flattening
1389
const TypeKlassPtr *tk = tj->isa_klassptr();
1390
if( tk ) {
1391
// If we are referencing a field within a Klass, we need
1392
// to assume the worst case of an Object. Both exact and
1393
// inexact types must flatten to the same alias class so
1394
// use NotNull as the PTR.
1395
if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1396
1397
tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1398
TypeKlassPtr::OBJECT->klass(),
1399
offset);
1400
}
1401
1402
ciKlass* klass = tk->klass();
1403
if( klass->is_obj_array_klass() ) {
1404
ciKlass* k = TypeAryPtr::OOPS->klass();
1405
if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs
1406
k = TypeInstPtr::BOTTOM->klass();
1407
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1408
}
1409
1410
// Check for precise loads from the primary supertype array and force them
1411
// to the supertype cache alias index. Check for generic array loads from
1412
// the primary supertype array and also force them to the supertype cache
1413
// alias index. Since the same load can reach both, we need to merge
1414
// these 2 disparate memories into the same alias class. Since the
1415
// primary supertype array is read-only, there's no chance of confusion
1416
// where we bypass an array load and an array store.
1417
int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1418
if (offset == Type::OffsetBot ||
1419
(offset >= primary_supers_offset &&
1420
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1421
offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1422
offset = in_bytes(Klass::secondary_super_cache_offset());
1423
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1424
}
1425
}
1426
1427
// Flatten all Raw pointers together.
1428
if (tj->base() == Type::RawPtr)
1429
tj = TypeRawPtr::BOTTOM;
1430
1431
if (tj->base() == Type::AnyPtr)
1432
tj = TypePtr::BOTTOM; // An error, which the caller must check for.
1433
1434
// Flatten all to bottom for now
1435
switch( _AliasLevel ) {
1436
case 0:
1437
tj = TypePtr::BOTTOM;
1438
break;
1439
case 1: // Flatten to: oop, static, field or array
1440
switch (tj->base()) {
1441
//case Type::AryPtr: tj = TypeAryPtr::RANGE; break;
1442
case Type::RawPtr: tj = TypeRawPtr::BOTTOM; break;
1443
case Type::AryPtr: // do not distinguish arrays at all
1444
case Type::InstPtr: tj = TypeInstPtr::BOTTOM; break;
1445
case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
1446
case Type::AnyPtr: tj = TypePtr::BOTTOM; break; // caller checks it
1447
default: ShouldNotReachHere();
1448
}
1449
break;
1450
case 2: // No collapsing at level 2; keep all splits
1451
case 3: // No collapsing at level 3; keep all splits
1452
break;
1453
default:
1454
Unimplemented();
1455
}
1456
1457
offset = tj->offset();
1458
assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1459
1460
assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1461
(offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1462
(offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1463
(offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1464
(offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1465
(offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1466
(offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr),
1467
"For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1468
assert( tj->ptr() != TypePtr::TopPTR &&
1469
tj->ptr() != TypePtr::AnyNull &&
1470
tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1471
// assert( tj->ptr() != TypePtr::Constant ||
1472
// tj->base() == Type::RawPtr ||
1473
// tj->base() == Type::KlassPtr, "No constant oop addresses" );
1474
1475
return tj;
1476
}
1477
1478
void Compile::AliasType::Init(int i, const TypePtr* at) {
1479
assert(AliasIdxTop <= i && i < Compile::current()->_max_alias_types, "Invalid alias index");
1480
_index = i;
1481
_adr_type = at;
1482
_field = NULL;
1483
_element = NULL;
1484
_is_rewritable = true; // default
1485
const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1486
if (atoop != NULL && atoop->is_known_instance()) {
1487
const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
1488
_general_index = Compile::current()->get_alias_index(gt);
1489
} else {
1490
_general_index = 0;
1491
}
1492
}
1493
1494
BasicType Compile::AliasType::basic_type() const {
1495
if (element() != NULL) {
1496
const Type* element = adr_type()->is_aryptr()->elem();
1497
return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
1498
} if (field() != NULL) {
1499
return field()->layout_type();
1500
} else {
1501
return T_ILLEGAL; // unknown
1502
}
1503
}
1504
1505
//---------------------------------print_on------------------------------------
1506
#ifndef PRODUCT
1507
void Compile::AliasType::print_on(outputStream* st) {
1508
if (index() < 10)
1509
st->print("@ <%d> ", index());
1510
else st->print("@ <%d>", index());
1511
st->print(is_rewritable() ? " " : " RO");
1512
int offset = adr_type()->offset();
1513
if (offset == Type::OffsetBot)
1514
st->print(" +any");
1515
else st->print(" +%-3d", offset);
1516
st->print(" in ");
1517
adr_type()->dump_on(st);
1518
const TypeOopPtr* tjp = adr_type()->isa_oopptr();
1519
if (field() != NULL && tjp) {
1520
if (tjp->klass() != field()->holder() ||
1521
tjp->offset() != field()->offset_in_bytes()) {
1522
st->print(" != ");
1523
field()->print();
1524
st->print(" ***");
1525
}
1526
}
1527
}
1528
1529
void print_alias_types() {
1530
Compile* C = Compile::current();
1531
tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
1532
for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
1533
C->alias_type(idx)->print_on(tty);
1534
tty->cr();
1535
}
1536
}
1537
#endif
1538
1539
1540
//----------------------------probe_alias_cache--------------------------------
1541
Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
1542
intptr_t key = (intptr_t) adr_type;
1543
key ^= key >> logAliasCacheSize;
1544
return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1545
}
1546
1547
1548
//-----------------------------grow_alias_types--------------------------------
1549
void Compile::grow_alias_types() {
1550
const int old_ats = _max_alias_types; // how many before?
1551
const int new_ats = old_ats; // how many more?
1552
const int grow_ats = old_ats+new_ats; // how many now?
1553
_max_alias_types = grow_ats;
1554
_alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1555
AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1556
Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1557
for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1558
}
1559
1560
1561
//--------------------------------find_alias_type------------------------------
1562
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1563
if (_AliasLevel == 0)
1564
return alias_type(AliasIdxBot);
1565
1566
AliasCacheEntry* ace = probe_alias_cache(adr_type);
1567
if (ace->_adr_type == adr_type) {
1568
return alias_type(ace->_index);
1569
}
1570
1571
// Handle special cases.
1572
if (adr_type == NULL) return alias_type(AliasIdxTop);
1573
if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1574
1575
// Do it the slow way.
1576
const TypePtr* flat = flatten_alias_type(adr_type);
1577
1578
#ifdef ASSERT
1579
{
1580
ResourceMark rm;
1581
assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1582
Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1583
assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1584
Type::str(adr_type));
1585
if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1586
const TypeOopPtr* foop = flat->is_oopptr();
1587
// Scalarizable allocations have exact klass always.
1588
bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1589
const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1590
assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type: foop = %s; xoop = %s",
1591
Type::str(foop), Type::str(xoop));
1592
}
1593
}
1594
#endif
1595
1596
int idx = AliasIdxTop;
1597
for (int i = 0; i < num_alias_types(); i++) {
1598
if (alias_type(i)->adr_type() == flat) {
1599
idx = i;
1600
break;
1601
}
1602
}
1603
1604
if (idx == AliasIdxTop) {
1605
if (no_create) return NULL;
1606
// Grow the array if necessary.
1607
if (_num_alias_types == _max_alias_types) grow_alias_types();
1608
// Add a new alias type.
1609
idx = _num_alias_types++;
1610
_alias_types[idx]->Init(idx, flat);
1611
if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1612
if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1613
if (flat->isa_instptr()) {
1614
if (flat->offset() == java_lang_Class::klass_offset()
1615
&& flat->is_instptr()->klass() == env()->Class_klass())
1616
alias_type(idx)->set_rewritable(false);
1617
}
1618
if (flat->isa_aryptr()) {
1619
#ifdef ASSERT
1620
const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1621
// (T_BYTE has the weakest alignment and size restrictions...)
1622
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1623
#endif
1624
if (flat->offset() == TypePtr::OffsetBot) {
1625
alias_type(idx)->set_element(flat->is_aryptr()->elem());
1626
}
1627
}
1628
if (flat->isa_klassptr()) {
1629
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1630
alias_type(idx)->set_rewritable(false);
1631
if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1632
alias_type(idx)->set_rewritable(false);
1633
if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1634
alias_type(idx)->set_rewritable(false);
1635
if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1636
alias_type(idx)->set_rewritable(false);
1637
if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1638
alias_type(idx)->set_rewritable(false);
1639
}
1640
// %%% (We would like to finalize JavaThread::threadObj_offset(),
1641
// but the base pointer type is not distinctive enough to identify
1642
// references into JavaThread.)
1643
1644
// Check for final fields.
1645
const TypeInstPtr* tinst = flat->isa_instptr();
1646
if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1647
ciField* field;
1648
if (tinst->const_oop() != NULL &&
1649
tinst->klass() == ciEnv::current()->Class_klass() &&
1650
tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1651
// static field
1652
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1653
field = k->get_field_by_offset(tinst->offset(), true);
1654
} else {
1655
ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1656
field = k->get_field_by_offset(tinst->offset(), false);
1657
}
1658
assert(field == NULL ||
1659
original_field == NULL ||
1660
(field->holder() == original_field->holder() &&
1661
field->offset() == original_field->offset() &&
1662
field->is_static() == original_field->is_static()), "wrong field?");
1663
// Set field() and is_rewritable() attributes.
1664
if (field != NULL) alias_type(idx)->set_field(field);
1665
}
1666
}
1667
1668
// Fill the cache for next time.
1669
ace->_adr_type = adr_type;
1670
ace->_index = idx;
1671
assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1672
1673
// Might as well try to fill the cache for the flattened version, too.
1674
AliasCacheEntry* face = probe_alias_cache(flat);
1675
if (face->_adr_type == NULL) {
1676
face->_adr_type = flat;
1677
face->_index = idx;
1678
assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1679
}
1680
1681
return alias_type(idx);
1682
}
1683
1684
1685
Compile::AliasType* Compile::alias_type(ciField* field) {
1686
const TypeOopPtr* t;
1687
if (field->is_static())
1688
t = TypeInstPtr::make(field->holder()->java_mirror());
1689
else
1690
t = TypeOopPtr::make_from_klass_raw(field->holder());
1691
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1692
assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1693
return atp;
1694
}
1695
1696
1697
//------------------------------have_alias_type--------------------------------
1698
bool Compile::have_alias_type(const TypePtr* adr_type) {
1699
AliasCacheEntry* ace = probe_alias_cache(adr_type);
1700
if (ace->_adr_type == adr_type) {
1701
return true;
1702
}
1703
1704
// Handle special cases.
1705
if (adr_type == NULL) return true;
1706
if (adr_type == TypePtr::BOTTOM) return true;
1707
1708
return find_alias_type(adr_type, true, NULL) != NULL;
1709
}
1710
1711
//-----------------------------must_alias--------------------------------------
1712
// True if all values of the given address type are in the given alias category.
1713
bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
1714
if (alias_idx == AliasIdxBot) return true; // the universal category
1715
if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP
1716
if (alias_idx == AliasIdxTop) return false; // the empty category
1717
if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
1718
1719
// the only remaining possible overlap is identity
1720
int adr_idx = get_alias_index(adr_type);
1721
assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1722
assert(adr_idx == alias_idx ||
1723
(alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
1724
&& adr_type != TypeOopPtr::BOTTOM),
1725
"should not be testing for overlap with an unsafe pointer");
1726
return adr_idx == alias_idx;
1727
}
1728
1729
//------------------------------can_alias--------------------------------------
1730
// True if any values of the given address type are in the given alias category.
1731
bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
1732
if (alias_idx == AliasIdxTop) return false; // the empty category
1733
if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP
1734
// Known instance doesn't alias with bottom memory
1735
if (alias_idx == AliasIdxBot) return !adr_type->is_known_instance(); // the universal category
1736
if (adr_type->base() == Type::AnyPtr) return !C->get_adr_type(alias_idx)->is_known_instance(); // TypePtr::BOTTOM or its twins
1737
1738
// the only remaining possible overlap is identity
1739
int adr_idx = get_alias_index(adr_type);
1740
assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1741
return adr_idx == alias_idx;
1742
}
1743
1744
//---------------------cleanup_loop_predicates-----------------------
1745
// Remove the opaque nodes that protect the predicates so that all unused
1746
// checks and uncommon_traps will be eliminated from the ideal graph
1747
void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1748
if (predicate_count()==0) return;
1749
for (int i = predicate_count(); i > 0; i--) {
1750
Node * n = predicate_opaque1_node(i-1);
1751
assert(n->Opcode() == Op_Opaque1, "must be");
1752
igvn.replace_node(n, n->in(1));
1753
}
1754
assert(predicate_count()==0, "should be clean!");
1755
}
1756
1757
void Compile::record_for_post_loop_opts_igvn(Node* n) {
1758
if (!n->for_post_loop_opts_igvn()) {
1759
assert(!_for_post_loop_igvn.contains(n), "duplicate");
1760
n->add_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1761
_for_post_loop_igvn.append(n);
1762
}
1763
}
1764
1765
void Compile::remove_from_post_loop_opts_igvn(Node* n) {
1766
n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1767
_for_post_loop_igvn.remove(n);
1768
}
1769
1770
void Compile::process_for_post_loop_opts_igvn(PhaseIterGVN& igvn) {
1771
// Verify that all previous optimizations produced a valid graph
1772
// at least to this point, even if no loop optimizations were done.
1773
PhaseIdealLoop::verify(igvn);
1774
1775
C->set_post_loop_opts_phase(); // no more loop opts allowed
1776
1777
assert(!C->major_progress(), "not cleared");
1778
1779
if (_for_post_loop_igvn.length() > 0) {
1780
while (_for_post_loop_igvn.length() > 0) {
1781
Node* n = _for_post_loop_igvn.pop();
1782
n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1783
igvn._worklist.push(n);
1784
}
1785
igvn.optimize();
1786
assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1787
1788
// Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1789
if (C->major_progress()) {
1790
C->clear_major_progress(); // ensure that major progress is now clear
1791
}
1792
}
1793
}
1794
1795
// StringOpts and late inlining of string methods
1796
void Compile::inline_string_calls(bool parse_time) {
1797
{
1798
// remove useless nodes to make the usage analysis simpler
1799
ResourceMark rm;
1800
PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1801
}
1802
1803
{
1804
ResourceMark rm;
1805
print_method(PHASE_BEFORE_STRINGOPTS, 3);
1806
PhaseStringOpts pso(initial_gvn(), for_igvn());
1807
print_method(PHASE_AFTER_STRINGOPTS, 3);
1808
}
1809
1810
// now inline anything that we skipped the first time around
1811
if (!parse_time) {
1812
_late_inlines_pos = _late_inlines.length();
1813
}
1814
1815
while (_string_late_inlines.length() > 0) {
1816
CallGenerator* cg = _string_late_inlines.pop();
1817
cg->do_late_inline();
1818
if (failing()) return;
1819
}
1820
_string_late_inlines.trunc_to(0);
1821
}
1822
1823
// Late inlining of boxing methods
1824
void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
1825
if (_boxing_late_inlines.length() > 0) {
1826
assert(has_boxed_value(), "inconsistent");
1827
1828
PhaseGVN* gvn = initial_gvn();
1829
set_inlining_incrementally(true);
1830
1831
assert( igvn._worklist.size() == 0, "should be done with igvn" );
1832
for_igvn()->clear();
1833
gvn->replace_with(&igvn);
1834
1835
_late_inlines_pos = _late_inlines.length();
1836
1837
while (_boxing_late_inlines.length() > 0) {
1838
CallGenerator* cg = _boxing_late_inlines.pop();
1839
cg->do_late_inline();
1840
if (failing()) return;
1841
}
1842
_boxing_late_inlines.trunc_to(0);
1843
1844
inline_incrementally_cleanup(igvn);
1845
1846
set_inlining_incrementally(false);
1847
}
1848
}
1849
1850
bool Compile::inline_incrementally_one() {
1851
assert(IncrementalInline, "incremental inlining should be on");
1852
1853
TracePhase tp("incrementalInline_inline", &timers[_t_incrInline_inline]);
1854
1855
set_inlining_progress(false);
1856
set_do_cleanup(false);
1857
1858
for (int i = 0; i < _late_inlines.length(); i++) {
1859
_late_inlines_pos = i+1;
1860
CallGenerator* cg = _late_inlines.at(i);
1861
bool does_dispatch = cg->is_virtual_late_inline() || cg->is_mh_late_inline();
1862
if (inlining_incrementally() || does_dispatch) { // a call can be either inlined or strength-reduced to a direct call
1863
cg->do_late_inline();
1864
assert(_late_inlines.at(i) == cg, "no insertions before current position allowed");
1865
if (failing()) {
1866
return false;
1867
} else if (inlining_progress()) {
1868
_late_inlines_pos = i+1; // restore the position in case new elements were inserted
1869
print_method(PHASE_INCREMENTAL_INLINE_STEP, cg->call_node(), 3);
1870
break; // process one call site at a time
1871
}
1872
} else {
1873
// Ignore late inline direct calls when inlining is not allowed.
1874
// They are left in the late inline list when node budget is exhausted until the list is fully drained.
1875
}
1876
}
1877
// Remove processed elements.
1878
_late_inlines.remove_till(_late_inlines_pos);
1879
_late_inlines_pos = 0;
1880
1881
assert(inlining_progress() || _late_inlines.length() == 0, "no progress");
1882
1883
bool needs_cleanup = do_cleanup() || over_inlining_cutoff();
1884
1885
set_inlining_progress(false);
1886
set_do_cleanup(false);
1887
1888
bool force_cleanup = directive()->IncrementalInlineForceCleanupOption;
1889
return (_late_inlines.length() > 0) && !needs_cleanup && !force_cleanup;
1890
}
1891
1892
void Compile::inline_incrementally_cleanup(PhaseIterGVN& igvn) {
1893
{
1894
TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
1895
ResourceMark rm;
1896
PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1897
}
1898
{
1899
TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
1900
igvn = PhaseIterGVN(initial_gvn());
1901
igvn.optimize();
1902
}
1903
print_method(PHASE_INCREMENTAL_INLINE_CLEANUP, 3);
1904
}
1905
1906
// Perform incremental inlining until bound on number of live nodes is reached
1907
void Compile::inline_incrementally(PhaseIterGVN& igvn) {
1908
TracePhase tp("incrementalInline", &timers[_t_incrInline]);
1909
1910
set_inlining_incrementally(true);
1911
uint low_live_nodes = 0;
1912
1913
while (_late_inlines.length() > 0) {
1914
if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1915
if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
1916
TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
1917
// PhaseIdealLoop is expensive so we only try it once we are
1918
// out of live nodes and we only try it again if the previous
1919
// helped got the number of nodes down significantly
1920
PhaseIdealLoop::optimize(igvn, LoopOptsNone);
1921
if (failing()) return;
1922
low_live_nodes = live_nodes();
1923
_major_progress = true;
1924
}
1925
1926
if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1927
bool do_print_inlining = print_inlining() || print_intrinsics();
1928
if (do_print_inlining || log() != NULL) {
1929
// Print inlining message for candidates that we couldn't inline for lack of space.
1930
for (int i = 0; i < _late_inlines.length(); i++) {
1931
CallGenerator* cg = _late_inlines.at(i);
1932
const char* msg = "live nodes > LiveNodeCountInliningCutoff";
1933
if (do_print_inlining) {
1934
cg->print_inlining_late(msg);
1935
}
1936
log_late_inline_failure(cg, msg);
1937
}
1938
}
1939
break; // finish
1940
}
1941
}
1942
1943
for_igvn()->clear();
1944
initial_gvn()->replace_with(&igvn);
1945
1946
while (inline_incrementally_one()) {
1947
assert(!failing(), "inconsistent");
1948
}
1949
if (failing()) return;
1950
1951
inline_incrementally_cleanup(igvn);
1952
1953
print_method(PHASE_INCREMENTAL_INLINE_STEP, 3);
1954
1955
if (failing()) return;
1956
1957
if (_late_inlines.length() == 0) {
1958
break; // no more progress
1959
}
1960
}
1961
assert( igvn._worklist.size() == 0, "should be done with igvn" );
1962
1963
if (_string_late_inlines.length() > 0) {
1964
assert(has_stringbuilder(), "inconsistent");
1965
for_igvn()->clear();
1966
initial_gvn()->replace_with(&igvn);
1967
1968
inline_string_calls(false);
1969
1970
if (failing()) return;
1971
1972
inline_incrementally_cleanup(igvn);
1973
}
1974
1975
set_inlining_incrementally(false);
1976
}
1977
1978
void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
1979
// "inlining_incrementally() == false" is used to signal that no inlining is allowed
1980
// (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
1981
// Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
1982
// as if "inlining_incrementally() == true" were set.
1983
assert(inlining_incrementally() == false, "not allowed");
1984
assert(_modified_nodes == NULL, "not allowed");
1985
assert(_late_inlines.length() > 0, "sanity");
1986
1987
while (_late_inlines.length() > 0) {
1988
for_igvn()->clear();
1989
initial_gvn()->replace_with(&igvn);
1990
1991
while (inline_incrementally_one()) {
1992
assert(!failing(), "inconsistent");
1993
}
1994
if (failing()) return;
1995
1996
inline_incrementally_cleanup(igvn);
1997
}
1998
}
1999
2000
bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2001
if (_loop_opts_cnt > 0) {
2002
debug_only( int cnt = 0; );
2003
while (major_progress() && (_loop_opts_cnt > 0)) {
2004
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2005
assert( cnt++ < 40, "infinite cycle in loop optimization" );
2006
PhaseIdealLoop::optimize(igvn, mode);
2007
_loop_opts_cnt--;
2008
if (failing()) return false;
2009
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2010
}
2011
}
2012
return true;
2013
}
2014
2015
// Remove edges from "root" to each SafePoint at a backward branch.
2016
// They were inserted during parsing (see add_safepoint()) to make
2017
// infinite loops without calls or exceptions visible to root, i.e.,
2018
// useful.
2019
void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2020
Node *r = root();
2021
if (r != NULL) {
2022
for (uint i = r->req(); i < r->len(); ++i) {
2023
Node *n = r->in(i);
2024
if (n != NULL && n->is_SafePoint()) {
2025
r->rm_prec(i);
2026
if (n->outcnt() == 0) {
2027
igvn.remove_dead_node(n);
2028
}
2029
--i;
2030
}
2031
}
2032
// Parsing may have added top inputs to the root node (Path
2033
// leading to the Halt node proven dead). Make sure we get a
2034
// chance to clean them up.
2035
igvn._worklist.push(r);
2036
igvn.optimize();
2037
}
2038
}
2039
2040
//------------------------------Optimize---------------------------------------
2041
// Given a graph, optimize it.
2042
void Compile::Optimize() {
2043
TracePhase tp("optimizer", &timers[_t_optimizer]);
2044
2045
#ifndef PRODUCT
2046
if (env()->break_at_compile()) {
2047
BREAKPOINT;
2048
}
2049
2050
#endif
2051
2052
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2053
#ifdef ASSERT
2054
bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
2055
#endif
2056
2057
ResourceMark rm;
2058
2059
print_inlining_reinit();
2060
2061
NOT_PRODUCT( verify_graph_edges(); )
2062
2063
print_method(PHASE_AFTER_PARSING);
2064
2065
{
2066
// Iterative Global Value Numbering, including ideal transforms
2067
// Initialize IterGVN with types and values from parse-time GVN
2068
PhaseIterGVN igvn(initial_gvn());
2069
#ifdef ASSERT
2070
_modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2071
#endif
2072
{
2073
TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2074
igvn.optimize();
2075
}
2076
2077
if (failing()) return;
2078
2079
print_method(PHASE_ITER_GVN1, 2);
2080
2081
inline_incrementally(igvn);
2082
2083
print_method(PHASE_INCREMENTAL_INLINE, 2);
2084
2085
if (failing()) return;
2086
2087
if (eliminate_boxing()) {
2088
// Inline valueOf() methods now.
2089
inline_boxing_calls(igvn);
2090
2091
if (AlwaysIncrementalInline) {
2092
inline_incrementally(igvn);
2093
}
2094
2095
print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2096
2097
if (failing()) return;
2098
}
2099
2100
// Now that all inlining is over, cut edge from root to loop
2101
// safepoints
2102
remove_root_to_sfpts_edges(igvn);
2103
2104
// Remove the speculative part of types and clean up the graph from
2105
// the extra CastPP nodes whose only purpose is to carry them. Do
2106
// that early so that optimizations are not disrupted by the extra
2107
// CastPP nodes.
2108
remove_speculative_types(igvn);
2109
2110
// No more new expensive nodes will be added to the list from here
2111
// so keep only the actual candidates for optimizations.
2112
cleanup_expensive_nodes(igvn);
2113
2114
assert(EnableVectorSupport || !has_vbox_nodes(), "sanity");
2115
if (EnableVectorSupport && has_vbox_nodes()) {
2116
TracePhase tp("", &timers[_t_vector]);
2117
PhaseVector pv(igvn);
2118
pv.optimize_vector_boxes();
2119
2120
print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2121
}
2122
assert(!has_vbox_nodes(), "sanity");
2123
2124
if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2125
Compile::TracePhase tp("", &timers[_t_renumberLive]);
2126
initial_gvn()->replace_with(&igvn);
2127
for_igvn()->clear();
2128
Unique_Node_List new_worklist(C->comp_arena());
2129
{
2130
ResourceMark rm;
2131
PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2132
}
2133
Unique_Node_List* save_for_igvn = for_igvn();
2134
set_for_igvn(&new_worklist);
2135
igvn = PhaseIterGVN(initial_gvn());
2136
igvn.optimize();
2137
set_for_igvn(save_for_igvn);
2138
}
2139
2140
// Perform escape analysis
2141
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2142
if (has_loops()) {
2143
// Cleanup graph (remove dead nodes).
2144
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2145
PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2146
if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2147
if (failing()) return;
2148
}
2149
ConnectionGraph::do_analysis(this, &igvn);
2150
2151
if (failing()) return;
2152
2153
// Optimize out fields loads from scalar replaceable allocations.
2154
igvn.optimize();
2155
print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2156
2157
if (failing()) return;
2158
2159
if (congraph() != NULL && macro_count() > 0) {
2160
TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2161
PhaseMacroExpand mexp(igvn);
2162
mexp.eliminate_macro_nodes();
2163
igvn.set_delay_transform(false);
2164
2165
igvn.optimize();
2166
print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2167
2168
if (failing()) return;
2169
}
2170
}
2171
2172
// Loop transforms on the ideal graph. Range Check Elimination,
2173
// peeling, unrolling, etc.
2174
2175
// Set loop opts counter
2176
if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2177
{
2178
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2179
PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
2180
_loop_opts_cnt--;
2181
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2182
if (failing()) return;
2183
}
2184
// Loop opts pass if partial peeling occurred in previous pass
2185
if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
2186
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2187
PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2188
_loop_opts_cnt--;
2189
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2190
if (failing()) return;
2191
}
2192
// Loop opts pass for loop-unrolling before CCP
2193
if(major_progress() && (_loop_opts_cnt > 0)) {
2194
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2195
PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2196
_loop_opts_cnt--;
2197
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2198
}
2199
if (!failing()) {
2200
// Verify that last round of loop opts produced a valid graph
2201
PhaseIdealLoop::verify(igvn);
2202
}
2203
}
2204
if (failing()) return;
2205
2206
// Conditional Constant Propagation;
2207
PhaseCCP ccp( &igvn );
2208
assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2209
{
2210
TracePhase tp("ccp", &timers[_t_ccp]);
2211
ccp.do_transform();
2212
}
2213
print_method(PHASE_CPP1, 2);
2214
2215
assert( true, "Break here to ccp.dump_old2new_map()");
2216
2217
// Iterative Global Value Numbering, including ideal transforms
2218
{
2219
TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2220
igvn = ccp;
2221
igvn.optimize();
2222
}
2223
print_method(PHASE_ITER_GVN2, 2);
2224
2225
if (failing()) return;
2226
2227
// Loop transforms on the ideal graph. Range Check Elimination,
2228
// peeling, unrolling, etc.
2229
if (!optimize_loops(igvn, LoopOptsDefault)) {
2230
return;
2231
}
2232
2233
if (failing()) return;
2234
2235
C->clear_major_progress(); // ensure that major progress is now clear
2236
2237
process_for_post_loop_opts_igvn(igvn);
2238
2239
#ifdef ASSERT
2240
bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2241
#endif
2242
2243
{
2244
TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2245
PhaseMacroExpand mex(igvn);
2246
if (mex.expand_macro_nodes()) {
2247
assert(failing(), "must bail out w/ explicit message");
2248
return;
2249
}
2250
print_method(PHASE_MACRO_EXPANSION, 2);
2251
}
2252
2253
{
2254
TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2255
if (bs->expand_barriers(this, igvn)) {
2256
assert(failing(), "must bail out w/ explicit message");
2257
return;
2258
}
2259
print_method(PHASE_BARRIER_EXPANSION, 2);
2260
}
2261
2262
if (C->max_vector_size() > 0) {
2263
C->optimize_logic_cones(igvn);
2264
igvn.optimize();
2265
}
2266
2267
DEBUG_ONLY( _modified_nodes = NULL; )
2268
2269
assert(igvn._worklist.size() == 0, "not empty");
2270
2271
assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2272
2273
if (_late_inlines.length() > 0) {
2274
// More opportunities to optimize virtual and MH calls.
2275
// Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2276
process_late_inline_calls_no_inline(igvn);
2277
}
2278
} // (End scope of igvn; run destructor if necessary for asserts.)
2279
2280
check_no_dead_use();
2281
2282
process_print_inlining();
2283
2284
// A method with only infinite loops has no edges entering loops from root
2285
{
2286
TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2287
if (final_graph_reshaping()) {
2288
assert(failing(), "must bail out w/ explicit message");
2289
return;
2290
}
2291
}
2292
2293
print_method(PHASE_OPTIMIZE_FINISHED, 2);
2294
DEBUG_ONLY(set_phase_optimize_finished();)
2295
}
2296
2297
#ifdef ASSERT
2298
void Compile::check_no_dead_use() const {
2299
ResourceMark rm;
2300
Unique_Node_List wq;
2301
wq.push(root());
2302
for (uint i = 0; i < wq.size(); ++i) {
2303
Node* n = wq.at(i);
2304
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2305
Node* u = n->fast_out(j);
2306
if (u->outcnt() == 0 && !u->is_Con()) {
2307
u->dump();
2308
fatal("no reachable node should have no use");
2309
}
2310
wq.push(u);
2311
}
2312
}
2313
}
2314
#endif
2315
2316
void Compile::inline_vector_reboxing_calls() {
2317
if (C->_vector_reboxing_late_inlines.length() > 0) {
2318
_late_inlines_pos = C->_late_inlines.length();
2319
while (_vector_reboxing_late_inlines.length() > 0) {
2320
CallGenerator* cg = _vector_reboxing_late_inlines.pop();
2321
cg->do_late_inline();
2322
if (failing()) return;
2323
print_method(PHASE_INLINE_VECTOR_REBOX, cg->call_node());
2324
}
2325
_vector_reboxing_late_inlines.trunc_to(0);
2326
}
2327
}
2328
2329
bool Compile::has_vbox_nodes() {
2330
if (C->_vector_reboxing_late_inlines.length() > 0) {
2331
return true;
2332
}
2333
for (int macro_idx = C->macro_count() - 1; macro_idx >= 0; macro_idx--) {
2334
Node * n = C->macro_node(macro_idx);
2335
assert(n->is_macro(), "only macro nodes expected here");
2336
if (n->Opcode() == Op_VectorUnbox || n->Opcode() == Op_VectorBox || n->Opcode() == Op_VectorBoxAllocate) {
2337
return true;
2338
}
2339
}
2340
return false;
2341
}
2342
2343
//---------------------------- Bitwise operation packing optimization ---------------------------
2344
2345
static bool is_vector_unary_bitwise_op(Node* n) {
2346
return n->Opcode() == Op_XorV &&
2347
VectorNode::is_vector_bitwise_not_pattern(n);
2348
}
2349
2350
static bool is_vector_binary_bitwise_op(Node* n) {
2351
switch (n->Opcode()) {
2352
case Op_AndV:
2353
case Op_OrV:
2354
return true;
2355
2356
case Op_XorV:
2357
return !is_vector_unary_bitwise_op(n);
2358
2359
default:
2360
return false;
2361
}
2362
}
2363
2364
static bool is_vector_ternary_bitwise_op(Node* n) {
2365
return n->Opcode() == Op_MacroLogicV;
2366
}
2367
2368
static bool is_vector_bitwise_op(Node* n) {
2369
return is_vector_unary_bitwise_op(n) ||
2370
is_vector_binary_bitwise_op(n) ||
2371
is_vector_ternary_bitwise_op(n);
2372
}
2373
2374
static bool is_vector_bitwise_cone_root(Node* n) {
2375
if (n->bottom_type()->isa_vectmask() || !is_vector_bitwise_op(n)) {
2376
return false;
2377
}
2378
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2379
if (is_vector_bitwise_op(n->fast_out(i))) {
2380
return false;
2381
}
2382
}
2383
return true;
2384
}
2385
2386
static uint collect_unique_inputs(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs) {
2387
uint cnt = 0;
2388
if (is_vector_bitwise_op(n)) {
2389
if (VectorNode::is_vector_bitwise_not_pattern(n)) {
2390
for (uint i = 1; i < n->req(); i++) {
2391
Node* in = n->in(i);
2392
bool skip = VectorNode::is_all_ones_vector(in);
2393
if (!skip && !inputs.member(in)) {
2394
inputs.push(in);
2395
cnt++;
2396
}
2397
}
2398
assert(cnt <= 1, "not unary");
2399
} else {
2400
uint last_req = n->req();
2401
if (is_vector_ternary_bitwise_op(n)) {
2402
last_req = n->req() - 1; // skip last input
2403
}
2404
for (uint i = 1; i < last_req; i++) {
2405
Node* def = n->in(i);
2406
if (!inputs.member(def)) {
2407
inputs.push(def);
2408
cnt++;
2409
}
2410
}
2411
}
2412
partition.push(n);
2413
} else { // not a bitwise operations
2414
if (!inputs.member(n)) {
2415
inputs.push(n);
2416
cnt++;
2417
}
2418
}
2419
return cnt;
2420
}
2421
2422
void Compile::collect_logic_cone_roots(Unique_Node_List& list) {
2423
Unique_Node_List useful_nodes;
2424
C->identify_useful_nodes(useful_nodes);
2425
2426
for (uint i = 0; i < useful_nodes.size(); i++) {
2427
Node* n = useful_nodes.at(i);
2428
if (is_vector_bitwise_cone_root(n)) {
2429
list.push(n);
2430
}
2431
}
2432
}
2433
2434
Node* Compile::xform_to_MacroLogicV(PhaseIterGVN& igvn,
2435
const TypeVect* vt,
2436
Unique_Node_List& partition,
2437
Unique_Node_List& inputs) {
2438
assert(partition.size() == 2 || partition.size() == 3, "not supported");
2439
assert(inputs.size() == 2 || inputs.size() == 3, "not supported");
2440
assert(Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type()), "not supported");
2441
2442
Node* in1 = inputs.at(0);
2443
Node* in2 = inputs.at(1);
2444
Node* in3 = (inputs.size() == 3 ? inputs.at(2) : in2);
2445
2446
uint func = compute_truth_table(partition, inputs);
2447
return igvn.transform(MacroLogicVNode::make(igvn, in3, in2, in1, func, vt));
2448
}
2449
2450
static uint extract_bit(uint func, uint pos) {
2451
return (func & (1 << pos)) >> pos;
2452
}
2453
2454
//
2455
// A macro logic node represents a truth table. It has 4 inputs,
2456
// First three inputs corresponds to 3 columns of a truth table
2457
// and fourth input captures the logic function.
2458
//
2459
// eg. fn = (in1 AND in2) OR in3;
2460
//
2461
// MacroNode(in1,in2,in3,fn)
2462
//
2463
// -----------------
2464
// in1 in2 in3 fn
2465
// -----------------
2466
// 0 0 0 0
2467
// 0 0 1 1
2468
// 0 1 0 0
2469
// 0 1 1 1
2470
// 1 0 0 0
2471
// 1 0 1 1
2472
// 1 1 0 1
2473
// 1 1 1 1
2474
//
2475
2476
uint Compile::eval_macro_logic_op(uint func, uint in1 , uint in2, uint in3) {
2477
int res = 0;
2478
for (int i = 0; i < 8; i++) {
2479
int bit1 = extract_bit(in1, i);
2480
int bit2 = extract_bit(in2, i);
2481
int bit3 = extract_bit(in3, i);
2482
2483
int func_bit_pos = (bit1 << 2 | bit2 << 1 | bit3);
2484
int func_bit = extract_bit(func, func_bit_pos);
2485
2486
res |= func_bit << i;
2487
}
2488
return res;
2489
}
2490
2491
static uint eval_operand(Node* n, ResourceHashtable<Node*,uint>& eval_map) {
2492
assert(n != NULL, "");
2493
assert(eval_map.contains(n), "absent");
2494
return *(eval_map.get(n));
2495
}
2496
2497
static void eval_operands(Node* n,
2498
uint& func1, uint& func2, uint& func3,
2499
ResourceHashtable<Node*,uint>& eval_map) {
2500
assert(is_vector_bitwise_op(n), "");
2501
2502
if (is_vector_unary_bitwise_op(n)) {
2503
Node* opnd = n->in(1);
2504
if (VectorNode::is_vector_bitwise_not_pattern(n) && VectorNode::is_all_ones_vector(opnd)) {
2505
opnd = n->in(2);
2506
}
2507
func1 = eval_operand(opnd, eval_map);
2508
} else if (is_vector_binary_bitwise_op(n)) {
2509
func1 = eval_operand(n->in(1), eval_map);
2510
func2 = eval_operand(n->in(2), eval_map);
2511
} else {
2512
assert(is_vector_ternary_bitwise_op(n), "unknown operation");
2513
func1 = eval_operand(n->in(1), eval_map);
2514
func2 = eval_operand(n->in(2), eval_map);
2515
func3 = eval_operand(n->in(3), eval_map);
2516
}
2517
}
2518
2519
uint Compile::compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs) {
2520
assert(inputs.size() <= 3, "sanity");
2521
ResourceMark rm;
2522
uint res = 0;
2523
ResourceHashtable<Node*,uint> eval_map;
2524
2525
// Populate precomputed functions for inputs.
2526
// Each input corresponds to one column of 3 input truth-table.
2527
uint input_funcs[] = { 0xAA, // (_, _, a) -> a
2528
0xCC, // (_, b, _) -> b
2529
0xF0 }; // (c, _, _) -> c
2530
for (uint i = 0; i < inputs.size(); i++) {
2531
eval_map.put(inputs.at(i), input_funcs[i]);
2532
}
2533
2534
for (uint i = 0; i < partition.size(); i++) {
2535
Node* n = partition.at(i);
2536
2537
uint func1 = 0, func2 = 0, func3 = 0;
2538
eval_operands(n, func1, func2, func3, eval_map);
2539
2540
switch (n->Opcode()) {
2541
case Op_OrV:
2542
assert(func3 == 0, "not binary");
2543
res = func1 | func2;
2544
break;
2545
case Op_AndV:
2546
assert(func3 == 0, "not binary");
2547
res = func1 & func2;
2548
break;
2549
case Op_XorV:
2550
if (VectorNode::is_vector_bitwise_not_pattern(n)) {
2551
assert(func2 == 0 && func3 == 0, "not unary");
2552
res = (~func1) & 0xFF;
2553
} else {
2554
assert(func3 == 0, "not binary");
2555
res = func1 ^ func2;
2556
}
2557
break;
2558
case Op_MacroLogicV:
2559
// Ordering of inputs may change during evaluation of sub-tree
2560
// containing MacroLogic node as a child node, thus a re-evaluation
2561
// makes sure that function is evaluated in context of current
2562
// inputs.
2563
res = eval_macro_logic_op(n->in(4)->get_int(), func1, func2, func3);
2564
break;
2565
2566
default: assert(false, "not supported: %s", n->Name());
2567
}
2568
assert(res <= 0xFF, "invalid");
2569
eval_map.put(n, res);
2570
}
2571
return res;
2572
}
2573
2574
bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs) {
2575
assert(partition.size() == 0, "not empty");
2576
assert(inputs.size() == 0, "not empty");
2577
if (is_vector_ternary_bitwise_op(n)) {
2578
return false;
2579
}
2580
2581
bool is_unary_op = is_vector_unary_bitwise_op(n);
2582
if (is_unary_op) {
2583
assert(collect_unique_inputs(n, partition, inputs) == 1, "not unary");
2584
return false; // too few inputs
2585
}
2586
2587
assert(is_vector_binary_bitwise_op(n), "not binary");
2588
Node* in1 = n->in(1);
2589
Node* in2 = n->in(2);
2590
2591
int in1_unique_inputs_cnt = collect_unique_inputs(in1, partition, inputs);
2592
int in2_unique_inputs_cnt = collect_unique_inputs(in2, partition, inputs);
2593
partition.push(n);
2594
2595
// Too many inputs?
2596
if (inputs.size() > 3) {
2597
partition.clear();
2598
inputs.clear();
2599
{ // Recompute in2 inputs
2600
Unique_Node_List not_used;
2601
in2_unique_inputs_cnt = collect_unique_inputs(in2, not_used, not_used);
2602
}
2603
// Pick the node with minimum number of inputs.
2604
if (in1_unique_inputs_cnt >= 3 && in2_unique_inputs_cnt >= 3) {
2605
return false; // still too many inputs
2606
}
2607
// Recompute partition & inputs.
2608
Node* child = (in1_unique_inputs_cnt < in2_unique_inputs_cnt ? in1 : in2);
2609
collect_unique_inputs(child, partition, inputs);
2610
2611
Node* other_input = (in1_unique_inputs_cnt < in2_unique_inputs_cnt ? in2 : in1);
2612
inputs.push(other_input);
2613
2614
partition.push(n);
2615
}
2616
2617
return (partition.size() == 2 || partition.size() == 3) &&
2618
(inputs.size() == 2 || inputs.size() == 3);
2619
}
2620
2621
2622
void Compile::process_logic_cone_root(PhaseIterGVN &igvn, Node *n, VectorSet &visited) {
2623
assert(is_vector_bitwise_op(n), "not a root");
2624
2625
visited.set(n->_idx);
2626
2627
// 1) Do a DFS walk over the logic cone.
2628
for (uint i = 1; i < n->req(); i++) {
2629
Node* in = n->in(i);
2630
if (!visited.test(in->_idx) && is_vector_bitwise_op(in)) {
2631
process_logic_cone_root(igvn, in, visited);
2632
}
2633
}
2634
2635
// 2) Bottom up traversal: Merge node[s] with
2636
// the parent to form macro logic node.
2637
Unique_Node_List partition;
2638
Unique_Node_List inputs;
2639
if (compute_logic_cone(n, partition, inputs)) {
2640
const TypeVect* vt = n->bottom_type()->is_vect();
2641
Node* macro_logic = xform_to_MacroLogicV(igvn, vt, partition, inputs);
2642
igvn.replace_node(n, macro_logic);
2643
}
2644
}
2645
2646
void Compile::optimize_logic_cones(PhaseIterGVN &igvn) {
2647
ResourceMark rm;
2648
if (Matcher::match_rule_supported(Op_MacroLogicV)) {
2649
Unique_Node_List list;
2650
collect_logic_cone_roots(list);
2651
2652
while (list.size() > 0) {
2653
Node* n = list.pop();
2654
const TypeVect* vt = n->bottom_type()->is_vect();
2655
bool supported = Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type());
2656
if (supported) {
2657
VectorSet visited(comp_arena());
2658
process_logic_cone_root(igvn, n, visited);
2659
}
2660
}
2661
}
2662
}
2663
2664
//------------------------------Code_Gen---------------------------------------
2665
// Given a graph, generate code for it
2666
void Compile::Code_Gen() {
2667
if (failing()) {
2668
return;
2669
}
2670
2671
// Perform instruction selection. You might think we could reclaim Matcher
2672
// memory PDQ, but actually the Matcher is used in generating spill code.
2673
// Internals of the Matcher (including some VectorSets) must remain live
2674
// for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2675
// set a bit in reclaimed memory.
2676
2677
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2678
// nodes. Mapping is only valid at the root of each matched subtree.
2679
NOT_PRODUCT( verify_graph_edges(); )
2680
2681
Matcher matcher;
2682
_matcher = &matcher;
2683
{
2684
TracePhase tp("matcher", &timers[_t_matcher]);
2685
matcher.match();
2686
if (failing()) {
2687
return;
2688
}
2689
}
2690
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2691
// nodes. Mapping is only valid at the root of each matched subtree.
2692
NOT_PRODUCT( verify_graph_edges(); )
2693
2694
// If you have too many nodes, or if matching has failed, bail out
2695
check_node_count(0, "out of nodes matching instructions");
2696
if (failing()) {
2697
return;
2698
}
2699
2700
print_method(PHASE_MATCHING, 2);
2701
2702
// Build a proper-looking CFG
2703
PhaseCFG cfg(node_arena(), root(), matcher);
2704
_cfg = &cfg;
2705
{
2706
TracePhase tp("scheduler", &timers[_t_scheduler]);
2707
bool success = cfg.do_global_code_motion();
2708
if (!success) {
2709
return;
2710
}
2711
2712
print_method(PHASE_GLOBAL_CODE_MOTION, 2);
2713
NOT_PRODUCT( verify_graph_edges(); )
2714
cfg.verify();
2715
}
2716
2717
PhaseChaitin regalloc(unique(), cfg, matcher, false);
2718
_regalloc = &regalloc;
2719
{
2720
TracePhase tp("regalloc", &timers[_t_registerAllocation]);
2721
// Perform register allocation. After Chaitin, use-def chains are
2722
// no longer accurate (at spill code) and so must be ignored.
2723
// Node->LRG->reg mappings are still accurate.
2724
_regalloc->Register_Allocate();
2725
2726
// Bail out if the allocator builds too many nodes
2727
if (failing()) {
2728
return;
2729
}
2730
}
2731
2732
// Prior to register allocation we kept empty basic blocks in case the
2733
// the allocator needed a place to spill. After register allocation we
2734
// are not adding any new instructions. If any basic block is empty, we
2735
// can now safely remove it.
2736
{
2737
TracePhase tp("blockOrdering", &timers[_t_blockOrdering]);
2738
cfg.remove_empty_blocks();
2739
if (do_freq_based_layout()) {
2740
PhaseBlockLayout layout(cfg);
2741
} else {
2742
cfg.set_loop_alignment();
2743
}
2744
cfg.fixup_flow();
2745
}
2746
2747
// Apply peephole optimizations
2748
if( OptoPeephole ) {
2749
TracePhase tp("peephole", &timers[_t_peephole]);
2750
PhasePeephole peep( _regalloc, cfg);
2751
peep.do_transform();
2752
}
2753
2754
// Do late expand if CPU requires this.
2755
if (Matcher::require_postalloc_expand) {
2756
TracePhase tp("postalloc_expand", &timers[_t_postalloc_expand]);
2757
cfg.postalloc_expand(_regalloc);
2758
}
2759
2760
// Convert Nodes to instruction bits in a buffer
2761
{
2762
TracePhase tp("output", &timers[_t_output]);
2763
PhaseOutput output;
2764
output.Output();
2765
if (failing()) return;
2766
output.install();
2767
}
2768
2769
print_method(PHASE_FINAL_CODE);
2770
2771
// He's dead, Jim.
2772
_cfg = (PhaseCFG*)((intptr_t)0xdeadbeef);
2773
_regalloc = (PhaseChaitin*)((intptr_t)0xdeadbeef);
2774
}
2775
2776
//------------------------------Final_Reshape_Counts---------------------------
2777
// This class defines counters to help identify when a method
2778
// may/must be executed using hardware with only 24-bit precision.
2779
struct Final_Reshape_Counts : public StackObj {
2780
int _call_count; // count non-inlined 'common' calls
2781
int _float_count; // count float ops requiring 24-bit precision
2782
int _double_count; // count double ops requiring more precision
2783
int _java_call_count; // count non-inlined 'java' calls
2784
int _inner_loop_count; // count loops which need alignment
2785
VectorSet _visited; // Visitation flags
2786
Node_List _tests; // Set of IfNodes & PCTableNodes
2787
2788
Final_Reshape_Counts() :
2789
_call_count(0), _float_count(0), _double_count(0),
2790
_java_call_count(0), _inner_loop_count(0) { }
2791
2792
void inc_call_count () { _call_count ++; }
2793
void inc_float_count () { _float_count ++; }
2794
void inc_double_count() { _double_count++; }
2795
void inc_java_call_count() { _java_call_count++; }
2796
void inc_inner_loop_count() { _inner_loop_count++; }
2797
2798
int get_call_count () const { return _call_count ; }
2799
int get_float_count () const { return _float_count ; }
2800
int get_double_count() const { return _double_count; }
2801
int get_java_call_count() const { return _java_call_count; }
2802
int get_inner_loop_count() const { return _inner_loop_count; }
2803
};
2804
2805
// Eliminate trivially redundant StoreCMs and accumulate their
2806
// precedence edges.
2807
void Compile::eliminate_redundant_card_marks(Node* n) {
2808
assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
2809
if (n->in(MemNode::Address)->outcnt() > 1) {
2810
// There are multiple users of the same address so it might be
2811
// possible to eliminate some of the StoreCMs
2812
Node* mem = n->in(MemNode::Memory);
2813
Node* adr = n->in(MemNode::Address);
2814
Node* val = n->in(MemNode::ValueIn);
2815
Node* prev = n;
2816
bool done = false;
2817
// Walk the chain of StoreCMs eliminating ones that match. As
2818
// long as it's a chain of single users then the optimization is
2819
// safe. Eliminating partially redundant StoreCMs would require
2820
// cloning copies down the other paths.
2821
while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
2822
if (adr == mem->in(MemNode::Address) &&
2823
val == mem->in(MemNode::ValueIn)) {
2824
// redundant StoreCM
2825
if (mem->req() > MemNode::OopStore) {
2826
// Hasn't been processed by this code yet.
2827
n->add_prec(mem->in(MemNode::OopStore));
2828
} else {
2829
// Already converted to precedence edge
2830
for (uint i = mem->req(); i < mem->len(); i++) {
2831
// Accumulate any precedence edges
2832
if (mem->in(i) != NULL) {
2833
n->add_prec(mem->in(i));
2834
}
2835
}
2836
// Everything above this point has been processed.
2837
done = true;
2838
}
2839
// Eliminate the previous StoreCM
2840
prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2841
assert(mem->outcnt() == 0, "should be dead");
2842
mem->disconnect_inputs(this);
2843
} else {
2844
prev = mem;
2845
}
2846
mem = prev->in(MemNode::Memory);
2847
}
2848
}
2849
}
2850
2851
//------------------------------final_graph_reshaping_impl----------------------
2852
// Implement items 1-5 from final_graph_reshaping below.
2853
void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2854
2855
if ( n->outcnt() == 0 ) return; // dead node
2856
uint nop = n->Opcode();
2857
2858
// Check for 2-input instruction with "last use" on right input.
2859
// Swap to left input. Implements item (2).
2860
if( n->req() == 3 && // two-input instruction
2861
n->in(1)->outcnt() > 1 && // left use is NOT a last use
2862
(!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2863
n->in(2)->outcnt() == 1 &&// right use IS a last use
2864
!n->in(2)->is_Con() ) { // right use is not a constant
2865
// Check for commutative opcode
2866
switch( nop ) {
2867
case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
2868
case Op_MaxI: case Op_MaxL: case Op_MaxF: case Op_MaxD:
2869
case Op_MinI: case Op_MinL: case Op_MinF: case Op_MinD:
2870
case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
2871
case Op_AndL: case Op_XorL: case Op_OrL:
2872
case Op_AndI: case Op_XorI: case Op_OrI: {
2873
// Move "last use" input to left by swapping inputs
2874
n->swap_edges(1, 2);
2875
break;
2876
}
2877
default:
2878
break;
2879
}
2880
}
2881
2882
#ifdef ASSERT
2883
if( n->is_Mem() ) {
2884
int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2885
assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
2886
// oop will be recorded in oop map if load crosses safepoint
2887
n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
2888
LoadNode::is_immutable_value(n->in(MemNode::Address))),
2889
"raw memory operations should have control edge");
2890
}
2891
if (n->is_MemBar()) {
2892
MemBarNode* mb = n->as_MemBar();
2893
if (mb->trailing_store() || mb->trailing_load_store()) {
2894
assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
2895
Node* mem = BarrierSet::barrier_set()->barrier_set_c2()->step_over_gc_barrier(mb->in(MemBarNode::Precedent));
2896
assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
2897
(mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
2898
} else if (mb->leading()) {
2899
assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
2900
}
2901
}
2902
#endif
2903
// Count FPU ops and common calls, implements item (3)
2904
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop);
2905
if (!gc_handled) {
2906
final_graph_reshaping_main_switch(n, frc, nop);
2907
}
2908
2909
// Collect CFG split points
2910
if (n->is_MultiBranch() && !n->is_RangeCheck()) {
2911
frc._tests.push(n);
2912
}
2913
}
2914
2915
void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop) {
2916
switch( nop ) {
2917
// Count all float operations that may use FPU
2918
case Op_AddF:
2919
case Op_SubF:
2920
case Op_MulF:
2921
case Op_DivF:
2922
case Op_NegF:
2923
case Op_ModF:
2924
case Op_ConvI2F:
2925
case Op_ConF:
2926
case Op_CmpF:
2927
case Op_CmpF3:
2928
case Op_StoreF:
2929
case Op_LoadF:
2930
// case Op_ConvL2F: // longs are split into 32-bit halves
2931
frc.inc_float_count();
2932
break;
2933
2934
case Op_ConvF2D:
2935
case Op_ConvD2F:
2936
frc.inc_float_count();
2937
frc.inc_double_count();
2938
break;
2939
2940
// Count all double operations that may use FPU
2941
case Op_AddD:
2942
case Op_SubD:
2943
case Op_MulD:
2944
case Op_DivD:
2945
case Op_NegD:
2946
case Op_ModD:
2947
case Op_ConvI2D:
2948
case Op_ConvD2I:
2949
// case Op_ConvL2D: // handled by leaf call
2950
// case Op_ConvD2L: // handled by leaf call
2951
case Op_ConD:
2952
case Op_CmpD:
2953
case Op_CmpD3:
2954
case Op_StoreD:
2955
case Op_LoadD:
2956
case Op_LoadD_unaligned:
2957
frc.inc_double_count();
2958
break;
2959
case Op_Opaque1: // Remove Opaque Nodes before matching
2960
case Op_Opaque2: // Remove Opaque Nodes before matching
2961
case Op_Opaque3:
2962
n->subsume_by(n->in(1), this);
2963
break;
2964
case Op_CallStaticJava:
2965
case Op_CallJava:
2966
case Op_CallDynamicJava:
2967
frc.inc_java_call_count(); // Count java call site;
2968
case Op_CallRuntime:
2969
case Op_CallLeaf:
2970
case Op_CallLeafVector:
2971
case Op_CallNative:
2972
case Op_CallLeafNoFP: {
2973
assert (n->is_Call(), "");
2974
CallNode *call = n->as_Call();
2975
// Count call sites where the FP mode bit would have to be flipped.
2976
// Do not count uncommon runtime calls:
2977
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2978
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2979
if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2980
frc.inc_call_count(); // Count the call site
2981
} else { // See if uncommon argument is shared
2982
Node *n = call->in(TypeFunc::Parms);
2983
int nop = n->Opcode();
2984
// Clone shared simple arguments to uncommon calls, item (1).
2985
if (n->outcnt() > 1 &&
2986
!n->is_Proj() &&
2987
nop != Op_CreateEx &&
2988
nop != Op_CheckCastPP &&
2989
nop != Op_DecodeN &&
2990
nop != Op_DecodeNKlass &&
2991
!n->is_Mem() &&
2992
!n->is_Phi()) {
2993
Node *x = n->clone();
2994
call->set_req(TypeFunc::Parms, x);
2995
}
2996
}
2997
break;
2998
}
2999
3000
case Op_StoreCM:
3001
{
3002
// Convert OopStore dependence into precedence edge
3003
Node* prec = n->in(MemNode::OopStore);
3004
n->del_req(MemNode::OopStore);
3005
n->add_prec(prec);
3006
eliminate_redundant_card_marks(n);
3007
}
3008
3009
// fall through
3010
3011
case Op_StoreB:
3012
case Op_StoreC:
3013
case Op_StorePConditional:
3014
case Op_StoreI:
3015
case Op_StoreL:
3016
case Op_StoreIConditional:
3017
case Op_StoreLConditional:
3018
case Op_CompareAndSwapB:
3019
case Op_CompareAndSwapS:
3020
case Op_CompareAndSwapI:
3021
case Op_CompareAndSwapL:
3022
case Op_CompareAndSwapP:
3023
case Op_CompareAndSwapN:
3024
case Op_WeakCompareAndSwapB:
3025
case Op_WeakCompareAndSwapS:
3026
case Op_WeakCompareAndSwapI:
3027
case Op_WeakCompareAndSwapL:
3028
case Op_WeakCompareAndSwapP:
3029
case Op_WeakCompareAndSwapN:
3030
case Op_CompareAndExchangeB:
3031
case Op_CompareAndExchangeS:
3032
case Op_CompareAndExchangeI:
3033
case Op_CompareAndExchangeL:
3034
case Op_CompareAndExchangeP:
3035
case Op_CompareAndExchangeN:
3036
case Op_GetAndAddS:
3037
case Op_GetAndAddB:
3038
case Op_GetAndAddI:
3039
case Op_GetAndAddL:
3040
case Op_GetAndSetS:
3041
case Op_GetAndSetB:
3042
case Op_GetAndSetI:
3043
case Op_GetAndSetL:
3044
case Op_GetAndSetP:
3045
case Op_GetAndSetN:
3046
case Op_StoreP:
3047
case Op_StoreN:
3048
case Op_StoreNKlass:
3049
case Op_LoadB:
3050
case Op_LoadUB:
3051
case Op_LoadUS:
3052
case Op_LoadI:
3053
case Op_LoadKlass:
3054
case Op_LoadNKlass:
3055
case Op_LoadL:
3056
case Op_LoadL_unaligned:
3057
case Op_LoadPLocked:
3058
case Op_LoadP:
3059
case Op_LoadN:
3060
case Op_LoadRange:
3061
case Op_LoadS:
3062
break;
3063
3064
case Op_AddP: { // Assert sane base pointers
3065
Node *addp = n->in(AddPNode::Address);
3066
assert( !addp->is_AddP() ||
3067
addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
3068
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
3069
"Base pointers must match (addp %u)", addp->_idx );
3070
#ifdef _LP64
3071
if ((UseCompressedOops || UseCompressedClassPointers) &&
3072
addp->Opcode() == Op_ConP &&
3073
addp == n->in(AddPNode::Base) &&
3074
n->in(AddPNode::Offset)->is_Con()) {
3075
// If the transformation of ConP to ConN+DecodeN is beneficial depends
3076
// on the platform and on the compressed oops mode.
3077
// Use addressing with narrow klass to load with offset on x86.
3078
// Some platforms can use the constant pool to load ConP.
3079
// Do this transformation here since IGVN will convert ConN back to ConP.
3080
const Type* t = addp->bottom_type();
3081
bool is_oop = t->isa_oopptr() != NULL;
3082
bool is_klass = t->isa_klassptr() != NULL;
3083
3084
if ((is_oop && Matcher::const_oop_prefer_decode() ) ||
3085
(is_klass && Matcher::const_klass_prefer_decode())) {
3086
Node* nn = NULL;
3087
3088
int op = is_oop ? Op_ConN : Op_ConNKlass;
3089
3090
// Look for existing ConN node of the same exact type.
3091
Node* r = root();
3092
uint cnt = r->outcnt();
3093
for (uint i = 0; i < cnt; i++) {
3094
Node* m = r->raw_out(i);
3095
if (m!= NULL && m->Opcode() == op &&
3096
m->bottom_type()->make_ptr() == t) {
3097
nn = m;
3098
break;
3099
}
3100
}
3101
if (nn != NULL) {
3102
// Decode a narrow oop to match address
3103
// [R12 + narrow_oop_reg<<3 + offset]
3104
if (is_oop) {
3105
nn = new DecodeNNode(nn, t);
3106
} else {
3107
nn = new DecodeNKlassNode(nn, t);
3108
}
3109
// Check for succeeding AddP which uses the same Base.
3110
// Otherwise we will run into the assertion above when visiting that guy.
3111
for (uint i = 0; i < n->outcnt(); ++i) {
3112
Node *out_i = n->raw_out(i);
3113
if (out_i && out_i->is_AddP() && out_i->in(AddPNode::Base) == addp) {
3114
out_i->set_req(AddPNode::Base, nn);
3115
#ifdef ASSERT
3116
for (uint j = 0; j < out_i->outcnt(); ++j) {
3117
Node *out_j = out_i->raw_out(j);
3118
assert(out_j == NULL || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp,
3119
"more than 2 AddP nodes in a chain (out_j %u)", out_j->_idx);
3120
}
3121
#endif
3122
}
3123
}
3124
n->set_req(AddPNode::Base, nn);
3125
n->set_req(AddPNode::Address, nn);
3126
if (addp->outcnt() == 0) {
3127
addp->disconnect_inputs(this);
3128
}
3129
}
3130
}
3131
}
3132
#endif
3133
break;
3134
}
3135
3136
case Op_CastPP: {
3137
// Remove CastPP nodes to gain more freedom during scheduling but
3138
// keep the dependency they encode as control or precedence edges
3139
// (if control is set already) on memory operations. Some CastPP
3140
// nodes don't have a control (don't carry a dependency): skip
3141
// those.
3142
if (n->in(0) != NULL) {
3143
ResourceMark rm;
3144
Unique_Node_List wq;
3145
wq.push(n);
3146
for (uint next = 0; next < wq.size(); ++next) {
3147
Node *m = wq.at(next);
3148
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
3149
Node* use = m->fast_out(i);
3150
if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
3151
use->ensure_control_or_add_prec(n->in(0));
3152
} else {
3153
switch(use->Opcode()) {
3154
case Op_AddP:
3155
case Op_DecodeN:
3156
case Op_DecodeNKlass:
3157
case Op_CheckCastPP:
3158
case Op_CastPP:
3159
wq.push(use);
3160
break;
3161
}
3162
}
3163
}
3164
}
3165
}
3166
const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
3167
if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
3168
Node* in1 = n->in(1);
3169
const Type* t = n->bottom_type();
3170
Node* new_in1 = in1->clone();
3171
new_in1->as_DecodeN()->set_type(t);
3172
3173
if (!Matcher::narrow_oop_use_complex_address()) {
3174
//
3175
// x86, ARM and friends can handle 2 adds in addressing mode
3176
// and Matcher can fold a DecodeN node into address by using
3177
// a narrow oop directly and do implicit NULL check in address:
3178
//
3179
// [R12 + narrow_oop_reg<<3 + offset]
3180
// NullCheck narrow_oop_reg
3181
//
3182
// On other platforms (Sparc) we have to keep new DecodeN node and
3183
// use it to do implicit NULL check in address:
3184
//
3185
// decode_not_null narrow_oop_reg, base_reg
3186
// [base_reg + offset]
3187
// NullCheck base_reg
3188
//
3189
// Pin the new DecodeN node to non-null path on these platform (Sparc)
3190
// to keep the information to which NULL check the new DecodeN node
3191
// corresponds to use it as value in implicit_null_check().
3192
//
3193
new_in1->set_req(0, n->in(0));
3194
}
3195
3196
n->subsume_by(new_in1, this);
3197
if (in1->outcnt() == 0) {
3198
in1->disconnect_inputs(this);
3199
}
3200
} else {
3201
n->subsume_by(n->in(1), this);
3202
if (n->outcnt() == 0) {
3203
n->disconnect_inputs(this);
3204
}
3205
}
3206
break;
3207
}
3208
#ifdef _LP64
3209
case Op_CmpP:
3210
// Do this transformation here to preserve CmpPNode::sub() and
3211
// other TypePtr related Ideal optimizations (for example, ptr nullness).
3212
if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
3213
Node* in1 = n->in(1);
3214
Node* in2 = n->in(2);
3215
if (!in1->is_DecodeNarrowPtr()) {
3216
in2 = in1;
3217
in1 = n->in(2);
3218
}
3219
assert(in1->is_DecodeNarrowPtr(), "sanity");
3220
3221
Node* new_in2 = NULL;
3222
if (in2->is_DecodeNarrowPtr()) {
3223
assert(in2->Opcode() == in1->Opcode(), "must be same node type");
3224
new_in2 = in2->in(1);
3225
} else if (in2->Opcode() == Op_ConP) {
3226
const Type* t = in2->bottom_type();
3227
if (t == TypePtr::NULL_PTR) {
3228
assert(in1->is_DecodeN(), "compare klass to null?");
3229
// Don't convert CmpP null check into CmpN if compressed
3230
// oops implicit null check is not generated.
3231
// This will allow to generate normal oop implicit null check.
3232
if (Matcher::gen_narrow_oop_implicit_null_checks())
3233
new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR);
3234
//
3235
// This transformation together with CastPP transformation above
3236
// will generated code for implicit NULL checks for compressed oops.
3237
//
3238
// The original code after Optimize()
3239
//
3240
// LoadN memory, narrow_oop_reg
3241
// decode narrow_oop_reg, base_reg
3242
// CmpP base_reg, NULL
3243
// CastPP base_reg // NotNull
3244
// Load [base_reg + offset], val_reg
3245
//
3246
// after these transformations will be
3247
//
3248
// LoadN memory, narrow_oop_reg
3249
// CmpN narrow_oop_reg, NULL
3250
// decode_not_null narrow_oop_reg, base_reg
3251
// Load [base_reg + offset], val_reg
3252
//
3253
// and the uncommon path (== NULL) will use narrow_oop_reg directly
3254
// since narrow oops can be used in debug info now (see the code in
3255
// final_graph_reshaping_walk()).
3256
//
3257
// At the end the code will be matched to
3258
// on x86:
3259
//
3260
// Load_narrow_oop memory, narrow_oop_reg
3261
// Load [R12 + narrow_oop_reg<<3 + offset], val_reg
3262
// NullCheck narrow_oop_reg
3263
//
3264
// and on sparc:
3265
//
3266
// Load_narrow_oop memory, narrow_oop_reg
3267
// decode_not_null narrow_oop_reg, base_reg
3268
// Load [base_reg + offset], val_reg
3269
// NullCheck base_reg
3270
//
3271
} else if (t->isa_oopptr()) {
3272
new_in2 = ConNode::make(t->make_narrowoop());
3273
} else if (t->isa_klassptr()) {
3274
new_in2 = ConNode::make(t->make_narrowklass());
3275
}
3276
}
3277
if (new_in2 != NULL) {
3278
Node* cmpN = new CmpNNode(in1->in(1), new_in2);
3279
n->subsume_by(cmpN, this);
3280
if (in1->outcnt() == 0) {
3281
in1->disconnect_inputs(this);
3282
}
3283
if (in2->outcnt() == 0) {
3284
in2->disconnect_inputs(this);
3285
}
3286
}
3287
}
3288
break;
3289
3290
case Op_DecodeN:
3291
case Op_DecodeNKlass:
3292
assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
3293
// DecodeN could be pinned when it can't be fold into
3294
// an address expression, see the code for Op_CastPP above.
3295
assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
3296
break;
3297
3298
case Op_EncodeP:
3299
case Op_EncodePKlass: {
3300
Node* in1 = n->in(1);
3301
if (in1->is_DecodeNarrowPtr()) {
3302
n->subsume_by(in1->in(1), this);
3303
} else if (in1->Opcode() == Op_ConP) {
3304
const Type* t = in1->bottom_type();
3305
if (t == TypePtr::NULL_PTR) {
3306
assert(t->isa_oopptr(), "null klass?");
3307
n->subsume_by(ConNode::make(TypeNarrowOop::NULL_PTR), this);
3308
} else if (t->isa_oopptr()) {
3309
n->subsume_by(ConNode::make(t->make_narrowoop()), this);
3310
} else if (t->isa_klassptr()) {
3311
n->subsume_by(ConNode::make(t->make_narrowklass()), this);
3312
}
3313
}
3314
if (in1->outcnt() == 0) {
3315
in1->disconnect_inputs(this);
3316
}
3317
break;
3318
}
3319
3320
case Op_Proj: {
3321
if (OptimizeStringConcat || IncrementalInline) {
3322
ProjNode* proj = n->as_Proj();
3323
if (proj->_is_io_use) {
3324
assert(proj->_con == TypeFunc::I_O || proj->_con == TypeFunc::Memory, "");
3325
// Separate projections were used for the exception path which
3326
// are normally removed by a late inline. If it wasn't inlined
3327
// then they will hang around and should just be replaced with
3328
// the original one. Merge them.
3329
Node* non_io_proj = proj->in(0)->as_Multi()->proj_out_or_null(proj->_con, false /*is_io_use*/);
3330
if (non_io_proj != NULL) {
3331
proj->subsume_by(non_io_proj , this);
3332
}
3333
}
3334
}
3335
break;
3336
}
3337
3338
case Op_Phi:
3339
if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3340
// The EncodeP optimization may create Phi with the same edges
3341
// for all paths. It is not handled well by Register Allocator.
3342
Node* unique_in = n->in(1);
3343
assert(unique_in != NULL, "");
3344
uint cnt = n->req();
3345
for (uint i = 2; i < cnt; i++) {
3346
Node* m = n->in(i);
3347
assert(m != NULL, "");
3348
if (unique_in != m)
3349
unique_in = NULL;
3350
}
3351
if (unique_in != NULL) {
3352
n->subsume_by(unique_in, this);
3353
}
3354
}
3355
break;
3356
3357
#endif
3358
3359
#ifdef ASSERT
3360
case Op_CastII:
3361
// Verify that all range check dependent CastII nodes were removed.
3362
if (n->isa_CastII()->has_range_check()) {
3363
n->dump(3);
3364
assert(false, "Range check dependent CastII node was not removed");
3365
}
3366
break;
3367
#endif
3368
3369
case Op_ModI:
3370
if (UseDivMod) {
3371
// Check if a%b and a/b both exist
3372
Node* d = n->find_similar(Op_DivI);
3373
if (d) {
3374
// Replace them with a fused divmod if supported
3375
if (Matcher::has_match_rule(Op_DivModI)) {
3376
DivModINode* divmod = DivModINode::make(n);
3377
d->subsume_by(divmod->div_proj(), this);
3378
n->subsume_by(divmod->mod_proj(), this);
3379
} else {
3380
// replace a%b with a-((a/b)*b)
3381
Node* mult = new MulINode(d, d->in(2));
3382
Node* sub = new SubINode(d->in(1), mult);
3383
n->subsume_by(sub, this);
3384
}
3385
}
3386
}
3387
break;
3388
3389
case Op_ModL:
3390
if (UseDivMod) {
3391
// Check if a%b and a/b both exist
3392
Node* d = n->find_similar(Op_DivL);
3393
if (d) {
3394
// Replace them with a fused divmod if supported
3395
if (Matcher::has_match_rule(Op_DivModL)) {
3396
DivModLNode* divmod = DivModLNode::make(n);
3397
d->subsume_by(divmod->div_proj(), this);
3398
n->subsume_by(divmod->mod_proj(), this);
3399
} else {
3400
// replace a%b with a-((a/b)*b)
3401
Node* mult = new MulLNode(d, d->in(2));
3402
Node* sub = new SubLNode(d->in(1), mult);
3403
n->subsume_by(sub, this);
3404
}
3405
}
3406
}
3407
break;
3408
3409
case Op_LoadVector:
3410
case Op_StoreVector:
3411
case Op_LoadVectorGather:
3412
case Op_StoreVectorScatter:
3413
case Op_VectorMaskGen:
3414
case Op_LoadVectorMasked:
3415
case Op_StoreVectorMasked:
3416
break;
3417
3418
case Op_AddReductionVI:
3419
case Op_AddReductionVL:
3420
case Op_AddReductionVF:
3421
case Op_AddReductionVD:
3422
case Op_MulReductionVI:
3423
case Op_MulReductionVL:
3424
case Op_MulReductionVF:
3425
case Op_MulReductionVD:
3426
case Op_MinReductionV:
3427
case Op_MaxReductionV:
3428
case Op_AndReductionV:
3429
case Op_OrReductionV:
3430
case Op_XorReductionV:
3431
break;
3432
3433
case Op_PackB:
3434
case Op_PackS:
3435
case Op_PackI:
3436
case Op_PackF:
3437
case Op_PackL:
3438
case Op_PackD:
3439
if (n->req()-1 > 2) {
3440
// Replace many operand PackNodes with a binary tree for matching
3441
PackNode* p = (PackNode*) n;
3442
Node* btp = p->binary_tree_pack(1, n->req());
3443
n->subsume_by(btp, this);
3444
}
3445
break;
3446
case Op_Loop:
3447
assert(!n->as_Loop()->is_transformed_long_inner_loop() || _loop_opts_cnt == 0, "should have been turned into a counted loop");
3448
case Op_CountedLoop:
3449
case Op_LongCountedLoop:
3450
case Op_OuterStripMinedLoop:
3451
if (n->as_Loop()->is_inner_loop()) {
3452
frc.inc_inner_loop_count();
3453
}
3454
n->as_Loop()->verify_strip_mined(0);
3455
break;
3456
case Op_LShiftI:
3457
case Op_RShiftI:
3458
case Op_URShiftI:
3459
case Op_LShiftL:
3460
case Op_RShiftL:
3461
case Op_URShiftL:
3462
if (Matcher::need_masked_shift_count) {
3463
// The cpu's shift instructions don't restrict the count to the
3464
// lower 5/6 bits. We need to do the masking ourselves.
3465
Node* in2 = n->in(2);
3466
juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
3467
const TypeInt* t = in2->find_int_type();
3468
if (t != NULL && t->is_con()) {
3469
juint shift = t->get_con();
3470
if (shift > mask) { // Unsigned cmp
3471
n->set_req(2, ConNode::make(TypeInt::make(shift & mask)));
3472
}
3473
} else {
3474
if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3475
Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3476
n->set_req(2, shift);
3477
}
3478
}
3479
if (in2->outcnt() == 0) { // Remove dead node
3480
in2->disconnect_inputs(this);
3481
}
3482
}
3483
break;
3484
case Op_MemBarStoreStore:
3485
case Op_MemBarRelease:
3486
// Break the link with AllocateNode: it is no longer useful and
3487
// confuses register allocation.
3488
if (n->req() > MemBarNode::Precedent) {
3489
n->set_req(MemBarNode::Precedent, top());
3490
}
3491
break;
3492
case Op_MemBarAcquire: {
3493
if (n->as_MemBar()->trailing_load() && n->req() > MemBarNode::Precedent) {
3494
// At parse time, the trailing MemBarAcquire for a volatile load
3495
// is created with an edge to the load. After optimizations,
3496
// that input may be a chain of Phis. If those phis have no
3497
// other use, then the MemBarAcquire keeps them alive and
3498
// register allocation can be confused.
3499
ResourceMark rm;
3500
Unique_Node_List wq;
3501
wq.push(n->in(MemBarNode::Precedent));
3502
n->set_req(MemBarNode::Precedent, top());
3503
while (wq.size() > 0) {
3504
Node* m = wq.pop();
3505
if (m->outcnt() == 0) {
3506
for (uint j = 0; j < m->req(); j++) {
3507
Node* in = m->in(j);
3508
if (in != NULL) {
3509
wq.push(in);
3510
}
3511
}
3512
m->disconnect_inputs(this);
3513
}
3514
}
3515
}
3516
break;
3517
}
3518
case Op_Blackhole:
3519
break;
3520
case Op_RangeCheck: {
3521
RangeCheckNode* rc = n->as_RangeCheck();
3522
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3523
n->subsume_by(iff, this);
3524
frc._tests.push(iff);
3525
break;
3526
}
3527
case Op_ConvI2L: {
3528
if (!Matcher::convi2l_type_required) {
3529
// Code generation on some platforms doesn't need accurate
3530
// ConvI2L types. Widening the type can help remove redundant
3531
// address computations.
3532
n->as_Type()->set_type(TypeLong::INT);
3533
ResourceMark rm;
3534
Unique_Node_List wq;
3535
wq.push(n);
3536
for (uint next = 0; next < wq.size(); next++) {
3537
Node *m = wq.at(next);
3538
3539
for(;;) {
3540
// Loop over all nodes with identical inputs edges as m
3541
Node* k = m->find_similar(m->Opcode());
3542
if (k == NULL) {
3543
break;
3544
}
3545
// Push their uses so we get a chance to remove node made
3546
// redundant
3547
for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) {
3548
Node* u = k->fast_out(i);
3549
if (u->Opcode() == Op_LShiftL ||
3550
u->Opcode() == Op_AddL ||
3551
u->Opcode() == Op_SubL ||
3552
u->Opcode() == Op_AddP) {
3553
wq.push(u);
3554
}
3555
}
3556
// Replace all nodes with identical edges as m with m
3557
k->subsume_by(m, this);
3558
}
3559
}
3560
}
3561
break;
3562
}
3563
case Op_CmpUL: {
3564
if (!Matcher::has_match_rule(Op_CmpUL)) {
3565
// No support for unsigned long comparisons
3566
ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3567
Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3568
Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3569
ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3570
Node* andl = new AndLNode(orl, remove_sign_mask);
3571
Node* cmp = new CmpLNode(andl, n->in(2));
3572
n->subsume_by(cmp, this);
3573
}
3574
break;
3575
}
3576
default:
3577
assert(!n->is_Call(), "");
3578
assert(!n->is_Mem(), "");
3579
assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3580
break;
3581
}
3582
}
3583
3584
//------------------------------final_graph_reshaping_walk---------------------
3585
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3586
// requires that the walk visits a node's inputs before visiting the node.
3587
void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3588
Unique_Node_List sfpt;
3589
3590
frc._visited.set(root->_idx); // first, mark node as visited
3591
uint cnt = root->req();
3592
Node *n = root;
3593
uint i = 0;
3594
while (true) {
3595
if (i < cnt) {
3596
// Place all non-visited non-null inputs onto stack
3597
Node* m = n->in(i);
3598
++i;
3599
if (m != NULL && !frc._visited.test_set(m->_idx)) {
3600
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
3601
// compute worst case interpreter size in case of a deoptimization
3602
update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
3603
3604
sfpt.push(m);
3605
}
3606
cnt = m->req();
3607
nstack.push(n, i); // put on stack parent and next input's index
3608
n = m;
3609
i = 0;
3610
}
3611
} else {
3612
// Now do post-visit work
3613
final_graph_reshaping_impl( n, frc );
3614
if (nstack.is_empty())
3615
break; // finished
3616
n = nstack.node(); // Get node from stack
3617
cnt = n->req();
3618
i = nstack.index();
3619
nstack.pop(); // Shift to the next node on stack
3620
}
3621
}
3622
3623
// Skip next transformation if compressed oops are not used.
3624
if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3625
(!UseCompressedOops && !UseCompressedClassPointers))
3626
return;
3627
3628
// Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3629
// It could be done for an uncommon traps or any safepoints/calls
3630
// if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3631
while (sfpt.size() > 0) {
3632
n = sfpt.pop();
3633
JVMState *jvms = n->as_SafePoint()->jvms();
3634
assert(jvms != NULL, "sanity");
3635
int start = jvms->debug_start();
3636
int end = n->req();
3637
bool is_uncommon = (n->is_CallStaticJava() &&
3638
n->as_CallStaticJava()->uncommon_trap_request() != 0);
3639
for (int j = start; j < end; j++) {
3640
Node* in = n->in(j);
3641
if (in->is_DecodeNarrowPtr()) {
3642
bool safe_to_skip = true;
3643
if (!is_uncommon ) {
3644
// Is it safe to skip?
3645
for (uint i = 0; i < in->outcnt(); i++) {
3646
Node* u = in->raw_out(i);
3647
if (!u->is_SafePoint() ||
3648
(u->is_Call() && u->as_Call()->has_non_debug_use(n))) {
3649
safe_to_skip = false;
3650
}
3651
}
3652
}
3653
if (safe_to_skip) {
3654
n->set_req(j, in->in(1));
3655
}
3656
if (in->outcnt() == 0) {
3657
in->disconnect_inputs(this);
3658
}
3659
}
3660
}
3661
}
3662
}
3663
3664
//------------------------------final_graph_reshaping--------------------------
3665
// Final Graph Reshaping.
3666
//
3667
// (1) Clone simple inputs to uncommon calls, so they can be scheduled late
3668
// and not commoned up and forced early. Must come after regular
3669
// optimizations to avoid GVN undoing the cloning. Clone constant
3670
// inputs to Loop Phis; these will be split by the allocator anyways.
3671
// Remove Opaque nodes.
3672
// (2) Move last-uses by commutative operations to the left input to encourage
3673
// Intel update-in-place two-address operations and better register usage
3674
// on RISCs. Must come after regular optimizations to avoid GVN Ideal
3675
// calls canonicalizing them back.
3676
// (3) Count the number of double-precision FP ops, single-precision FP ops
3677
// and call sites. On Intel, we can get correct rounding either by
3678
// forcing singles to memory (requires extra stores and loads after each
3679
// FP bytecode) or we can set a rounding mode bit (requires setting and
3680
// clearing the mode bit around call sites). The mode bit is only used
3681
// if the relative frequency of single FP ops to calls is low enough.
3682
// This is a key transform for SPEC mpeg_audio.
3683
// (4) Detect infinite loops; blobs of code reachable from above but not
3684
// below. Several of the Code_Gen algorithms fail on such code shapes,
3685
// so we simply bail out. Happens a lot in ZKM.jar, but also happens
3686
// from time to time in other codes (such as -Xcomp finalizer loops, etc).
3687
// Detection is by looking for IfNodes where only 1 projection is
3688
// reachable from below or CatchNodes missing some targets.
3689
// (5) Assert for insane oop offsets in debug mode.
3690
3691
bool Compile::final_graph_reshaping() {
3692
// an infinite loop may have been eliminated by the optimizer,
3693
// in which case the graph will be empty.
3694
if (root()->req() == 1) {
3695
record_method_not_compilable("trivial infinite loop");
3696
return true;
3697
}
3698
3699
// Expensive nodes have their control input set to prevent the GVN
3700
// from freely commoning them. There's no GVN beyond this point so
3701
// no need to keep the control input. We want the expensive nodes to
3702
// be freely moved to the least frequent code path by gcm.
3703
assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
3704
for (int i = 0; i < expensive_count(); i++) {
3705
_expensive_nodes.at(i)->set_req(0, NULL);
3706
}
3707
3708
Final_Reshape_Counts frc;
3709
3710
// Visit everybody reachable!
3711
// Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
3712
Node_Stack nstack(live_nodes() >> 1);
3713
final_graph_reshaping_walk(nstack, root(), frc);
3714
3715
// Check for unreachable (from below) code (i.e., infinite loops).
3716
for( uint i = 0; i < frc._tests.size(); i++ ) {
3717
MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
3718
// Get number of CFG targets.
3719
// Note that PCTables include exception targets after calls.
3720
uint required_outcnt = n->required_outcnt();
3721
if (n->outcnt() != required_outcnt) {
3722
// Check for a few special cases. Rethrow Nodes never take the
3723
// 'fall-thru' path, so expected kids is 1 less.
3724
if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
3725
if (n->in(0)->in(0)->is_Call()) {
3726
CallNode *call = n->in(0)->in(0)->as_Call();
3727
if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3728
required_outcnt--; // Rethrow always has 1 less kid
3729
} else if (call->req() > TypeFunc::Parms &&
3730
call->is_CallDynamicJava()) {
3731
// Check for null receiver. In such case, the optimizer has
3732
// detected that the virtual call will always result in a null
3733
// pointer exception. The fall-through projection of this CatchNode
3734
// will not be populated.
3735
Node *arg0 = call->in(TypeFunc::Parms);
3736
if (arg0->is_Type() &&
3737
arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3738
required_outcnt--;
3739
}
3740
} else if (call->entry_point() == OptoRuntime::new_array_Java() &&
3741
call->req() > TypeFunc::Parms+1 &&
3742
call->is_CallStaticJava()) {
3743
// Check for negative array length. In such case, the optimizer has
3744
// detected that the allocation attempt will always result in an
3745
// exception. There is no fall-through projection of this CatchNode .
3746
Node *arg1 = call->in(TypeFunc::Parms+1);
3747
if (arg1->is_Type() &&
3748
arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3749
required_outcnt--;
3750
}
3751
}
3752
}
3753
}
3754
// Recheck with a better notion of 'required_outcnt'
3755
if (n->outcnt() != required_outcnt) {
3756
record_method_not_compilable("malformed control flow");
3757
return true; // Not all targets reachable!
3758
}
3759
}
3760
// Check that I actually visited all kids. Unreached kids
3761
// must be infinite loops.
3762
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3763
if (!frc._visited.test(n->fast_out(j)->_idx)) {
3764
record_method_not_compilable("infinite loop");
3765
return true; // Found unvisited kid; must be unreach
3766
}
3767
3768
// Here so verification code in final_graph_reshaping_walk()
3769
// always see an OuterStripMinedLoopEnd
3770
if (n->is_OuterStripMinedLoopEnd() || n->is_LongCountedLoopEnd()) {
3771
IfNode* init_iff = n->as_If();
3772
Node* iff = new IfNode(init_iff->in(0), init_iff->in(1), init_iff->_prob, init_iff->_fcnt);
3773
n->subsume_by(iff, this);
3774
}
3775
}
3776
3777
#ifdef IA32
3778
// If original bytecodes contained a mixture of floats and doubles
3779
// check if the optimizer has made it homogenous, item (3).
3780
if (UseSSE == 0 &&
3781
frc.get_float_count() > 32 &&
3782
frc.get_double_count() == 0 &&
3783
(10 * frc.get_call_count() < frc.get_float_count()) ) {
3784
set_24_bit_selection_and_mode(false, true);
3785
}
3786
#endif // IA32
3787
3788
set_java_calls(frc.get_java_call_count());
3789
set_inner_loops(frc.get_inner_loop_count());
3790
3791
// No infinite loops, no reason to bail out.
3792
return false;
3793
}
3794
3795
//-----------------------------too_many_traps----------------------------------
3796
// Report if there are too many traps at the current method and bci.
3797
// Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
3798
bool Compile::too_many_traps(ciMethod* method,
3799
int bci,
3800
Deoptimization::DeoptReason reason) {
3801
ciMethodData* md = method->method_data();
3802
if (md->is_empty()) {
3803
// Assume the trap has not occurred, or that it occurred only
3804
// because of a transient condition during start-up in the interpreter.
3805
return false;
3806
}
3807
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3808
if (md->has_trap_at(bci, m, reason) != 0) {
3809
// Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
3810
// Also, if there are multiple reasons, or if there is no per-BCI record,
3811
// assume the worst.
3812
if (log())
3813
log()->elem("observe trap='%s' count='%d'",
3814
Deoptimization::trap_reason_name(reason),
3815
md->trap_count(reason));
3816
return true;
3817
} else {
3818
// Ignore method/bci and see if there have been too many globally.
3819
return too_many_traps(reason, md);
3820
}
3821
}
3822
3823
// Less-accurate variant which does not require a method and bci.
3824
bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
3825
ciMethodData* logmd) {
3826
if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
3827
// Too many traps globally.
3828
// Note that we use cumulative trap_count, not just md->trap_count.
3829
if (log()) {
3830
int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
3831
log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
3832
Deoptimization::trap_reason_name(reason),
3833
mcount, trap_count(reason));
3834
}
3835
return true;
3836
} else {
3837
// The coast is clear.
3838
return false;
3839
}
3840
}
3841
3842
//--------------------------too_many_recompiles--------------------------------
3843
// Report if there are too many recompiles at the current method and bci.
3844
// Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
3845
// Is not eager to return true, since this will cause the compiler to use
3846
// Action_none for a trap point, to avoid too many recompilations.
3847
bool Compile::too_many_recompiles(ciMethod* method,
3848
int bci,
3849
Deoptimization::DeoptReason reason) {
3850
ciMethodData* md = method->method_data();
3851
if (md->is_empty()) {
3852
// Assume the trap has not occurred, or that it occurred only
3853
// because of a transient condition during start-up in the interpreter.
3854
return false;
3855
}
3856
// Pick a cutoff point well within PerBytecodeRecompilationCutoff.
3857
uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
3858
uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
3859
Deoptimization::DeoptReason per_bc_reason
3860
= Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3861
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3862
if ((per_bc_reason == Deoptimization::Reason_none
3863
|| md->has_trap_at(bci, m, reason) != 0)
3864
// The trap frequency measure we care about is the recompile count:
3865
&& md->trap_recompiled_at(bci, m)
3866
&& md->overflow_recompile_count() >= bc_cutoff) {
3867
// Do not emit a trap here if it has already caused recompilations.
3868
// Also, if there are multiple reasons, or if there is no per-BCI record,
3869
// assume the worst.
3870
if (log())
3871
log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
3872
Deoptimization::trap_reason_name(reason),
3873
md->trap_count(reason),
3874
md->overflow_recompile_count());
3875
return true;
3876
} else if (trap_count(reason) != 0
3877
&& decompile_count() >= m_cutoff) {
3878
// Too many recompiles globally, and we have seen this sort of trap.
3879
// Use cumulative decompile_count, not just md->decompile_count.
3880
if (log())
3881
log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
3882
Deoptimization::trap_reason_name(reason),
3883
md->trap_count(reason), trap_count(reason),
3884
md->decompile_count(), decompile_count());
3885
return true;
3886
} else {
3887
// The coast is clear.
3888
return false;
3889
}
3890
}
3891
3892
// Compute when not to trap. Used by matching trap based nodes and
3893
// NullCheck optimization.
3894
void Compile::set_allowed_deopt_reasons() {
3895
_allowed_reasons = 0;
3896
if (is_method_compilation()) {
3897
for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
3898
assert(rs < BitsPerInt, "recode bit map");
3899
if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
3900
_allowed_reasons |= nth_bit(rs);
3901
}
3902
}
3903
}
3904
}
3905
3906
bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
3907
return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
3908
}
3909
3910
bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
3911
return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
3912
}
3913
3914
bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
3915
if (holder->is_initialized()) {
3916
return false;
3917
}
3918
if (holder->is_being_initialized()) {
3919
if (accessing_method->holder() == holder) {
3920
// Access inside a class. The barrier can be elided when access happens in <clinit>,
3921
// <init>, or a static method. In all those cases, there was an initialization
3922
// barrier on the holder klass passed.
3923
if (accessing_method->is_static_initializer() ||
3924
accessing_method->is_object_initializer() ||
3925
accessing_method->is_static()) {
3926
return false;
3927
}
3928
} else if (accessing_method->holder()->is_subclass_of(holder)) {
3929
// Access from a subclass. The barrier can be elided only when access happens in <clinit>.
3930
// In case of <init> or a static method, the barrier is on the subclass is not enough:
3931
// child class can become fully initialized while its parent class is still being initialized.
3932
if (accessing_method->is_static_initializer()) {
3933
return false;
3934
}
3935
}
3936
ciMethod* root = method(); // the root method of compilation
3937
if (root != accessing_method) {
3938
return needs_clinit_barrier(holder, root); // check access in the context of compilation root
3939
}
3940
}
3941
return true;
3942
}
3943
3944
#ifndef PRODUCT
3945
//------------------------------verify_graph_edges---------------------------
3946
// Walk the Graph and verify that there is a one-to-one correspondence
3947
// between Use-Def edges and Def-Use edges in the graph.
3948
void Compile::verify_graph_edges(bool no_dead_code) {
3949
if (VerifyGraphEdges) {
3950
Unique_Node_List visited;
3951
// Call recursive graph walk to check edges
3952
_root->verify_edges(visited);
3953
if (no_dead_code) {
3954
// Now make sure that no visited node is used by an unvisited node.
3955
bool dead_nodes = false;
3956
Unique_Node_List checked;
3957
while (visited.size() > 0) {
3958
Node* n = visited.pop();
3959
checked.push(n);
3960
for (uint i = 0; i < n->outcnt(); i++) {
3961
Node* use = n->raw_out(i);
3962
if (checked.member(use)) continue; // already checked
3963
if (visited.member(use)) continue; // already in the graph
3964
if (use->is_Con()) continue; // a dead ConNode is OK
3965
// At this point, we have found a dead node which is DU-reachable.
3966
if (!dead_nodes) {
3967
tty->print_cr("*** Dead nodes reachable via DU edges:");
3968
dead_nodes = true;
3969
}
3970
use->dump(2);
3971
tty->print_cr("---");
3972
checked.push(use); // No repeats; pretend it is now checked.
3973
}
3974
}
3975
assert(!dead_nodes, "using nodes must be reachable from root");
3976
}
3977
}
3978
}
3979
#endif
3980
3981
// The Compile object keeps track of failure reasons separately from the ciEnv.
3982
// This is required because there is not quite a 1-1 relation between the
3983
// ciEnv and its compilation task and the Compile object. Note that one
3984
// ciEnv might use two Compile objects, if C2Compiler::compile_method decides
3985
// to backtrack and retry without subsuming loads. Other than this backtracking
3986
// behavior, the Compile's failure reason is quietly copied up to the ciEnv
3987
// by the logic in C2Compiler.
3988
void Compile::record_failure(const char* reason) {
3989
if (log() != NULL) {
3990
log()->elem("failure reason='%s' phase='compile'", reason);
3991
}
3992
if (_failure_reason == NULL) {
3993
// Record the first failure reason.
3994
_failure_reason = reason;
3995
}
3996
3997
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
3998
C->print_method(PHASE_FAILURE);
3999
}
4000
_root = NULL; // flush the graph, too
4001
}
4002
4003
Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator)
4004
: TraceTime(name, accumulator, CITime, CITimeVerbose),
4005
_phase_name(name), _dolog(CITimeVerbose)
4006
{
4007
if (_dolog) {
4008
C = Compile::current();
4009
_log = C->log();
4010
} else {
4011
C = NULL;
4012
_log = NULL;
4013
}
4014
if (_log != NULL) {
4015
_log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4016
_log->stamp();
4017
_log->end_head();
4018
}
4019
}
4020
4021
Compile::TracePhase::~TracePhase() {
4022
4023
C = Compile::current();
4024
if (_dolog) {
4025
_log = C->log();
4026
} else {
4027
_log = NULL;
4028
}
4029
4030
#ifdef ASSERT
4031
if (PrintIdealNodeCount) {
4032
tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
4033
_phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4034
}
4035
4036
if (VerifyIdealNodeCount) {
4037
Compile::current()->print_missing_nodes();
4038
}
4039
#endif
4040
4041
if (_log != NULL) {
4042
_log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4043
}
4044
}
4045
4046
//----------------------------static_subtype_check-----------------------------
4047
// Shortcut important common cases when superklass is exact:
4048
// (0) superklass is java.lang.Object (can occur in reflective code)
4049
// (1) subklass is already limited to a subtype of superklass => always ok
4050
// (2) subklass does not overlap with superklass => always fail
4051
// (3) superklass has NO subtypes and we can check with a simple compare.
4052
int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4053
if (StressReflectiveCode) {
4054
return SSC_full_test; // Let caller generate the general case.
4055
}
4056
4057
if (superk == env()->Object_klass()) {
4058
return SSC_always_true; // (0) this test cannot fail
4059
}
4060
4061
ciType* superelem = superk;
4062
ciType* subelem = subk;
4063
if (superelem->is_array_klass()) {
4064
superelem = superelem->as_array_klass()->base_element_type();
4065
}
4066
if (subelem->is_array_klass()) {
4067
subelem = subelem->as_array_klass()->base_element_type();
4068
}
4069
4070
if (!subk->is_interface()) { // cannot trust static interface types yet
4071
if (subk->is_subtype_of(superk)) {
4072
return SSC_always_true; // (1) false path dead; no dynamic test needed
4073
}
4074
if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4075
!(subelem->is_klass() && subelem->as_klass()->is_interface()) &&
4076
!superk->is_subtype_of(subk)) {
4077
return SSC_always_false; // (2) true path dead; no dynamic test needed
4078
}
4079
}
4080
4081
// If casting to an instance klass, it must have no subtypes
4082
if (superk->is_interface()) {
4083
// Cannot trust interfaces yet.
4084
// %%% S.B. superk->nof_implementors() == 1
4085
} else if (superelem->is_instance_klass()) {
4086
ciInstanceKlass* ik = superelem->as_instance_klass();
4087
if (!ik->has_subklass() && !ik->is_interface()) {
4088
if (!ik->is_final()) {
4089
// Add a dependency if there is a chance of a later subclass.
4090
dependencies()->assert_leaf_type(ik);
4091
}
4092
return SSC_easy_test; // (3) caller can do a simple ptr comparison
4093
}
4094
} else {
4095
// A primitive array type has no subtypes.
4096
return SSC_easy_test; // (3) caller can do a simple ptr comparison
4097
}
4098
4099
return SSC_full_test;
4100
}
4101
4102
Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
4103
#ifdef _LP64
4104
// The scaled index operand to AddP must be a clean 64-bit value.
4105
// Java allows a 32-bit int to be incremented to a negative
4106
// value, which appears in a 64-bit register as a large
4107
// positive number. Using that large positive number as an
4108
// operand in pointer arithmetic has bad consequences.
4109
// On the other hand, 32-bit overflow is rare, and the possibility
4110
// can often be excluded, if we annotate the ConvI2L node with
4111
// a type assertion that its value is known to be a small positive
4112
// number. (The prior range check has ensured this.)
4113
// This assertion is used by ConvI2LNode::Ideal.
4114
int index_max = max_jint - 1; // array size is max_jint, index is one less
4115
if (sizetype != NULL) index_max = sizetype->_hi - 1;
4116
const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
4117
idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
4118
#endif
4119
return idx;
4120
}
4121
4122
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
4123
Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4124
if (ctrl != NULL) {
4125
// Express control dependency by a CastII node with a narrow type.
4126
value = new CastIINode(value, itype, carry_dependency, true /* range check dependency */);
4127
// Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4128
// node from floating above the range check during loop optimizations. Otherwise, the
4129
// ConvI2L node may be eliminated independently of the range check, causing the data path
4130
// to become TOP while the control path is still there (although it's unreachable).
4131
value->set_req(0, ctrl);
4132
value = phase->transform(value);
4133
}
4134
const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4135
return phase->transform(new ConvI2LNode(value, ltype));
4136
}
4137
4138
void Compile::print_inlining_stream_free() {
4139
if (_print_inlining_stream != NULL) {
4140
_print_inlining_stream->~stringStream();
4141
_print_inlining_stream = NULL;
4142
}
4143
}
4144
4145
// The message about the current inlining is accumulated in
4146
// _print_inlining_stream and transfered into the _print_inlining_list
4147
// once we know whether inlining succeeds or not. For regular
4148
// inlining, messages are appended to the buffer pointed by
4149
// _print_inlining_idx in the _print_inlining_list. For late inlining,
4150
// a new buffer is added after _print_inlining_idx in the list. This
4151
// way we can update the inlining message for late inlining call site
4152
// when the inlining is attempted again.
4153
void Compile::print_inlining_init() {
4154
if (print_inlining() || print_intrinsics()) {
4155
// print_inlining_init is actually called several times.
4156
print_inlining_stream_free();
4157
_print_inlining_stream = new stringStream();
4158
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer*>(comp_arena(), 1, 1, new PrintInliningBuffer());
4159
}
4160
}
4161
4162
void Compile::print_inlining_reinit() {
4163
if (print_inlining() || print_intrinsics()) {
4164
print_inlining_stream_free();
4165
// Re allocate buffer when we change ResourceMark
4166
_print_inlining_stream = new stringStream();
4167
}
4168
}
4169
4170
void Compile::print_inlining_reset() {
4171
_print_inlining_stream->reset();
4172
}
4173
4174
void Compile::print_inlining_commit() {
4175
assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
4176
// Transfer the message from _print_inlining_stream to the current
4177
// _print_inlining_list buffer and clear _print_inlining_stream.
4178
_print_inlining_list->at(_print_inlining_idx)->ss()->write(_print_inlining_stream->base(), _print_inlining_stream->size());
4179
print_inlining_reset();
4180
}
4181
4182
void Compile::print_inlining_push() {
4183
// Add new buffer to the _print_inlining_list at current position
4184
_print_inlining_idx++;
4185
_print_inlining_list->insert_before(_print_inlining_idx, new PrintInliningBuffer());
4186
}
4187
4188
Compile::PrintInliningBuffer* Compile::print_inlining_current() {
4189
return _print_inlining_list->at(_print_inlining_idx);
4190
}
4191
4192
void Compile::print_inlining_update(CallGenerator* cg) {
4193
if (print_inlining() || print_intrinsics()) {
4194
if (cg->is_late_inline()) {
4195
if (print_inlining_current()->cg() != cg &&
4196
(print_inlining_current()->cg() != NULL ||
4197
print_inlining_current()->ss()->size() != 0)) {
4198
print_inlining_push();
4199
}
4200
print_inlining_commit();
4201
print_inlining_current()->set_cg(cg);
4202
} else {
4203
if (print_inlining_current()->cg() != NULL) {
4204
print_inlining_push();
4205
}
4206
print_inlining_commit();
4207
}
4208
}
4209
}
4210
4211
void Compile::print_inlining_move_to(CallGenerator* cg) {
4212
// We resume inlining at a late inlining call site. Locate the
4213
// corresponding inlining buffer so that we can update it.
4214
if (print_inlining() || print_intrinsics()) {
4215
for (int i = 0; i < _print_inlining_list->length(); i++) {
4216
if (_print_inlining_list->at(i)->cg() == cg) {
4217
_print_inlining_idx = i;
4218
return;
4219
}
4220
}
4221
ShouldNotReachHere();
4222
}
4223
}
4224
4225
void Compile::print_inlining_update_delayed(CallGenerator* cg) {
4226
if (print_inlining() || print_intrinsics()) {
4227
assert(_print_inlining_stream->size() > 0, "missing inlining msg");
4228
assert(print_inlining_current()->cg() == cg, "wrong entry");
4229
// replace message with new message
4230
_print_inlining_list->at_put(_print_inlining_idx, new PrintInliningBuffer());
4231
print_inlining_commit();
4232
print_inlining_current()->set_cg(cg);
4233
}
4234
}
4235
4236
void Compile::print_inlining_assert_ready() {
4237
assert(!_print_inlining || _print_inlining_stream->size() == 0, "loosing data");
4238
}
4239
4240
void Compile::process_print_inlining() {
4241
assert(_late_inlines.length() == 0, "not drained yet");
4242
if (print_inlining() || print_intrinsics()) {
4243
ResourceMark rm;
4244
stringStream ss;
4245
assert(_print_inlining_list != NULL, "process_print_inlining should be called only once.");
4246
for (int i = 0; i < _print_inlining_list->length(); i++) {
4247
PrintInliningBuffer* pib = _print_inlining_list->at(i);
4248
ss.print("%s", pib->ss()->as_string());
4249
delete pib;
4250
DEBUG_ONLY(_print_inlining_list->at_put(i, NULL));
4251
}
4252
// Reset _print_inlining_list, it only contains destructed objects.
4253
// It is on the arena, so it will be freed when the arena is reset.
4254
_print_inlining_list = NULL;
4255
// _print_inlining_stream won't be used anymore, either.
4256
print_inlining_stream_free();
4257
size_t end = ss.size();
4258
_print_inlining_output = NEW_ARENA_ARRAY(comp_arena(), char, end+1);
4259
strncpy(_print_inlining_output, ss.base(), end+1);
4260
_print_inlining_output[end] = 0;
4261
}
4262
}
4263
4264
void Compile::dump_print_inlining() {
4265
if (_print_inlining_output != NULL) {
4266
tty->print_raw(_print_inlining_output);
4267
}
4268
}
4269
4270
void Compile::log_late_inline(CallGenerator* cg) {
4271
if (log() != NULL) {
4272
log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4273
cg->unique_id());
4274
JVMState* p = cg->call_node()->jvms();
4275
while (p != NULL) {
4276
log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4277
p = p->caller();
4278
}
4279
log()->tail("late_inline");
4280
}
4281
}
4282
4283
void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4284
log_late_inline(cg);
4285
if (log() != NULL) {
4286
log()->inline_fail(msg);
4287
}
4288
}
4289
4290
void Compile::log_inline_id(CallGenerator* cg) {
4291
if (log() != NULL) {
4292
// The LogCompilation tool needs a unique way to identify late
4293
// inline call sites. This id must be unique for this call site in
4294
// this compilation. Try to have it unique across compilations as
4295
// well because it can be convenient when grepping through the log
4296
// file.
4297
// Distinguish OSR compilations from others in case CICountOSR is
4298
// on.
4299
jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
4300
cg->set_unique_id(id);
4301
log()->elem("inline_id id='" JLONG_FORMAT "'", id);
4302
}
4303
}
4304
4305
void Compile::log_inline_failure(const char* msg) {
4306
if (C->log() != NULL) {
4307
C->log()->inline_fail(msg);
4308
}
4309
}
4310
4311
4312
// Dump inlining replay data to the stream.
4313
// Don't change thread state and acquire any locks.
4314
void Compile::dump_inline_data(outputStream* out) {
4315
InlineTree* inl_tree = ilt();
4316
if (inl_tree != NULL) {
4317
out->print(" inline %d", inl_tree->count());
4318
inl_tree->dump_replay_data(out);
4319
}
4320
}
4321
4322
int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
4323
if (n1->Opcode() < n2->Opcode()) return -1;
4324
else if (n1->Opcode() > n2->Opcode()) return 1;
4325
4326
assert(n1->req() == n2->req(), "can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req());
4327
for (uint i = 1; i < n1->req(); i++) {
4328
if (n1->in(i) < n2->in(i)) return -1;
4329
else if (n1->in(i) > n2->in(i)) return 1;
4330
}
4331
4332
return 0;
4333
}
4334
4335
int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
4336
Node* n1 = *n1p;
4337
Node* n2 = *n2p;
4338
4339
return cmp_expensive_nodes(n1, n2);
4340
}
4341
4342
void Compile::sort_expensive_nodes() {
4343
if (!expensive_nodes_sorted()) {
4344
_expensive_nodes.sort(cmp_expensive_nodes);
4345
}
4346
}
4347
4348
bool Compile::expensive_nodes_sorted() const {
4349
for (int i = 1; i < _expensive_nodes.length(); i++) {
4350
if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i-1)) < 0) {
4351
return false;
4352
}
4353
}
4354
return true;
4355
}
4356
4357
bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
4358
if (_expensive_nodes.length() == 0) {
4359
return false;
4360
}
4361
4362
assert(OptimizeExpensiveOps, "optimization off?");
4363
4364
// Take this opportunity to remove dead nodes from the list
4365
int j = 0;
4366
for (int i = 0; i < _expensive_nodes.length(); i++) {
4367
Node* n = _expensive_nodes.at(i);
4368
if (!n->is_unreachable(igvn)) {
4369
assert(n->is_expensive(), "should be expensive");
4370
_expensive_nodes.at_put(j, n);
4371
j++;
4372
}
4373
}
4374
_expensive_nodes.trunc_to(j);
4375
4376
// Then sort the list so that similar nodes are next to each other
4377
// and check for at least two nodes of identical kind with same data
4378
// inputs.
4379
sort_expensive_nodes();
4380
4381
for (int i = 0; i < _expensive_nodes.length()-1; i++) {
4382
if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i+1)) == 0) {
4383
return true;
4384
}
4385
}
4386
4387
return false;
4388
}
4389
4390
void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
4391
if (_expensive_nodes.length() == 0) {
4392
return;
4393
}
4394
4395
assert(OptimizeExpensiveOps, "optimization off?");
4396
4397
// Sort to bring similar nodes next to each other and clear the
4398
// control input of nodes for which there's only a single copy.
4399
sort_expensive_nodes();
4400
4401
int j = 0;
4402
int identical = 0;
4403
int i = 0;
4404
bool modified = false;
4405
for (; i < _expensive_nodes.length()-1; i++) {
4406
assert(j <= i, "can't write beyond current index");
4407
if (_expensive_nodes.at(i)->Opcode() == _expensive_nodes.at(i+1)->Opcode()) {
4408
identical++;
4409
_expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4410
continue;
4411
}
4412
if (identical > 0) {
4413
_expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4414
identical = 0;
4415
} else {
4416
Node* n = _expensive_nodes.at(i);
4417
igvn.replace_input_of(n, 0, NULL);
4418
igvn.hash_insert(n);
4419
modified = true;
4420
}
4421
}
4422
if (identical > 0) {
4423
_expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4424
} else if (_expensive_nodes.length() >= 1) {
4425
Node* n = _expensive_nodes.at(i);
4426
igvn.replace_input_of(n, 0, NULL);
4427
igvn.hash_insert(n);
4428
modified = true;
4429
}
4430
_expensive_nodes.trunc_to(j);
4431
if (modified) {
4432
igvn.optimize();
4433
}
4434
}
4435
4436
void Compile::add_expensive_node(Node * n) {
4437
assert(!_expensive_nodes.contains(n), "duplicate entry in expensive list");
4438
assert(n->is_expensive(), "expensive nodes with non-null control here only");
4439
assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
4440
if (OptimizeExpensiveOps) {
4441
_expensive_nodes.append(n);
4442
} else {
4443
// Clear control input and let IGVN optimize expensive nodes if
4444
// OptimizeExpensiveOps is off.
4445
n->set_req(0, NULL);
4446
}
4447
}
4448
4449
/**
4450
* Remove the speculative part of types and clean up the graph
4451
*/
4452
void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4453
if (UseTypeSpeculation) {
4454
Unique_Node_List worklist;
4455
worklist.push(root());
4456
int modified = 0;
4457
// Go over all type nodes that carry a speculative type, drop the
4458
// speculative part of the type and enqueue the node for an igvn
4459
// which may optimize it out.
4460
for (uint next = 0; next < worklist.size(); ++next) {
4461
Node *n = worklist.at(next);
4462
if (n->is_Type()) {
4463
TypeNode* tn = n->as_Type();
4464
const Type* t = tn->type();
4465
const Type* t_no_spec = t->remove_speculative();
4466
if (t_no_spec != t) {
4467
bool in_hash = igvn.hash_delete(n);
4468
assert(in_hash, "node should be in igvn hash table");
4469
tn->set_type(t_no_spec);
4470
igvn.hash_insert(n);
4471
igvn._worklist.push(n); // give it a chance to go away
4472
modified++;
4473
}
4474
}
4475
uint max = n->len();
4476
for( uint i = 0; i < max; ++i ) {
4477
Node *m = n->in(i);
4478
if (not_a_node(m)) continue;
4479
worklist.push(m);
4480
}
4481
}
4482
// Drop the speculative part of all types in the igvn's type table
4483
igvn.remove_speculative_types();
4484
if (modified > 0) {
4485
igvn.optimize();
4486
}
4487
#ifdef ASSERT
4488
// Verify that after the IGVN is over no speculative type has resurfaced
4489
worklist.clear();
4490
worklist.push(root());
4491
for (uint next = 0; next < worklist.size(); ++next) {
4492
Node *n = worklist.at(next);
4493
const Type* t = igvn.type_or_null(n);
4494
assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4495
if (n->is_Type()) {
4496
t = n->as_Type()->type();
4497
assert(t == t->remove_speculative(), "no more speculative types");
4498
}
4499
uint max = n->len();
4500
for( uint i = 0; i < max; ++i ) {
4501
Node *m = n->in(i);
4502
if (not_a_node(m)) continue;
4503
worklist.push(m);
4504
}
4505
}
4506
igvn.check_no_speculative_types();
4507
#endif
4508
}
4509
}
4510
4511
// Auxiliary methods to support randomized stressing/fuzzing.
4512
4513
int Compile::random() {
4514
_stress_seed = os::next_random(_stress_seed);
4515
return static_cast<int>(_stress_seed);
4516
}
4517
4518
// This method can be called the arbitrary number of times, with current count
4519
// as the argument. The logic allows selecting a single candidate from the
4520
// running list of candidates as follows:
4521
// int count = 0;
4522
// Cand* selected = null;
4523
// while(cand = cand->next()) {
4524
// if (randomized_select(++count)) {
4525
// selected = cand;
4526
// }
4527
// }
4528
//
4529
// Including count equalizes the chances any candidate is "selected".
4530
// This is useful when we don't have the complete list of candidates to choose
4531
// from uniformly. In this case, we need to adjust the randomicity of the
4532
// selection, or else we will end up biasing the selection towards the latter
4533
// candidates.
4534
//
4535
// Quick back-envelope calculation shows that for the list of n candidates
4536
// the equal probability for the candidate to persist as "best" can be
4537
// achieved by replacing it with "next" k-th candidate with the probability
4538
// of 1/k. It can be easily shown that by the end of the run, the
4539
// probability for any candidate is converged to 1/n, thus giving the
4540
// uniform distribution among all the candidates.
4541
//
4542
// We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
4543
#define RANDOMIZED_DOMAIN_POW 29
4544
#define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
4545
#define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
4546
bool Compile::randomized_select(int count) {
4547
assert(count > 0, "only positive");
4548
return (random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
4549
}
4550
4551
CloneMap& Compile::clone_map() { return _clone_map; }
4552
void Compile::set_clone_map(Dict* d) { _clone_map._dict = d; }
4553
4554
void NodeCloneInfo::dump() const {
4555
tty->print(" {%d:%d} ", idx(), gen());
4556
}
4557
4558
void CloneMap::clone(Node* old, Node* nnn, int gen) {
4559
uint64_t val = value(old->_idx);
4560
NodeCloneInfo cio(val);
4561
assert(val != 0, "old node should be in the map");
4562
NodeCloneInfo cin(cio.idx(), gen + cio.gen());
4563
insert(nnn->_idx, cin.get());
4564
#ifndef PRODUCT
4565
if (is_debug()) {
4566
tty->print_cr("CloneMap::clone inserted node %d info {%d:%d} into CloneMap", nnn->_idx, cin.idx(), cin.gen());
4567
}
4568
#endif
4569
}
4570
4571
void CloneMap::verify_insert_and_clone(Node* old, Node* nnn, int gen) {
4572
NodeCloneInfo cio(value(old->_idx));
4573
if (cio.get() == 0) {
4574
cio.set(old->_idx, 0);
4575
insert(old->_idx, cio.get());
4576
#ifndef PRODUCT
4577
if (is_debug()) {
4578
tty->print_cr("CloneMap::verify_insert_and_clone inserted node %d info {%d:%d} into CloneMap", old->_idx, cio.idx(), cio.gen());
4579
}
4580
#endif
4581
}
4582
clone(old, nnn, gen);
4583
}
4584
4585
int CloneMap::max_gen() const {
4586
int g = 0;
4587
DictI di(_dict);
4588
for(; di.test(); ++di) {
4589
int t = gen(di._key);
4590
if (g < t) {
4591
g = t;
4592
#ifndef PRODUCT
4593
if (is_debug()) {
4594
tty->print_cr("CloneMap::max_gen() update max=%d from %d", g, _2_node_idx_t(di._key));
4595
}
4596
#endif
4597
}
4598
}
4599
return g;
4600
}
4601
4602
void CloneMap::dump(node_idx_t key) const {
4603
uint64_t val = value(key);
4604
if (val != 0) {
4605
NodeCloneInfo ni(val);
4606
ni.dump();
4607
}
4608
}
4609
4610
// Move Allocate nodes to the start of the list
4611
void Compile::sort_macro_nodes() {
4612
int count = macro_count();
4613
int allocates = 0;
4614
for (int i = 0; i < count; i++) {
4615
Node* n = macro_node(i);
4616
if (n->is_Allocate()) {
4617
if (i != allocates) {
4618
Node* tmp = macro_node(allocates);
4619
_macro_nodes.at_put(allocates, n);
4620
_macro_nodes.at_put(i, tmp);
4621
}
4622
allocates++;
4623
}
4624
}
4625
}
4626
4627
void Compile::print_method(CompilerPhaseType cpt, const char *name, int level) {
4628
EventCompilerPhase event;
4629
if (event.should_commit()) {
4630
CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, cpt, C->_compile_id, level);
4631
}
4632
#ifndef PRODUCT
4633
if (should_print(level)) {
4634
_printer->print_method(name, level);
4635
}
4636
#endif
4637
C->_latest_stage_start_counter.stamp();
4638
}
4639
4640
void Compile::print_method(CompilerPhaseType cpt, int level, int idx) {
4641
char output[1024];
4642
#ifndef PRODUCT
4643
if (idx != 0) {
4644
jio_snprintf(output, sizeof(output), "%s:%d", CompilerPhaseTypeHelper::to_string(cpt), idx);
4645
} else {
4646
jio_snprintf(output, sizeof(output), "%s", CompilerPhaseTypeHelper::to_string(cpt));
4647
}
4648
#endif
4649
print_method(cpt, output, level);
4650
}
4651
4652
void Compile::print_method(CompilerPhaseType cpt, Node* n, int level) {
4653
ResourceMark rm;
4654
stringStream ss;
4655
ss.print_raw(CompilerPhaseTypeHelper::to_string(cpt));
4656
if (n != NULL) {
4657
ss.print(": %d %s ", n->_idx, NodeClassNames[n->Opcode()]);
4658
} else {
4659
ss.print_raw(": NULL");
4660
}
4661
C->print_method(cpt, ss.as_string(), level);
4662
}
4663
4664
void Compile::end_method(int level) {
4665
EventCompilerPhase event;
4666
if (event.should_commit()) {
4667
CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, PHASE_END, C->_compile_id, level);
4668
}
4669
4670
#ifndef PRODUCT
4671
if (_method != NULL && should_print(level)) {
4672
_printer->end_method();
4673
}
4674
#endif
4675
}
4676
4677
4678
#ifndef PRODUCT
4679
IdealGraphPrinter* Compile::_debug_file_printer = NULL;
4680
IdealGraphPrinter* Compile::_debug_network_printer = NULL;
4681
4682
// Called from debugger. Prints method to the default file with the default phase name.
4683
// This works regardless of any Ideal Graph Visualizer flags set or not.
4684
void igv_print() {
4685
Compile::current()->igv_print_method_to_file();
4686
}
4687
4688
// Same as igv_print() above but with a specified phase name.
4689
void igv_print(const char* phase_name) {
4690
Compile::current()->igv_print_method_to_file(phase_name);
4691
}
4692
4693
// Called from debugger. Prints method with the default phase name to the default network or the one specified with
4694
// the network flags for the Ideal Graph Visualizer, or to the default file depending on the 'network' argument.
4695
// This works regardless of any Ideal Graph Visualizer flags set or not.
4696
void igv_print(bool network) {
4697
if (network) {
4698
Compile::current()->igv_print_method_to_network();
4699
} else {
4700
Compile::current()->igv_print_method_to_file();
4701
}
4702
}
4703
4704
// Same as igv_print(bool network) above but with a specified phase name.
4705
void igv_print(bool network, const char* phase_name) {
4706
if (network) {
4707
Compile::current()->igv_print_method_to_network(phase_name);
4708
} else {
4709
Compile::current()->igv_print_method_to_file(phase_name);
4710
}
4711
}
4712
4713
// Called from debugger. Normal write to the default _printer. Only works if Ideal Graph Visualizer printing flags are set.
4714
void igv_print_default() {
4715
Compile::current()->print_method(PHASE_DEBUG, 0);
4716
}
4717
4718
// Called from debugger, especially when replaying a trace in which the program state cannot be altered like with rr replay.
4719
// A method is appended to an existing default file with the default phase name. This means that igv_append() must follow
4720
// an earlier igv_print(*) call which sets up the file. This works regardless of any Ideal Graph Visualizer flags set or not.
4721
void igv_append() {
4722
Compile::current()->igv_print_method_to_file("Debug", true);
4723
}
4724
4725
// Same as igv_append() above but with a specified phase name.
4726
void igv_append(const char* phase_name) {
4727
Compile::current()->igv_print_method_to_file(phase_name, true);
4728
}
4729
4730
void Compile::igv_print_method_to_file(const char* phase_name, bool append) {
4731
const char* file_name = "custom_debug.xml";
4732
if (_debug_file_printer == NULL) {
4733
_debug_file_printer = new IdealGraphPrinter(C, file_name, append);
4734
} else {
4735
_debug_file_printer->update_compiled_method(C->method());
4736
}
4737
tty->print_cr("Method %s to %s", append ? "appended" : "printed", file_name);
4738
_debug_file_printer->print(phase_name, (Node*)C->root());
4739
}
4740
4741
void Compile::igv_print_method_to_network(const char* phase_name) {
4742
if (_debug_network_printer == NULL) {
4743
_debug_network_printer = new IdealGraphPrinter(C);
4744
} else {
4745
_debug_network_printer->update_compiled_method(C->method());
4746
}
4747
tty->print_cr("Method printed over network stream to IGV");
4748
_debug_network_printer->print(phase_name, (Node*)C->root());
4749
}
4750
#endif
4751
4752
void Compile::add_native_invoker(RuntimeStub* stub) {
4753
_native_invokers.append(stub);
4754
}
4755
4756
Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) {
4757
if (type != NULL && phase->type(value)->higher_equal(type)) {
4758
return value;
4759
}
4760
Node* result = NULL;
4761
if (bt == T_BYTE) {
4762
result = phase->transform(new LShiftINode(value, phase->intcon(24)));
4763
result = new RShiftINode(result, phase->intcon(24));
4764
} else if (bt == T_BOOLEAN) {
4765
result = new AndINode(value, phase->intcon(0xFF));
4766
} else if (bt == T_CHAR) {
4767
result = new AndINode(value,phase->intcon(0xFFFF));
4768
} else {
4769
assert(bt == T_SHORT, "unexpected narrow type");
4770
result = phase->transform(new LShiftINode(value, phase->intcon(16)));
4771
result = new RShiftINode(result, phase->intcon(16));
4772
}
4773
if (transform_res) {
4774
result = phase->transform(result);
4775
}
4776
return result;
4777
}
4778
4779
4780