Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/gcm.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "libadt/vectset.hpp"
27
#include "memory/allocation.inline.hpp"
28
#include "opto/block.hpp"
29
#include "opto/c2compiler.hpp"
30
#include "opto/callnode.hpp"
31
#include "opto/cfgnode.hpp"
32
#include "opto/machnode.hpp"
33
#include "opto/opcodes.hpp"
34
#include "opto/phaseX.hpp"
35
#include "opto/rootnode.hpp"
36
#include "opto/runtime.hpp"
37
#include "runtime/deoptimization.hpp"
38
#if defined AD_MD_HPP
39
# include AD_MD_HPP
40
#elif defined TARGET_ARCH_MODEL_x86_32
41
# include "adfiles/ad_x86_32.hpp"
42
#elif defined TARGET_ARCH_MODEL_x86_64
43
# include "adfiles/ad_x86_64.hpp"
44
#elif defined TARGET_ARCH_MODEL_aarch32
45
# include "adfiles/ad_aarch32.hpp"
46
#elif defined TARGET_ARCH_MODEL_aarch64
47
# include "adfiles/ad_aarch64.hpp"
48
#elif defined TARGET_ARCH_MODEL_sparc
49
# include "adfiles/ad_sparc.hpp"
50
#elif defined TARGET_ARCH_MODEL_zero
51
# include "adfiles/ad_zero.hpp"
52
#elif defined TARGET_ARCH_MODEL_ppc_64
53
# include "adfiles/ad_ppc_64.hpp"
54
#endif
55
56
57
// Portions of code courtesy of Clifford Click
58
59
// Optimization - Graph Style
60
61
// To avoid float value underflow
62
#define MIN_BLOCK_FREQUENCY 1.e-35f
63
64
//----------------------------schedule_node_into_block-------------------------
65
// Insert node n into block b. Look for projections of n and make sure they
66
// are in b also.
67
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
68
// Set basic block of n, Add n to b,
69
map_node_to_block(n, b);
70
b->add_inst(n);
71
72
// After Matching, nearly any old Node may have projections trailing it.
73
// These are usually machine-dependent flags. In any case, they might
74
// float to another block below this one. Move them up.
75
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
76
Node* use = n->fast_out(i);
77
if (use->is_Proj()) {
78
Block* buse = get_block_for_node(use);
79
if (buse != b) { // In wrong block?
80
if (buse != NULL) {
81
buse->find_remove(use); // Remove from wrong block
82
}
83
map_node_to_block(use, b);
84
b->add_inst(use);
85
}
86
}
87
}
88
}
89
90
//----------------------------replace_block_proj_ctrl-------------------------
91
// Nodes that have is_block_proj() nodes as their control need to use
92
// the appropriate Region for their actual block as their control since
93
// the projection will be in a predecessor block.
94
void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
95
const Node *in0 = n->in(0);
96
assert(in0 != NULL, "Only control-dependent");
97
const Node *p = in0->is_block_proj();
98
if (p != NULL && p != n) { // Control from a block projection?
99
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
100
// Find trailing Region
101
Block *pb = get_block_for_node(in0); // Block-projection already has basic block
102
uint j = 0;
103
if (pb->_num_succs != 1) { // More then 1 successor?
104
// Search for successor
105
uint max = pb->number_of_nodes();
106
assert( max > 1, "" );
107
uint start = max - pb->_num_succs;
108
// Find which output path belongs to projection
109
for (j = start; j < max; j++) {
110
if( pb->get_node(j) == in0 )
111
break;
112
}
113
assert( j < max, "must find" );
114
// Change control to match head of successor basic block
115
j -= start;
116
}
117
n->set_req(0, pb->_succs[j]->head());
118
}
119
}
120
121
static bool is_dominator(Block* d, Block* n) {
122
return d->dom_lca(n) == d;
123
}
124
125
//------------------------------schedule_pinned_nodes--------------------------
126
// Set the basic block for Nodes pinned into blocks
127
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
128
// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
129
GrowableArray <Node *> spstack(C->live_nodes() + 8);
130
spstack.push(_root);
131
while (spstack.is_nonempty()) {
132
Node* node = spstack.pop();
133
if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
134
if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
135
assert(node->in(0), "pinned Node must have Control");
136
// Before setting block replace block_proj control edge
137
replace_block_proj_ctrl(node);
138
Node* input = node->in(0);
139
while (!input->is_block_start()) {
140
input = input->in(0);
141
}
142
Block* block = get_block_for_node(input); // Basic block of controlling input
143
schedule_node_into_block(node, block);
144
}
145
146
// If the node has precedence edges (added when CastPP nodes are
147
// removed in final_graph_reshaping), fix the control of the
148
// node to cover the precedence edges and remove the
149
// dependencies.
150
Node* n = NULL;
151
for (uint i = node->len()-1; i >= node->req(); i--) {
152
Node* m = node->in(i);
153
if (m == NULL) continue;
154
// Skip the precedence edge if the test that guarded a CastPP:
155
// - was optimized out during escape analysis
156
// (OptimizePtrCompare): the CastPP's control isn't an end of
157
// block.
158
// - is moved in the branch of a dominating If: the control of
159
// the CastPP is then a Region.
160
if (m->is_block_proj() || m->is_block_start()) {
161
node->rm_prec(i);
162
if (n == NULL) {
163
n = m;
164
} else {
165
Block* bn = get_block_for_node(n);
166
Block* bm = get_block_for_node(m);
167
assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other");
168
n = is_dominator(bn, bm) ? m : n;
169
}
170
}
171
}
172
if (n != NULL) {
173
assert(node->in(0), "control should have been set");
174
Block* bn = get_block_for_node(n);
175
Block* bnode = get_block_for_node(node->in(0));
176
assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other");
177
if (!is_dominator(bn, bnode)) {
178
node->set_req(0, n);
179
}
180
}
181
182
// process all inputs that are non NULL
183
for (int i = node->req() - 1; i >= 0; --i) {
184
if (node->in(i) != NULL) {
185
spstack.push(node->in(i));
186
}
187
}
188
}
189
}
190
}
191
192
#ifdef ASSERT
193
// Assert that new input b2 is dominated by all previous inputs.
194
// Check this by by seeing that it is dominated by b1, the deepest
195
// input observed until b2.
196
static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
197
if (b1 == NULL) return;
198
assert(b1->_dom_depth < b2->_dom_depth, "sanity");
199
Block* tmp = b2;
200
while (tmp != b1 && tmp != NULL) {
201
tmp = tmp->_idom;
202
}
203
if (tmp != b1) {
204
// Detected an unschedulable graph. Print some nice stuff and die.
205
tty->print_cr("!!! Unschedulable graph !!!");
206
for (uint j=0; j<n->len(); j++) { // For all inputs
207
Node* inn = n->in(j); // Get input
208
if (inn == NULL) continue; // Ignore NULL, missing inputs
209
Block* inb = cfg->get_block_for_node(inn);
210
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
211
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
212
inn->dump();
213
}
214
tty->print("Failing node: ");
215
n->dump();
216
assert(false, "unscheduable graph");
217
}
218
}
219
#endif
220
221
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
222
// Find the last input dominated by all other inputs.
223
Block* deepb = NULL; // Deepest block so far
224
int deepb_dom_depth = 0;
225
for (uint k = 0; k < n->len(); k++) { // For all inputs
226
Node* inn = n->in(k); // Get input
227
if (inn == NULL) continue; // Ignore NULL, missing inputs
228
Block* inb = cfg->get_block_for_node(inn);
229
assert(inb != NULL, "must already have scheduled this input");
230
if (deepb_dom_depth < (int) inb->_dom_depth) {
231
// The new inb must be dominated by the previous deepb.
232
// The various inputs must be linearly ordered in the dom
233
// tree, or else there will not be a unique deepest block.
234
DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
235
deepb = inb; // Save deepest block
236
deepb_dom_depth = deepb->_dom_depth;
237
}
238
}
239
assert(deepb != NULL, "must be at least one input to n");
240
return deepb;
241
}
242
243
244
//------------------------------schedule_early---------------------------------
245
// Find the earliest Block any instruction can be placed in. Some instructions
246
// are pinned into Blocks. Unpinned instructions can appear in last block in
247
// which all their inputs occur.
248
bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
249
// Allocate stack with enough space to avoid frequent realloc
250
Node_Stack nstack(roots.Size() + 8);
251
// _root will be processed among C->top() inputs
252
roots.push(C->top());
253
visited.set(C->top()->_idx);
254
255
while (roots.size() != 0) {
256
// Use local variables nstack_top_n & nstack_top_i to cache values
257
// on stack's top.
258
Node* parent_node = roots.pop();
259
uint input_index = 0;
260
261
while (true) {
262
if (input_index == 0) {
263
// Fixup some control. Constants without control get attached
264
// to root and nodes that use is_block_proj() nodes should be attached
265
// to the region that starts their block.
266
const Node* control_input = parent_node->in(0);
267
if (control_input != NULL) {
268
replace_block_proj_ctrl(parent_node);
269
} else {
270
// Is a constant with NO inputs?
271
if (parent_node->req() == 1) {
272
parent_node->set_req(0, _root);
273
}
274
}
275
}
276
277
// First, visit all inputs and force them to get a block. If an
278
// input is already in a block we quit following inputs (to avoid
279
// cycles). Instead we put that Node on a worklist to be handled
280
// later (since IT'S inputs may not have a block yet).
281
282
// Assume all n's inputs will be processed
283
bool done = true;
284
285
while (input_index < parent_node->len()) {
286
Node* in = parent_node->in(input_index++);
287
if (in == NULL) {
288
continue;
289
}
290
291
int is_visited = visited.test_set(in->_idx);
292
if (!has_block(in)) {
293
if (is_visited) {
294
assert(false, "graph should be schedulable");
295
return false;
296
}
297
// Save parent node and next input's index.
298
nstack.push(parent_node, input_index);
299
// Process current input now.
300
parent_node = in;
301
input_index = 0;
302
// Not all n's inputs processed.
303
done = false;
304
break;
305
} else if (!is_visited) {
306
// Visit this guy later, using worklist
307
roots.push(in);
308
}
309
}
310
311
if (done) {
312
// All of n's inputs have been processed, complete post-processing.
313
314
// Some instructions are pinned into a block. These include Region,
315
// Phi, Start, Return, and other control-dependent instructions and
316
// any projections which depend on them.
317
if (!parent_node->pinned()) {
318
// Set earliest legal block.
319
Block* earliest_block = find_deepest_input(parent_node, this);
320
map_node_to_block(parent_node, earliest_block);
321
} else {
322
assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
323
}
324
325
if (nstack.is_empty()) {
326
// Finished all nodes on stack.
327
// Process next node on the worklist 'roots'.
328
break;
329
}
330
// Get saved parent node and next input's index.
331
parent_node = nstack.node();
332
input_index = nstack.index();
333
nstack.pop();
334
}
335
}
336
}
337
return true;
338
}
339
340
//------------------------------dom_lca----------------------------------------
341
// Find least common ancestor in dominator tree
342
// LCA is a current notion of LCA, to be raised above 'this'.
343
// As a convenient boundary condition, return 'this' if LCA is NULL.
344
// Find the LCA of those two nodes.
345
Block* Block::dom_lca(Block* LCA) {
346
if (LCA == NULL || LCA == this) return this;
347
348
Block* anc = this;
349
while (anc->_dom_depth > LCA->_dom_depth)
350
anc = anc->_idom; // Walk up till anc is as high as LCA
351
352
while (LCA->_dom_depth > anc->_dom_depth)
353
LCA = LCA->_idom; // Walk up till LCA is as high as anc
354
355
while (LCA != anc) { // Walk both up till they are the same
356
LCA = LCA->_idom;
357
anc = anc->_idom;
358
}
359
360
return LCA;
361
}
362
363
//--------------------------raise_LCA_above_use--------------------------------
364
// We are placing a definition, and have been given a def->use edge.
365
// The definition must dominate the use, so move the LCA upward in the
366
// dominator tree to dominate the use. If the use is a phi, adjust
367
// the LCA only with the phi input paths which actually use this def.
368
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
369
Block* buse = cfg->get_block_for_node(use);
370
if (buse == NULL) return LCA; // Unused killing Projs have no use block
371
if (!use->is_Phi()) return buse->dom_lca(LCA);
372
uint pmax = use->req(); // Number of Phi inputs
373
// Why does not this loop just break after finding the matching input to
374
// the Phi? Well...it's like this. I do not have true def-use/use-def
375
// chains. Means I cannot distinguish, from the def-use direction, which
376
// of many use-defs lead from the same use to the same def. That is, this
377
// Phi might have several uses of the same def. Each use appears in a
378
// different predecessor block. But when I enter here, I cannot distinguish
379
// which use-def edge I should find the predecessor block for. So I find
380
// them all. Means I do a little extra work if a Phi uses the same value
381
// more than once.
382
for (uint j=1; j<pmax; j++) { // For all inputs
383
if (use->in(j) == def) { // Found matching input?
384
Block* pred = cfg->get_block_for_node(buse->pred(j));
385
LCA = pred->dom_lca(LCA);
386
}
387
}
388
return LCA;
389
}
390
391
//----------------------------raise_LCA_above_marks----------------------------
392
// Return a new LCA that dominates LCA and any of its marked predecessors.
393
// Search all my parents up to 'early' (exclusive), looking for predecessors
394
// which are marked with the given index. Return the LCA (in the dom tree)
395
// of all marked blocks. If there are none marked, return the original
396
// LCA.
397
static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
398
Block_List worklist;
399
worklist.push(LCA);
400
while (worklist.size() > 0) {
401
Block* mid = worklist.pop();
402
if (mid == early) continue; // stop searching here
403
404
// Test and set the visited bit.
405
if (mid->raise_LCA_visited() == mark) continue; // already visited
406
407
// Don't process the current LCA, otherwise the search may terminate early
408
if (mid != LCA && mid->raise_LCA_mark() == mark) {
409
// Raise the LCA.
410
LCA = mid->dom_lca(LCA);
411
if (LCA == early) break; // stop searching everywhere
412
assert(early->dominates(LCA), "early is high enough");
413
// Resume searching at that point, skipping intermediate levels.
414
worklist.push(LCA);
415
if (LCA == mid)
416
continue; // Don't mark as visited to avoid early termination.
417
} else {
418
// Keep searching through this block's predecessors.
419
for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
420
Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
421
worklist.push(mid_parent);
422
}
423
}
424
mid->set_raise_LCA_visited(mark);
425
}
426
return LCA;
427
}
428
429
//--------------------------memory_early_block--------------------------------
430
// This is a variation of find_deepest_input, the heart of schedule_early.
431
// Find the "early" block for a load, if we considered only memory and
432
// address inputs, that is, if other data inputs were ignored.
433
//
434
// Because a subset of edges are considered, the resulting block will
435
// be earlier (at a shallower dom_depth) than the true schedule_early
436
// point of the node. We compute this earlier block as a more permissive
437
// site for anti-dependency insertion, but only if subsume_loads is enabled.
438
static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
439
Node* base;
440
Node* index;
441
Node* store = load->in(MemNode::Memory);
442
load->as_Mach()->memory_inputs(base, index);
443
444
assert(base != NodeSentinel && index != NodeSentinel,
445
"unexpected base/index inputs");
446
447
Node* mem_inputs[4];
448
int mem_inputs_length = 0;
449
if (base != NULL) mem_inputs[mem_inputs_length++] = base;
450
if (index != NULL) mem_inputs[mem_inputs_length++] = index;
451
if (store != NULL) mem_inputs[mem_inputs_length++] = store;
452
453
// In the comparision below, add one to account for the control input,
454
// which may be null, but always takes up a spot in the in array.
455
if (mem_inputs_length + 1 < (int) load->req()) {
456
// This "load" has more inputs than just the memory, base and index inputs.
457
// For purposes of checking anti-dependences, we need to start
458
// from the early block of only the address portion of the instruction,
459
// and ignore other blocks that may have factored into the wider
460
// schedule_early calculation.
461
if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
462
463
Block* deepb = NULL; // Deepest block so far
464
int deepb_dom_depth = 0;
465
for (int i = 0; i < mem_inputs_length; i++) {
466
Block* inb = cfg->get_block_for_node(mem_inputs[i]);
467
if (deepb_dom_depth < (int) inb->_dom_depth) {
468
// The new inb must be dominated by the previous deepb.
469
// The various inputs must be linearly ordered in the dom
470
// tree, or else there will not be a unique deepest block.
471
DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
472
deepb = inb; // Save deepest block
473
deepb_dom_depth = deepb->_dom_depth;
474
}
475
}
476
early = deepb;
477
}
478
479
return early;
480
}
481
482
//--------------------------insert_anti_dependences---------------------------
483
// A load may need to witness memory that nearby stores can overwrite.
484
// For each nearby store, either insert an "anti-dependence" edge
485
// from the load to the store, or else move LCA upward to force the
486
// load to (eventually) be scheduled in a block above the store.
487
//
488
// Do not add edges to stores on distinct control-flow paths;
489
// only add edges to stores which might interfere.
490
//
491
// Return the (updated) LCA. There will not be any possibly interfering
492
// store between the load's "early block" and the updated LCA.
493
// Any stores in the updated LCA will have new precedence edges
494
// back to the load. The caller is expected to schedule the load
495
// in the LCA, in which case the precedence edges will make LCM
496
// preserve anti-dependences. The caller may also hoist the load
497
// above the LCA, if it is not the early block.
498
Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
499
assert(load->needs_anti_dependence_check(), "must be a load of some sort");
500
assert(LCA != NULL, "");
501
DEBUG_ONLY(Block* LCA_orig = LCA);
502
503
// Compute the alias index. Loads and stores with different alias indices
504
// do not need anti-dependence edges.
505
uint load_alias_idx = C->get_alias_index(load->adr_type());
506
#ifdef ASSERT
507
if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
508
(PrintOpto || VerifyAliases ||
509
PrintMiscellaneous && (WizardMode || Verbose))) {
510
// Load nodes should not consume all of memory.
511
// Reporting a bottom type indicates a bug in adlc.
512
// If some particular type of node validly consumes all of memory,
513
// sharpen the preceding "if" to exclude it, so we can catch bugs here.
514
tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
515
load->dump(2);
516
if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
517
}
518
#endif
519
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
520
"String compare is only known 'load' that does not conflict with any stores");
521
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
522
"String equals is a 'load' that does not conflict with any stores");
523
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
524
"String indexOf is a 'load' that does not conflict with any stores");
525
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
526
"Arrays equals is a 'load' that do not conflict with any stores");
527
528
if (!C->alias_type(load_alias_idx)->is_rewritable()) {
529
// It is impossible to spoil this load by putting stores before it,
530
// because we know that the stores will never update the value
531
// which 'load' must witness.
532
return LCA;
533
}
534
535
node_idx_t load_index = load->_idx;
536
537
// Note the earliest legal placement of 'load', as determined by
538
// by the unique point in the dom tree where all memory effects
539
// and other inputs are first available. (Computed by schedule_early.)
540
// For normal loads, 'early' is the shallowest place (dom graph wise)
541
// to look for anti-deps between this load and any store.
542
Block* early = get_block_for_node(load);
543
544
// If we are subsuming loads, compute an "early" block that only considers
545
// memory or address inputs. This block may be different than the
546
// schedule_early block in that it could be at an even shallower depth in the
547
// dominator tree, and allow for a broader discovery of anti-dependences.
548
if (C->subsume_loads()) {
549
early = memory_early_block(load, early, this);
550
}
551
552
ResourceArea *area = Thread::current()->resource_area();
553
Node_List worklist_mem(area); // prior memory state to store
554
Node_List worklist_store(area); // possible-def to explore
555
Node_List worklist_visited(area); // visited mergemem nodes
556
Node_List non_early_stores(area); // all relevant stores outside of early
557
bool must_raise_LCA = false;
558
559
#ifdef TRACK_PHI_INPUTS
560
// %%% This extra checking fails because MergeMem nodes are not GVNed.
561
// Provide "phi_inputs" to check if every input to a PhiNode is from the
562
// original memory state. This indicates a PhiNode for which should not
563
// prevent the load from sinking. For such a block, set_raise_LCA_mark
564
// may be overly conservative.
565
// Mechanism: count inputs seen for each Phi encountered in worklist_store.
566
DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
567
#endif
568
569
// 'load' uses some memory state; look for users of the same state.
570
// Recurse through MergeMem nodes to the stores that use them.
571
572
// Each of these stores is a possible definition of memory
573
// that 'load' needs to use. We need to force 'load'
574
// to occur before each such store. When the store is in
575
// the same block as 'load', we insert an anti-dependence
576
// edge load->store.
577
578
// The relevant stores "nearby" the load consist of a tree rooted
579
// at initial_mem, with internal nodes of type MergeMem.
580
// Therefore, the branches visited by the worklist are of this form:
581
// initial_mem -> (MergeMem ->)* store
582
// The anti-dependence constraints apply only to the fringe of this tree.
583
584
Node* initial_mem = load->in(MemNode::Memory);
585
worklist_store.push(initial_mem);
586
worklist_visited.push(initial_mem);
587
worklist_mem.push(NULL);
588
while (worklist_store.size() > 0) {
589
// Examine a nearby store to see if it might interfere with our load.
590
Node* mem = worklist_mem.pop();
591
Node* store = worklist_store.pop();
592
uint op = store->Opcode();
593
594
// MergeMems do not directly have anti-deps.
595
// Treat them as internal nodes in a forward tree of memory states,
596
// the leaves of which are each a 'possible-def'.
597
if (store == initial_mem // root (exclusive) of tree we are searching
598
|| op == Op_MergeMem // internal node of tree we are searching
599
) {
600
mem = store; // It's not a possibly interfering store.
601
if (store == initial_mem)
602
initial_mem = NULL; // only process initial memory once
603
604
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
605
store = mem->fast_out(i);
606
if (store->is_MergeMem()) {
607
// Be sure we don't get into combinatorial problems.
608
// (Allow phis to be repeated; they can merge two relevant states.)
609
uint j = worklist_visited.size();
610
for (; j > 0; j--) {
611
if (worklist_visited.at(j-1) == store) break;
612
}
613
if (j > 0) continue; // already on work list; do not repeat
614
worklist_visited.push(store);
615
}
616
worklist_mem.push(mem);
617
worklist_store.push(store);
618
}
619
continue;
620
}
621
622
if (op == Op_MachProj || op == Op_Catch) continue;
623
if (store->needs_anti_dependence_check()) continue; // not really a store
624
625
// Compute the alias index. Loads and stores with different alias
626
// indices do not need anti-dependence edges. Wide MemBar's are
627
// anti-dependent on everything (except immutable memories).
628
const TypePtr* adr_type = store->adr_type();
629
if (!C->can_alias(adr_type, load_alias_idx)) continue;
630
631
// Most slow-path runtime calls do NOT modify Java memory, but
632
// they can block and so write Raw memory.
633
if (store->is_Mach()) {
634
MachNode* mstore = store->as_Mach();
635
if (load_alias_idx != Compile::AliasIdxRaw) {
636
// Check for call into the runtime using the Java calling
637
// convention (and from there into a wrapper); it has no
638
// _method. Can't do this optimization for Native calls because
639
// they CAN write to Java memory.
640
if (mstore->ideal_Opcode() == Op_CallStaticJava) {
641
assert(mstore->is_MachSafePoint(), "");
642
MachSafePointNode* ms = (MachSafePointNode*) mstore;
643
assert(ms->is_MachCallJava(), "");
644
MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
645
if (mcj->_method == NULL) {
646
// These runtime calls do not write to Java visible memory
647
// (other than Raw) and so do not require anti-dependence edges.
648
continue;
649
}
650
}
651
// Same for SafePoints: they read/write Raw but only read otherwise.
652
// This is basically a workaround for SafePoints only defining control
653
// instead of control + memory.
654
if (mstore->ideal_Opcode() == Op_SafePoint)
655
continue;
656
} else {
657
// Some raw memory, such as the load of "top" at an allocation,
658
// can be control dependent on the previous safepoint. See
659
// comments in GraphKit::allocate_heap() about control input.
660
// Inserting an anti-dep between such a safepoint and a use
661
// creates a cycle, and will cause a subsequent failure in
662
// local scheduling. (BugId 4919904)
663
// (%%% How can a control input be a safepoint and not a projection??)
664
if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
665
continue;
666
}
667
}
668
669
// Identify a block that the current load must be above,
670
// or else observe that 'store' is all the way up in the
671
// earliest legal block for 'load'. In the latter case,
672
// immediately insert an anti-dependence edge.
673
Block* store_block = get_block_for_node(store);
674
assert(store_block != NULL, "unused killing projections skipped above");
675
676
if (store->is_Phi()) {
677
// 'load' uses memory which is one (or more) of the Phi's inputs.
678
// It must be scheduled not before the Phi, but rather before
679
// each of the relevant Phi inputs.
680
//
681
// Instead of finding the LCA of all inputs to a Phi that match 'mem',
682
// we mark each corresponding predecessor block and do a combined
683
// hoisting operation later (raise_LCA_above_marks).
684
//
685
// Do not assert(store_block != early, "Phi merging memory after access")
686
// PhiNode may be at start of block 'early' with backedge to 'early'
687
DEBUG_ONLY(bool found_match = false);
688
for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
689
if (store->in(j) == mem) { // Found matching input?
690
DEBUG_ONLY(found_match = true);
691
Block* pred_block = get_block_for_node(store_block->pred(j));
692
if (pred_block != early) {
693
// If any predecessor of the Phi matches the load's "early block",
694
// we do not need a precedence edge between the Phi and 'load'
695
// since the load will be forced into a block preceding the Phi.
696
pred_block->set_raise_LCA_mark(load_index);
697
assert(!LCA_orig->dominates(pred_block) ||
698
early->dominates(pred_block), "early is high enough");
699
must_raise_LCA = true;
700
} else {
701
// anti-dependent upon PHI pinned below 'early', no edge needed
702
LCA = early; // but can not schedule below 'early'
703
}
704
}
705
}
706
assert(found_match, "no worklist bug");
707
#ifdef TRACK_PHI_INPUTS
708
#ifdef ASSERT
709
// This assert asks about correct handling of PhiNodes, which may not
710
// have all input edges directly from 'mem'. See BugId 4621264
711
int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
712
// Increment by exactly one even if there are multiple copies of 'mem'
713
// coming into the phi, because we will run this block several times
714
// if there are several copies of 'mem'. (That's how DU iterators work.)
715
phi_inputs.at_put(store->_idx, num_mem_inputs);
716
assert(PhiNode::Input + num_mem_inputs < store->req(),
717
"Expect at least one phi input will not be from original memory state");
718
#endif //ASSERT
719
#endif //TRACK_PHI_INPUTS
720
} else if (store_block != early) {
721
// 'store' is between the current LCA and earliest possible block.
722
// Label its block, and decide later on how to raise the LCA
723
// to include the effect on LCA of this store.
724
// If this store's block gets chosen as the raised LCA, we
725
// will find him on the non_early_stores list and stick him
726
// with a precedence edge.
727
// (But, don't bother if LCA is already raised all the way.)
728
if (LCA != early) {
729
store_block->set_raise_LCA_mark(load_index);
730
must_raise_LCA = true;
731
non_early_stores.push(store);
732
}
733
} else {
734
// Found a possibly-interfering store in the load's 'early' block.
735
// This means 'load' cannot sink at all in the dominator tree.
736
// Add an anti-dep edge, and squeeze 'load' into the highest block.
737
assert(store != load->in(0), "dependence cycle found");
738
if (verify) {
739
assert(store->find_edge(load) != -1, "missing precedence edge");
740
} else {
741
store->add_prec(load);
742
}
743
LCA = early;
744
// This turns off the process of gathering non_early_stores.
745
}
746
}
747
// (Worklist is now empty; all nearby stores have been visited.)
748
749
// Finished if 'load' must be scheduled in its 'early' block.
750
// If we found any stores there, they have already been given
751
// precedence edges.
752
if (LCA == early) return LCA;
753
754
// We get here only if there are no possibly-interfering stores
755
// in the load's 'early' block. Move LCA up above all predecessors
756
// which contain stores we have noted.
757
//
758
// The raised LCA block can be a home to such interfering stores,
759
// but its predecessors must not contain any such stores.
760
//
761
// The raised LCA will be a lower bound for placing the load,
762
// preventing the load from sinking past any block containing
763
// a store that may invalidate the memory state required by 'load'.
764
if (must_raise_LCA)
765
LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
766
if (LCA == early) return LCA;
767
768
// Insert anti-dependence edges from 'load' to each store
769
// in the non-early LCA block.
770
// Mine the non_early_stores list for such stores.
771
if (LCA->raise_LCA_mark() == load_index) {
772
while (non_early_stores.size() > 0) {
773
Node* store = non_early_stores.pop();
774
Block* store_block = get_block_for_node(store);
775
if (store_block == LCA) {
776
// add anti_dependence from store to load in its own block
777
assert(store != load->in(0), "dependence cycle found");
778
if (verify) {
779
assert(store->find_edge(load) != -1, "missing precedence edge");
780
} else {
781
store->add_prec(load);
782
}
783
} else {
784
assert(store_block->raise_LCA_mark() == load_index, "block was marked");
785
// Any other stores we found must be either inside the new LCA
786
// or else outside the original LCA. In the latter case, they
787
// did not interfere with any use of 'load'.
788
assert(LCA->dominates(store_block)
789
|| !LCA_orig->dominates(store_block), "no stray stores");
790
}
791
}
792
}
793
794
// Return the highest block containing stores; any stores
795
// within that block have been given anti-dependence edges.
796
return LCA;
797
}
798
799
// This class is used to iterate backwards over the nodes in the graph.
800
801
class Node_Backward_Iterator {
802
803
private:
804
Node_Backward_Iterator();
805
806
public:
807
// Constructor for the iterator
808
Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
809
810
// Postincrement operator to iterate over the nodes
811
Node *next();
812
813
private:
814
VectorSet &_visited;
815
Node_List &_stack;
816
PhaseCFG &_cfg;
817
};
818
819
// Constructor for the Node_Backward_Iterator
820
Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
821
: _visited(visited), _stack(stack), _cfg(cfg) {
822
// The stack should contain exactly the root
823
stack.clear();
824
stack.push(root);
825
826
// Clear the visited bits
827
visited.Clear();
828
}
829
830
// Iterator for the Node_Backward_Iterator
831
Node *Node_Backward_Iterator::next() {
832
833
// If the _stack is empty, then just return NULL: finished.
834
if ( !_stack.size() )
835
return NULL;
836
837
// '_stack' is emulating a real _stack. The 'visit-all-users' loop has been
838
// made stateless, so I do not need to record the index 'i' on my _stack.
839
// Instead I visit all users each time, scanning for unvisited users.
840
// I visit unvisited not-anti-dependence users first, then anti-dependent
841
// children next.
842
Node *self = _stack.pop();
843
844
// I cycle here when I am entering a deeper level of recursion.
845
// The key variable 'self' was set prior to jumping here.
846
while( 1 ) {
847
848
_visited.set(self->_idx);
849
850
// Now schedule all uses as late as possible.
851
const Node* src = self->is_Proj() ? self->in(0) : self;
852
uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
853
854
// Schedule all nodes in a post-order visit
855
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
856
857
// Scan for unvisited nodes
858
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
859
// For all uses, schedule late
860
Node* n = self->fast_out(i); // Use
861
862
// Skip already visited children
863
if ( _visited.test(n->_idx) )
864
continue;
865
866
// do not traverse backward control edges
867
Node *use = n->is_Proj() ? n->in(0) : n;
868
uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
869
870
if ( use_rpo < src_rpo )
871
continue;
872
873
// Phi nodes always precede uses in a basic block
874
if ( use_rpo == src_rpo && use->is_Phi() )
875
continue;
876
877
unvisited = n; // Found unvisited
878
879
// Check for possible-anti-dependent
880
if( !n->needs_anti_dependence_check() )
881
break; // Not visited, not anti-dep; schedule it NOW
882
}
883
884
// Did I find an unvisited not-anti-dependent Node?
885
if ( !unvisited )
886
break; // All done with children; post-visit 'self'
887
888
// Visit the unvisited Node. Contains the obvious push to
889
// indicate I'm entering a deeper level of recursion. I push the
890
// old state onto the _stack and set a new state and loop (recurse).
891
_stack.push(self);
892
self = unvisited;
893
} // End recursion loop
894
895
return self;
896
}
897
898
//------------------------------ComputeLatenciesBackwards----------------------
899
// Compute the latency of all the instructions.
900
void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
901
#ifndef PRODUCT
902
if (trace_opto_pipelining())
903
tty->print("\n#---- ComputeLatenciesBackwards ----\n");
904
#endif
905
906
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
907
Node *n;
908
909
// Walk over all the nodes from last to first
910
while (n = iter.next()) {
911
// Set the latency for the definitions of this instruction
912
partial_latency_of_defs(n);
913
}
914
} // end ComputeLatenciesBackwards
915
916
//------------------------------partial_latency_of_defs------------------------
917
// Compute the latency impact of this node on all defs. This computes
918
// a number that increases as we approach the beginning of the routine.
919
void PhaseCFG::partial_latency_of_defs(Node *n) {
920
// Set the latency for this instruction
921
#ifndef PRODUCT
922
if (trace_opto_pipelining()) {
923
tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
924
dump();
925
}
926
#endif
927
928
if (n->is_Proj()) {
929
n = n->in(0);
930
}
931
932
if (n->is_Root()) {
933
return;
934
}
935
936
uint nlen = n->len();
937
uint use_latency = get_latency_for_node(n);
938
uint use_pre_order = get_block_for_node(n)->_pre_order;
939
940
for (uint j = 0; j < nlen; j++) {
941
Node *def = n->in(j);
942
943
if (!def || def == n) {
944
continue;
945
}
946
947
// Walk backwards thru projections
948
if (def->is_Proj()) {
949
def = def->in(0);
950
}
951
952
#ifndef PRODUCT
953
if (trace_opto_pipelining()) {
954
tty->print("# in(%2d): ", j);
955
def->dump();
956
}
957
#endif
958
959
// If the defining block is not known, assume it is ok
960
Block *def_block = get_block_for_node(def);
961
uint def_pre_order = def_block ? def_block->_pre_order : 0;
962
963
if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
964
continue;
965
}
966
967
uint delta_latency = n->latency(j);
968
uint current_latency = delta_latency + use_latency;
969
970
if (get_latency_for_node(def) < current_latency) {
971
set_latency_for_node(def, current_latency);
972
}
973
974
#ifndef PRODUCT
975
if (trace_opto_pipelining()) {
976
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
977
}
978
#endif
979
}
980
}
981
982
//------------------------------latency_from_use-------------------------------
983
// Compute the latency of a specific use
984
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
985
// If self-reference, return no latency
986
if (use == n || use->is_Root()) {
987
return 0;
988
}
989
990
uint def_pre_order = get_block_for_node(def)->_pre_order;
991
uint latency = 0;
992
993
// If the use is not a projection, then it is simple...
994
if (!use->is_Proj()) {
995
#ifndef PRODUCT
996
if (trace_opto_pipelining()) {
997
tty->print("# out(): ");
998
use->dump();
999
}
1000
#endif
1001
1002
uint use_pre_order = get_block_for_node(use)->_pre_order;
1003
1004
if (use_pre_order < def_pre_order)
1005
return 0;
1006
1007
if (use_pre_order == def_pre_order && use->is_Phi())
1008
return 0;
1009
1010
uint nlen = use->len();
1011
uint nl = get_latency_for_node(use);
1012
1013
for ( uint j=0; j<nlen; j++ ) {
1014
if (use->in(j) == n) {
1015
// Change this if we want local latencies
1016
uint ul = use->latency(j);
1017
uint l = ul + nl;
1018
if (latency < l) latency = l;
1019
#ifndef PRODUCT
1020
if (trace_opto_pipelining()) {
1021
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
1022
nl, j, ul, l, latency);
1023
}
1024
#endif
1025
}
1026
}
1027
} else {
1028
// This is a projection, just grab the latency of the use(s)
1029
for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1030
uint l = latency_from_use(use, def, use->fast_out(j));
1031
if (latency < l) latency = l;
1032
}
1033
}
1034
1035
return latency;
1036
}
1037
1038
//------------------------------latency_from_uses------------------------------
1039
// Compute the latency of this instruction relative to all of it's uses.
1040
// This computes a number that increases as we approach the beginning of the
1041
// routine.
1042
void PhaseCFG::latency_from_uses(Node *n) {
1043
// Set the latency for this instruction
1044
#ifndef PRODUCT
1045
if (trace_opto_pipelining()) {
1046
tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1047
dump();
1048
}
1049
#endif
1050
uint latency=0;
1051
const Node *def = n->is_Proj() ? n->in(0): n;
1052
1053
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1054
uint l = latency_from_use(n, def, n->fast_out(i));
1055
1056
if (latency < l) latency = l;
1057
}
1058
1059
set_latency_for_node(n, latency);
1060
}
1061
1062
//------------------------------hoist_to_cheaper_block-------------------------
1063
// Pick a block for node self, between early and LCA, that is a cheaper
1064
// alternative to LCA.
1065
Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1066
const double delta = 1+PROB_UNLIKELY_MAG(4);
1067
Block* least = LCA;
1068
double least_freq = least->_freq;
1069
uint target = get_latency_for_node(self);
1070
uint start_latency = get_latency_for_node(LCA->head());
1071
uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1072
bool in_latency = (target <= start_latency);
1073
const Block* root_block = get_block_for_node(_root);
1074
1075
// Turn off latency scheduling if scheduling is just plain off
1076
if (!C->do_scheduling())
1077
in_latency = true;
1078
1079
// Do not hoist (to cover latency) instructions which target a
1080
// single register. Hoisting stretches the live range of the
1081
// single register and may force spilling.
1082
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1083
if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1084
in_latency = true;
1085
1086
#ifndef PRODUCT
1087
if (trace_opto_pipelining()) {
1088
tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1089
self->dump();
1090
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1091
LCA->_pre_order,
1092
LCA->head()->_idx,
1093
start_latency,
1094
LCA->get_node(LCA->end_idx())->_idx,
1095
end_latency,
1096
least_freq);
1097
}
1098
#endif
1099
1100
int cand_cnt = 0; // number of candidates tried
1101
1102
// Walk up the dominator tree from LCA (Lowest common ancestor) to
1103
// the earliest legal location. Capture the least execution frequency.
1104
while (LCA != early) {
1105
LCA = LCA->_idom; // Follow up the dominator tree
1106
1107
if (LCA == NULL) {
1108
// Bailout without retry
1109
assert(false, "graph should be schedulable");
1110
C->record_method_not_compilable("late schedule failed: LCA == NULL");
1111
return least;
1112
}
1113
1114
// Don't hoist machine instructions to the root basic block
1115
if (mach && LCA == root_block)
1116
break;
1117
1118
uint start_lat = get_latency_for_node(LCA->head());
1119
uint end_idx = LCA->end_idx();
1120
uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
1121
double LCA_freq = LCA->_freq;
1122
#ifndef PRODUCT
1123
if (trace_opto_pipelining()) {
1124
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1125
LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1126
}
1127
#endif
1128
cand_cnt++;
1129
if (LCA_freq < least_freq || // Better Frequency
1130
(StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1131
(!StressGCM && // Otherwise, choose with latency
1132
!in_latency && // No block containing latency
1133
LCA_freq < least_freq * delta && // No worse frequency
1134
target >= end_lat && // within latency range
1135
!self->is_iteratively_computed() ) // But don't hoist IV increments
1136
// because they may end up above other uses of their phi forcing
1137
// their result register to be different from their input.
1138
) {
1139
least = LCA; // Found cheaper block
1140
least_freq = LCA_freq;
1141
start_latency = start_lat;
1142
end_latency = end_lat;
1143
if (target <= start_lat)
1144
in_latency = true;
1145
}
1146
}
1147
1148
#ifndef PRODUCT
1149
if (trace_opto_pipelining()) {
1150
tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1151
least->_pre_order, start_latency, least_freq);
1152
}
1153
#endif
1154
1155
// See if the latency needs to be updated
1156
if (target < end_latency) {
1157
#ifndef PRODUCT
1158
if (trace_opto_pipelining()) {
1159
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1160
}
1161
#endif
1162
set_latency_for_node(self, end_latency);
1163
partial_latency_of_defs(self);
1164
}
1165
1166
return least;
1167
}
1168
1169
1170
//------------------------------schedule_late-----------------------------------
1171
// Now schedule all codes as LATE as possible. This is the LCA in the
1172
// dominator tree of all USES of a value. Pick the block with the least
1173
// loop nesting depth that is lowest in the dominator tree.
1174
extern const char must_clone[];
1175
void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1176
#ifndef PRODUCT
1177
if (trace_opto_pipelining())
1178
tty->print("\n#---- schedule_late ----\n");
1179
#endif
1180
1181
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1182
Node *self;
1183
1184
// Walk over all the nodes from last to first
1185
while (self = iter.next()) {
1186
Block* early = get_block_for_node(self); // Earliest legal placement
1187
1188
if (self->is_top()) {
1189
// Top node goes in bb #2 with other constants.
1190
// It must be special-cased, because it has no out edges.
1191
early->add_inst(self);
1192
continue;
1193
}
1194
1195
// No uses, just terminate
1196
if (self->outcnt() == 0) {
1197
assert(self->is_MachProj(), "sanity");
1198
continue; // Must be a dead machine projection
1199
}
1200
1201
// If node is pinned in the block, then no scheduling can be done.
1202
if( self->pinned() ) // Pinned in block?
1203
continue;
1204
1205
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1206
if (mach) {
1207
switch (mach->ideal_Opcode()) {
1208
case Op_CreateEx:
1209
// Don't move exception creation
1210
early->add_inst(self);
1211
continue;
1212
break;
1213
case Op_CheckCastPP:
1214
// Don't move CheckCastPP nodes away from their input, if the input
1215
// is a rawptr (5071820).
1216
Node *def = self->in(1);
1217
if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1218
early->add_inst(self);
1219
#ifdef ASSERT
1220
_raw_oops.push(def);
1221
#endif
1222
continue;
1223
}
1224
break;
1225
}
1226
}
1227
1228
// Gather LCA of all uses
1229
Block *LCA = NULL;
1230
{
1231
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1232
// For all uses, find LCA
1233
Node* use = self->fast_out(i);
1234
LCA = raise_LCA_above_use(LCA, use, self, this);
1235
}
1236
} // (Hide defs of imax, i from rest of block.)
1237
1238
// Place temps in the block of their use. This isn't a
1239
// requirement for correctness but it reduces useless
1240
// interference between temps and other nodes.
1241
if (mach != NULL && mach->is_MachTemp()) {
1242
map_node_to_block(self, LCA);
1243
LCA->add_inst(self);
1244
continue;
1245
}
1246
1247
// Check if 'self' could be anti-dependent on memory
1248
if (self->needs_anti_dependence_check()) {
1249
// Hoist LCA above possible-defs and insert anti-dependences to
1250
// defs in new LCA block.
1251
LCA = insert_anti_dependences(LCA, self);
1252
}
1253
1254
if (early->_dom_depth > LCA->_dom_depth) {
1255
// Somehow the LCA has moved above the earliest legal point.
1256
// (One way this can happen is via memory_early_block.)
1257
if (C->subsume_loads() == true && !C->failing()) {
1258
// Retry with subsume_loads == false
1259
// If this is the first failure, the sentinel string will "stick"
1260
// to the Compile object, and the C2Compiler will see it and retry.
1261
C->record_failure(C2Compiler::retry_no_subsuming_loads());
1262
} else {
1263
// Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1264
assert(false, "graph should be schedulable");
1265
C->record_method_not_compilable("late schedule failed: incorrect graph");
1266
}
1267
return;
1268
}
1269
1270
// If there is no opportunity to hoist, then we're done.
1271
// In stress mode, try to hoist even the single operations.
1272
bool try_to_hoist = StressGCM || (LCA != early);
1273
1274
// Must clone guys stay next to use; no hoisting allowed.
1275
// Also cannot hoist guys that alter memory or are otherwise not
1276
// allocatable (hoisting can make a value live longer, leading to
1277
// anti and output dependency problems which are normally resolved
1278
// by the register allocator giving everyone a different register).
1279
if (mach != NULL && must_clone[mach->ideal_Opcode()])
1280
try_to_hoist = false;
1281
1282
Block* late = NULL;
1283
if (try_to_hoist) {
1284
// Now find the block with the least execution frequency.
1285
// Start at the latest schedule and work up to the earliest schedule
1286
// in the dominator tree. Thus the Node will dominate all its uses.
1287
late = hoist_to_cheaper_block(LCA, early, self);
1288
} else {
1289
// Just use the LCA of the uses.
1290
late = LCA;
1291
}
1292
1293
// Put the node into target block
1294
schedule_node_into_block(self, late);
1295
1296
#ifdef ASSERT
1297
if (self->needs_anti_dependence_check()) {
1298
// since precedence edges are only inserted when we're sure they
1299
// are needed make sure that after placement in a block we don't
1300
// need any new precedence edges.
1301
verify_anti_dependences(late, self);
1302
}
1303
#endif
1304
} // Loop until all nodes have been visited
1305
1306
} // end ScheduleLate
1307
1308
//------------------------------GlobalCodeMotion-------------------------------
1309
void PhaseCFG::global_code_motion() {
1310
ResourceMark rm;
1311
1312
#ifndef PRODUCT
1313
if (trace_opto_pipelining()) {
1314
tty->print("\n---- Start GlobalCodeMotion ----\n");
1315
}
1316
#endif
1317
1318
// Initialize the node to block mapping for things on the proj_list
1319
for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1320
unmap_node_from_block(_matcher.get_projection(i));
1321
}
1322
1323
// Set the basic block for Nodes pinned into blocks
1324
Arena* arena = Thread::current()->resource_area();
1325
VectorSet visited(arena);
1326
schedule_pinned_nodes(visited);
1327
1328
// Find the earliest Block any instruction can be placed in. Some
1329
// instructions are pinned into Blocks. Unpinned instructions can
1330
// appear in last block in which all their inputs occur.
1331
visited.Clear();
1332
Node_List stack(arena);
1333
// Pre-grow the list
1334
stack.map((C->live_nodes() >> 1) + 16, NULL);
1335
if (!schedule_early(visited, stack)) {
1336
// Bailout without retry
1337
C->record_method_not_compilable("early schedule failed");
1338
return;
1339
}
1340
1341
// Build Def-Use edges.
1342
// Compute the latency information (via backwards walk) for all the
1343
// instructions in the graph
1344
_node_latency = new GrowableArray<uint>(); // resource_area allocation
1345
1346
if (C->do_scheduling()) {
1347
compute_latencies_backwards(visited, stack);
1348
}
1349
1350
// Now schedule all codes as LATE as possible. This is the LCA in the
1351
// dominator tree of all USES of a value. Pick the block with the least
1352
// loop nesting depth that is lowest in the dominator tree.
1353
// ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1354
schedule_late(visited, stack);
1355
if (C->failing()) {
1356
return;
1357
}
1358
1359
#ifndef PRODUCT
1360
if (trace_opto_pipelining()) {
1361
tty->print("\n---- Detect implicit null checks ----\n");
1362
}
1363
#endif
1364
1365
// Detect implicit-null-check opportunities. Basically, find NULL checks
1366
// with suitable memory ops nearby. Use the memory op to do the NULL check.
1367
// I can generate a memory op if there is not one nearby.
1368
if (C->is_method_compilation()) {
1369
// By reversing the loop direction we get a very minor gain on mpegaudio.
1370
// Feel free to revert to a forward loop for clarity.
1371
// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1372
for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1373
Node* proj = _matcher._null_check_tests[i];
1374
Node* val = _matcher._null_check_tests[i + 1];
1375
Block* block = get_block_for_node(proj);
1376
implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1377
// The implicit_null_check will only perform the transformation
1378
// if the null branch is truly uncommon, *and* it leads to an
1379
// uncommon trap. Combined with the too_many_traps guards
1380
// above, this prevents SEGV storms reported in 6366351,
1381
// by recompiling offending methods without this optimization.
1382
}
1383
}
1384
1385
#ifndef PRODUCT
1386
if (trace_opto_pipelining()) {
1387
tty->print("\n---- Start Local Scheduling ----\n");
1388
}
1389
#endif
1390
1391
// Schedule locally. Right now a simple topological sort.
1392
// Later, do a real latency aware scheduler.
1393
GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1394
visited.Clear();
1395
for (uint i = 0; i < number_of_blocks(); i++) {
1396
Block* block = get_block(i);
1397
if (!schedule_local(block, ready_cnt, visited)) {
1398
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1399
C->record_method_not_compilable("local schedule failed");
1400
}
1401
return;
1402
}
1403
}
1404
1405
// If we inserted any instructions between a Call and his CatchNode,
1406
// clone the instructions on all paths below the Catch.
1407
for (uint i = 0; i < number_of_blocks(); i++) {
1408
Block* block = get_block(i);
1409
call_catch_cleanup(block);
1410
}
1411
1412
#ifndef PRODUCT
1413
if (trace_opto_pipelining()) {
1414
tty->print("\n---- After GlobalCodeMotion ----\n");
1415
for (uint i = 0; i < number_of_blocks(); i++) {
1416
Block* block = get_block(i);
1417
block->dump();
1418
}
1419
}
1420
#endif
1421
// Dead.
1422
_node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1423
}
1424
1425
bool PhaseCFG::do_global_code_motion() {
1426
1427
build_dominator_tree();
1428
if (C->failing()) {
1429
return false;
1430
}
1431
1432
NOT_PRODUCT( C->verify_graph_edges(); )
1433
1434
estimate_block_frequency();
1435
1436
global_code_motion();
1437
1438
if (C->failing()) {
1439
return false;
1440
}
1441
1442
return true;
1443
}
1444
1445
//------------------------------Estimate_Block_Frequency-----------------------
1446
// Estimate block frequencies based on IfNode probabilities.
1447
void PhaseCFG::estimate_block_frequency() {
1448
1449
// Force conditional branches leading to uncommon traps to be unlikely,
1450
// not because we get to the uncommon_trap with less relative frequency,
1451
// but because an uncommon_trap typically causes a deopt, so we only get
1452
// there once.
1453
if (C->do_freq_based_layout()) {
1454
Block_List worklist;
1455
Block* root_blk = get_block(0);
1456
for (uint i = 1; i < root_blk->num_preds(); i++) {
1457
Block *pb = get_block_for_node(root_blk->pred(i));
1458
if (pb->has_uncommon_code()) {
1459
worklist.push(pb);
1460
}
1461
}
1462
while (worklist.size() > 0) {
1463
Block* uct = worklist.pop();
1464
if (uct == get_root_block()) {
1465
continue;
1466
}
1467
for (uint i = 1; i < uct->num_preds(); i++) {
1468
Block *pb = get_block_for_node(uct->pred(i));
1469
if (pb->_num_succs == 1) {
1470
worklist.push(pb);
1471
} else if (pb->num_fall_throughs() == 2) {
1472
pb->update_uncommon_branch(uct);
1473
}
1474
}
1475
}
1476
}
1477
1478
// Create the loop tree and calculate loop depth.
1479
_root_loop = create_loop_tree();
1480
_root_loop->compute_loop_depth(0);
1481
1482
// Compute block frequency of each block, relative to a single loop entry.
1483
_root_loop->compute_freq();
1484
1485
// Adjust all frequencies to be relative to a single method entry
1486
_root_loop->_freq = 1.0;
1487
_root_loop->scale_freq();
1488
1489
// Save outmost loop frequency for LRG frequency threshold
1490
_outer_loop_frequency = _root_loop->outer_loop_freq();
1491
1492
// force paths ending at uncommon traps to be infrequent
1493
if (!C->do_freq_based_layout()) {
1494
Block_List worklist;
1495
Block* root_blk = get_block(0);
1496
for (uint i = 1; i < root_blk->num_preds(); i++) {
1497
Block *pb = get_block_for_node(root_blk->pred(i));
1498
if (pb->has_uncommon_code()) {
1499
worklist.push(pb);
1500
}
1501
}
1502
while (worklist.size() > 0) {
1503
Block* uct = worklist.pop();
1504
uct->_freq = PROB_MIN;
1505
for (uint i = 1; i < uct->num_preds(); i++) {
1506
Block *pb = get_block_for_node(uct->pred(i));
1507
if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1508
worklist.push(pb);
1509
}
1510
}
1511
}
1512
}
1513
1514
#ifdef ASSERT
1515
for (uint i = 0; i < number_of_blocks(); i++) {
1516
Block* b = get_block(i);
1517
assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1518
}
1519
#endif
1520
1521
#ifndef PRODUCT
1522
if (PrintCFGBlockFreq) {
1523
tty->print_cr("CFG Block Frequencies");
1524
_root_loop->dump_tree();
1525
if (Verbose) {
1526
tty->print_cr("PhaseCFG dump");
1527
dump();
1528
tty->print_cr("Node dump");
1529
_root->dump(99999);
1530
}
1531
}
1532
#endif
1533
}
1534
1535
//----------------------------create_loop_tree--------------------------------
1536
// Create a loop tree from the CFG
1537
CFGLoop* PhaseCFG::create_loop_tree() {
1538
1539
#ifdef ASSERT
1540
assert(get_block(0) == get_root_block(), "first block should be root block");
1541
for (uint i = 0; i < number_of_blocks(); i++) {
1542
Block* block = get_block(i);
1543
// Check that _loop field are clear...we could clear them if not.
1544
assert(block->_loop == NULL, "clear _loop expected");
1545
// Sanity check that the RPO numbering is reflected in the _blocks array.
1546
// It doesn't have to be for the loop tree to be built, but if it is not,
1547
// then the blocks have been reordered since dom graph building...which
1548
// may question the RPO numbering
1549
assert(block->_rpo == i, "unexpected reverse post order number");
1550
}
1551
#endif
1552
1553
int idct = 0;
1554
CFGLoop* root_loop = new CFGLoop(idct++);
1555
1556
Block_List worklist;
1557
1558
// Assign blocks to loops
1559
for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1560
Block* block = get_block(i);
1561
1562
if (block->head()->is_Loop()) {
1563
Block* loop_head = block;
1564
assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1565
Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1566
Block* tail = get_block_for_node(tail_n);
1567
1568
// Defensively filter out Loop nodes for non-single-entry loops.
1569
// For all reasonable loops, the head occurs before the tail in RPO.
1570
if (i <= tail->_rpo) {
1571
1572
// The tail and (recursive) predecessors of the tail
1573
// are made members of a new loop.
1574
1575
assert(worklist.size() == 0, "nonempty worklist");
1576
CFGLoop* nloop = new CFGLoop(idct++);
1577
assert(loop_head->_loop == NULL, "just checking");
1578
loop_head->_loop = nloop;
1579
// Add to nloop so push_pred() will skip over inner loops
1580
nloop->add_member(loop_head);
1581
nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1582
1583
while (worklist.size() > 0) {
1584
Block* member = worklist.pop();
1585
if (member != loop_head) {
1586
for (uint j = 1; j < member->num_preds(); j++) {
1587
nloop->push_pred(member, j, worklist, this);
1588
}
1589
}
1590
}
1591
}
1592
}
1593
}
1594
1595
// Create a member list for each loop consisting
1596
// of both blocks and (immediate child) loops.
1597
for (uint i = 0; i < number_of_blocks(); i++) {
1598
Block* block = get_block(i);
1599
CFGLoop* lp = block->_loop;
1600
if (lp == NULL) {
1601
// Not assigned to a loop. Add it to the method's pseudo loop.
1602
block->_loop = root_loop;
1603
lp = root_loop;
1604
}
1605
if (lp == root_loop || block != lp->head()) { // loop heads are already members
1606
lp->add_member(block);
1607
}
1608
if (lp != root_loop) {
1609
if (lp->parent() == NULL) {
1610
// Not a nested loop. Make it a child of the method's pseudo loop.
1611
root_loop->add_nested_loop(lp);
1612
}
1613
if (block == lp->head()) {
1614
// Add nested loop to member list of parent loop.
1615
lp->parent()->add_member(lp);
1616
}
1617
}
1618
}
1619
1620
return root_loop;
1621
}
1622
1623
//------------------------------push_pred--------------------------------------
1624
void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1625
Node* pred_n = blk->pred(i);
1626
Block* pred = cfg->get_block_for_node(pred_n);
1627
CFGLoop *pred_loop = pred->_loop;
1628
if (pred_loop == NULL) {
1629
// Filter out blocks for non-single-entry loops.
1630
// For all reasonable loops, the head occurs before the tail in RPO.
1631
if (pred->_rpo > head()->_rpo) {
1632
pred->_loop = this;
1633
worklist.push(pred);
1634
}
1635
} else if (pred_loop != this) {
1636
// Nested loop.
1637
while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1638
pred_loop = pred_loop->_parent;
1639
}
1640
// Make pred's loop be a child
1641
if (pred_loop->_parent == NULL) {
1642
add_nested_loop(pred_loop);
1643
// Continue with loop entry predecessor.
1644
Block* pred_head = pred_loop->head();
1645
assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1646
assert(pred_head != head(), "loop head in only one loop");
1647
push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1648
} else {
1649
assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1650
}
1651
}
1652
}
1653
1654
//------------------------------add_nested_loop--------------------------------
1655
// Make cl a child of the current loop in the loop tree.
1656
void CFGLoop::add_nested_loop(CFGLoop* cl) {
1657
assert(_parent == NULL, "no parent yet");
1658
assert(cl != this, "not my own parent");
1659
cl->_parent = this;
1660
CFGLoop* ch = _child;
1661
if (ch == NULL) {
1662
_child = cl;
1663
} else {
1664
while (ch->_sibling != NULL) { ch = ch->_sibling; }
1665
ch->_sibling = cl;
1666
}
1667
}
1668
1669
//------------------------------compute_loop_depth-----------------------------
1670
// Store the loop depth in each CFGLoop object.
1671
// Recursively walk the children to do the same for them.
1672
void CFGLoop::compute_loop_depth(int depth) {
1673
_depth = depth;
1674
CFGLoop* ch = _child;
1675
while (ch != NULL) {
1676
ch->compute_loop_depth(depth + 1);
1677
ch = ch->_sibling;
1678
}
1679
}
1680
1681
//------------------------------compute_freq-----------------------------------
1682
// Compute the frequency of each block and loop, relative to a single entry
1683
// into the dominating loop head.
1684
void CFGLoop::compute_freq() {
1685
// Bottom up traversal of loop tree (visit inner loops first.)
1686
// Set loop head frequency to 1.0, then transitively
1687
// compute frequency for all successors in the loop,
1688
// as well as for each exit edge. Inner loops are
1689
// treated as single blocks with loop exit targets
1690
// as the successor blocks.
1691
1692
// Nested loops first
1693
CFGLoop* ch = _child;
1694
while (ch != NULL) {
1695
ch->compute_freq();
1696
ch = ch->_sibling;
1697
}
1698
assert (_members.length() > 0, "no empty loops");
1699
Block* hd = head();
1700
hd->_freq = 1.0f;
1701
for (int i = 0; i < _members.length(); i++) {
1702
CFGElement* s = _members.at(i);
1703
float freq = s->_freq;
1704
if (s->is_block()) {
1705
Block* b = s->as_Block();
1706
for (uint j = 0; j < b->_num_succs; j++) {
1707
Block* sb = b->_succs[j];
1708
update_succ_freq(sb, freq * b->succ_prob(j));
1709
}
1710
} else {
1711
CFGLoop* lp = s->as_CFGLoop();
1712
assert(lp->_parent == this, "immediate child");
1713
for (int k = 0; k < lp->_exits.length(); k++) {
1714
Block* eb = lp->_exits.at(k).get_target();
1715
float prob = lp->_exits.at(k).get_prob();
1716
update_succ_freq(eb, freq * prob);
1717
}
1718
}
1719
}
1720
1721
// For all loops other than the outer, "method" loop,
1722
// sum and normalize the exit probability. The "method" loop
1723
// should keep the initial exit probability of 1, so that
1724
// inner blocks do not get erroneously scaled.
1725
if (_depth != 0) {
1726
// Total the exit probabilities for this loop.
1727
float exits_sum = 0.0f;
1728
for (int i = 0; i < _exits.length(); i++) {
1729
exits_sum += _exits.at(i).get_prob();
1730
}
1731
1732
// Normalize the exit probabilities. Until now, the
1733
// probabilities estimate the possibility of exit per
1734
// a single loop iteration; afterward, they estimate
1735
// the probability of exit per loop entry.
1736
for (int i = 0; i < _exits.length(); i++) {
1737
Block* et = _exits.at(i).get_target();
1738
float new_prob = 0.0f;
1739
if (_exits.at(i).get_prob() > 0.0f) {
1740
new_prob = _exits.at(i).get_prob() / exits_sum;
1741
}
1742
BlockProbPair bpp(et, new_prob);
1743
_exits.at_put(i, bpp);
1744
}
1745
1746
// Save the total, but guard against unreasonable probability,
1747
// as the value is used to estimate the loop trip count.
1748
// An infinite trip count would blur relative block
1749
// frequencies.
1750
if (exits_sum > 1.0f) exits_sum = 1.0;
1751
if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1752
_exit_prob = exits_sum;
1753
}
1754
}
1755
1756
//------------------------------succ_prob-------------------------------------
1757
// Determine the probability of reaching successor 'i' from the receiver block.
1758
float Block::succ_prob(uint i) {
1759
int eidx = end_idx();
1760
Node *n = get_node(eidx); // Get ending Node
1761
1762
int op = n->Opcode();
1763
if (n->is_Mach()) {
1764
if (n->is_MachNullCheck()) {
1765
// Can only reach here if called after lcm. The original Op_If is gone,
1766
// so we attempt to infer the probability from one or both of the
1767
// successor blocks.
1768
assert(_num_succs == 2, "expecting 2 successors of a null check");
1769
// If either successor has only one predecessor, then the
1770
// probability estimate can be derived using the
1771
// relative frequency of the successor and this block.
1772
if (_succs[i]->num_preds() == 2) {
1773
return _succs[i]->_freq / _freq;
1774
} else if (_succs[1-i]->num_preds() == 2) {
1775
return 1 - (_succs[1-i]->_freq / _freq);
1776
} else {
1777
// Estimate using both successor frequencies
1778
float freq = _succs[i]->_freq;
1779
return freq / (freq + _succs[1-i]->_freq);
1780
}
1781
}
1782
op = n->as_Mach()->ideal_Opcode();
1783
}
1784
1785
1786
// Switch on branch type
1787
switch( op ) {
1788
case Op_CountedLoopEnd:
1789
case Op_If: {
1790
assert (i < 2, "just checking");
1791
// Conditionals pass on only part of their frequency
1792
float prob = n->as_MachIf()->_prob;
1793
assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1794
// If succ[i] is the FALSE branch, invert path info
1795
if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1796
return 1.0f - prob; // not taken
1797
} else {
1798
return prob; // taken
1799
}
1800
}
1801
1802
case Op_Jump:
1803
// Divide the frequency between all successors evenly
1804
return 1.0f/_num_succs;
1805
1806
case Op_Catch: {
1807
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1808
if (ci->_con == CatchProjNode::fall_through_index) {
1809
// Fall-thru path gets the lion's share.
1810
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1811
} else {
1812
// Presume exceptional paths are equally unlikely
1813
return PROB_UNLIKELY_MAG(5);
1814
}
1815
}
1816
1817
case Op_Root:
1818
case Op_Goto:
1819
// Pass frequency straight thru to target
1820
return 1.0f;
1821
1822
case Op_NeverBranch:
1823
return 0.0f;
1824
1825
case Op_TailCall:
1826
case Op_TailJump:
1827
case Op_Return:
1828
case Op_Halt:
1829
case Op_Rethrow:
1830
// Do not push out freq to root block
1831
return 0.0f;
1832
1833
default:
1834
ShouldNotReachHere();
1835
}
1836
1837
return 0.0f;
1838
}
1839
1840
//------------------------------num_fall_throughs-----------------------------
1841
// Return the number of fall-through candidates for a block
1842
int Block::num_fall_throughs() {
1843
int eidx = end_idx();
1844
Node *n = get_node(eidx); // Get ending Node
1845
1846
int op = n->Opcode();
1847
if (n->is_Mach()) {
1848
if (n->is_MachNullCheck()) {
1849
// In theory, either side can fall-thru, for simplicity sake,
1850
// let's say only the false branch can now.
1851
return 1;
1852
}
1853
op = n->as_Mach()->ideal_Opcode();
1854
}
1855
1856
// Switch on branch type
1857
switch( op ) {
1858
case Op_CountedLoopEnd:
1859
case Op_If:
1860
return 2;
1861
1862
case Op_Root:
1863
case Op_Goto:
1864
return 1;
1865
1866
case Op_Catch: {
1867
for (uint i = 0; i < _num_succs; i++) {
1868
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1869
if (ci->_con == CatchProjNode::fall_through_index) {
1870
return 1;
1871
}
1872
}
1873
return 0;
1874
}
1875
1876
case Op_Jump:
1877
case Op_NeverBranch:
1878
case Op_TailCall:
1879
case Op_TailJump:
1880
case Op_Return:
1881
case Op_Halt:
1882
case Op_Rethrow:
1883
return 0;
1884
1885
default:
1886
ShouldNotReachHere();
1887
}
1888
1889
return 0;
1890
}
1891
1892
//------------------------------succ_fall_through-----------------------------
1893
// Return true if a specific successor could be fall-through target.
1894
bool Block::succ_fall_through(uint i) {
1895
int eidx = end_idx();
1896
Node *n = get_node(eidx); // Get ending Node
1897
1898
int op = n->Opcode();
1899
if (n->is_Mach()) {
1900
if (n->is_MachNullCheck()) {
1901
// In theory, either side can fall-thru, for simplicity sake,
1902
// let's say only the false branch can now.
1903
return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
1904
}
1905
op = n->as_Mach()->ideal_Opcode();
1906
}
1907
1908
// Switch on branch type
1909
switch( op ) {
1910
case Op_CountedLoopEnd:
1911
case Op_If:
1912
case Op_Root:
1913
case Op_Goto:
1914
return true;
1915
1916
case Op_Catch: {
1917
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1918
return ci->_con == CatchProjNode::fall_through_index;
1919
}
1920
1921
case Op_Jump:
1922
case Op_NeverBranch:
1923
case Op_TailCall:
1924
case Op_TailJump:
1925
case Op_Return:
1926
case Op_Halt:
1927
case Op_Rethrow:
1928
return false;
1929
1930
default:
1931
ShouldNotReachHere();
1932
}
1933
1934
return false;
1935
}
1936
1937
//------------------------------update_uncommon_branch------------------------
1938
// Update the probability of a two-branch to be uncommon
1939
void Block::update_uncommon_branch(Block* ub) {
1940
int eidx = end_idx();
1941
Node *n = get_node(eidx); // Get ending Node
1942
1943
int op = n->as_Mach()->ideal_Opcode();
1944
1945
assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1946
assert(num_fall_throughs() == 2, "must be a two way branch block");
1947
1948
// Which successor is ub?
1949
uint s;
1950
for (s = 0; s <_num_succs; s++) {
1951
if (_succs[s] == ub) break;
1952
}
1953
assert(s < 2, "uncommon successor must be found");
1954
1955
// If ub is the true path, make the proability small, else
1956
// ub is the false path, and make the probability large
1957
bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
1958
1959
// Get existing probability
1960
float p = n->as_MachIf()->_prob;
1961
1962
if (invert) p = 1.0 - p;
1963
if (p > PROB_MIN) {
1964
p = PROB_MIN;
1965
}
1966
if (invert) p = 1.0 - p;
1967
1968
n->as_MachIf()->_prob = p;
1969
}
1970
1971
//------------------------------update_succ_freq-------------------------------
1972
// Update the appropriate frequency associated with block 'b', a successor of
1973
// a block in this loop.
1974
void CFGLoop::update_succ_freq(Block* b, float freq) {
1975
if (b->_loop == this) {
1976
if (b == head()) {
1977
// back branch within the loop
1978
// Do nothing now, the loop carried frequency will be
1979
// adjust later in scale_freq().
1980
} else {
1981
// simple branch within the loop
1982
b->_freq += freq;
1983
}
1984
} else if (!in_loop_nest(b)) {
1985
// branch is exit from this loop
1986
BlockProbPair bpp(b, freq);
1987
_exits.append(bpp);
1988
} else {
1989
// branch into nested loop
1990
CFGLoop* ch = b->_loop;
1991
ch->_freq += freq;
1992
}
1993
}
1994
1995
//------------------------------in_loop_nest-----------------------------------
1996
// Determine if block b is in the receiver's loop nest.
1997
bool CFGLoop::in_loop_nest(Block* b) {
1998
int depth = _depth;
1999
CFGLoop* b_loop = b->_loop;
2000
int b_depth = b_loop->_depth;
2001
if (depth == b_depth) {
2002
return true;
2003
}
2004
while (b_depth > depth) {
2005
b_loop = b_loop->_parent;
2006
b_depth = b_loop->_depth;
2007
}
2008
return b_loop == this;
2009
}
2010
2011
//------------------------------scale_freq-------------------------------------
2012
// Scale frequency of loops and blocks by trip counts from outer loops
2013
// Do a top down traversal of loop tree (visit outer loops first.)
2014
void CFGLoop::scale_freq() {
2015
float loop_freq = _freq * trip_count();
2016
_freq = loop_freq;
2017
for (int i = 0; i < _members.length(); i++) {
2018
CFGElement* s = _members.at(i);
2019
float block_freq = s->_freq * loop_freq;
2020
if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2021
block_freq = MIN_BLOCK_FREQUENCY;
2022
s->_freq = block_freq;
2023
}
2024
CFGLoop* ch = _child;
2025
while (ch != NULL) {
2026
ch->scale_freq();
2027
ch = ch->_sibling;
2028
}
2029
}
2030
2031
// Frequency of outer loop
2032
float CFGLoop::outer_loop_freq() const {
2033
if (_child != NULL) {
2034
return _child->_freq;
2035
}
2036
return _freq;
2037
}
2038
2039
#ifndef PRODUCT
2040
//------------------------------dump_tree--------------------------------------
2041
void CFGLoop::dump_tree() const {
2042
dump();
2043
if (_child != NULL) _child->dump_tree();
2044
if (_sibling != NULL) _sibling->dump_tree();
2045
}
2046
2047
//------------------------------dump-------------------------------------------
2048
void CFGLoop::dump() const {
2049
for (int i = 0; i < _depth; i++) tty->print(" ");
2050
tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
2051
_depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2052
for (int i = 0; i < _depth; i++) tty->print(" ");
2053
tty->print(" members:");
2054
int k = 0;
2055
for (int i = 0; i < _members.length(); i++) {
2056
if (k++ >= 6) {
2057
tty->print("\n ");
2058
for (int j = 0; j < _depth+1; j++) tty->print(" ");
2059
k = 0;
2060
}
2061
CFGElement *s = _members.at(i);
2062
if (s->is_block()) {
2063
Block *b = s->as_Block();
2064
tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2065
} else {
2066
CFGLoop* lp = s->as_CFGLoop();
2067
tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2068
}
2069
}
2070
tty->print("\n");
2071
for (int i = 0; i < _depth; i++) tty->print(" ");
2072
tty->print(" exits: ");
2073
k = 0;
2074
for (int i = 0; i < _exits.length(); i++) {
2075
if (k++ >= 7) {
2076
tty->print("\n ");
2077
for (int j = 0; j < _depth+1; j++) tty->print(" ");
2078
k = 0;
2079
}
2080
Block *blk = _exits.at(i).get_target();
2081
float prob = _exits.at(i).get_prob();
2082
tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2083
}
2084
tty->print("\n");
2085
}
2086
#endif
2087
2088