Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/opto/gcm.cpp
64440 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "libadt/vectset.hpp"
27
#include "memory/allocation.inline.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "opto/block.hpp"
30
#include "opto/c2compiler.hpp"
31
#include "opto/callnode.hpp"
32
#include "opto/cfgnode.hpp"
33
#include "opto/machnode.hpp"
34
#include "opto/opcodes.hpp"
35
#include "opto/phaseX.hpp"
36
#include "opto/rootnode.hpp"
37
#include "opto/runtime.hpp"
38
#include "opto/chaitin.hpp"
39
#include "runtime/deoptimization.hpp"
40
41
// Portions of code courtesy of Clifford Click
42
43
// Optimization - Graph Style
44
45
// To avoid float value underflow
46
#define MIN_BLOCK_FREQUENCY 1.e-35f
47
48
//----------------------------schedule_node_into_block-------------------------
49
// Insert node n into block b. Look for projections of n and make sure they
50
// are in b also.
51
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
52
// Set basic block of n, Add n to b,
53
map_node_to_block(n, b);
54
b->add_inst(n);
55
56
// After Matching, nearly any old Node may have projections trailing it.
57
// These are usually machine-dependent flags. In any case, they might
58
// float to another block below this one. Move them up.
59
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
60
Node* use = n->fast_out(i);
61
if (use->is_Proj()) {
62
Block* buse = get_block_for_node(use);
63
if (buse != b) { // In wrong block?
64
if (buse != NULL) {
65
buse->find_remove(use); // Remove from wrong block
66
}
67
map_node_to_block(use, b);
68
b->add_inst(use);
69
}
70
}
71
}
72
}
73
74
//----------------------------replace_block_proj_ctrl-------------------------
75
// Nodes that have is_block_proj() nodes as their control need to use
76
// the appropriate Region for their actual block as their control since
77
// the projection will be in a predecessor block.
78
void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
79
const Node *in0 = n->in(0);
80
assert(in0 != NULL, "Only control-dependent");
81
const Node *p = in0->is_block_proj();
82
if (p != NULL && p != n) { // Control from a block projection?
83
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
84
// Find trailing Region
85
Block *pb = get_block_for_node(in0); // Block-projection already has basic block
86
uint j = 0;
87
if (pb->_num_succs != 1) { // More then 1 successor?
88
// Search for successor
89
uint max = pb->number_of_nodes();
90
assert( max > 1, "" );
91
uint start = max - pb->_num_succs;
92
// Find which output path belongs to projection
93
for (j = start; j < max; j++) {
94
if( pb->get_node(j) == in0 )
95
break;
96
}
97
assert( j < max, "must find" );
98
// Change control to match head of successor basic block
99
j -= start;
100
}
101
n->set_req(0, pb->_succs[j]->head());
102
}
103
}
104
105
bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
106
assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
107
if (dom_node == node) {
108
return true;
109
}
110
Block* d = find_block_for_node(dom_node);
111
Block* n = find_block_for_node(node);
112
assert(n != NULL && d != NULL, "blocks must exist");
113
114
if (d == n) {
115
if (dom_node->is_block_start()) {
116
return true;
117
}
118
if (node->is_block_start()) {
119
return false;
120
}
121
if (dom_node->is_block_proj()) {
122
return false;
123
}
124
if (node->is_block_proj()) {
125
return true;
126
}
127
128
assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
129
assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
130
131
// Neither 'node' nor 'dom_node' is a block start or block projection.
132
// Check if 'dom_node' is above 'node' in the control graph.
133
if (is_dominating_control(dom_node, node)) {
134
return true;
135
}
136
137
#ifdef ASSERT
138
// If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
139
if (!is_dominating_control(node, dom_node)) {
140
node->dump();
141
dom_node->dump();
142
assert(false, "neither dom_node nor node dominates the other");
143
}
144
#endif
145
146
return false;
147
}
148
return d->dom_lca(n) == d;
149
}
150
151
bool PhaseCFG::is_CFG(Node* n) {
152
return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
153
}
154
155
bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
156
bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
157
assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
158
|| (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
159
return result;
160
}
161
162
Block* PhaseCFG::find_block_for_node(Node* n) const {
163
if (n->is_block_start() || n->is_block_proj()) {
164
return get_block_for_node(n);
165
} else {
166
// Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
167
// an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
168
assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
169
Node* ctrl = n->in(0);
170
while (!ctrl->is_block_start()) {
171
ctrl = ctrl->in(0);
172
}
173
return get_block_for_node(ctrl);
174
}
175
}
176
177
// Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
178
bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
179
Node* ctrl = n->in(0);
180
while (!ctrl->is_block_start()) {
181
if (ctrl == dom_ctrl) {
182
return true;
183
}
184
ctrl = ctrl->in(0);
185
}
186
return false;
187
}
188
189
190
//------------------------------schedule_pinned_nodes--------------------------
191
// Set the basic block for Nodes pinned into blocks
192
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
193
// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
194
GrowableArray <Node*> spstack(C->live_nodes() + 8);
195
spstack.push(_root);
196
while (spstack.is_nonempty()) {
197
Node* node = spstack.pop();
198
if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
199
if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
200
assert(node->in(0), "pinned Node must have Control");
201
// Before setting block replace block_proj control edge
202
replace_block_proj_ctrl(node);
203
Node* input = node->in(0);
204
while (!input->is_block_start()) {
205
input = input->in(0);
206
}
207
Block* block = get_block_for_node(input); // Basic block of controlling input
208
schedule_node_into_block(node, block);
209
}
210
211
// If the node has precedence edges (added when CastPP nodes are
212
// removed in final_graph_reshaping), fix the control of the
213
// node to cover the precedence edges and remove the
214
// dependencies.
215
Node* n = NULL;
216
for (uint i = node->len()-1; i >= node->req(); i--) {
217
Node* m = node->in(i);
218
if (m == NULL) continue;
219
220
// Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block
221
if (is_CFG(m)) {
222
node->rm_prec(i);
223
if (n == NULL) {
224
n = m;
225
} else {
226
assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
227
n = is_dominator(n, m) ? m : n;
228
}
229
} else {
230
assert(node->is_Mach(), "sanity");
231
assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");
232
}
233
}
234
if (n != NULL) {
235
assert(node->in(0), "control should have been set");
236
assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
237
if (!is_dominator(n, node->in(0))) {
238
node->set_req(0, n);
239
}
240
}
241
242
// process all inputs that are non NULL
243
for (int i = node->req()-1; i >= 0; --i) {
244
if (node->in(i) != NULL) {
245
spstack.push(node->in(i));
246
}
247
}
248
}
249
}
250
}
251
252
#ifdef ASSERT
253
// Assert that new input b2 is dominated by all previous inputs.
254
// Check this by by seeing that it is dominated by b1, the deepest
255
// input observed until b2.
256
static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
257
if (b1 == NULL) return;
258
assert(b1->_dom_depth < b2->_dom_depth, "sanity");
259
Block* tmp = b2;
260
while (tmp != b1 && tmp != NULL) {
261
tmp = tmp->_idom;
262
}
263
if (tmp != b1) {
264
// Detected an unschedulable graph. Print some nice stuff and die.
265
tty->print_cr("!!! Unschedulable graph !!!");
266
for (uint j=0; j<n->len(); j++) { // For all inputs
267
Node* inn = n->in(j); // Get input
268
if (inn == NULL) continue; // Ignore NULL, missing inputs
269
Block* inb = cfg->get_block_for_node(inn);
270
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
271
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
272
inn->dump();
273
}
274
tty->print("Failing node: ");
275
n->dump();
276
assert(false, "unscheduable graph");
277
}
278
}
279
#endif
280
281
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
282
// Find the last input dominated by all other inputs.
283
Block* deepb = NULL; // Deepest block so far
284
int deepb_dom_depth = 0;
285
for (uint k = 0; k < n->len(); k++) { // For all inputs
286
Node* inn = n->in(k); // Get input
287
if (inn == NULL) continue; // Ignore NULL, missing inputs
288
Block* inb = cfg->get_block_for_node(inn);
289
assert(inb != NULL, "must already have scheduled this input");
290
if (deepb_dom_depth < (int) inb->_dom_depth) {
291
// The new inb must be dominated by the previous deepb.
292
// The various inputs must be linearly ordered in the dom
293
// tree, or else there will not be a unique deepest block.
294
DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
295
deepb = inb; // Save deepest block
296
deepb_dom_depth = deepb->_dom_depth;
297
}
298
}
299
assert(deepb != NULL, "must be at least one input to n");
300
return deepb;
301
}
302
303
304
//------------------------------schedule_early---------------------------------
305
// Find the earliest Block any instruction can be placed in. Some instructions
306
// are pinned into Blocks. Unpinned instructions can appear in last block in
307
// which all their inputs occur.
308
bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
309
// Allocate stack with enough space to avoid frequent realloc
310
Node_Stack nstack(roots.size() + 8);
311
// _root will be processed among C->top() inputs
312
roots.push(C->top(), 0);
313
visited.set(C->top()->_idx);
314
315
while (roots.size() != 0) {
316
// Use local variables nstack_top_n & nstack_top_i to cache values
317
// on stack's top.
318
Node* parent_node = roots.node();
319
uint input_index = 0;
320
roots.pop();
321
322
while (true) {
323
if (input_index == 0) {
324
// Fixup some control. Constants without control get attached
325
// to root and nodes that use is_block_proj() nodes should be attached
326
// to the region that starts their block.
327
const Node* control_input = parent_node->in(0);
328
if (control_input != NULL) {
329
replace_block_proj_ctrl(parent_node);
330
} else {
331
// Is a constant with NO inputs?
332
if (parent_node->req() == 1) {
333
parent_node->set_req(0, _root);
334
}
335
}
336
}
337
338
// First, visit all inputs and force them to get a block. If an
339
// input is already in a block we quit following inputs (to avoid
340
// cycles). Instead we put that Node on a worklist to be handled
341
// later (since IT'S inputs may not have a block yet).
342
343
// Assume all n's inputs will be processed
344
bool done = true;
345
346
while (input_index < parent_node->len()) {
347
Node* in = parent_node->in(input_index++);
348
if (in == NULL) {
349
continue;
350
}
351
352
int is_visited = visited.test_set(in->_idx);
353
if (!has_block(in)) {
354
if (is_visited) {
355
assert(false, "graph should be schedulable");
356
return false;
357
}
358
// Save parent node and next input's index.
359
nstack.push(parent_node, input_index);
360
// Process current input now.
361
parent_node = in;
362
input_index = 0;
363
// Not all n's inputs processed.
364
done = false;
365
break;
366
} else if (!is_visited) {
367
// Visit this guy later, using worklist
368
roots.push(in, 0);
369
}
370
}
371
372
if (done) {
373
// All of n's inputs have been processed, complete post-processing.
374
375
// Some instructions are pinned into a block. These include Region,
376
// Phi, Start, Return, and other control-dependent instructions and
377
// any projections which depend on them.
378
if (!parent_node->pinned()) {
379
// Set earliest legal block.
380
Block* earliest_block = find_deepest_input(parent_node, this);
381
map_node_to_block(parent_node, earliest_block);
382
} else {
383
assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
384
}
385
386
if (nstack.is_empty()) {
387
// Finished all nodes on stack.
388
// Process next node on the worklist 'roots'.
389
break;
390
}
391
// Get saved parent node and next input's index.
392
parent_node = nstack.node();
393
input_index = nstack.index();
394
nstack.pop();
395
}
396
}
397
}
398
return true;
399
}
400
401
//------------------------------dom_lca----------------------------------------
402
// Find least common ancestor in dominator tree
403
// LCA is a current notion of LCA, to be raised above 'this'.
404
// As a convenient boundary condition, return 'this' if LCA is NULL.
405
// Find the LCA of those two nodes.
406
Block* Block::dom_lca(Block* LCA) {
407
if (LCA == NULL || LCA == this) return this;
408
409
Block* anc = this;
410
while (anc->_dom_depth > LCA->_dom_depth)
411
anc = anc->_idom; // Walk up till anc is as high as LCA
412
413
while (LCA->_dom_depth > anc->_dom_depth)
414
LCA = LCA->_idom; // Walk up till LCA is as high as anc
415
416
while (LCA != anc) { // Walk both up till they are the same
417
LCA = LCA->_idom;
418
anc = anc->_idom;
419
}
420
421
return LCA;
422
}
423
424
//--------------------------raise_LCA_above_use--------------------------------
425
// We are placing a definition, and have been given a def->use edge.
426
// The definition must dominate the use, so move the LCA upward in the
427
// dominator tree to dominate the use. If the use is a phi, adjust
428
// the LCA only with the phi input paths which actually use this def.
429
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
430
Block* buse = cfg->get_block_for_node(use);
431
if (buse == NULL) return LCA; // Unused killing Projs have no use block
432
if (!use->is_Phi()) return buse->dom_lca(LCA);
433
uint pmax = use->req(); // Number of Phi inputs
434
// Why does not this loop just break after finding the matching input to
435
// the Phi? Well...it's like this. I do not have true def-use/use-def
436
// chains. Means I cannot distinguish, from the def-use direction, which
437
// of many use-defs lead from the same use to the same def. That is, this
438
// Phi might have several uses of the same def. Each use appears in a
439
// different predecessor block. But when I enter here, I cannot distinguish
440
// which use-def edge I should find the predecessor block for. So I find
441
// them all. Means I do a little extra work if a Phi uses the same value
442
// more than once.
443
for (uint j=1; j<pmax; j++) { // For all inputs
444
if (use->in(j) == def) { // Found matching input?
445
Block* pred = cfg->get_block_for_node(buse->pred(j));
446
LCA = pred->dom_lca(LCA);
447
}
448
}
449
return LCA;
450
}
451
452
//----------------------------raise_LCA_above_marks----------------------------
453
// Return a new LCA that dominates LCA and any of its marked predecessors.
454
// Search all my parents up to 'early' (exclusive), looking for predecessors
455
// which are marked with the given index. Return the LCA (in the dom tree)
456
// of all marked blocks. If there are none marked, return the original
457
// LCA.
458
static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
459
Block_List worklist;
460
worklist.push(LCA);
461
while (worklist.size() > 0) {
462
Block* mid = worklist.pop();
463
if (mid == early) continue; // stop searching here
464
465
// Test and set the visited bit.
466
if (mid->raise_LCA_visited() == mark) continue; // already visited
467
468
// Don't process the current LCA, otherwise the search may terminate early
469
if (mid != LCA && mid->raise_LCA_mark() == mark) {
470
// Raise the LCA.
471
LCA = mid->dom_lca(LCA);
472
if (LCA == early) break; // stop searching everywhere
473
assert(early->dominates(LCA), "early is high enough");
474
// Resume searching at that point, skipping intermediate levels.
475
worklist.push(LCA);
476
if (LCA == mid)
477
continue; // Don't mark as visited to avoid early termination.
478
} else {
479
// Keep searching through this block's predecessors.
480
for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
481
Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
482
worklist.push(mid_parent);
483
}
484
}
485
mid->set_raise_LCA_visited(mark);
486
}
487
return LCA;
488
}
489
490
//--------------------------memory_early_block--------------------------------
491
// This is a variation of find_deepest_input, the heart of schedule_early.
492
// Find the "early" block for a load, if we considered only memory and
493
// address inputs, that is, if other data inputs were ignored.
494
//
495
// Because a subset of edges are considered, the resulting block will
496
// be earlier (at a shallower dom_depth) than the true schedule_early
497
// point of the node. We compute this earlier block as a more permissive
498
// site for anti-dependency insertion, but only if subsume_loads is enabled.
499
static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
500
Node* base;
501
Node* index;
502
Node* store = load->in(MemNode::Memory);
503
load->as_Mach()->memory_inputs(base, index);
504
505
assert(base != NodeSentinel && index != NodeSentinel,
506
"unexpected base/index inputs");
507
508
Node* mem_inputs[4];
509
int mem_inputs_length = 0;
510
if (base != NULL) mem_inputs[mem_inputs_length++] = base;
511
if (index != NULL) mem_inputs[mem_inputs_length++] = index;
512
if (store != NULL) mem_inputs[mem_inputs_length++] = store;
513
514
// In the comparision below, add one to account for the control input,
515
// which may be null, but always takes up a spot in the in array.
516
if (mem_inputs_length + 1 < (int) load->req()) {
517
// This "load" has more inputs than just the memory, base and index inputs.
518
// For purposes of checking anti-dependences, we need to start
519
// from the early block of only the address portion of the instruction,
520
// and ignore other blocks that may have factored into the wider
521
// schedule_early calculation.
522
if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
523
524
Block* deepb = NULL; // Deepest block so far
525
int deepb_dom_depth = 0;
526
for (int i = 0; i < mem_inputs_length; i++) {
527
Block* inb = cfg->get_block_for_node(mem_inputs[i]);
528
if (deepb_dom_depth < (int) inb->_dom_depth) {
529
// The new inb must be dominated by the previous deepb.
530
// The various inputs must be linearly ordered in the dom
531
// tree, or else there will not be a unique deepest block.
532
DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
533
deepb = inb; // Save deepest block
534
deepb_dom_depth = deepb->_dom_depth;
535
}
536
}
537
early = deepb;
538
}
539
540
return early;
541
}
542
543
// This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.
544
bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
545
// We expect an anti-dependence edge from 'load' to 'store', except when
546
// implicit_null_check() has hoisted 'store' above its early block to
547
// perform an implicit null check, and 'load' is placed in the null
548
// block. In this case it is safe to ignore the anti-dependence, as the
549
// null block is only reached if 'store' tries to write to null object and
550
// 'load' read from non-null object (there is preceding check for that)
551
// These objects can't be the same.
552
Block* store_block = get_block_for_node(store);
553
Block* load_block = get_block_for_node(load);
554
Node* end = store_block->end();
555
if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
556
Node* if_true = end->find_out_with(Op_IfTrue);
557
assert(if_true != NULL, "null check without null projection");
558
Node* null_block_region = if_true->find_out_with(Op_Region);
559
assert(null_block_region != NULL, "null check without null region");
560
return get_block_for_node(null_block_region) == load_block;
561
}
562
return false;
563
}
564
565
//--------------------------insert_anti_dependences---------------------------
566
// A load may need to witness memory that nearby stores can overwrite.
567
// For each nearby store, either insert an "anti-dependence" edge
568
// from the load to the store, or else move LCA upward to force the
569
// load to (eventually) be scheduled in a block above the store.
570
//
571
// Do not add edges to stores on distinct control-flow paths;
572
// only add edges to stores which might interfere.
573
//
574
// Return the (updated) LCA. There will not be any possibly interfering
575
// store between the load's "early block" and the updated LCA.
576
// Any stores in the updated LCA will have new precedence edges
577
// back to the load. The caller is expected to schedule the load
578
// in the LCA, in which case the precedence edges will make LCM
579
// preserve anti-dependences. The caller may also hoist the load
580
// above the LCA, if it is not the early block.
581
Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
582
assert(load->needs_anti_dependence_check(), "must be a load of some sort");
583
assert(LCA != NULL, "");
584
DEBUG_ONLY(Block* LCA_orig = LCA);
585
586
// Compute the alias index. Loads and stores with different alias indices
587
// do not need anti-dependence edges.
588
int load_alias_idx = C->get_alias_index(load->adr_type());
589
#ifdef ASSERT
590
assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
591
if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
592
(PrintOpto || VerifyAliases ||
593
(PrintMiscellaneous && (WizardMode || Verbose)))) {
594
// Load nodes should not consume all of memory.
595
// Reporting a bottom type indicates a bug in adlc.
596
// If some particular type of node validly consumes all of memory,
597
// sharpen the preceding "if" to exclude it, so we can catch bugs here.
598
tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
599
load->dump(2);
600
if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
601
}
602
#endif
603
604
if (!C->alias_type(load_alias_idx)->is_rewritable()) {
605
// It is impossible to spoil this load by putting stores before it,
606
// because we know that the stores will never update the value
607
// which 'load' must witness.
608
return LCA;
609
}
610
611
node_idx_t load_index = load->_idx;
612
613
// Note the earliest legal placement of 'load', as determined by
614
// by the unique point in the dom tree where all memory effects
615
// and other inputs are first available. (Computed by schedule_early.)
616
// For normal loads, 'early' is the shallowest place (dom graph wise)
617
// to look for anti-deps between this load and any store.
618
Block* early = get_block_for_node(load);
619
620
// If we are subsuming loads, compute an "early" block that only considers
621
// memory or address inputs. This block may be different than the
622
// schedule_early block in that it could be at an even shallower depth in the
623
// dominator tree, and allow for a broader discovery of anti-dependences.
624
if (C->subsume_loads()) {
625
early = memory_early_block(load, early, this);
626
}
627
628
ResourceArea *area = Thread::current()->resource_area();
629
Node_List worklist_mem(area); // prior memory state to store
630
Node_List worklist_store(area); // possible-def to explore
631
Node_List worklist_visited(area); // visited mergemem nodes
632
Node_List non_early_stores(area); // all relevant stores outside of early
633
bool must_raise_LCA = false;
634
635
// 'load' uses some memory state; look for users of the same state.
636
// Recurse through MergeMem nodes to the stores that use them.
637
638
// Each of these stores is a possible definition of memory
639
// that 'load' needs to use. We need to force 'load'
640
// to occur before each such store. When the store is in
641
// the same block as 'load', we insert an anti-dependence
642
// edge load->store.
643
644
// The relevant stores "nearby" the load consist of a tree rooted
645
// at initial_mem, with internal nodes of type MergeMem.
646
// Therefore, the branches visited by the worklist are of this form:
647
// initial_mem -> (MergeMem ->)* store
648
// The anti-dependence constraints apply only to the fringe of this tree.
649
650
Node* initial_mem = load->in(MemNode::Memory);
651
worklist_store.push(initial_mem);
652
worklist_visited.push(initial_mem);
653
worklist_mem.push(NULL);
654
while (worklist_store.size() > 0) {
655
// Examine a nearby store to see if it might interfere with our load.
656
Node* mem = worklist_mem.pop();
657
Node* store = worklist_store.pop();
658
uint op = store->Opcode();
659
660
// MergeMems do not directly have anti-deps.
661
// Treat them as internal nodes in a forward tree of memory states,
662
// the leaves of which are each a 'possible-def'.
663
if (store == initial_mem // root (exclusive) of tree we are searching
664
|| op == Op_MergeMem // internal node of tree we are searching
665
) {
666
mem = store; // It's not a possibly interfering store.
667
if (store == initial_mem)
668
initial_mem = NULL; // only process initial memory once
669
670
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
671
store = mem->fast_out(i);
672
if (store->is_MergeMem()) {
673
// Be sure we don't get into combinatorial problems.
674
// (Allow phis to be repeated; they can merge two relevant states.)
675
uint j = worklist_visited.size();
676
for (; j > 0; j--) {
677
if (worklist_visited.at(j-1) == store) break;
678
}
679
if (j > 0) continue; // already on work list; do not repeat
680
worklist_visited.push(store);
681
}
682
worklist_mem.push(mem);
683
worklist_store.push(store);
684
}
685
continue;
686
}
687
688
if (op == Op_MachProj || op == Op_Catch) continue;
689
if (store->needs_anti_dependence_check()) continue; // not really a store
690
691
// Compute the alias index. Loads and stores with different alias
692
// indices do not need anti-dependence edges. Wide MemBar's are
693
// anti-dependent on everything (except immutable memories).
694
const TypePtr* adr_type = store->adr_type();
695
if (!C->can_alias(adr_type, load_alias_idx)) continue;
696
697
// Most slow-path runtime calls do NOT modify Java memory, but
698
// they can block and so write Raw memory.
699
if (store->is_Mach()) {
700
MachNode* mstore = store->as_Mach();
701
if (load_alias_idx != Compile::AliasIdxRaw) {
702
// Check for call into the runtime using the Java calling
703
// convention (and from there into a wrapper); it has no
704
// _method. Can't do this optimization for Native calls because
705
// they CAN write to Java memory.
706
if (mstore->ideal_Opcode() == Op_CallStaticJava) {
707
assert(mstore->is_MachSafePoint(), "");
708
MachSafePointNode* ms = (MachSafePointNode*) mstore;
709
assert(ms->is_MachCallJava(), "");
710
MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
711
if (mcj->_method == NULL) {
712
// These runtime calls do not write to Java visible memory
713
// (other than Raw) and so do not require anti-dependence edges.
714
continue;
715
}
716
}
717
// Same for SafePoints: they read/write Raw but only read otherwise.
718
// This is basically a workaround for SafePoints only defining control
719
// instead of control + memory.
720
if (mstore->ideal_Opcode() == Op_SafePoint)
721
continue;
722
} else {
723
// Some raw memory, such as the load of "top" at an allocation,
724
// can be control dependent on the previous safepoint. See
725
// comments in GraphKit::allocate_heap() about control input.
726
// Inserting an anti-dep between such a safepoint and a use
727
// creates a cycle, and will cause a subsequent failure in
728
// local scheduling. (BugId 4919904)
729
// (%%% How can a control input be a safepoint and not a projection??)
730
if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
731
continue;
732
}
733
}
734
735
// Identify a block that the current load must be above,
736
// or else observe that 'store' is all the way up in the
737
// earliest legal block for 'load'. In the latter case,
738
// immediately insert an anti-dependence edge.
739
Block* store_block = get_block_for_node(store);
740
assert(store_block != NULL, "unused killing projections skipped above");
741
742
if (store->is_Phi()) {
743
// Loop-phis need to raise load before input. (Other phis are treated
744
// as store below.)
745
//
746
// 'load' uses memory which is one (or more) of the Phi's inputs.
747
// It must be scheduled not before the Phi, but rather before
748
// each of the relevant Phi inputs.
749
//
750
// Instead of finding the LCA of all inputs to a Phi that match 'mem',
751
// we mark each corresponding predecessor block and do a combined
752
// hoisting operation later (raise_LCA_above_marks).
753
//
754
// Do not assert(store_block != early, "Phi merging memory after access")
755
// PhiNode may be at start of block 'early' with backedge to 'early'
756
DEBUG_ONLY(bool found_match = false);
757
for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
758
if (store->in(j) == mem) { // Found matching input?
759
DEBUG_ONLY(found_match = true);
760
Block* pred_block = get_block_for_node(store_block->pred(j));
761
if (pred_block != early) {
762
// If any predecessor of the Phi matches the load's "early block",
763
// we do not need a precedence edge between the Phi and 'load'
764
// since the load will be forced into a block preceding the Phi.
765
pred_block->set_raise_LCA_mark(load_index);
766
assert(!LCA_orig->dominates(pred_block) ||
767
early->dominates(pred_block), "early is high enough");
768
must_raise_LCA = true;
769
} else {
770
// anti-dependent upon PHI pinned below 'early', no edge needed
771
LCA = early; // but can not schedule below 'early'
772
}
773
}
774
}
775
assert(found_match, "no worklist bug");
776
} else if (store_block != early) {
777
// 'store' is between the current LCA and earliest possible block.
778
// Label its block, and decide later on how to raise the LCA
779
// to include the effect on LCA of this store.
780
// If this store's block gets chosen as the raised LCA, we
781
// will find him on the non_early_stores list and stick him
782
// with a precedence edge.
783
// (But, don't bother if LCA is already raised all the way.)
784
if (LCA != early && !unrelated_load_in_store_null_block(store, load)) {
785
store_block->set_raise_LCA_mark(load_index);
786
must_raise_LCA = true;
787
non_early_stores.push(store);
788
}
789
} else {
790
// Found a possibly-interfering store in the load's 'early' block.
791
// This means 'load' cannot sink at all in the dominator tree.
792
// Add an anti-dep edge, and squeeze 'load' into the highest block.
793
assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
794
if (verify) {
795
assert(store->find_edge(load) != -1 || unrelated_load_in_store_null_block(store, load),
796
"missing precedence edge");
797
} else {
798
store->add_prec(load);
799
}
800
LCA = early;
801
// This turns off the process of gathering non_early_stores.
802
}
803
}
804
// (Worklist is now empty; all nearby stores have been visited.)
805
806
// Finished if 'load' must be scheduled in its 'early' block.
807
// If we found any stores there, they have already been given
808
// precedence edges.
809
if (LCA == early) return LCA;
810
811
// We get here only if there are no possibly-interfering stores
812
// in the load's 'early' block. Move LCA up above all predecessors
813
// which contain stores we have noted.
814
//
815
// The raised LCA block can be a home to such interfering stores,
816
// but its predecessors must not contain any such stores.
817
//
818
// The raised LCA will be a lower bound for placing the load,
819
// preventing the load from sinking past any block containing
820
// a store that may invalidate the memory state required by 'load'.
821
if (must_raise_LCA)
822
LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
823
if (LCA == early) return LCA;
824
825
// Insert anti-dependence edges from 'load' to each store
826
// in the non-early LCA block.
827
// Mine the non_early_stores list for such stores.
828
if (LCA->raise_LCA_mark() == load_index) {
829
while (non_early_stores.size() > 0) {
830
Node* store = non_early_stores.pop();
831
Block* store_block = get_block_for_node(store);
832
if (store_block == LCA) {
833
// add anti_dependence from store to load in its own block
834
assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
835
if (verify) {
836
assert(store->find_edge(load) != -1, "missing precedence edge");
837
} else {
838
store->add_prec(load);
839
}
840
} else {
841
assert(store_block->raise_LCA_mark() == load_index, "block was marked");
842
// Any other stores we found must be either inside the new LCA
843
// or else outside the original LCA. In the latter case, they
844
// did not interfere with any use of 'load'.
845
assert(LCA->dominates(store_block)
846
|| !LCA_orig->dominates(store_block), "no stray stores");
847
}
848
}
849
}
850
851
// Return the highest block containing stores; any stores
852
// within that block have been given anti-dependence edges.
853
return LCA;
854
}
855
856
// This class is used to iterate backwards over the nodes in the graph.
857
858
class Node_Backward_Iterator {
859
860
private:
861
Node_Backward_Iterator();
862
863
public:
864
// Constructor for the iterator
865
Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
866
867
// Postincrement operator to iterate over the nodes
868
Node *next();
869
870
private:
871
VectorSet &_visited;
872
Node_Stack &_stack;
873
PhaseCFG &_cfg;
874
};
875
876
// Constructor for the Node_Backward_Iterator
877
Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
878
: _visited(visited), _stack(stack), _cfg(cfg) {
879
// The stack should contain exactly the root
880
stack.clear();
881
stack.push(root, root->outcnt());
882
883
// Clear the visited bits
884
visited.clear();
885
}
886
887
// Iterator for the Node_Backward_Iterator
888
Node *Node_Backward_Iterator::next() {
889
890
// If the _stack is empty, then just return NULL: finished.
891
if ( !_stack.size() )
892
return NULL;
893
894
// I visit unvisited not-anti-dependence users first, then anti-dependent
895
// children next. I iterate backwards to support removal of nodes.
896
// The stack holds states consisting of 3 values:
897
// current Def node, flag which indicates 1st/2nd pass, index of current out edge
898
Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
899
bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
900
uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
901
_stack.pop();
902
903
// I cycle here when I am entering a deeper level of recursion.
904
// The key variable 'self' was set prior to jumping here.
905
while( 1 ) {
906
907
_visited.set(self->_idx);
908
909
// Now schedule all uses as late as possible.
910
const Node* src = self->is_Proj() ? self->in(0) : self;
911
uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
912
913
// Schedule all nodes in a post-order visit
914
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
915
916
// Scan for unvisited nodes
917
while (idx > 0) {
918
// For all uses, schedule late
919
Node* n = self->raw_out(--idx); // Use
920
921
// Skip already visited children
922
if ( _visited.test(n->_idx) )
923
continue;
924
925
// do not traverse backward control edges
926
Node *use = n->is_Proj() ? n->in(0) : n;
927
uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
928
929
if ( use_rpo < src_rpo )
930
continue;
931
932
// Phi nodes always precede uses in a basic block
933
if ( use_rpo == src_rpo && use->is_Phi() )
934
continue;
935
936
unvisited = n; // Found unvisited
937
938
// Check for possible-anti-dependent
939
// 1st pass: No such nodes, 2nd pass: Only such nodes.
940
if (n->needs_anti_dependence_check() == iterate_anti_dep) {
941
unvisited = n; // Found unvisited
942
break;
943
}
944
}
945
946
// Did I find an unvisited not-anti-dependent Node?
947
if (!unvisited) {
948
if (!iterate_anti_dep) {
949
// 2nd pass: Iterate over nodes which needs_anti_dependence_check.
950
iterate_anti_dep = true;
951
idx = self->outcnt();
952
continue;
953
}
954
break; // All done with children; post-visit 'self'
955
}
956
957
// Visit the unvisited Node. Contains the obvious push to
958
// indicate I'm entering a deeper level of recursion. I push the
959
// old state onto the _stack and set a new state and loop (recurse).
960
_stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
961
self = unvisited;
962
iterate_anti_dep = false;
963
idx = self->outcnt();
964
} // End recursion loop
965
966
return self;
967
}
968
969
//------------------------------ComputeLatenciesBackwards----------------------
970
// Compute the latency of all the instructions.
971
void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
972
#ifndef PRODUCT
973
if (trace_opto_pipelining())
974
tty->print("\n#---- ComputeLatenciesBackwards ----\n");
975
#endif
976
977
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
978
Node *n;
979
980
// Walk over all the nodes from last to first
981
while ((n = iter.next())) {
982
// Set the latency for the definitions of this instruction
983
partial_latency_of_defs(n);
984
}
985
} // end ComputeLatenciesBackwards
986
987
//------------------------------partial_latency_of_defs------------------------
988
// Compute the latency impact of this node on all defs. This computes
989
// a number that increases as we approach the beginning of the routine.
990
void PhaseCFG::partial_latency_of_defs(Node *n) {
991
// Set the latency for this instruction
992
#ifndef PRODUCT
993
if (trace_opto_pipelining()) {
994
tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
995
dump();
996
}
997
#endif
998
999
if (n->is_Proj()) {
1000
n = n->in(0);
1001
}
1002
1003
if (n->is_Root()) {
1004
return;
1005
}
1006
1007
uint nlen = n->len();
1008
uint use_latency = get_latency_for_node(n);
1009
uint use_pre_order = get_block_for_node(n)->_pre_order;
1010
1011
for (uint j = 0; j < nlen; j++) {
1012
Node *def = n->in(j);
1013
1014
if (!def || def == n) {
1015
continue;
1016
}
1017
1018
// Walk backwards thru projections
1019
if (def->is_Proj()) {
1020
def = def->in(0);
1021
}
1022
1023
#ifndef PRODUCT
1024
if (trace_opto_pipelining()) {
1025
tty->print("# in(%2d): ", j);
1026
def->dump();
1027
}
1028
#endif
1029
1030
// If the defining block is not known, assume it is ok
1031
Block *def_block = get_block_for_node(def);
1032
uint def_pre_order = def_block ? def_block->_pre_order : 0;
1033
1034
if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1035
continue;
1036
}
1037
1038
uint delta_latency = n->latency(j);
1039
uint current_latency = delta_latency + use_latency;
1040
1041
if (get_latency_for_node(def) < current_latency) {
1042
set_latency_for_node(def, current_latency);
1043
}
1044
1045
#ifndef PRODUCT
1046
if (trace_opto_pipelining()) {
1047
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1048
}
1049
#endif
1050
}
1051
}
1052
1053
//------------------------------latency_from_use-------------------------------
1054
// Compute the latency of a specific use
1055
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1056
// If self-reference, return no latency
1057
if (use == n || use->is_Root()) {
1058
return 0;
1059
}
1060
1061
uint def_pre_order = get_block_for_node(def)->_pre_order;
1062
uint latency = 0;
1063
1064
// If the use is not a projection, then it is simple...
1065
if (!use->is_Proj()) {
1066
#ifndef PRODUCT
1067
if (trace_opto_pipelining()) {
1068
tty->print("# out(): ");
1069
use->dump();
1070
}
1071
#endif
1072
1073
uint use_pre_order = get_block_for_node(use)->_pre_order;
1074
1075
if (use_pre_order < def_pre_order)
1076
return 0;
1077
1078
if (use_pre_order == def_pre_order && use->is_Phi())
1079
return 0;
1080
1081
uint nlen = use->len();
1082
uint nl = get_latency_for_node(use);
1083
1084
for ( uint j=0; j<nlen; j++ ) {
1085
if (use->in(j) == n) {
1086
// Change this if we want local latencies
1087
uint ul = use->latency(j);
1088
uint l = ul + nl;
1089
if (latency < l) latency = l;
1090
#ifndef PRODUCT
1091
if (trace_opto_pipelining()) {
1092
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
1093
nl, j, ul, l, latency);
1094
}
1095
#endif
1096
}
1097
}
1098
} else {
1099
// This is a projection, just grab the latency of the use(s)
1100
for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1101
uint l = latency_from_use(use, def, use->fast_out(j));
1102
if (latency < l) latency = l;
1103
}
1104
}
1105
1106
return latency;
1107
}
1108
1109
//------------------------------latency_from_uses------------------------------
1110
// Compute the latency of this instruction relative to all of it's uses.
1111
// This computes a number that increases as we approach the beginning of the
1112
// routine.
1113
void PhaseCFG::latency_from_uses(Node *n) {
1114
// Set the latency for this instruction
1115
#ifndef PRODUCT
1116
if (trace_opto_pipelining()) {
1117
tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1118
dump();
1119
}
1120
#endif
1121
uint latency=0;
1122
const Node *def = n->is_Proj() ? n->in(0): n;
1123
1124
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1125
uint l = latency_from_use(n, def, n->fast_out(i));
1126
1127
if (latency < l) latency = l;
1128
}
1129
1130
set_latency_for_node(n, latency);
1131
}
1132
1133
//------------------------------hoist_to_cheaper_block-------------------------
1134
// Pick a block for node self, between early and LCA, that is a cheaper
1135
// alternative to LCA.
1136
Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1137
const double delta = 1+PROB_UNLIKELY_MAG(4);
1138
Block* least = LCA;
1139
double least_freq = least->_freq;
1140
uint target = get_latency_for_node(self);
1141
uint start_latency = get_latency_for_node(LCA->head());
1142
uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1143
bool in_latency = (target <= start_latency);
1144
const Block* root_block = get_block_for_node(_root);
1145
1146
// Turn off latency scheduling if scheduling is just plain off
1147
if (!C->do_scheduling())
1148
in_latency = true;
1149
1150
// Do not hoist (to cover latency) instructions which target a
1151
// single register. Hoisting stretches the live range of the
1152
// single register and may force spilling.
1153
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1154
if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1155
in_latency = true;
1156
1157
#ifndef PRODUCT
1158
if (trace_opto_pipelining()) {
1159
tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1160
self->dump();
1161
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1162
LCA->_pre_order,
1163
LCA->head()->_idx,
1164
start_latency,
1165
LCA->get_node(LCA->end_idx())->_idx,
1166
end_latency,
1167
least_freq);
1168
}
1169
#endif
1170
1171
int cand_cnt = 0; // number of candidates tried
1172
1173
// Walk up the dominator tree from LCA (Lowest common ancestor) to
1174
// the earliest legal location. Capture the least execution frequency.
1175
while (LCA != early) {
1176
LCA = LCA->_idom; // Follow up the dominator tree
1177
1178
if (LCA == NULL) {
1179
// Bailout without retry
1180
assert(false, "graph should be schedulable");
1181
C->record_method_not_compilable("late schedule failed: LCA == NULL");
1182
return least;
1183
}
1184
1185
// Don't hoist machine instructions to the root basic block
1186
if (mach && LCA == root_block)
1187
break;
1188
1189
if (self->is_memory_writer() &&
1190
(LCA->_loop->depth() > early->_loop->depth())) {
1191
// LCA is an invalid placement for a memory writer: choosing it would
1192
// cause memory interference, as illustrated in schedule_late().
1193
continue;
1194
}
1195
verify_memory_writer_placement(LCA, self);
1196
1197
uint start_lat = get_latency_for_node(LCA->head());
1198
uint end_idx = LCA->end_idx();
1199
uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
1200
double LCA_freq = LCA->_freq;
1201
#ifndef PRODUCT
1202
if (trace_opto_pipelining()) {
1203
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1204
LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1205
}
1206
#endif
1207
cand_cnt++;
1208
if (LCA_freq < least_freq || // Better Frequency
1209
(StressGCM && C->randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1210
(!StressGCM && // Otherwise, choose with latency
1211
!in_latency && // No block containing latency
1212
LCA_freq < least_freq * delta && // No worse frequency
1213
target >= end_lat && // within latency range
1214
!self->is_iteratively_computed() ) // But don't hoist IV increments
1215
// because they may end up above other uses of their phi forcing
1216
// their result register to be different from their input.
1217
) {
1218
least = LCA; // Found cheaper block
1219
least_freq = LCA_freq;
1220
start_latency = start_lat;
1221
end_latency = end_lat;
1222
if (target <= start_lat)
1223
in_latency = true;
1224
}
1225
}
1226
1227
#ifndef PRODUCT
1228
if (trace_opto_pipelining()) {
1229
tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1230
least->_pre_order, start_latency, least_freq);
1231
}
1232
#endif
1233
1234
// See if the latency needs to be updated
1235
if (target < end_latency) {
1236
#ifndef PRODUCT
1237
if (trace_opto_pipelining()) {
1238
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1239
}
1240
#endif
1241
set_latency_for_node(self, end_latency);
1242
partial_latency_of_defs(self);
1243
}
1244
1245
return least;
1246
}
1247
1248
1249
//------------------------------schedule_late-----------------------------------
1250
// Now schedule all codes as LATE as possible. This is the LCA in the
1251
// dominator tree of all USES of a value. Pick the block with the least
1252
// loop nesting depth that is lowest in the dominator tree.
1253
extern const char must_clone[];
1254
void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1255
#ifndef PRODUCT
1256
if (trace_opto_pipelining())
1257
tty->print("\n#---- schedule_late ----\n");
1258
#endif
1259
1260
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1261
Node *self;
1262
1263
// Walk over all the nodes from last to first
1264
while ((self = iter.next())) {
1265
Block* early = get_block_for_node(self); // Earliest legal placement
1266
1267
if (self->is_top()) {
1268
// Top node goes in bb #2 with other constants.
1269
// It must be special-cased, because it has no out edges.
1270
early->add_inst(self);
1271
continue;
1272
}
1273
1274
// No uses, just terminate
1275
if (self->outcnt() == 0) {
1276
assert(self->is_MachProj(), "sanity");
1277
continue; // Must be a dead machine projection
1278
}
1279
1280
// If node is pinned in the block, then no scheduling can be done.
1281
if( self->pinned() ) // Pinned in block?
1282
continue;
1283
1284
#ifdef ASSERT
1285
// Assert that memory writers (e.g. stores) have a "home" block (the block
1286
// given by their control input), and that this block corresponds to their
1287
// earliest possible placement. This guarantees that
1288
// hoist_to_cheaper_block() will always have at least one valid choice.
1289
if (self->is_memory_writer()) {
1290
assert(find_block_for_node(self->in(0)) == early,
1291
"The home of a memory writer must also be its earliest placement");
1292
}
1293
#endif
1294
1295
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1296
if (mach) {
1297
switch (mach->ideal_Opcode()) {
1298
case Op_CreateEx:
1299
// Don't move exception creation
1300
early->add_inst(self);
1301
continue;
1302
break;
1303
case Op_CheckCastPP: {
1304
// Don't move CheckCastPP nodes away from their input, if the input
1305
// is a rawptr (5071820).
1306
Node *def = self->in(1);
1307
if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1308
early->add_inst(self);
1309
#ifdef ASSERT
1310
_raw_oops.push(def);
1311
#endif
1312
continue;
1313
}
1314
break;
1315
}
1316
default:
1317
break;
1318
}
1319
if (C->has_irreducible_loop() && self->is_memory_writer()) {
1320
// If the CFG is irreducible, place memory writers in their home block.
1321
// This prevents hoist_to_cheaper_block() from accidentally placing such
1322
// nodes into deeper loops, as in the following example:
1323
//
1324
// Home placement of store in B1 (loop L1):
1325
//
1326
// B1 (L1):
1327
// m1 <- ..
1328
// m2 <- store m1, ..
1329
// B2 (L2):
1330
// jump B2
1331
// B3 (L1):
1332
// .. <- .. m2, ..
1333
//
1334
// Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1335
//
1336
// B1 (L1):
1337
// m1 <- ..
1338
// B2 (L2):
1339
// m2 <- store m1, ..
1340
// # Wrong: m1 and m2 interfere at this point.
1341
// jump B2
1342
// B3 (L1):
1343
// .. <- .. m2, ..
1344
//
1345
// This "hoist inversion" can happen due to different factors such as
1346
// inaccurate estimation of frequencies for irreducible CFGs, and loops
1347
// with always-taken exits in reducible CFGs. In the reducible case,
1348
// hoist inversion is prevented by discarding invalid blocks (those in
1349
// deeper loops than the home block). In the irreducible case, the
1350
// invalid blocks cannot be identified due to incomplete loop nesting
1351
// information, hence a conservative solution is taken.
1352
#ifndef PRODUCT
1353
if (trace_opto_pipelining()) {
1354
tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1355
early->_pre_order);
1356
self->dump();
1357
}
1358
#endif
1359
schedule_node_into_block(self, early);
1360
continue;
1361
}
1362
}
1363
1364
// Gather LCA of all uses
1365
Block *LCA = NULL;
1366
{
1367
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1368
// For all uses, find LCA
1369
Node* use = self->fast_out(i);
1370
LCA = raise_LCA_above_use(LCA, use, self, this);
1371
}
1372
guarantee(LCA != NULL, "There must be a LCA");
1373
} // (Hide defs of imax, i from rest of block.)
1374
1375
// Place temps in the block of their use. This isn't a
1376
// requirement for correctness but it reduces useless
1377
// interference between temps and other nodes.
1378
if (mach != NULL && mach->is_MachTemp()) {
1379
map_node_to_block(self, LCA);
1380
LCA->add_inst(self);
1381
continue;
1382
}
1383
1384
// Check if 'self' could be anti-dependent on memory
1385
if (self->needs_anti_dependence_check()) {
1386
// Hoist LCA above possible-defs and insert anti-dependences to
1387
// defs in new LCA block.
1388
LCA = insert_anti_dependences(LCA, self);
1389
}
1390
1391
if (early->_dom_depth > LCA->_dom_depth) {
1392
// Somehow the LCA has moved above the earliest legal point.
1393
// (One way this can happen is via memory_early_block.)
1394
if (C->subsume_loads() == true && !C->failing()) {
1395
// Retry with subsume_loads == false
1396
// If this is the first failure, the sentinel string will "stick"
1397
// to the Compile object, and the C2Compiler will see it and retry.
1398
C->record_failure(C2Compiler::retry_no_subsuming_loads());
1399
} else {
1400
// Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1401
assert(false, "graph should be schedulable");
1402
C->record_method_not_compilable("late schedule failed: incorrect graph");
1403
}
1404
return;
1405
}
1406
1407
if (self->is_memory_writer()) {
1408
// If the LCA of a memory writer is a descendant of its home loop, hoist
1409
// it into a valid placement.
1410
while (LCA->_loop->depth() > early->_loop->depth()) {
1411
LCA = LCA->_idom;
1412
}
1413
assert(LCA != NULL, "a valid LCA must exist");
1414
verify_memory_writer_placement(LCA, self);
1415
}
1416
1417
// If there is no opportunity to hoist, then we're done.
1418
// In stress mode, try to hoist even the single operations.
1419
bool try_to_hoist = StressGCM || (LCA != early);
1420
1421
// Must clone guys stay next to use; no hoisting allowed.
1422
// Also cannot hoist guys that alter memory or are otherwise not
1423
// allocatable (hoisting can make a value live longer, leading to
1424
// anti and output dependency problems which are normally resolved
1425
// by the register allocator giving everyone a different register).
1426
if (mach != NULL && must_clone[mach->ideal_Opcode()])
1427
try_to_hoist = false;
1428
1429
Block* late = NULL;
1430
if (try_to_hoist) {
1431
// Now find the block with the least execution frequency.
1432
// Start at the latest schedule and work up to the earliest schedule
1433
// in the dominator tree. Thus the Node will dominate all its uses.
1434
late = hoist_to_cheaper_block(LCA, early, self);
1435
} else {
1436
// Just use the LCA of the uses.
1437
late = LCA;
1438
}
1439
1440
// Put the node into target block
1441
schedule_node_into_block(self, late);
1442
1443
#ifdef ASSERT
1444
if (self->needs_anti_dependence_check()) {
1445
// since precedence edges are only inserted when we're sure they
1446
// are needed make sure that after placement in a block we don't
1447
// need any new precedence edges.
1448
verify_anti_dependences(late, self);
1449
}
1450
#endif
1451
} // Loop until all nodes have been visited
1452
1453
} // end ScheduleLate
1454
1455
//------------------------------GlobalCodeMotion-------------------------------
1456
void PhaseCFG::global_code_motion() {
1457
ResourceMark rm;
1458
1459
#ifndef PRODUCT
1460
if (trace_opto_pipelining()) {
1461
tty->print("\n---- Start GlobalCodeMotion ----\n");
1462
}
1463
#endif
1464
1465
// Initialize the node to block mapping for things on the proj_list
1466
for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1467
unmap_node_from_block(_matcher.get_projection(i));
1468
}
1469
1470
// Set the basic block for Nodes pinned into blocks
1471
VectorSet visited;
1472
schedule_pinned_nodes(visited);
1473
1474
// Find the earliest Block any instruction can be placed in. Some
1475
// instructions are pinned into Blocks. Unpinned instructions can
1476
// appear in last block in which all their inputs occur.
1477
visited.clear();
1478
Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1479
if (!schedule_early(visited, stack)) {
1480
// Bailout without retry
1481
C->record_method_not_compilable("early schedule failed");
1482
return;
1483
}
1484
1485
// Build Def-Use edges.
1486
// Compute the latency information (via backwards walk) for all the
1487
// instructions in the graph
1488
_node_latency = new GrowableArray<uint>(); // resource_area allocation
1489
1490
if (C->do_scheduling()) {
1491
compute_latencies_backwards(visited, stack);
1492
}
1493
1494
// Now schedule all codes as LATE as possible. This is the LCA in the
1495
// dominator tree of all USES of a value. Pick the block with the least
1496
// loop nesting depth that is lowest in the dominator tree.
1497
// ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1498
schedule_late(visited, stack);
1499
if (C->failing()) {
1500
return;
1501
}
1502
1503
#ifndef PRODUCT
1504
if (trace_opto_pipelining()) {
1505
tty->print("\n---- Detect implicit null checks ----\n");
1506
}
1507
#endif
1508
1509
// Detect implicit-null-check opportunities. Basically, find NULL checks
1510
// with suitable memory ops nearby. Use the memory op to do the NULL check.
1511
// I can generate a memory op if there is not one nearby.
1512
if (C->is_method_compilation()) {
1513
// By reversing the loop direction we get a very minor gain on mpegaudio.
1514
// Feel free to revert to a forward loop for clarity.
1515
// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1516
for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1517
Node* proj = _matcher._null_check_tests[i];
1518
Node* val = _matcher._null_check_tests[i + 1];
1519
Block* block = get_block_for_node(proj);
1520
implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1521
// The implicit_null_check will only perform the transformation
1522
// if the null branch is truly uncommon, *and* it leads to an
1523
// uncommon trap. Combined with the too_many_traps guards
1524
// above, this prevents SEGV storms reported in 6366351,
1525
// by recompiling offending methods without this optimization.
1526
}
1527
}
1528
1529
bool block_size_threshold_ok = false;
1530
intptr_t *recalc_pressure_nodes = NULL;
1531
if (OptoRegScheduling) {
1532
for (uint i = 0; i < number_of_blocks(); i++) {
1533
Block* block = get_block(i);
1534
if (block->number_of_nodes() > 10) {
1535
block_size_threshold_ok = true;
1536
break;
1537
}
1538
}
1539
}
1540
1541
// Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1542
// is key to enabling this feature.
1543
PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1544
ResourceArea live_arena(mtCompiler); // Arena for liveness
1545
ResourceMark rm_live(&live_arena);
1546
PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1547
PhaseIFG ifg(&live_arena);
1548
if (OptoRegScheduling && block_size_threshold_ok) {
1549
regalloc.mark_ssa();
1550
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
1551
rm_live.reset_to_mark(); // Reclaim working storage
1552
IndexSet::reset_memory(C, &live_arena);
1553
uint node_size = regalloc._lrg_map.max_lrg_id();
1554
ifg.init(node_size); // Empty IFG
1555
regalloc.set_ifg(ifg);
1556
regalloc.set_live(live);
1557
regalloc.gather_lrg_masks(false); // Collect LRG masks
1558
live.compute(node_size); // Compute liveness
1559
1560
recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1561
for (uint i = 0; i < node_size; i++) {
1562
recalc_pressure_nodes[i] = 0;
1563
}
1564
}
1565
_regalloc = &regalloc;
1566
1567
#ifndef PRODUCT
1568
if (trace_opto_pipelining()) {
1569
tty->print("\n---- Start Local Scheduling ----\n");
1570
}
1571
#endif
1572
1573
// Schedule locally. Right now a simple topological sort.
1574
// Later, do a real latency aware scheduler.
1575
GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1576
visited.reset();
1577
for (uint i = 0; i < number_of_blocks(); i++) {
1578
Block* block = get_block(i);
1579
if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1580
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1581
C->record_method_not_compilable("local schedule failed");
1582
}
1583
_regalloc = NULL;
1584
return;
1585
}
1586
}
1587
_regalloc = NULL;
1588
1589
// If we inserted any instructions between a Call and his CatchNode,
1590
// clone the instructions on all paths below the Catch.
1591
for (uint i = 0; i < number_of_blocks(); i++) {
1592
Block* block = get_block(i);
1593
call_catch_cleanup(block);
1594
}
1595
1596
#ifndef PRODUCT
1597
if (trace_opto_pipelining()) {
1598
tty->print("\n---- After GlobalCodeMotion ----\n");
1599
for (uint i = 0; i < number_of_blocks(); i++) {
1600
Block* block = get_block(i);
1601
block->dump();
1602
}
1603
}
1604
#endif
1605
// Dead.
1606
_node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1607
}
1608
1609
bool PhaseCFG::do_global_code_motion() {
1610
1611
build_dominator_tree();
1612
if (C->failing()) {
1613
return false;
1614
}
1615
1616
NOT_PRODUCT( C->verify_graph_edges(); )
1617
1618
estimate_block_frequency();
1619
1620
global_code_motion();
1621
1622
if (C->failing()) {
1623
return false;
1624
}
1625
1626
return true;
1627
}
1628
1629
//------------------------------Estimate_Block_Frequency-----------------------
1630
// Estimate block frequencies based on IfNode probabilities.
1631
void PhaseCFG::estimate_block_frequency() {
1632
1633
// Force conditional branches leading to uncommon traps to be unlikely,
1634
// not because we get to the uncommon_trap with less relative frequency,
1635
// but because an uncommon_trap typically causes a deopt, so we only get
1636
// there once.
1637
if (C->do_freq_based_layout()) {
1638
Block_List worklist;
1639
Block* root_blk = get_block(0);
1640
for (uint i = 1; i < root_blk->num_preds(); i++) {
1641
Block *pb = get_block_for_node(root_blk->pred(i));
1642
if (pb->has_uncommon_code()) {
1643
worklist.push(pb);
1644
}
1645
}
1646
while (worklist.size() > 0) {
1647
Block* uct = worklist.pop();
1648
if (uct == get_root_block()) {
1649
continue;
1650
}
1651
for (uint i = 1; i < uct->num_preds(); i++) {
1652
Block *pb = get_block_for_node(uct->pred(i));
1653
if (pb->_num_succs == 1) {
1654
worklist.push(pb);
1655
} else if (pb->num_fall_throughs() == 2) {
1656
pb->update_uncommon_branch(uct);
1657
}
1658
}
1659
}
1660
}
1661
1662
// Create the loop tree and calculate loop depth.
1663
_root_loop = create_loop_tree();
1664
_root_loop->compute_loop_depth(0);
1665
1666
// Compute block frequency of each block, relative to a single loop entry.
1667
_root_loop->compute_freq();
1668
1669
// Adjust all frequencies to be relative to a single method entry
1670
_root_loop->_freq = 1.0;
1671
_root_loop->scale_freq();
1672
1673
// Save outmost loop frequency for LRG frequency threshold
1674
_outer_loop_frequency = _root_loop->outer_loop_freq();
1675
1676
// force paths ending at uncommon traps to be infrequent
1677
if (!C->do_freq_based_layout()) {
1678
Block_List worklist;
1679
Block* root_blk = get_block(0);
1680
for (uint i = 1; i < root_blk->num_preds(); i++) {
1681
Block *pb = get_block_for_node(root_blk->pred(i));
1682
if (pb->has_uncommon_code()) {
1683
worklist.push(pb);
1684
}
1685
}
1686
while (worklist.size() > 0) {
1687
Block* uct = worklist.pop();
1688
uct->_freq = PROB_MIN;
1689
for (uint i = 1; i < uct->num_preds(); i++) {
1690
Block *pb = get_block_for_node(uct->pred(i));
1691
if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1692
worklist.push(pb);
1693
}
1694
}
1695
}
1696
}
1697
1698
#ifdef ASSERT
1699
for (uint i = 0; i < number_of_blocks(); i++) {
1700
Block* b = get_block(i);
1701
assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1702
}
1703
#endif
1704
1705
#ifndef PRODUCT
1706
if (PrintCFGBlockFreq) {
1707
tty->print_cr("CFG Block Frequencies");
1708
_root_loop->dump_tree();
1709
if (Verbose) {
1710
tty->print_cr("PhaseCFG dump");
1711
dump();
1712
tty->print_cr("Node dump");
1713
_root->dump(99999);
1714
}
1715
}
1716
#endif
1717
}
1718
1719
//----------------------------create_loop_tree--------------------------------
1720
// Create a loop tree from the CFG
1721
CFGLoop* PhaseCFG::create_loop_tree() {
1722
1723
#ifdef ASSERT
1724
assert(get_block(0) == get_root_block(), "first block should be root block");
1725
for (uint i = 0; i < number_of_blocks(); i++) {
1726
Block* block = get_block(i);
1727
// Check that _loop field are clear...we could clear them if not.
1728
assert(block->_loop == NULL, "clear _loop expected");
1729
// Sanity check that the RPO numbering is reflected in the _blocks array.
1730
// It doesn't have to be for the loop tree to be built, but if it is not,
1731
// then the blocks have been reordered since dom graph building...which
1732
// may question the RPO numbering
1733
assert(block->_rpo == i, "unexpected reverse post order number");
1734
}
1735
#endif
1736
1737
int idct = 0;
1738
CFGLoop* root_loop = new CFGLoop(idct++);
1739
1740
Block_List worklist;
1741
1742
// Assign blocks to loops
1743
for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1744
Block* block = get_block(i);
1745
1746
if (block->head()->is_Loop()) {
1747
Block* loop_head = block;
1748
assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1749
Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1750
Block* tail = get_block_for_node(tail_n);
1751
1752
// Defensively filter out Loop nodes for non-single-entry loops.
1753
// For all reasonable loops, the head occurs before the tail in RPO.
1754
if (i <= tail->_rpo) {
1755
1756
// The tail and (recursive) predecessors of the tail
1757
// are made members of a new loop.
1758
1759
assert(worklist.size() == 0, "nonempty worklist");
1760
CFGLoop* nloop = new CFGLoop(idct++);
1761
assert(loop_head->_loop == NULL, "just checking");
1762
loop_head->_loop = nloop;
1763
// Add to nloop so push_pred() will skip over inner loops
1764
nloop->add_member(loop_head);
1765
nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1766
1767
while (worklist.size() > 0) {
1768
Block* member = worklist.pop();
1769
if (member != loop_head) {
1770
for (uint j = 1; j < member->num_preds(); j++) {
1771
nloop->push_pred(member, j, worklist, this);
1772
}
1773
}
1774
}
1775
}
1776
}
1777
}
1778
1779
// Create a member list for each loop consisting
1780
// of both blocks and (immediate child) loops.
1781
for (uint i = 0; i < number_of_blocks(); i++) {
1782
Block* block = get_block(i);
1783
CFGLoop* lp = block->_loop;
1784
if (lp == NULL) {
1785
// Not assigned to a loop. Add it to the method's pseudo loop.
1786
block->_loop = root_loop;
1787
lp = root_loop;
1788
}
1789
if (lp == root_loop || block != lp->head()) { // loop heads are already members
1790
lp->add_member(block);
1791
}
1792
if (lp != root_loop) {
1793
if (lp->parent() == NULL) {
1794
// Not a nested loop. Make it a child of the method's pseudo loop.
1795
root_loop->add_nested_loop(lp);
1796
}
1797
if (block == lp->head()) {
1798
// Add nested loop to member list of parent loop.
1799
lp->parent()->add_member(lp);
1800
}
1801
}
1802
}
1803
1804
return root_loop;
1805
}
1806
1807
//------------------------------push_pred--------------------------------------
1808
void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1809
Node* pred_n = blk->pred(i);
1810
Block* pred = cfg->get_block_for_node(pred_n);
1811
CFGLoop *pred_loop = pred->_loop;
1812
if (pred_loop == NULL) {
1813
// Filter out blocks for non-single-entry loops.
1814
// For all reasonable loops, the head occurs before the tail in RPO.
1815
if (pred->_rpo > head()->_rpo) {
1816
pred->_loop = this;
1817
worklist.push(pred);
1818
}
1819
} else if (pred_loop != this) {
1820
// Nested loop.
1821
while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1822
pred_loop = pred_loop->_parent;
1823
}
1824
// Make pred's loop be a child
1825
if (pred_loop->_parent == NULL) {
1826
add_nested_loop(pred_loop);
1827
// Continue with loop entry predecessor.
1828
Block* pred_head = pred_loop->head();
1829
assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1830
assert(pred_head != head(), "loop head in only one loop");
1831
push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1832
} else {
1833
assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1834
}
1835
}
1836
}
1837
1838
//------------------------------add_nested_loop--------------------------------
1839
// Make cl a child of the current loop in the loop tree.
1840
void CFGLoop::add_nested_loop(CFGLoop* cl) {
1841
assert(_parent == NULL, "no parent yet");
1842
assert(cl != this, "not my own parent");
1843
cl->_parent = this;
1844
CFGLoop* ch = _child;
1845
if (ch == NULL) {
1846
_child = cl;
1847
} else {
1848
while (ch->_sibling != NULL) { ch = ch->_sibling; }
1849
ch->_sibling = cl;
1850
}
1851
}
1852
1853
//------------------------------compute_loop_depth-----------------------------
1854
// Store the loop depth in each CFGLoop object.
1855
// Recursively walk the children to do the same for them.
1856
void CFGLoop::compute_loop_depth(int depth) {
1857
_depth = depth;
1858
CFGLoop* ch = _child;
1859
while (ch != NULL) {
1860
ch->compute_loop_depth(depth + 1);
1861
ch = ch->_sibling;
1862
}
1863
}
1864
1865
//------------------------------compute_freq-----------------------------------
1866
// Compute the frequency of each block and loop, relative to a single entry
1867
// into the dominating loop head.
1868
void CFGLoop::compute_freq() {
1869
// Bottom up traversal of loop tree (visit inner loops first.)
1870
// Set loop head frequency to 1.0, then transitively
1871
// compute frequency for all successors in the loop,
1872
// as well as for each exit edge. Inner loops are
1873
// treated as single blocks with loop exit targets
1874
// as the successor blocks.
1875
1876
// Nested loops first
1877
CFGLoop* ch = _child;
1878
while (ch != NULL) {
1879
ch->compute_freq();
1880
ch = ch->_sibling;
1881
}
1882
assert (_members.length() > 0, "no empty loops");
1883
Block* hd = head();
1884
hd->_freq = 1.0;
1885
for (int i = 0; i < _members.length(); i++) {
1886
CFGElement* s = _members.at(i);
1887
double freq = s->_freq;
1888
if (s->is_block()) {
1889
Block* b = s->as_Block();
1890
for (uint j = 0; j < b->_num_succs; j++) {
1891
Block* sb = b->_succs[j];
1892
update_succ_freq(sb, freq * b->succ_prob(j));
1893
}
1894
} else {
1895
CFGLoop* lp = s->as_CFGLoop();
1896
assert(lp->_parent == this, "immediate child");
1897
for (int k = 0; k < lp->_exits.length(); k++) {
1898
Block* eb = lp->_exits.at(k).get_target();
1899
double prob = lp->_exits.at(k).get_prob();
1900
update_succ_freq(eb, freq * prob);
1901
}
1902
}
1903
}
1904
1905
// For all loops other than the outer, "method" loop,
1906
// sum and normalize the exit probability. The "method" loop
1907
// should keep the initial exit probability of 1, so that
1908
// inner blocks do not get erroneously scaled.
1909
if (_depth != 0) {
1910
// Total the exit probabilities for this loop.
1911
double exits_sum = 0.0f;
1912
for (int i = 0; i < _exits.length(); i++) {
1913
exits_sum += _exits.at(i).get_prob();
1914
}
1915
1916
// Normalize the exit probabilities. Until now, the
1917
// probabilities estimate the possibility of exit per
1918
// a single loop iteration; afterward, they estimate
1919
// the probability of exit per loop entry.
1920
for (int i = 0; i < _exits.length(); i++) {
1921
Block* et = _exits.at(i).get_target();
1922
float new_prob = 0.0f;
1923
if (_exits.at(i).get_prob() > 0.0f) {
1924
new_prob = _exits.at(i).get_prob() / exits_sum;
1925
}
1926
BlockProbPair bpp(et, new_prob);
1927
_exits.at_put(i, bpp);
1928
}
1929
1930
// Save the total, but guard against unreasonable probability,
1931
// as the value is used to estimate the loop trip count.
1932
// An infinite trip count would blur relative block
1933
// frequencies.
1934
if (exits_sum > 1.0f) exits_sum = 1.0;
1935
if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1936
_exit_prob = exits_sum;
1937
}
1938
}
1939
1940
//------------------------------succ_prob-------------------------------------
1941
// Determine the probability of reaching successor 'i' from the receiver block.
1942
float Block::succ_prob(uint i) {
1943
int eidx = end_idx();
1944
Node *n = get_node(eidx); // Get ending Node
1945
1946
int op = n->Opcode();
1947
if (n->is_Mach()) {
1948
if (n->is_MachNullCheck()) {
1949
// Can only reach here if called after lcm. The original Op_If is gone,
1950
// so we attempt to infer the probability from one or both of the
1951
// successor blocks.
1952
assert(_num_succs == 2, "expecting 2 successors of a null check");
1953
// If either successor has only one predecessor, then the
1954
// probability estimate can be derived using the
1955
// relative frequency of the successor and this block.
1956
if (_succs[i]->num_preds() == 2) {
1957
return _succs[i]->_freq / _freq;
1958
} else if (_succs[1-i]->num_preds() == 2) {
1959
return 1 - (_succs[1-i]->_freq / _freq);
1960
} else {
1961
// Estimate using both successor frequencies
1962
float freq = _succs[i]->_freq;
1963
return freq / (freq + _succs[1-i]->_freq);
1964
}
1965
}
1966
op = n->as_Mach()->ideal_Opcode();
1967
}
1968
1969
1970
// Switch on branch type
1971
switch( op ) {
1972
case Op_CountedLoopEnd:
1973
case Op_If: {
1974
assert (i < 2, "just checking");
1975
// Conditionals pass on only part of their frequency
1976
float prob = n->as_MachIf()->_prob;
1977
assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1978
// If succ[i] is the FALSE branch, invert path info
1979
if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1980
return 1.0f - prob; // not taken
1981
} else {
1982
return prob; // taken
1983
}
1984
}
1985
1986
case Op_Jump:
1987
return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
1988
1989
case Op_Catch: {
1990
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1991
if (ci->_con == CatchProjNode::fall_through_index) {
1992
// Fall-thru path gets the lion's share.
1993
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1994
} else {
1995
// Presume exceptional paths are equally unlikely
1996
return PROB_UNLIKELY_MAG(5);
1997
}
1998
}
1999
2000
case Op_Root:
2001
case Op_Goto:
2002
// Pass frequency straight thru to target
2003
return 1.0f;
2004
2005
case Op_NeverBranch:
2006
return 0.0f;
2007
2008
case Op_TailCall:
2009
case Op_TailJump:
2010
case Op_Return:
2011
case Op_Halt:
2012
case Op_Rethrow:
2013
// Do not push out freq to root block
2014
return 0.0f;
2015
2016
default:
2017
ShouldNotReachHere();
2018
}
2019
2020
return 0.0f;
2021
}
2022
2023
//------------------------------num_fall_throughs-----------------------------
2024
// Return the number of fall-through candidates for a block
2025
int Block::num_fall_throughs() {
2026
int eidx = end_idx();
2027
Node *n = get_node(eidx); // Get ending Node
2028
2029
int op = n->Opcode();
2030
if (n->is_Mach()) {
2031
if (n->is_MachNullCheck()) {
2032
// In theory, either side can fall-thru, for simplicity sake,
2033
// let's say only the false branch can now.
2034
return 1;
2035
}
2036
op = n->as_Mach()->ideal_Opcode();
2037
}
2038
2039
// Switch on branch type
2040
switch( op ) {
2041
case Op_CountedLoopEnd:
2042
case Op_If:
2043
return 2;
2044
2045
case Op_Root:
2046
case Op_Goto:
2047
return 1;
2048
2049
case Op_Catch: {
2050
for (uint i = 0; i < _num_succs; i++) {
2051
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2052
if (ci->_con == CatchProjNode::fall_through_index) {
2053
return 1;
2054
}
2055
}
2056
return 0;
2057
}
2058
2059
case Op_Jump:
2060
case Op_NeverBranch:
2061
case Op_TailCall:
2062
case Op_TailJump:
2063
case Op_Return:
2064
case Op_Halt:
2065
case Op_Rethrow:
2066
return 0;
2067
2068
default:
2069
ShouldNotReachHere();
2070
}
2071
2072
return 0;
2073
}
2074
2075
//------------------------------succ_fall_through-----------------------------
2076
// Return true if a specific successor could be fall-through target.
2077
bool Block::succ_fall_through(uint i) {
2078
int eidx = end_idx();
2079
Node *n = get_node(eidx); // Get ending Node
2080
2081
int op = n->Opcode();
2082
if (n->is_Mach()) {
2083
if (n->is_MachNullCheck()) {
2084
// In theory, either side can fall-thru, for simplicity sake,
2085
// let's say only the false branch can now.
2086
return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2087
}
2088
op = n->as_Mach()->ideal_Opcode();
2089
}
2090
2091
// Switch on branch type
2092
switch( op ) {
2093
case Op_CountedLoopEnd:
2094
case Op_If:
2095
case Op_Root:
2096
case Op_Goto:
2097
return true;
2098
2099
case Op_Catch: {
2100
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2101
return ci->_con == CatchProjNode::fall_through_index;
2102
}
2103
2104
case Op_Jump:
2105
case Op_NeverBranch:
2106
case Op_TailCall:
2107
case Op_TailJump:
2108
case Op_Return:
2109
case Op_Halt:
2110
case Op_Rethrow:
2111
return false;
2112
2113
default:
2114
ShouldNotReachHere();
2115
}
2116
2117
return false;
2118
}
2119
2120
//------------------------------update_uncommon_branch------------------------
2121
// Update the probability of a two-branch to be uncommon
2122
void Block::update_uncommon_branch(Block* ub) {
2123
int eidx = end_idx();
2124
Node *n = get_node(eidx); // Get ending Node
2125
2126
int op = n->as_Mach()->ideal_Opcode();
2127
2128
assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2129
assert(num_fall_throughs() == 2, "must be a two way branch block");
2130
2131
// Which successor is ub?
2132
uint s;
2133
for (s = 0; s <_num_succs; s++) {
2134
if (_succs[s] == ub) break;
2135
}
2136
assert(s < 2, "uncommon successor must be found");
2137
2138
// If ub is the true path, make the proability small, else
2139
// ub is the false path, and make the probability large
2140
bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2141
2142
// Get existing probability
2143
float p = n->as_MachIf()->_prob;
2144
2145
if (invert) p = 1.0 - p;
2146
if (p > PROB_MIN) {
2147
p = PROB_MIN;
2148
}
2149
if (invert) p = 1.0 - p;
2150
2151
n->as_MachIf()->_prob = p;
2152
}
2153
2154
//------------------------------update_succ_freq-------------------------------
2155
// Update the appropriate frequency associated with block 'b', a successor of
2156
// a block in this loop.
2157
void CFGLoop::update_succ_freq(Block* b, double freq) {
2158
if (b->_loop == this) {
2159
if (b == head()) {
2160
// back branch within the loop
2161
// Do nothing now, the loop carried frequency will be
2162
// adjust later in scale_freq().
2163
} else {
2164
// simple branch within the loop
2165
b->_freq += freq;
2166
}
2167
} else if (!in_loop_nest(b)) {
2168
// branch is exit from this loop
2169
BlockProbPair bpp(b, freq);
2170
_exits.append(bpp);
2171
} else {
2172
// branch into nested loop
2173
CFGLoop* ch = b->_loop;
2174
ch->_freq += freq;
2175
}
2176
}
2177
2178
//------------------------------in_loop_nest-----------------------------------
2179
// Determine if block b is in the receiver's loop nest.
2180
bool CFGLoop::in_loop_nest(Block* b) {
2181
int depth = _depth;
2182
CFGLoop* b_loop = b->_loop;
2183
int b_depth = b_loop->_depth;
2184
if (depth == b_depth) {
2185
return true;
2186
}
2187
while (b_depth > depth) {
2188
b_loop = b_loop->_parent;
2189
b_depth = b_loop->_depth;
2190
}
2191
return b_loop == this;
2192
}
2193
2194
//------------------------------scale_freq-------------------------------------
2195
// Scale frequency of loops and blocks by trip counts from outer loops
2196
// Do a top down traversal of loop tree (visit outer loops first.)
2197
void CFGLoop::scale_freq() {
2198
double loop_freq = _freq * trip_count();
2199
_freq = loop_freq;
2200
for (int i = 0; i < _members.length(); i++) {
2201
CFGElement* s = _members.at(i);
2202
double block_freq = s->_freq * loop_freq;
2203
if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2204
block_freq = MIN_BLOCK_FREQUENCY;
2205
s->_freq = block_freq;
2206
}
2207
CFGLoop* ch = _child;
2208
while (ch != NULL) {
2209
ch->scale_freq();
2210
ch = ch->_sibling;
2211
}
2212
}
2213
2214
// Frequency of outer loop
2215
double CFGLoop::outer_loop_freq() const {
2216
if (_child != NULL) {
2217
return _child->_freq;
2218
}
2219
return _freq;
2220
}
2221
2222
#ifndef PRODUCT
2223
//------------------------------dump_tree--------------------------------------
2224
void CFGLoop::dump_tree() const {
2225
dump();
2226
if (_child != NULL) _child->dump_tree();
2227
if (_sibling != NULL) _sibling->dump_tree();
2228
}
2229
2230
//------------------------------dump-------------------------------------------
2231
void CFGLoop::dump() const {
2232
for (int i = 0; i < _depth; i++) tty->print(" ");
2233
tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
2234
_depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2235
for (int i = 0; i < _depth; i++) tty->print(" ");
2236
tty->print(" members:");
2237
int k = 0;
2238
for (int i = 0; i < _members.length(); i++) {
2239
if (k++ >= 6) {
2240
tty->print("\n ");
2241
for (int j = 0; j < _depth+1; j++) tty->print(" ");
2242
k = 0;
2243
}
2244
CFGElement *s = _members.at(i);
2245
if (s->is_block()) {
2246
Block *b = s->as_Block();
2247
tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2248
} else {
2249
CFGLoop* lp = s->as_CFGLoop();
2250
tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2251
}
2252
}
2253
tty->print("\n");
2254
for (int i = 0; i < _depth; i++) tty->print(" ");
2255
tty->print(" exits: ");
2256
k = 0;
2257
for (int i = 0; i < _exits.length(); i++) {
2258
if (k++ >= 7) {
2259
tty->print("\n ");
2260
for (int j = 0; j < _depth+1; j++) tty->print(" ");
2261
k = 0;
2262
}
2263
Block *blk = _exits.at(i).get_target();
2264
double prob = _exits.at(i).get_prob();
2265
tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2266
}
2267
tty->print("\n");
2268
}
2269
#endif
2270
2271