Path: blob/master/src/hotspot/share/opto/gcm.cpp
64440 views
/*1* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "libadt/vectset.hpp"26#include "memory/allocation.inline.hpp"27#include "memory/resourceArea.hpp"28#include "opto/block.hpp"29#include "opto/c2compiler.hpp"30#include "opto/callnode.hpp"31#include "opto/cfgnode.hpp"32#include "opto/machnode.hpp"33#include "opto/opcodes.hpp"34#include "opto/phaseX.hpp"35#include "opto/rootnode.hpp"36#include "opto/runtime.hpp"37#include "opto/chaitin.hpp"38#include "runtime/deoptimization.hpp"3940// Portions of code courtesy of Clifford Click4142// Optimization - Graph Style4344// To avoid float value underflow45#define MIN_BLOCK_FREQUENCY 1.e-35f4647//----------------------------schedule_node_into_block-------------------------48// Insert node n into block b. Look for projections of n and make sure they49// are in b also.50void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {51// Set basic block of n, Add n to b,52map_node_to_block(n, b);53b->add_inst(n);5455// After Matching, nearly any old Node may have projections trailing it.56// These are usually machine-dependent flags. In any case, they might57// float to another block below this one. Move them up.58for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {59Node* use = n->fast_out(i);60if (use->is_Proj()) {61Block* buse = get_block_for_node(use);62if (buse != b) { // In wrong block?63if (buse != NULL) {64buse->find_remove(use); // Remove from wrong block65}66map_node_to_block(use, b);67b->add_inst(use);68}69}70}71}7273//----------------------------replace_block_proj_ctrl-------------------------74// Nodes that have is_block_proj() nodes as their control need to use75// the appropriate Region for their actual block as their control since76// the projection will be in a predecessor block.77void PhaseCFG::replace_block_proj_ctrl( Node *n ) {78const Node *in0 = n->in(0);79assert(in0 != NULL, "Only control-dependent");80const Node *p = in0->is_block_proj();81if (p != NULL && p != n) { // Control from a block projection?82assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");83// Find trailing Region84Block *pb = get_block_for_node(in0); // Block-projection already has basic block85uint j = 0;86if (pb->_num_succs != 1) { // More then 1 successor?87// Search for successor88uint max = pb->number_of_nodes();89assert( max > 1, "" );90uint start = max - pb->_num_succs;91// Find which output path belongs to projection92for (j = start; j < max; j++) {93if( pb->get_node(j) == in0 )94break;95}96assert( j < max, "must find" );97// Change control to match head of successor basic block98j -= start;99}100n->set_req(0, pb->_succs[j]->head());101}102}103104bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {105assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");106if (dom_node == node) {107return true;108}109Block* d = find_block_for_node(dom_node);110Block* n = find_block_for_node(node);111assert(n != NULL && d != NULL, "blocks must exist");112113if (d == n) {114if (dom_node->is_block_start()) {115return true;116}117if (node->is_block_start()) {118return false;119}120if (dom_node->is_block_proj()) {121return false;122}123if (node->is_block_proj()) {124return true;125}126127assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");128assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");129130// Neither 'node' nor 'dom_node' is a block start or block projection.131// Check if 'dom_node' is above 'node' in the control graph.132if (is_dominating_control(dom_node, node)) {133return true;134}135136#ifdef ASSERT137// If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'138if (!is_dominating_control(node, dom_node)) {139node->dump();140dom_node->dump();141assert(false, "neither dom_node nor node dominates the other");142}143#endif144145return false;146}147return d->dom_lca(n) == d;148}149150bool PhaseCFG::is_CFG(Node* n) {151return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);152}153154bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {155bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);156assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)157|| (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");158return result;159}160161Block* PhaseCFG::find_block_for_node(Node* n) const {162if (n->is_block_start() || n->is_block_proj()) {163return get_block_for_node(n);164} else {165// Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be166// an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.167assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");168Node* ctrl = n->in(0);169while (!ctrl->is_block_start()) {170ctrl = ctrl->in(0);171}172return get_block_for_node(ctrl);173}174}175176// Walk up the control graph from 'n' and check if 'dom_ctrl' is found.177bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {178Node* ctrl = n->in(0);179while (!ctrl->is_block_start()) {180if (ctrl == dom_ctrl) {181return true;182}183ctrl = ctrl->in(0);184}185return false;186}187188189//------------------------------schedule_pinned_nodes--------------------------190// Set the basic block for Nodes pinned into blocks191void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {192// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc193GrowableArray <Node*> spstack(C->live_nodes() + 8);194spstack.push(_root);195while (spstack.is_nonempty()) {196Node* node = spstack.pop();197if (!visited.test_set(node->_idx)) { // Test node and flag it as visited198if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!199assert(node->in(0), "pinned Node must have Control");200// Before setting block replace block_proj control edge201replace_block_proj_ctrl(node);202Node* input = node->in(0);203while (!input->is_block_start()) {204input = input->in(0);205}206Block* block = get_block_for_node(input); // Basic block of controlling input207schedule_node_into_block(node, block);208}209210// If the node has precedence edges (added when CastPP nodes are211// removed in final_graph_reshaping), fix the control of the212// node to cover the precedence edges and remove the213// dependencies.214Node* n = NULL;215for (uint i = node->len()-1; i >= node->req(); i--) {216Node* m = node->in(i);217if (m == NULL) continue;218219// Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block220if (is_CFG(m)) {221node->rm_prec(i);222if (n == NULL) {223n = m;224} else {225assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");226n = is_dominator(n, m) ? m : n;227}228} else {229assert(node->is_Mach(), "sanity");230assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");231}232}233if (n != NULL) {234assert(node->in(0), "control should have been set");235assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");236if (!is_dominator(n, node->in(0))) {237node->set_req(0, n);238}239}240241// process all inputs that are non NULL242for (int i = node->req()-1; i >= 0; --i) {243if (node->in(i) != NULL) {244spstack.push(node->in(i));245}246}247}248}249}250251#ifdef ASSERT252// Assert that new input b2 is dominated by all previous inputs.253// Check this by by seeing that it is dominated by b1, the deepest254// input observed until b2.255static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {256if (b1 == NULL) return;257assert(b1->_dom_depth < b2->_dom_depth, "sanity");258Block* tmp = b2;259while (tmp != b1 && tmp != NULL) {260tmp = tmp->_idom;261}262if (tmp != b1) {263// Detected an unschedulable graph. Print some nice stuff and die.264tty->print_cr("!!! Unschedulable graph !!!");265for (uint j=0; j<n->len(); j++) { // For all inputs266Node* inn = n->in(j); // Get input267if (inn == NULL) continue; // Ignore NULL, missing inputs268Block* inb = cfg->get_block_for_node(inn);269tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,270inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);271inn->dump();272}273tty->print("Failing node: ");274n->dump();275assert(false, "unscheduable graph");276}277}278#endif279280static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {281// Find the last input dominated by all other inputs.282Block* deepb = NULL; // Deepest block so far283int deepb_dom_depth = 0;284for (uint k = 0; k < n->len(); k++) { // For all inputs285Node* inn = n->in(k); // Get input286if (inn == NULL) continue; // Ignore NULL, missing inputs287Block* inb = cfg->get_block_for_node(inn);288assert(inb != NULL, "must already have scheduled this input");289if (deepb_dom_depth < (int) inb->_dom_depth) {290// The new inb must be dominated by the previous deepb.291// The various inputs must be linearly ordered in the dom292// tree, or else there will not be a unique deepest block.293DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));294deepb = inb; // Save deepest block295deepb_dom_depth = deepb->_dom_depth;296}297}298assert(deepb != NULL, "must be at least one input to n");299return deepb;300}301302303//------------------------------schedule_early---------------------------------304// Find the earliest Block any instruction can be placed in. Some instructions305// are pinned into Blocks. Unpinned instructions can appear in last block in306// which all their inputs occur.307bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {308// Allocate stack with enough space to avoid frequent realloc309Node_Stack nstack(roots.size() + 8);310// _root will be processed among C->top() inputs311roots.push(C->top(), 0);312visited.set(C->top()->_idx);313314while (roots.size() != 0) {315// Use local variables nstack_top_n & nstack_top_i to cache values316// on stack's top.317Node* parent_node = roots.node();318uint input_index = 0;319roots.pop();320321while (true) {322if (input_index == 0) {323// Fixup some control. Constants without control get attached324// to root and nodes that use is_block_proj() nodes should be attached325// to the region that starts their block.326const Node* control_input = parent_node->in(0);327if (control_input != NULL) {328replace_block_proj_ctrl(parent_node);329} else {330// Is a constant with NO inputs?331if (parent_node->req() == 1) {332parent_node->set_req(0, _root);333}334}335}336337// First, visit all inputs and force them to get a block. If an338// input is already in a block we quit following inputs (to avoid339// cycles). Instead we put that Node on a worklist to be handled340// later (since IT'S inputs may not have a block yet).341342// Assume all n's inputs will be processed343bool done = true;344345while (input_index < parent_node->len()) {346Node* in = parent_node->in(input_index++);347if (in == NULL) {348continue;349}350351int is_visited = visited.test_set(in->_idx);352if (!has_block(in)) {353if (is_visited) {354assert(false, "graph should be schedulable");355return false;356}357// Save parent node and next input's index.358nstack.push(parent_node, input_index);359// Process current input now.360parent_node = in;361input_index = 0;362// Not all n's inputs processed.363done = false;364break;365} else if (!is_visited) {366// Visit this guy later, using worklist367roots.push(in, 0);368}369}370371if (done) {372// All of n's inputs have been processed, complete post-processing.373374// Some instructions are pinned into a block. These include Region,375// Phi, Start, Return, and other control-dependent instructions and376// any projections which depend on them.377if (!parent_node->pinned()) {378// Set earliest legal block.379Block* earliest_block = find_deepest_input(parent_node, this);380map_node_to_block(parent_node, earliest_block);381} else {382assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");383}384385if (nstack.is_empty()) {386// Finished all nodes on stack.387// Process next node on the worklist 'roots'.388break;389}390// Get saved parent node and next input's index.391parent_node = nstack.node();392input_index = nstack.index();393nstack.pop();394}395}396}397return true;398}399400//------------------------------dom_lca----------------------------------------401// Find least common ancestor in dominator tree402// LCA is a current notion of LCA, to be raised above 'this'.403// As a convenient boundary condition, return 'this' if LCA is NULL.404// Find the LCA of those two nodes.405Block* Block::dom_lca(Block* LCA) {406if (LCA == NULL || LCA == this) return this;407408Block* anc = this;409while (anc->_dom_depth > LCA->_dom_depth)410anc = anc->_idom; // Walk up till anc is as high as LCA411412while (LCA->_dom_depth > anc->_dom_depth)413LCA = LCA->_idom; // Walk up till LCA is as high as anc414415while (LCA != anc) { // Walk both up till they are the same416LCA = LCA->_idom;417anc = anc->_idom;418}419420return LCA;421}422423//--------------------------raise_LCA_above_use--------------------------------424// We are placing a definition, and have been given a def->use edge.425// The definition must dominate the use, so move the LCA upward in the426// dominator tree to dominate the use. If the use is a phi, adjust427// the LCA only with the phi input paths which actually use this def.428static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {429Block* buse = cfg->get_block_for_node(use);430if (buse == NULL) return LCA; // Unused killing Projs have no use block431if (!use->is_Phi()) return buse->dom_lca(LCA);432uint pmax = use->req(); // Number of Phi inputs433// Why does not this loop just break after finding the matching input to434// the Phi? Well...it's like this. I do not have true def-use/use-def435// chains. Means I cannot distinguish, from the def-use direction, which436// of many use-defs lead from the same use to the same def. That is, this437// Phi might have several uses of the same def. Each use appears in a438// different predecessor block. But when I enter here, I cannot distinguish439// which use-def edge I should find the predecessor block for. So I find440// them all. Means I do a little extra work if a Phi uses the same value441// more than once.442for (uint j=1; j<pmax; j++) { // For all inputs443if (use->in(j) == def) { // Found matching input?444Block* pred = cfg->get_block_for_node(buse->pred(j));445LCA = pred->dom_lca(LCA);446}447}448return LCA;449}450451//----------------------------raise_LCA_above_marks----------------------------452// Return a new LCA that dominates LCA and any of its marked predecessors.453// Search all my parents up to 'early' (exclusive), looking for predecessors454// which are marked with the given index. Return the LCA (in the dom tree)455// of all marked blocks. If there are none marked, return the original456// LCA.457static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {458Block_List worklist;459worklist.push(LCA);460while (worklist.size() > 0) {461Block* mid = worklist.pop();462if (mid == early) continue; // stop searching here463464// Test and set the visited bit.465if (mid->raise_LCA_visited() == mark) continue; // already visited466467// Don't process the current LCA, otherwise the search may terminate early468if (mid != LCA && mid->raise_LCA_mark() == mark) {469// Raise the LCA.470LCA = mid->dom_lca(LCA);471if (LCA == early) break; // stop searching everywhere472assert(early->dominates(LCA), "early is high enough");473// Resume searching at that point, skipping intermediate levels.474worklist.push(LCA);475if (LCA == mid)476continue; // Don't mark as visited to avoid early termination.477} else {478// Keep searching through this block's predecessors.479for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {480Block* mid_parent = cfg->get_block_for_node(mid->pred(j));481worklist.push(mid_parent);482}483}484mid->set_raise_LCA_visited(mark);485}486return LCA;487}488489//--------------------------memory_early_block--------------------------------490// This is a variation of find_deepest_input, the heart of schedule_early.491// Find the "early" block for a load, if we considered only memory and492// address inputs, that is, if other data inputs were ignored.493//494// Because a subset of edges are considered, the resulting block will495// be earlier (at a shallower dom_depth) than the true schedule_early496// point of the node. We compute this earlier block as a more permissive497// site for anti-dependency insertion, but only if subsume_loads is enabled.498static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {499Node* base;500Node* index;501Node* store = load->in(MemNode::Memory);502load->as_Mach()->memory_inputs(base, index);503504assert(base != NodeSentinel && index != NodeSentinel,505"unexpected base/index inputs");506507Node* mem_inputs[4];508int mem_inputs_length = 0;509if (base != NULL) mem_inputs[mem_inputs_length++] = base;510if (index != NULL) mem_inputs[mem_inputs_length++] = index;511if (store != NULL) mem_inputs[mem_inputs_length++] = store;512513// In the comparision below, add one to account for the control input,514// which may be null, but always takes up a spot in the in array.515if (mem_inputs_length + 1 < (int) load->req()) {516// This "load" has more inputs than just the memory, base and index inputs.517// For purposes of checking anti-dependences, we need to start518// from the early block of only the address portion of the instruction,519// and ignore other blocks that may have factored into the wider520// schedule_early calculation.521if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);522523Block* deepb = NULL; // Deepest block so far524int deepb_dom_depth = 0;525for (int i = 0; i < mem_inputs_length; i++) {526Block* inb = cfg->get_block_for_node(mem_inputs[i]);527if (deepb_dom_depth < (int) inb->_dom_depth) {528// The new inb must be dominated by the previous deepb.529// The various inputs must be linearly ordered in the dom530// tree, or else there will not be a unique deepest block.531DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));532deepb = inb; // Save deepest block533deepb_dom_depth = deepb->_dom_depth;534}535}536early = deepb;537}538539return early;540}541542// This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.543bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {544// We expect an anti-dependence edge from 'load' to 'store', except when545// implicit_null_check() has hoisted 'store' above its early block to546// perform an implicit null check, and 'load' is placed in the null547// block. In this case it is safe to ignore the anti-dependence, as the548// null block is only reached if 'store' tries to write to null object and549// 'load' read from non-null object (there is preceding check for that)550// These objects can't be the same.551Block* store_block = get_block_for_node(store);552Block* load_block = get_block_for_node(load);553Node* end = store_block->end();554if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {555Node* if_true = end->find_out_with(Op_IfTrue);556assert(if_true != NULL, "null check without null projection");557Node* null_block_region = if_true->find_out_with(Op_Region);558assert(null_block_region != NULL, "null check without null region");559return get_block_for_node(null_block_region) == load_block;560}561return false;562}563564//--------------------------insert_anti_dependences---------------------------565// A load may need to witness memory that nearby stores can overwrite.566// For each nearby store, either insert an "anti-dependence" edge567// from the load to the store, or else move LCA upward to force the568// load to (eventually) be scheduled in a block above the store.569//570// Do not add edges to stores on distinct control-flow paths;571// only add edges to stores which might interfere.572//573// Return the (updated) LCA. There will not be any possibly interfering574// store between the load's "early block" and the updated LCA.575// Any stores in the updated LCA will have new precedence edges576// back to the load. The caller is expected to schedule the load577// in the LCA, in which case the precedence edges will make LCM578// preserve anti-dependences. The caller may also hoist the load579// above the LCA, if it is not the early block.580Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {581assert(load->needs_anti_dependence_check(), "must be a load of some sort");582assert(LCA != NULL, "");583DEBUG_ONLY(Block* LCA_orig = LCA);584585// Compute the alias index. Loads and stores with different alias indices586// do not need anti-dependence edges.587int load_alias_idx = C->get_alias_index(load->adr_type());588#ifdef ASSERT589assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");590if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&591(PrintOpto || VerifyAliases ||592(PrintMiscellaneous && (WizardMode || Verbose)))) {593// Load nodes should not consume all of memory.594// Reporting a bottom type indicates a bug in adlc.595// If some particular type of node validly consumes all of memory,596// sharpen the preceding "if" to exclude it, so we can catch bugs here.597tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");598load->dump(2);599if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");600}601#endif602603if (!C->alias_type(load_alias_idx)->is_rewritable()) {604// It is impossible to spoil this load by putting stores before it,605// because we know that the stores will never update the value606// which 'load' must witness.607return LCA;608}609610node_idx_t load_index = load->_idx;611612// Note the earliest legal placement of 'load', as determined by613// by the unique point in the dom tree where all memory effects614// and other inputs are first available. (Computed by schedule_early.)615// For normal loads, 'early' is the shallowest place (dom graph wise)616// to look for anti-deps between this load and any store.617Block* early = get_block_for_node(load);618619// If we are subsuming loads, compute an "early" block that only considers620// memory or address inputs. This block may be different than the621// schedule_early block in that it could be at an even shallower depth in the622// dominator tree, and allow for a broader discovery of anti-dependences.623if (C->subsume_loads()) {624early = memory_early_block(load, early, this);625}626627ResourceArea *area = Thread::current()->resource_area();628Node_List worklist_mem(area); // prior memory state to store629Node_List worklist_store(area); // possible-def to explore630Node_List worklist_visited(area); // visited mergemem nodes631Node_List non_early_stores(area); // all relevant stores outside of early632bool must_raise_LCA = false;633634// 'load' uses some memory state; look for users of the same state.635// Recurse through MergeMem nodes to the stores that use them.636637// Each of these stores is a possible definition of memory638// that 'load' needs to use. We need to force 'load'639// to occur before each such store. When the store is in640// the same block as 'load', we insert an anti-dependence641// edge load->store.642643// The relevant stores "nearby" the load consist of a tree rooted644// at initial_mem, with internal nodes of type MergeMem.645// Therefore, the branches visited by the worklist are of this form:646// initial_mem -> (MergeMem ->)* store647// The anti-dependence constraints apply only to the fringe of this tree.648649Node* initial_mem = load->in(MemNode::Memory);650worklist_store.push(initial_mem);651worklist_visited.push(initial_mem);652worklist_mem.push(NULL);653while (worklist_store.size() > 0) {654// Examine a nearby store to see if it might interfere with our load.655Node* mem = worklist_mem.pop();656Node* store = worklist_store.pop();657uint op = store->Opcode();658659// MergeMems do not directly have anti-deps.660// Treat them as internal nodes in a forward tree of memory states,661// the leaves of which are each a 'possible-def'.662if (store == initial_mem // root (exclusive) of tree we are searching663|| op == Op_MergeMem // internal node of tree we are searching664) {665mem = store; // It's not a possibly interfering store.666if (store == initial_mem)667initial_mem = NULL; // only process initial memory once668669for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {670store = mem->fast_out(i);671if (store->is_MergeMem()) {672// Be sure we don't get into combinatorial problems.673// (Allow phis to be repeated; they can merge two relevant states.)674uint j = worklist_visited.size();675for (; j > 0; j--) {676if (worklist_visited.at(j-1) == store) break;677}678if (j > 0) continue; // already on work list; do not repeat679worklist_visited.push(store);680}681worklist_mem.push(mem);682worklist_store.push(store);683}684continue;685}686687if (op == Op_MachProj || op == Op_Catch) continue;688if (store->needs_anti_dependence_check()) continue; // not really a store689690// Compute the alias index. Loads and stores with different alias691// indices do not need anti-dependence edges. Wide MemBar's are692// anti-dependent on everything (except immutable memories).693const TypePtr* adr_type = store->adr_type();694if (!C->can_alias(adr_type, load_alias_idx)) continue;695696// Most slow-path runtime calls do NOT modify Java memory, but697// they can block and so write Raw memory.698if (store->is_Mach()) {699MachNode* mstore = store->as_Mach();700if (load_alias_idx != Compile::AliasIdxRaw) {701// Check for call into the runtime using the Java calling702// convention (and from there into a wrapper); it has no703// _method. Can't do this optimization for Native calls because704// they CAN write to Java memory.705if (mstore->ideal_Opcode() == Op_CallStaticJava) {706assert(mstore->is_MachSafePoint(), "");707MachSafePointNode* ms = (MachSafePointNode*) mstore;708assert(ms->is_MachCallJava(), "");709MachCallJavaNode* mcj = (MachCallJavaNode*) ms;710if (mcj->_method == NULL) {711// These runtime calls do not write to Java visible memory712// (other than Raw) and so do not require anti-dependence edges.713continue;714}715}716// Same for SafePoints: they read/write Raw but only read otherwise.717// This is basically a workaround for SafePoints only defining control718// instead of control + memory.719if (mstore->ideal_Opcode() == Op_SafePoint)720continue;721} else {722// Some raw memory, such as the load of "top" at an allocation,723// can be control dependent on the previous safepoint. See724// comments in GraphKit::allocate_heap() about control input.725// Inserting an anti-dep between such a safepoint and a use726// creates a cycle, and will cause a subsequent failure in727// local scheduling. (BugId 4919904)728// (%%% How can a control input be a safepoint and not a projection??)729if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)730continue;731}732}733734// Identify a block that the current load must be above,735// or else observe that 'store' is all the way up in the736// earliest legal block for 'load'. In the latter case,737// immediately insert an anti-dependence edge.738Block* store_block = get_block_for_node(store);739assert(store_block != NULL, "unused killing projections skipped above");740741if (store->is_Phi()) {742// Loop-phis need to raise load before input. (Other phis are treated743// as store below.)744//745// 'load' uses memory which is one (or more) of the Phi's inputs.746// It must be scheduled not before the Phi, but rather before747// each of the relevant Phi inputs.748//749// Instead of finding the LCA of all inputs to a Phi that match 'mem',750// we mark each corresponding predecessor block and do a combined751// hoisting operation later (raise_LCA_above_marks).752//753// Do not assert(store_block != early, "Phi merging memory after access")754// PhiNode may be at start of block 'early' with backedge to 'early'755DEBUG_ONLY(bool found_match = false);756for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {757if (store->in(j) == mem) { // Found matching input?758DEBUG_ONLY(found_match = true);759Block* pred_block = get_block_for_node(store_block->pred(j));760if (pred_block != early) {761// If any predecessor of the Phi matches the load's "early block",762// we do not need a precedence edge between the Phi and 'load'763// since the load will be forced into a block preceding the Phi.764pred_block->set_raise_LCA_mark(load_index);765assert(!LCA_orig->dominates(pred_block) ||766early->dominates(pred_block), "early is high enough");767must_raise_LCA = true;768} else {769// anti-dependent upon PHI pinned below 'early', no edge needed770LCA = early; // but can not schedule below 'early'771}772}773}774assert(found_match, "no worklist bug");775} else if (store_block != early) {776// 'store' is between the current LCA and earliest possible block.777// Label its block, and decide later on how to raise the LCA778// to include the effect on LCA of this store.779// If this store's block gets chosen as the raised LCA, we780// will find him on the non_early_stores list and stick him781// with a precedence edge.782// (But, don't bother if LCA is already raised all the way.)783if (LCA != early && !unrelated_load_in_store_null_block(store, load)) {784store_block->set_raise_LCA_mark(load_index);785must_raise_LCA = true;786non_early_stores.push(store);787}788} else {789// Found a possibly-interfering store in the load's 'early' block.790// This means 'load' cannot sink at all in the dominator tree.791// Add an anti-dep edge, and squeeze 'load' into the highest block.792assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");793if (verify) {794assert(store->find_edge(load) != -1 || unrelated_load_in_store_null_block(store, load),795"missing precedence edge");796} else {797store->add_prec(load);798}799LCA = early;800// This turns off the process of gathering non_early_stores.801}802}803// (Worklist is now empty; all nearby stores have been visited.)804805// Finished if 'load' must be scheduled in its 'early' block.806// If we found any stores there, they have already been given807// precedence edges.808if (LCA == early) return LCA;809810// We get here only if there are no possibly-interfering stores811// in the load's 'early' block. Move LCA up above all predecessors812// which contain stores we have noted.813//814// The raised LCA block can be a home to such interfering stores,815// but its predecessors must not contain any such stores.816//817// The raised LCA will be a lower bound for placing the load,818// preventing the load from sinking past any block containing819// a store that may invalidate the memory state required by 'load'.820if (must_raise_LCA)821LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);822if (LCA == early) return LCA;823824// Insert anti-dependence edges from 'load' to each store825// in the non-early LCA block.826// Mine the non_early_stores list for such stores.827if (LCA->raise_LCA_mark() == load_index) {828while (non_early_stores.size() > 0) {829Node* store = non_early_stores.pop();830Block* store_block = get_block_for_node(store);831if (store_block == LCA) {832// add anti_dependence from store to load in its own block833assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");834if (verify) {835assert(store->find_edge(load) != -1, "missing precedence edge");836} else {837store->add_prec(load);838}839} else {840assert(store_block->raise_LCA_mark() == load_index, "block was marked");841// Any other stores we found must be either inside the new LCA842// or else outside the original LCA. In the latter case, they843// did not interfere with any use of 'load'.844assert(LCA->dominates(store_block)845|| !LCA_orig->dominates(store_block), "no stray stores");846}847}848}849850// Return the highest block containing stores; any stores851// within that block have been given anti-dependence edges.852return LCA;853}854855// This class is used to iterate backwards over the nodes in the graph.856857class Node_Backward_Iterator {858859private:860Node_Backward_Iterator();861862public:863// Constructor for the iterator864Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);865866// Postincrement operator to iterate over the nodes867Node *next();868869private:870VectorSet &_visited;871Node_Stack &_stack;872PhaseCFG &_cfg;873};874875// Constructor for the Node_Backward_Iterator876Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)877: _visited(visited), _stack(stack), _cfg(cfg) {878// The stack should contain exactly the root879stack.clear();880stack.push(root, root->outcnt());881882// Clear the visited bits883visited.clear();884}885886// Iterator for the Node_Backward_Iterator887Node *Node_Backward_Iterator::next() {888889// If the _stack is empty, then just return NULL: finished.890if ( !_stack.size() )891return NULL;892893// I visit unvisited not-anti-dependence users first, then anti-dependent894// children next. I iterate backwards to support removal of nodes.895// The stack holds states consisting of 3 values:896// current Def node, flag which indicates 1st/2nd pass, index of current out edge897Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);898bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);899uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.900_stack.pop();901902// I cycle here when I am entering a deeper level of recursion.903// The key variable 'self' was set prior to jumping here.904while( 1 ) {905906_visited.set(self->_idx);907908// Now schedule all uses as late as possible.909const Node* src = self->is_Proj() ? self->in(0) : self;910uint src_rpo = _cfg.get_block_for_node(src)->_rpo;911912// Schedule all nodes in a post-order visit913Node *unvisited = NULL; // Unvisited anti-dependent Node, if any914915// Scan for unvisited nodes916while (idx > 0) {917// For all uses, schedule late918Node* n = self->raw_out(--idx); // Use919920// Skip already visited children921if ( _visited.test(n->_idx) )922continue;923924// do not traverse backward control edges925Node *use = n->is_Proj() ? n->in(0) : n;926uint use_rpo = _cfg.get_block_for_node(use)->_rpo;927928if ( use_rpo < src_rpo )929continue;930931// Phi nodes always precede uses in a basic block932if ( use_rpo == src_rpo && use->is_Phi() )933continue;934935unvisited = n; // Found unvisited936937// Check for possible-anti-dependent938// 1st pass: No such nodes, 2nd pass: Only such nodes.939if (n->needs_anti_dependence_check() == iterate_anti_dep) {940unvisited = n; // Found unvisited941break;942}943}944945// Did I find an unvisited not-anti-dependent Node?946if (!unvisited) {947if (!iterate_anti_dep) {948// 2nd pass: Iterate over nodes which needs_anti_dependence_check.949iterate_anti_dep = true;950idx = self->outcnt();951continue;952}953break; // All done with children; post-visit 'self'954}955956// Visit the unvisited Node. Contains the obvious push to957// indicate I'm entering a deeper level of recursion. I push the958// old state onto the _stack and set a new state and loop (recurse).959_stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);960self = unvisited;961iterate_anti_dep = false;962idx = self->outcnt();963} // End recursion loop964965return self;966}967968//------------------------------ComputeLatenciesBackwards----------------------969// Compute the latency of all the instructions.970void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {971#ifndef PRODUCT972if (trace_opto_pipelining())973tty->print("\n#---- ComputeLatenciesBackwards ----\n");974#endif975976Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);977Node *n;978979// Walk over all the nodes from last to first980while ((n = iter.next())) {981// Set the latency for the definitions of this instruction982partial_latency_of_defs(n);983}984} // end ComputeLatenciesBackwards985986//------------------------------partial_latency_of_defs------------------------987// Compute the latency impact of this node on all defs. This computes988// a number that increases as we approach the beginning of the routine.989void PhaseCFG::partial_latency_of_defs(Node *n) {990// Set the latency for this instruction991#ifndef PRODUCT992if (trace_opto_pipelining()) {993tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));994dump();995}996#endif997998if (n->is_Proj()) {999n = n->in(0);1000}10011002if (n->is_Root()) {1003return;1004}10051006uint nlen = n->len();1007uint use_latency = get_latency_for_node(n);1008uint use_pre_order = get_block_for_node(n)->_pre_order;10091010for (uint j = 0; j < nlen; j++) {1011Node *def = n->in(j);10121013if (!def || def == n) {1014continue;1015}10161017// Walk backwards thru projections1018if (def->is_Proj()) {1019def = def->in(0);1020}10211022#ifndef PRODUCT1023if (trace_opto_pipelining()) {1024tty->print("# in(%2d): ", j);1025def->dump();1026}1027#endif10281029// If the defining block is not known, assume it is ok1030Block *def_block = get_block_for_node(def);1031uint def_pre_order = def_block ? def_block->_pre_order : 0;10321033if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {1034continue;1035}10361037uint delta_latency = n->latency(j);1038uint current_latency = delta_latency + use_latency;10391040if (get_latency_for_node(def) < current_latency) {1041set_latency_for_node(def, current_latency);1042}10431044#ifndef PRODUCT1045if (trace_opto_pipelining()) {1046tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));1047}1048#endif1049}1050}10511052//------------------------------latency_from_use-------------------------------1053// Compute the latency of a specific use1054int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {1055// If self-reference, return no latency1056if (use == n || use->is_Root()) {1057return 0;1058}10591060uint def_pre_order = get_block_for_node(def)->_pre_order;1061uint latency = 0;10621063// If the use is not a projection, then it is simple...1064if (!use->is_Proj()) {1065#ifndef PRODUCT1066if (trace_opto_pipelining()) {1067tty->print("# out(): ");1068use->dump();1069}1070#endif10711072uint use_pre_order = get_block_for_node(use)->_pre_order;10731074if (use_pre_order < def_pre_order)1075return 0;10761077if (use_pre_order == def_pre_order && use->is_Phi())1078return 0;10791080uint nlen = use->len();1081uint nl = get_latency_for_node(use);10821083for ( uint j=0; j<nlen; j++ ) {1084if (use->in(j) == n) {1085// Change this if we want local latencies1086uint ul = use->latency(j);1087uint l = ul + nl;1088if (latency < l) latency = l;1089#ifndef PRODUCT1090if (trace_opto_pipelining()) {1091tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",1092nl, j, ul, l, latency);1093}1094#endif1095}1096}1097} else {1098// This is a projection, just grab the latency of the use(s)1099for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {1100uint l = latency_from_use(use, def, use->fast_out(j));1101if (latency < l) latency = l;1102}1103}11041105return latency;1106}11071108//------------------------------latency_from_uses------------------------------1109// Compute the latency of this instruction relative to all of it's uses.1110// This computes a number that increases as we approach the beginning of the1111// routine.1112void PhaseCFG::latency_from_uses(Node *n) {1113// Set the latency for this instruction1114#ifndef PRODUCT1115if (trace_opto_pipelining()) {1116tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));1117dump();1118}1119#endif1120uint latency=0;1121const Node *def = n->is_Proj() ? n->in(0): n;11221123for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1124uint l = latency_from_use(n, def, n->fast_out(i));11251126if (latency < l) latency = l;1127}11281129set_latency_for_node(n, latency);1130}11311132//------------------------------hoist_to_cheaper_block-------------------------1133// Pick a block for node self, between early and LCA, that is a cheaper1134// alternative to LCA.1135Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {1136const double delta = 1+PROB_UNLIKELY_MAG(4);1137Block* least = LCA;1138double least_freq = least->_freq;1139uint target = get_latency_for_node(self);1140uint start_latency = get_latency_for_node(LCA->head());1141uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));1142bool in_latency = (target <= start_latency);1143const Block* root_block = get_block_for_node(_root);11441145// Turn off latency scheduling if scheduling is just plain off1146if (!C->do_scheduling())1147in_latency = true;11481149// Do not hoist (to cover latency) instructions which target a1150// single register. Hoisting stretches the live range of the1151// single register and may force spilling.1152MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;1153if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())1154in_latency = true;11551156#ifndef PRODUCT1157if (trace_opto_pipelining()) {1158tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));1159self->dump();1160tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",1161LCA->_pre_order,1162LCA->head()->_idx,1163start_latency,1164LCA->get_node(LCA->end_idx())->_idx,1165end_latency,1166least_freq);1167}1168#endif11691170int cand_cnt = 0; // number of candidates tried11711172// Walk up the dominator tree from LCA (Lowest common ancestor) to1173// the earliest legal location. Capture the least execution frequency.1174while (LCA != early) {1175LCA = LCA->_idom; // Follow up the dominator tree11761177if (LCA == NULL) {1178// Bailout without retry1179assert(false, "graph should be schedulable");1180C->record_method_not_compilable("late schedule failed: LCA == NULL");1181return least;1182}11831184// Don't hoist machine instructions to the root basic block1185if (mach && LCA == root_block)1186break;11871188if (self->is_memory_writer() &&1189(LCA->_loop->depth() > early->_loop->depth())) {1190// LCA is an invalid placement for a memory writer: choosing it would1191// cause memory interference, as illustrated in schedule_late().1192continue;1193}1194verify_memory_writer_placement(LCA, self);11951196uint start_lat = get_latency_for_node(LCA->head());1197uint end_idx = LCA->end_idx();1198uint end_lat = get_latency_for_node(LCA->get_node(end_idx));1199double LCA_freq = LCA->_freq;1200#ifndef PRODUCT1201if (trace_opto_pipelining()) {1202tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",1203LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);1204}1205#endif1206cand_cnt++;1207if (LCA_freq < least_freq || // Better Frequency1208(StressGCM && C->randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode1209(!StressGCM && // Otherwise, choose with latency1210!in_latency && // No block containing latency1211LCA_freq < least_freq * delta && // No worse frequency1212target >= end_lat && // within latency range1213!self->is_iteratively_computed() ) // But don't hoist IV increments1214// because they may end up above other uses of their phi forcing1215// their result register to be different from their input.1216) {1217least = LCA; // Found cheaper block1218least_freq = LCA_freq;1219start_latency = start_lat;1220end_latency = end_lat;1221if (target <= start_lat)1222in_latency = true;1223}1224}12251226#ifndef PRODUCT1227if (trace_opto_pipelining()) {1228tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",1229least->_pre_order, start_latency, least_freq);1230}1231#endif12321233// See if the latency needs to be updated1234if (target < end_latency) {1235#ifndef PRODUCT1236if (trace_opto_pipelining()) {1237tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);1238}1239#endif1240set_latency_for_node(self, end_latency);1241partial_latency_of_defs(self);1242}12431244return least;1245}124612471248//------------------------------schedule_late-----------------------------------1249// Now schedule all codes as LATE as possible. This is the LCA in the1250// dominator tree of all USES of a value. Pick the block with the least1251// loop nesting depth that is lowest in the dominator tree.1252extern const char must_clone[];1253void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {1254#ifndef PRODUCT1255if (trace_opto_pipelining())1256tty->print("\n#---- schedule_late ----\n");1257#endif12581259Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);1260Node *self;12611262// Walk over all the nodes from last to first1263while ((self = iter.next())) {1264Block* early = get_block_for_node(self); // Earliest legal placement12651266if (self->is_top()) {1267// Top node goes in bb #2 with other constants.1268// It must be special-cased, because it has no out edges.1269early->add_inst(self);1270continue;1271}12721273// No uses, just terminate1274if (self->outcnt() == 0) {1275assert(self->is_MachProj(), "sanity");1276continue; // Must be a dead machine projection1277}12781279// If node is pinned in the block, then no scheduling can be done.1280if( self->pinned() ) // Pinned in block?1281continue;12821283#ifdef ASSERT1284// Assert that memory writers (e.g. stores) have a "home" block (the block1285// given by their control input), and that this block corresponds to their1286// earliest possible placement. This guarantees that1287// hoist_to_cheaper_block() will always have at least one valid choice.1288if (self->is_memory_writer()) {1289assert(find_block_for_node(self->in(0)) == early,1290"The home of a memory writer must also be its earliest placement");1291}1292#endif12931294MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;1295if (mach) {1296switch (mach->ideal_Opcode()) {1297case Op_CreateEx:1298// Don't move exception creation1299early->add_inst(self);1300continue;1301break;1302case Op_CheckCastPP: {1303// Don't move CheckCastPP nodes away from their input, if the input1304// is a rawptr (5071820).1305Node *def = self->in(1);1306if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {1307early->add_inst(self);1308#ifdef ASSERT1309_raw_oops.push(def);1310#endif1311continue;1312}1313break;1314}1315default:1316break;1317}1318if (C->has_irreducible_loop() && self->is_memory_writer()) {1319// If the CFG is irreducible, place memory writers in their home block.1320// This prevents hoist_to_cheaper_block() from accidentally placing such1321// nodes into deeper loops, as in the following example:1322//1323// Home placement of store in B1 (loop L1):1324//1325// B1 (L1):1326// m1 <- ..1327// m2 <- store m1, ..1328// B2 (L2):1329// jump B21330// B3 (L1):1331// .. <- .. m2, ..1332//1333// Wrong "hoisting" of store to B2 (in loop L2, child of L1):1334//1335// B1 (L1):1336// m1 <- ..1337// B2 (L2):1338// m2 <- store m1, ..1339// # Wrong: m1 and m2 interfere at this point.1340// jump B21341// B3 (L1):1342// .. <- .. m2, ..1343//1344// This "hoist inversion" can happen due to different factors such as1345// inaccurate estimation of frequencies for irreducible CFGs, and loops1346// with always-taken exits in reducible CFGs. In the reducible case,1347// hoist inversion is prevented by discarding invalid blocks (those in1348// deeper loops than the home block). In the irreducible case, the1349// invalid blocks cannot be identified due to incomplete loop nesting1350// information, hence a conservative solution is taken.1351#ifndef PRODUCT1352if (trace_opto_pipelining()) {1353tty->print_cr("# Irreducible loops: schedule in home block B%d:",1354early->_pre_order);1355self->dump();1356}1357#endif1358schedule_node_into_block(self, early);1359continue;1360}1361}13621363// Gather LCA of all uses1364Block *LCA = NULL;1365{1366for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {1367// For all uses, find LCA1368Node* use = self->fast_out(i);1369LCA = raise_LCA_above_use(LCA, use, self, this);1370}1371guarantee(LCA != NULL, "There must be a LCA");1372} // (Hide defs of imax, i from rest of block.)13731374// Place temps in the block of their use. This isn't a1375// requirement for correctness but it reduces useless1376// interference between temps and other nodes.1377if (mach != NULL && mach->is_MachTemp()) {1378map_node_to_block(self, LCA);1379LCA->add_inst(self);1380continue;1381}13821383// Check if 'self' could be anti-dependent on memory1384if (self->needs_anti_dependence_check()) {1385// Hoist LCA above possible-defs and insert anti-dependences to1386// defs in new LCA block.1387LCA = insert_anti_dependences(LCA, self);1388}13891390if (early->_dom_depth > LCA->_dom_depth) {1391// Somehow the LCA has moved above the earliest legal point.1392// (One way this can happen is via memory_early_block.)1393if (C->subsume_loads() == true && !C->failing()) {1394// Retry with subsume_loads == false1395// If this is the first failure, the sentinel string will "stick"1396// to the Compile object, and the C2Compiler will see it and retry.1397C->record_failure(C2Compiler::retry_no_subsuming_loads());1398} else {1399// Bailout without retry when (early->_dom_depth > LCA->_dom_depth)1400assert(false, "graph should be schedulable");1401C->record_method_not_compilable("late schedule failed: incorrect graph");1402}1403return;1404}14051406if (self->is_memory_writer()) {1407// If the LCA of a memory writer is a descendant of its home loop, hoist1408// it into a valid placement.1409while (LCA->_loop->depth() > early->_loop->depth()) {1410LCA = LCA->_idom;1411}1412assert(LCA != NULL, "a valid LCA must exist");1413verify_memory_writer_placement(LCA, self);1414}14151416// If there is no opportunity to hoist, then we're done.1417// In stress mode, try to hoist even the single operations.1418bool try_to_hoist = StressGCM || (LCA != early);14191420// Must clone guys stay next to use; no hoisting allowed.1421// Also cannot hoist guys that alter memory or are otherwise not1422// allocatable (hoisting can make a value live longer, leading to1423// anti and output dependency problems which are normally resolved1424// by the register allocator giving everyone a different register).1425if (mach != NULL && must_clone[mach->ideal_Opcode()])1426try_to_hoist = false;14271428Block* late = NULL;1429if (try_to_hoist) {1430// Now find the block with the least execution frequency.1431// Start at the latest schedule and work up to the earliest schedule1432// in the dominator tree. Thus the Node will dominate all its uses.1433late = hoist_to_cheaper_block(LCA, early, self);1434} else {1435// Just use the LCA of the uses.1436late = LCA;1437}14381439// Put the node into target block1440schedule_node_into_block(self, late);14411442#ifdef ASSERT1443if (self->needs_anti_dependence_check()) {1444// since precedence edges are only inserted when we're sure they1445// are needed make sure that after placement in a block we don't1446// need any new precedence edges.1447verify_anti_dependences(late, self);1448}1449#endif1450} // Loop until all nodes have been visited14511452} // end ScheduleLate14531454//------------------------------GlobalCodeMotion-------------------------------1455void PhaseCFG::global_code_motion() {1456ResourceMark rm;14571458#ifndef PRODUCT1459if (trace_opto_pipelining()) {1460tty->print("\n---- Start GlobalCodeMotion ----\n");1461}1462#endif14631464// Initialize the node to block mapping for things on the proj_list1465for (uint i = 0; i < _matcher.number_of_projections(); i++) {1466unmap_node_from_block(_matcher.get_projection(i));1467}14681469// Set the basic block for Nodes pinned into blocks1470VectorSet visited;1471schedule_pinned_nodes(visited);14721473// Find the earliest Block any instruction can be placed in. Some1474// instructions are pinned into Blocks. Unpinned instructions can1475// appear in last block in which all their inputs occur.1476visited.clear();1477Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow1478if (!schedule_early(visited, stack)) {1479// Bailout without retry1480C->record_method_not_compilable("early schedule failed");1481return;1482}14831484// Build Def-Use edges.1485// Compute the latency information (via backwards walk) for all the1486// instructions in the graph1487_node_latency = new GrowableArray<uint>(); // resource_area allocation14881489if (C->do_scheduling()) {1490compute_latencies_backwards(visited, stack);1491}14921493// Now schedule all codes as LATE as possible. This is the LCA in the1494// dominator tree of all USES of a value. Pick the block with the least1495// loop nesting depth that is lowest in the dominator tree.1496// ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )1497schedule_late(visited, stack);1498if (C->failing()) {1499return;1500}15011502#ifndef PRODUCT1503if (trace_opto_pipelining()) {1504tty->print("\n---- Detect implicit null checks ----\n");1505}1506#endif15071508// Detect implicit-null-check opportunities. Basically, find NULL checks1509// with suitable memory ops nearby. Use the memory op to do the NULL check.1510// I can generate a memory op if there is not one nearby.1511if (C->is_method_compilation()) {1512// By reversing the loop direction we get a very minor gain on mpegaudio.1513// Feel free to revert to a forward loop for clarity.1514// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {1515for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {1516Node* proj = _matcher._null_check_tests[i];1517Node* val = _matcher._null_check_tests[i + 1];1518Block* block = get_block_for_node(proj);1519implicit_null_check(block, proj, val, C->allowed_deopt_reasons());1520// The implicit_null_check will only perform the transformation1521// if the null branch is truly uncommon, *and* it leads to an1522// uncommon trap. Combined with the too_many_traps guards1523// above, this prevents SEGV storms reported in 6366351,1524// by recompiling offending methods without this optimization.1525}1526}15271528bool block_size_threshold_ok = false;1529intptr_t *recalc_pressure_nodes = NULL;1530if (OptoRegScheduling) {1531for (uint i = 0; i < number_of_blocks(); i++) {1532Block* block = get_block(i);1533if (block->number_of_nodes() > 10) {1534block_size_threshold_ok = true;1535break;1536}1537}1538}15391540// Enabling the scheduler for register pressure plus finding blocks of size to schedule for it1541// is key to enabling this feature.1542PhaseChaitin regalloc(C->unique(), *this, _matcher, true);1543ResourceArea live_arena(mtCompiler); // Arena for liveness1544ResourceMark rm_live(&live_arena);1545PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);1546PhaseIFG ifg(&live_arena);1547if (OptoRegScheduling && block_size_threshold_ok) {1548regalloc.mark_ssa();1549Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);1550rm_live.reset_to_mark(); // Reclaim working storage1551IndexSet::reset_memory(C, &live_arena);1552uint node_size = regalloc._lrg_map.max_lrg_id();1553ifg.init(node_size); // Empty IFG1554regalloc.set_ifg(ifg);1555regalloc.set_live(live);1556regalloc.gather_lrg_masks(false); // Collect LRG masks1557live.compute(node_size); // Compute liveness15581559recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);1560for (uint i = 0; i < node_size; i++) {1561recalc_pressure_nodes[i] = 0;1562}1563}1564_regalloc = ®alloc;15651566#ifndef PRODUCT1567if (trace_opto_pipelining()) {1568tty->print("\n---- Start Local Scheduling ----\n");1569}1570#endif15711572// Schedule locally. Right now a simple topological sort.1573// Later, do a real latency aware scheduler.1574GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);1575visited.reset();1576for (uint i = 0; i < number_of_blocks(); i++) {1577Block* block = get_block(i);1578if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {1579if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {1580C->record_method_not_compilable("local schedule failed");1581}1582_regalloc = NULL;1583return;1584}1585}1586_regalloc = NULL;15871588// If we inserted any instructions between a Call and his CatchNode,1589// clone the instructions on all paths below the Catch.1590for (uint i = 0; i < number_of_blocks(); i++) {1591Block* block = get_block(i);1592call_catch_cleanup(block);1593}15941595#ifndef PRODUCT1596if (trace_opto_pipelining()) {1597tty->print("\n---- After GlobalCodeMotion ----\n");1598for (uint i = 0; i < number_of_blocks(); i++) {1599Block* block = get_block(i);1600block->dump();1601}1602}1603#endif1604// Dead.1605_node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);1606}16071608bool PhaseCFG::do_global_code_motion() {16091610build_dominator_tree();1611if (C->failing()) {1612return false;1613}16141615NOT_PRODUCT( C->verify_graph_edges(); )16161617estimate_block_frequency();16181619global_code_motion();16201621if (C->failing()) {1622return false;1623}16241625return true;1626}16271628//------------------------------Estimate_Block_Frequency-----------------------1629// Estimate block frequencies based on IfNode probabilities.1630void PhaseCFG::estimate_block_frequency() {16311632// Force conditional branches leading to uncommon traps to be unlikely,1633// not because we get to the uncommon_trap with less relative frequency,1634// but because an uncommon_trap typically causes a deopt, so we only get1635// there once.1636if (C->do_freq_based_layout()) {1637Block_List worklist;1638Block* root_blk = get_block(0);1639for (uint i = 1; i < root_blk->num_preds(); i++) {1640Block *pb = get_block_for_node(root_blk->pred(i));1641if (pb->has_uncommon_code()) {1642worklist.push(pb);1643}1644}1645while (worklist.size() > 0) {1646Block* uct = worklist.pop();1647if (uct == get_root_block()) {1648continue;1649}1650for (uint i = 1; i < uct->num_preds(); i++) {1651Block *pb = get_block_for_node(uct->pred(i));1652if (pb->_num_succs == 1) {1653worklist.push(pb);1654} else if (pb->num_fall_throughs() == 2) {1655pb->update_uncommon_branch(uct);1656}1657}1658}1659}16601661// Create the loop tree and calculate loop depth.1662_root_loop = create_loop_tree();1663_root_loop->compute_loop_depth(0);16641665// Compute block frequency of each block, relative to a single loop entry.1666_root_loop->compute_freq();16671668// Adjust all frequencies to be relative to a single method entry1669_root_loop->_freq = 1.0;1670_root_loop->scale_freq();16711672// Save outmost loop frequency for LRG frequency threshold1673_outer_loop_frequency = _root_loop->outer_loop_freq();16741675// force paths ending at uncommon traps to be infrequent1676if (!C->do_freq_based_layout()) {1677Block_List worklist;1678Block* root_blk = get_block(0);1679for (uint i = 1; i < root_blk->num_preds(); i++) {1680Block *pb = get_block_for_node(root_blk->pred(i));1681if (pb->has_uncommon_code()) {1682worklist.push(pb);1683}1684}1685while (worklist.size() > 0) {1686Block* uct = worklist.pop();1687uct->_freq = PROB_MIN;1688for (uint i = 1; i < uct->num_preds(); i++) {1689Block *pb = get_block_for_node(uct->pred(i));1690if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {1691worklist.push(pb);1692}1693}1694}1695}16961697#ifdef ASSERT1698for (uint i = 0; i < number_of_blocks(); i++) {1699Block* b = get_block(i);1700assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");1701}1702#endif17031704#ifndef PRODUCT1705if (PrintCFGBlockFreq) {1706tty->print_cr("CFG Block Frequencies");1707_root_loop->dump_tree();1708if (Verbose) {1709tty->print_cr("PhaseCFG dump");1710dump();1711tty->print_cr("Node dump");1712_root->dump(99999);1713}1714}1715#endif1716}17171718//----------------------------create_loop_tree--------------------------------1719// Create a loop tree from the CFG1720CFGLoop* PhaseCFG::create_loop_tree() {17211722#ifdef ASSERT1723assert(get_block(0) == get_root_block(), "first block should be root block");1724for (uint i = 0; i < number_of_blocks(); i++) {1725Block* block = get_block(i);1726// Check that _loop field are clear...we could clear them if not.1727assert(block->_loop == NULL, "clear _loop expected");1728// Sanity check that the RPO numbering is reflected in the _blocks array.1729// It doesn't have to be for the loop tree to be built, but if it is not,1730// then the blocks have been reordered since dom graph building...which1731// may question the RPO numbering1732assert(block->_rpo == i, "unexpected reverse post order number");1733}1734#endif17351736int idct = 0;1737CFGLoop* root_loop = new CFGLoop(idct++);17381739Block_List worklist;17401741// Assign blocks to loops1742for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block1743Block* block = get_block(i);17441745if (block->head()->is_Loop()) {1746Block* loop_head = block;1747assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");1748Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);1749Block* tail = get_block_for_node(tail_n);17501751// Defensively filter out Loop nodes for non-single-entry loops.1752// For all reasonable loops, the head occurs before the tail in RPO.1753if (i <= tail->_rpo) {17541755// The tail and (recursive) predecessors of the tail1756// are made members of a new loop.17571758assert(worklist.size() == 0, "nonempty worklist");1759CFGLoop* nloop = new CFGLoop(idct++);1760assert(loop_head->_loop == NULL, "just checking");1761loop_head->_loop = nloop;1762// Add to nloop so push_pred() will skip over inner loops1763nloop->add_member(loop_head);1764nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);17651766while (worklist.size() > 0) {1767Block* member = worklist.pop();1768if (member != loop_head) {1769for (uint j = 1; j < member->num_preds(); j++) {1770nloop->push_pred(member, j, worklist, this);1771}1772}1773}1774}1775}1776}17771778// Create a member list for each loop consisting1779// of both blocks and (immediate child) loops.1780for (uint i = 0; i < number_of_blocks(); i++) {1781Block* block = get_block(i);1782CFGLoop* lp = block->_loop;1783if (lp == NULL) {1784// Not assigned to a loop. Add it to the method's pseudo loop.1785block->_loop = root_loop;1786lp = root_loop;1787}1788if (lp == root_loop || block != lp->head()) { // loop heads are already members1789lp->add_member(block);1790}1791if (lp != root_loop) {1792if (lp->parent() == NULL) {1793// Not a nested loop. Make it a child of the method's pseudo loop.1794root_loop->add_nested_loop(lp);1795}1796if (block == lp->head()) {1797// Add nested loop to member list of parent loop.1798lp->parent()->add_member(lp);1799}1800}1801}18021803return root_loop;1804}18051806//------------------------------push_pred--------------------------------------1807void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {1808Node* pred_n = blk->pred(i);1809Block* pred = cfg->get_block_for_node(pred_n);1810CFGLoop *pred_loop = pred->_loop;1811if (pred_loop == NULL) {1812// Filter out blocks for non-single-entry loops.1813// For all reasonable loops, the head occurs before the tail in RPO.1814if (pred->_rpo > head()->_rpo) {1815pred->_loop = this;1816worklist.push(pred);1817}1818} else if (pred_loop != this) {1819// Nested loop.1820while (pred_loop->_parent != NULL && pred_loop->_parent != this) {1821pred_loop = pred_loop->_parent;1822}1823// Make pred's loop be a child1824if (pred_loop->_parent == NULL) {1825add_nested_loop(pred_loop);1826// Continue with loop entry predecessor.1827Block* pred_head = pred_loop->head();1828assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");1829assert(pred_head != head(), "loop head in only one loop");1830push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);1831} else {1832assert(pred_loop->_parent == this && _parent == NULL, "just checking");1833}1834}1835}18361837//------------------------------add_nested_loop--------------------------------1838// Make cl a child of the current loop in the loop tree.1839void CFGLoop::add_nested_loop(CFGLoop* cl) {1840assert(_parent == NULL, "no parent yet");1841assert(cl != this, "not my own parent");1842cl->_parent = this;1843CFGLoop* ch = _child;1844if (ch == NULL) {1845_child = cl;1846} else {1847while (ch->_sibling != NULL) { ch = ch->_sibling; }1848ch->_sibling = cl;1849}1850}18511852//------------------------------compute_loop_depth-----------------------------1853// Store the loop depth in each CFGLoop object.1854// Recursively walk the children to do the same for them.1855void CFGLoop::compute_loop_depth(int depth) {1856_depth = depth;1857CFGLoop* ch = _child;1858while (ch != NULL) {1859ch->compute_loop_depth(depth + 1);1860ch = ch->_sibling;1861}1862}18631864//------------------------------compute_freq-----------------------------------1865// Compute the frequency of each block and loop, relative to a single entry1866// into the dominating loop head.1867void CFGLoop::compute_freq() {1868// Bottom up traversal of loop tree (visit inner loops first.)1869// Set loop head frequency to 1.0, then transitively1870// compute frequency for all successors in the loop,1871// as well as for each exit edge. Inner loops are1872// treated as single blocks with loop exit targets1873// as the successor blocks.18741875// Nested loops first1876CFGLoop* ch = _child;1877while (ch != NULL) {1878ch->compute_freq();1879ch = ch->_sibling;1880}1881assert (_members.length() > 0, "no empty loops");1882Block* hd = head();1883hd->_freq = 1.0;1884for (int i = 0; i < _members.length(); i++) {1885CFGElement* s = _members.at(i);1886double freq = s->_freq;1887if (s->is_block()) {1888Block* b = s->as_Block();1889for (uint j = 0; j < b->_num_succs; j++) {1890Block* sb = b->_succs[j];1891update_succ_freq(sb, freq * b->succ_prob(j));1892}1893} else {1894CFGLoop* lp = s->as_CFGLoop();1895assert(lp->_parent == this, "immediate child");1896for (int k = 0; k < lp->_exits.length(); k++) {1897Block* eb = lp->_exits.at(k).get_target();1898double prob = lp->_exits.at(k).get_prob();1899update_succ_freq(eb, freq * prob);1900}1901}1902}19031904// For all loops other than the outer, "method" loop,1905// sum and normalize the exit probability. The "method" loop1906// should keep the initial exit probability of 1, so that1907// inner blocks do not get erroneously scaled.1908if (_depth != 0) {1909// Total the exit probabilities for this loop.1910double exits_sum = 0.0f;1911for (int i = 0; i < _exits.length(); i++) {1912exits_sum += _exits.at(i).get_prob();1913}19141915// Normalize the exit probabilities. Until now, the1916// probabilities estimate the possibility of exit per1917// a single loop iteration; afterward, they estimate1918// the probability of exit per loop entry.1919for (int i = 0; i < _exits.length(); i++) {1920Block* et = _exits.at(i).get_target();1921float new_prob = 0.0f;1922if (_exits.at(i).get_prob() > 0.0f) {1923new_prob = _exits.at(i).get_prob() / exits_sum;1924}1925BlockProbPair bpp(et, new_prob);1926_exits.at_put(i, bpp);1927}19281929// Save the total, but guard against unreasonable probability,1930// as the value is used to estimate the loop trip count.1931// An infinite trip count would blur relative block1932// frequencies.1933if (exits_sum > 1.0f) exits_sum = 1.0;1934if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;1935_exit_prob = exits_sum;1936}1937}19381939//------------------------------succ_prob-------------------------------------1940// Determine the probability of reaching successor 'i' from the receiver block.1941float Block::succ_prob(uint i) {1942int eidx = end_idx();1943Node *n = get_node(eidx); // Get ending Node19441945int op = n->Opcode();1946if (n->is_Mach()) {1947if (n->is_MachNullCheck()) {1948// Can only reach here if called after lcm. The original Op_If is gone,1949// so we attempt to infer the probability from one or both of the1950// successor blocks.1951assert(_num_succs == 2, "expecting 2 successors of a null check");1952// If either successor has only one predecessor, then the1953// probability estimate can be derived using the1954// relative frequency of the successor and this block.1955if (_succs[i]->num_preds() == 2) {1956return _succs[i]->_freq / _freq;1957} else if (_succs[1-i]->num_preds() == 2) {1958return 1 - (_succs[1-i]->_freq / _freq);1959} else {1960// Estimate using both successor frequencies1961float freq = _succs[i]->_freq;1962return freq / (freq + _succs[1-i]->_freq);1963}1964}1965op = n->as_Mach()->ideal_Opcode();1966}196719681969// Switch on branch type1970switch( op ) {1971case Op_CountedLoopEnd:1972case Op_If: {1973assert (i < 2, "just checking");1974// Conditionals pass on only part of their frequency1975float prob = n->as_MachIf()->_prob;1976assert(prob >= 0.0 && prob <= 1.0, "out of range probability");1977// If succ[i] is the FALSE branch, invert path info1978if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {1979return 1.0f - prob; // not taken1980} else {1981return prob; // taken1982}1983}19841985case Op_Jump:1986return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];19871988case Op_Catch: {1989const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();1990if (ci->_con == CatchProjNode::fall_through_index) {1991// Fall-thru path gets the lion's share.1992return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;1993} else {1994// Presume exceptional paths are equally unlikely1995return PROB_UNLIKELY_MAG(5);1996}1997}19981999case Op_Root:2000case Op_Goto:2001// Pass frequency straight thru to target2002return 1.0f;20032004case Op_NeverBranch:2005return 0.0f;20062007case Op_TailCall:2008case Op_TailJump:2009case Op_Return:2010case Op_Halt:2011case Op_Rethrow:2012// Do not push out freq to root block2013return 0.0f;20142015default:2016ShouldNotReachHere();2017}20182019return 0.0f;2020}20212022//------------------------------num_fall_throughs-----------------------------2023// Return the number of fall-through candidates for a block2024int Block::num_fall_throughs() {2025int eidx = end_idx();2026Node *n = get_node(eidx); // Get ending Node20272028int op = n->Opcode();2029if (n->is_Mach()) {2030if (n->is_MachNullCheck()) {2031// In theory, either side can fall-thru, for simplicity sake,2032// let's say only the false branch can now.2033return 1;2034}2035op = n->as_Mach()->ideal_Opcode();2036}20372038// Switch on branch type2039switch( op ) {2040case Op_CountedLoopEnd:2041case Op_If:2042return 2;20432044case Op_Root:2045case Op_Goto:2046return 1;20472048case Op_Catch: {2049for (uint i = 0; i < _num_succs; i++) {2050const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();2051if (ci->_con == CatchProjNode::fall_through_index) {2052return 1;2053}2054}2055return 0;2056}20572058case Op_Jump:2059case Op_NeverBranch:2060case Op_TailCall:2061case Op_TailJump:2062case Op_Return:2063case Op_Halt:2064case Op_Rethrow:2065return 0;20662067default:2068ShouldNotReachHere();2069}20702071return 0;2072}20732074//------------------------------succ_fall_through-----------------------------2075// Return true if a specific successor could be fall-through target.2076bool Block::succ_fall_through(uint i) {2077int eidx = end_idx();2078Node *n = get_node(eidx); // Get ending Node20792080int op = n->Opcode();2081if (n->is_Mach()) {2082if (n->is_MachNullCheck()) {2083// In theory, either side can fall-thru, for simplicity sake,2084// let's say only the false branch can now.2085return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;2086}2087op = n->as_Mach()->ideal_Opcode();2088}20892090// Switch on branch type2091switch( op ) {2092case Op_CountedLoopEnd:2093case Op_If:2094case Op_Root:2095case Op_Goto:2096return true;20972098case Op_Catch: {2099const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();2100return ci->_con == CatchProjNode::fall_through_index;2101}21022103case Op_Jump:2104case Op_NeverBranch:2105case Op_TailCall:2106case Op_TailJump:2107case Op_Return:2108case Op_Halt:2109case Op_Rethrow:2110return false;21112112default:2113ShouldNotReachHere();2114}21152116return false;2117}21182119//------------------------------update_uncommon_branch------------------------2120// Update the probability of a two-branch to be uncommon2121void Block::update_uncommon_branch(Block* ub) {2122int eidx = end_idx();2123Node *n = get_node(eidx); // Get ending Node21242125int op = n->as_Mach()->ideal_Opcode();21262127assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");2128assert(num_fall_throughs() == 2, "must be a two way branch block");21292130// Which successor is ub?2131uint s;2132for (s = 0; s <_num_succs; s++) {2133if (_succs[s] == ub) break;2134}2135assert(s < 2, "uncommon successor must be found");21362137// If ub is the true path, make the proability small, else2138// ub is the false path, and make the probability large2139bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);21402141// Get existing probability2142float p = n->as_MachIf()->_prob;21432144if (invert) p = 1.0 - p;2145if (p > PROB_MIN) {2146p = PROB_MIN;2147}2148if (invert) p = 1.0 - p;21492150n->as_MachIf()->_prob = p;2151}21522153//------------------------------update_succ_freq-------------------------------2154// Update the appropriate frequency associated with block 'b', a successor of2155// a block in this loop.2156void CFGLoop::update_succ_freq(Block* b, double freq) {2157if (b->_loop == this) {2158if (b == head()) {2159// back branch within the loop2160// Do nothing now, the loop carried frequency will be2161// adjust later in scale_freq().2162} else {2163// simple branch within the loop2164b->_freq += freq;2165}2166} else if (!in_loop_nest(b)) {2167// branch is exit from this loop2168BlockProbPair bpp(b, freq);2169_exits.append(bpp);2170} else {2171// branch into nested loop2172CFGLoop* ch = b->_loop;2173ch->_freq += freq;2174}2175}21762177//------------------------------in_loop_nest-----------------------------------2178// Determine if block b is in the receiver's loop nest.2179bool CFGLoop::in_loop_nest(Block* b) {2180int depth = _depth;2181CFGLoop* b_loop = b->_loop;2182int b_depth = b_loop->_depth;2183if (depth == b_depth) {2184return true;2185}2186while (b_depth > depth) {2187b_loop = b_loop->_parent;2188b_depth = b_loop->_depth;2189}2190return b_loop == this;2191}21922193//------------------------------scale_freq-------------------------------------2194// Scale frequency of loops and blocks by trip counts from outer loops2195// Do a top down traversal of loop tree (visit outer loops first.)2196void CFGLoop::scale_freq() {2197double loop_freq = _freq * trip_count();2198_freq = loop_freq;2199for (int i = 0; i < _members.length(); i++) {2200CFGElement* s = _members.at(i);2201double block_freq = s->_freq * loop_freq;2202if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)2203block_freq = MIN_BLOCK_FREQUENCY;2204s->_freq = block_freq;2205}2206CFGLoop* ch = _child;2207while (ch != NULL) {2208ch->scale_freq();2209ch = ch->_sibling;2210}2211}22122213// Frequency of outer loop2214double CFGLoop::outer_loop_freq() const {2215if (_child != NULL) {2216return _child->_freq;2217}2218return _freq;2219}22202221#ifndef PRODUCT2222//------------------------------dump_tree--------------------------------------2223void CFGLoop::dump_tree() const {2224dump();2225if (_child != NULL) _child->dump_tree();2226if (_sibling != NULL) _sibling->dump_tree();2227}22282229//------------------------------dump-------------------------------------------2230void CFGLoop::dump() const {2231for (int i = 0; i < _depth; i++) tty->print(" ");2232tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",2233_depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);2234for (int i = 0; i < _depth; i++) tty->print(" ");2235tty->print(" members:");2236int k = 0;2237for (int i = 0; i < _members.length(); i++) {2238if (k++ >= 6) {2239tty->print("\n ");2240for (int j = 0; j < _depth+1; j++) tty->print(" ");2241k = 0;2242}2243CFGElement *s = _members.at(i);2244if (s->is_block()) {2245Block *b = s->as_Block();2246tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);2247} else {2248CFGLoop* lp = s->as_CFGLoop();2249tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);2250}2251}2252tty->print("\n");2253for (int i = 0; i < _depth; i++) tty->print(" ");2254tty->print(" exits: ");2255k = 0;2256for (int i = 0; i < _exits.length(); i++) {2257if (k++ >= 7) {2258tty->print("\n ");2259for (int j = 0; j < _depth+1; j++) tty->print(" ");2260k = 0;2261}2262Block *blk = _exits.at(i).get_target();2263double prob = _exits.at(i).get_prob();2264tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));2265}2266tty->print("\n");2267}2268#endif226922702271