Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/gcm.cpp
32285 views
/*1* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "libadt/vectset.hpp"26#include "memory/allocation.inline.hpp"27#include "opto/block.hpp"28#include "opto/c2compiler.hpp"29#include "opto/callnode.hpp"30#include "opto/cfgnode.hpp"31#include "opto/machnode.hpp"32#include "opto/opcodes.hpp"33#include "opto/phaseX.hpp"34#include "opto/rootnode.hpp"35#include "opto/runtime.hpp"36#include "runtime/deoptimization.hpp"37#if defined AD_MD_HPP38# include AD_MD_HPP39#elif defined TARGET_ARCH_MODEL_x86_3240# include "adfiles/ad_x86_32.hpp"41#elif defined TARGET_ARCH_MODEL_x86_6442# include "adfiles/ad_x86_64.hpp"43#elif defined TARGET_ARCH_MODEL_aarch3244# include "adfiles/ad_aarch32.hpp"45#elif defined TARGET_ARCH_MODEL_aarch6446# include "adfiles/ad_aarch64.hpp"47#elif defined TARGET_ARCH_MODEL_sparc48# include "adfiles/ad_sparc.hpp"49#elif defined TARGET_ARCH_MODEL_zero50# include "adfiles/ad_zero.hpp"51#elif defined TARGET_ARCH_MODEL_ppc_6452# include "adfiles/ad_ppc_64.hpp"53#endif545556// Portions of code courtesy of Clifford Click5758// Optimization - Graph Style5960// To avoid float value underflow61#define MIN_BLOCK_FREQUENCY 1.e-35f6263//----------------------------schedule_node_into_block-------------------------64// Insert node n into block b. Look for projections of n and make sure they65// are in b also.66void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {67// Set basic block of n, Add n to b,68map_node_to_block(n, b);69b->add_inst(n);7071// After Matching, nearly any old Node may have projections trailing it.72// These are usually machine-dependent flags. In any case, they might73// float to another block below this one. Move them up.74for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {75Node* use = n->fast_out(i);76if (use->is_Proj()) {77Block* buse = get_block_for_node(use);78if (buse != b) { // In wrong block?79if (buse != NULL) {80buse->find_remove(use); // Remove from wrong block81}82map_node_to_block(use, b);83b->add_inst(use);84}85}86}87}8889//----------------------------replace_block_proj_ctrl-------------------------90// Nodes that have is_block_proj() nodes as their control need to use91// the appropriate Region for their actual block as their control since92// the projection will be in a predecessor block.93void PhaseCFG::replace_block_proj_ctrl( Node *n ) {94const Node *in0 = n->in(0);95assert(in0 != NULL, "Only control-dependent");96const Node *p = in0->is_block_proj();97if (p != NULL && p != n) { // Control from a block projection?98assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");99// Find trailing Region100Block *pb = get_block_for_node(in0); // Block-projection already has basic block101uint j = 0;102if (pb->_num_succs != 1) { // More then 1 successor?103// Search for successor104uint max = pb->number_of_nodes();105assert( max > 1, "" );106uint start = max - pb->_num_succs;107// Find which output path belongs to projection108for (j = start; j < max; j++) {109if( pb->get_node(j) == in0 )110break;111}112assert( j < max, "must find" );113// Change control to match head of successor basic block114j -= start;115}116n->set_req(0, pb->_succs[j]->head());117}118}119120static bool is_dominator(Block* d, Block* n) {121return d->dom_lca(n) == d;122}123124//------------------------------schedule_pinned_nodes--------------------------125// Set the basic block for Nodes pinned into blocks126void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {127// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc128GrowableArray <Node *> spstack(C->live_nodes() + 8);129spstack.push(_root);130while (spstack.is_nonempty()) {131Node* node = spstack.pop();132if (!visited.test_set(node->_idx)) { // Test node and flag it as visited133if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!134assert(node->in(0), "pinned Node must have Control");135// Before setting block replace block_proj control edge136replace_block_proj_ctrl(node);137Node* input = node->in(0);138while (!input->is_block_start()) {139input = input->in(0);140}141Block* block = get_block_for_node(input); // Basic block of controlling input142schedule_node_into_block(node, block);143}144145// If the node has precedence edges (added when CastPP nodes are146// removed in final_graph_reshaping), fix the control of the147// node to cover the precedence edges and remove the148// dependencies.149Node* n = NULL;150for (uint i = node->len()-1; i >= node->req(); i--) {151Node* m = node->in(i);152if (m == NULL) continue;153// Skip the precedence edge if the test that guarded a CastPP:154// - was optimized out during escape analysis155// (OptimizePtrCompare): the CastPP's control isn't an end of156// block.157// - is moved in the branch of a dominating If: the control of158// the CastPP is then a Region.159if (m->is_block_proj() || m->is_block_start()) {160node->rm_prec(i);161if (n == NULL) {162n = m;163} else {164Block* bn = get_block_for_node(n);165Block* bm = get_block_for_node(m);166assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other");167n = is_dominator(bn, bm) ? m : n;168}169}170}171if (n != NULL) {172assert(node->in(0), "control should have been set");173Block* bn = get_block_for_node(n);174Block* bnode = get_block_for_node(node->in(0));175assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other");176if (!is_dominator(bn, bnode)) {177node->set_req(0, n);178}179}180181// process all inputs that are non NULL182for (int i = node->req() - 1; i >= 0; --i) {183if (node->in(i) != NULL) {184spstack.push(node->in(i));185}186}187}188}189}190191#ifdef ASSERT192// Assert that new input b2 is dominated by all previous inputs.193// Check this by by seeing that it is dominated by b1, the deepest194// input observed until b2.195static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {196if (b1 == NULL) return;197assert(b1->_dom_depth < b2->_dom_depth, "sanity");198Block* tmp = b2;199while (tmp != b1 && tmp != NULL) {200tmp = tmp->_idom;201}202if (tmp != b1) {203// Detected an unschedulable graph. Print some nice stuff and die.204tty->print_cr("!!! Unschedulable graph !!!");205for (uint j=0; j<n->len(); j++) { // For all inputs206Node* inn = n->in(j); // Get input207if (inn == NULL) continue; // Ignore NULL, missing inputs208Block* inb = cfg->get_block_for_node(inn);209tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,210inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);211inn->dump();212}213tty->print("Failing node: ");214n->dump();215assert(false, "unscheduable graph");216}217}218#endif219220static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {221// Find the last input dominated by all other inputs.222Block* deepb = NULL; // Deepest block so far223int deepb_dom_depth = 0;224for (uint k = 0; k < n->len(); k++) { // For all inputs225Node* inn = n->in(k); // Get input226if (inn == NULL) continue; // Ignore NULL, missing inputs227Block* inb = cfg->get_block_for_node(inn);228assert(inb != NULL, "must already have scheduled this input");229if (deepb_dom_depth < (int) inb->_dom_depth) {230// The new inb must be dominated by the previous deepb.231// The various inputs must be linearly ordered in the dom232// tree, or else there will not be a unique deepest block.233DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));234deepb = inb; // Save deepest block235deepb_dom_depth = deepb->_dom_depth;236}237}238assert(deepb != NULL, "must be at least one input to n");239return deepb;240}241242243//------------------------------schedule_early---------------------------------244// Find the earliest Block any instruction can be placed in. Some instructions245// are pinned into Blocks. Unpinned instructions can appear in last block in246// which all their inputs occur.247bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {248// Allocate stack with enough space to avoid frequent realloc249Node_Stack nstack(roots.Size() + 8);250// _root will be processed among C->top() inputs251roots.push(C->top());252visited.set(C->top()->_idx);253254while (roots.size() != 0) {255// Use local variables nstack_top_n & nstack_top_i to cache values256// on stack's top.257Node* parent_node = roots.pop();258uint input_index = 0;259260while (true) {261if (input_index == 0) {262// Fixup some control. Constants without control get attached263// to root and nodes that use is_block_proj() nodes should be attached264// to the region that starts their block.265const Node* control_input = parent_node->in(0);266if (control_input != NULL) {267replace_block_proj_ctrl(parent_node);268} else {269// Is a constant with NO inputs?270if (parent_node->req() == 1) {271parent_node->set_req(0, _root);272}273}274}275276// First, visit all inputs and force them to get a block. If an277// input is already in a block we quit following inputs (to avoid278// cycles). Instead we put that Node on a worklist to be handled279// later (since IT'S inputs may not have a block yet).280281// Assume all n's inputs will be processed282bool done = true;283284while (input_index < parent_node->len()) {285Node* in = parent_node->in(input_index++);286if (in == NULL) {287continue;288}289290int is_visited = visited.test_set(in->_idx);291if (!has_block(in)) {292if (is_visited) {293assert(false, "graph should be schedulable");294return false;295}296// Save parent node and next input's index.297nstack.push(parent_node, input_index);298// Process current input now.299parent_node = in;300input_index = 0;301// Not all n's inputs processed.302done = false;303break;304} else if (!is_visited) {305// Visit this guy later, using worklist306roots.push(in);307}308}309310if (done) {311// All of n's inputs have been processed, complete post-processing.312313// Some instructions are pinned into a block. These include Region,314// Phi, Start, Return, and other control-dependent instructions and315// any projections which depend on them.316if (!parent_node->pinned()) {317// Set earliest legal block.318Block* earliest_block = find_deepest_input(parent_node, this);319map_node_to_block(parent_node, earliest_block);320} else {321assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");322}323324if (nstack.is_empty()) {325// Finished all nodes on stack.326// Process next node on the worklist 'roots'.327break;328}329// Get saved parent node and next input's index.330parent_node = nstack.node();331input_index = nstack.index();332nstack.pop();333}334}335}336return true;337}338339//------------------------------dom_lca----------------------------------------340// Find least common ancestor in dominator tree341// LCA is a current notion of LCA, to be raised above 'this'.342// As a convenient boundary condition, return 'this' if LCA is NULL.343// Find the LCA of those two nodes.344Block* Block::dom_lca(Block* LCA) {345if (LCA == NULL || LCA == this) return this;346347Block* anc = this;348while (anc->_dom_depth > LCA->_dom_depth)349anc = anc->_idom; // Walk up till anc is as high as LCA350351while (LCA->_dom_depth > anc->_dom_depth)352LCA = LCA->_idom; // Walk up till LCA is as high as anc353354while (LCA != anc) { // Walk both up till they are the same355LCA = LCA->_idom;356anc = anc->_idom;357}358359return LCA;360}361362//--------------------------raise_LCA_above_use--------------------------------363// We are placing a definition, and have been given a def->use edge.364// The definition must dominate the use, so move the LCA upward in the365// dominator tree to dominate the use. If the use is a phi, adjust366// the LCA only with the phi input paths which actually use this def.367static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {368Block* buse = cfg->get_block_for_node(use);369if (buse == NULL) return LCA; // Unused killing Projs have no use block370if (!use->is_Phi()) return buse->dom_lca(LCA);371uint pmax = use->req(); // Number of Phi inputs372// Why does not this loop just break after finding the matching input to373// the Phi? Well...it's like this. I do not have true def-use/use-def374// chains. Means I cannot distinguish, from the def-use direction, which375// of many use-defs lead from the same use to the same def. That is, this376// Phi might have several uses of the same def. Each use appears in a377// different predecessor block. But when I enter here, I cannot distinguish378// which use-def edge I should find the predecessor block for. So I find379// them all. Means I do a little extra work if a Phi uses the same value380// more than once.381for (uint j=1; j<pmax; j++) { // For all inputs382if (use->in(j) == def) { // Found matching input?383Block* pred = cfg->get_block_for_node(buse->pred(j));384LCA = pred->dom_lca(LCA);385}386}387return LCA;388}389390//----------------------------raise_LCA_above_marks----------------------------391// Return a new LCA that dominates LCA and any of its marked predecessors.392// Search all my parents up to 'early' (exclusive), looking for predecessors393// which are marked with the given index. Return the LCA (in the dom tree)394// of all marked blocks. If there are none marked, return the original395// LCA.396static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {397Block_List worklist;398worklist.push(LCA);399while (worklist.size() > 0) {400Block* mid = worklist.pop();401if (mid == early) continue; // stop searching here402403// Test and set the visited bit.404if (mid->raise_LCA_visited() == mark) continue; // already visited405406// Don't process the current LCA, otherwise the search may terminate early407if (mid != LCA && mid->raise_LCA_mark() == mark) {408// Raise the LCA.409LCA = mid->dom_lca(LCA);410if (LCA == early) break; // stop searching everywhere411assert(early->dominates(LCA), "early is high enough");412// Resume searching at that point, skipping intermediate levels.413worklist.push(LCA);414if (LCA == mid)415continue; // Don't mark as visited to avoid early termination.416} else {417// Keep searching through this block's predecessors.418for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {419Block* mid_parent = cfg->get_block_for_node(mid->pred(j));420worklist.push(mid_parent);421}422}423mid->set_raise_LCA_visited(mark);424}425return LCA;426}427428//--------------------------memory_early_block--------------------------------429// This is a variation of find_deepest_input, the heart of schedule_early.430// Find the "early" block for a load, if we considered only memory and431// address inputs, that is, if other data inputs were ignored.432//433// Because a subset of edges are considered, the resulting block will434// be earlier (at a shallower dom_depth) than the true schedule_early435// point of the node. We compute this earlier block as a more permissive436// site for anti-dependency insertion, but only if subsume_loads is enabled.437static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {438Node* base;439Node* index;440Node* store = load->in(MemNode::Memory);441load->as_Mach()->memory_inputs(base, index);442443assert(base != NodeSentinel && index != NodeSentinel,444"unexpected base/index inputs");445446Node* mem_inputs[4];447int mem_inputs_length = 0;448if (base != NULL) mem_inputs[mem_inputs_length++] = base;449if (index != NULL) mem_inputs[mem_inputs_length++] = index;450if (store != NULL) mem_inputs[mem_inputs_length++] = store;451452// In the comparision below, add one to account for the control input,453// which may be null, but always takes up a spot in the in array.454if (mem_inputs_length + 1 < (int) load->req()) {455// This "load" has more inputs than just the memory, base and index inputs.456// For purposes of checking anti-dependences, we need to start457// from the early block of only the address portion of the instruction,458// and ignore other blocks that may have factored into the wider459// schedule_early calculation.460if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);461462Block* deepb = NULL; // Deepest block so far463int deepb_dom_depth = 0;464for (int i = 0; i < mem_inputs_length; i++) {465Block* inb = cfg->get_block_for_node(mem_inputs[i]);466if (deepb_dom_depth < (int) inb->_dom_depth) {467// The new inb must be dominated by the previous deepb.468// The various inputs must be linearly ordered in the dom469// tree, or else there will not be a unique deepest block.470DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));471deepb = inb; // Save deepest block472deepb_dom_depth = deepb->_dom_depth;473}474}475early = deepb;476}477478return early;479}480481//--------------------------insert_anti_dependences---------------------------482// A load may need to witness memory that nearby stores can overwrite.483// For each nearby store, either insert an "anti-dependence" edge484// from the load to the store, or else move LCA upward to force the485// load to (eventually) be scheduled in a block above the store.486//487// Do not add edges to stores on distinct control-flow paths;488// only add edges to stores which might interfere.489//490// Return the (updated) LCA. There will not be any possibly interfering491// store between the load's "early block" and the updated LCA.492// Any stores in the updated LCA will have new precedence edges493// back to the load. The caller is expected to schedule the load494// in the LCA, in which case the precedence edges will make LCM495// preserve anti-dependences. The caller may also hoist the load496// above the LCA, if it is not the early block.497Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {498assert(load->needs_anti_dependence_check(), "must be a load of some sort");499assert(LCA != NULL, "");500DEBUG_ONLY(Block* LCA_orig = LCA);501502// Compute the alias index. Loads and stores with different alias indices503// do not need anti-dependence edges.504uint load_alias_idx = C->get_alias_index(load->adr_type());505#ifdef ASSERT506if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&507(PrintOpto || VerifyAliases ||508PrintMiscellaneous && (WizardMode || Verbose))) {509// Load nodes should not consume all of memory.510// Reporting a bottom type indicates a bug in adlc.511// If some particular type of node validly consumes all of memory,512// sharpen the preceding "if" to exclude it, so we can catch bugs here.513tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");514load->dump(2);515if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");516}517#endif518assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),519"String compare is only known 'load' that does not conflict with any stores");520assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),521"String equals is a 'load' that does not conflict with any stores");522assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),523"String indexOf is a 'load' that does not conflict with any stores");524assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),525"Arrays equals is a 'load' that do not conflict with any stores");526527if (!C->alias_type(load_alias_idx)->is_rewritable()) {528// It is impossible to spoil this load by putting stores before it,529// because we know that the stores will never update the value530// which 'load' must witness.531return LCA;532}533534node_idx_t load_index = load->_idx;535536// Note the earliest legal placement of 'load', as determined by537// by the unique point in the dom tree where all memory effects538// and other inputs are first available. (Computed by schedule_early.)539// For normal loads, 'early' is the shallowest place (dom graph wise)540// to look for anti-deps between this load and any store.541Block* early = get_block_for_node(load);542543// If we are subsuming loads, compute an "early" block that only considers544// memory or address inputs. This block may be different than the545// schedule_early block in that it could be at an even shallower depth in the546// dominator tree, and allow for a broader discovery of anti-dependences.547if (C->subsume_loads()) {548early = memory_early_block(load, early, this);549}550551ResourceArea *area = Thread::current()->resource_area();552Node_List worklist_mem(area); // prior memory state to store553Node_List worklist_store(area); // possible-def to explore554Node_List worklist_visited(area); // visited mergemem nodes555Node_List non_early_stores(area); // all relevant stores outside of early556bool must_raise_LCA = false;557558#ifdef TRACK_PHI_INPUTS559// %%% This extra checking fails because MergeMem nodes are not GVNed.560// Provide "phi_inputs" to check if every input to a PhiNode is from the561// original memory state. This indicates a PhiNode for which should not562// prevent the load from sinking. For such a block, set_raise_LCA_mark563// may be overly conservative.564// Mechanism: count inputs seen for each Phi encountered in worklist_store.565DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));566#endif567568// 'load' uses some memory state; look for users of the same state.569// Recurse through MergeMem nodes to the stores that use them.570571// Each of these stores is a possible definition of memory572// that 'load' needs to use. We need to force 'load'573// to occur before each such store. When the store is in574// the same block as 'load', we insert an anti-dependence575// edge load->store.576577// The relevant stores "nearby" the load consist of a tree rooted578// at initial_mem, with internal nodes of type MergeMem.579// Therefore, the branches visited by the worklist are of this form:580// initial_mem -> (MergeMem ->)* store581// The anti-dependence constraints apply only to the fringe of this tree.582583Node* initial_mem = load->in(MemNode::Memory);584worklist_store.push(initial_mem);585worklist_visited.push(initial_mem);586worklist_mem.push(NULL);587while (worklist_store.size() > 0) {588// Examine a nearby store to see if it might interfere with our load.589Node* mem = worklist_mem.pop();590Node* store = worklist_store.pop();591uint op = store->Opcode();592593// MergeMems do not directly have anti-deps.594// Treat them as internal nodes in a forward tree of memory states,595// the leaves of which are each a 'possible-def'.596if (store == initial_mem // root (exclusive) of tree we are searching597|| op == Op_MergeMem // internal node of tree we are searching598) {599mem = store; // It's not a possibly interfering store.600if (store == initial_mem)601initial_mem = NULL; // only process initial memory once602603for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {604store = mem->fast_out(i);605if (store->is_MergeMem()) {606// Be sure we don't get into combinatorial problems.607// (Allow phis to be repeated; they can merge two relevant states.)608uint j = worklist_visited.size();609for (; j > 0; j--) {610if (worklist_visited.at(j-1) == store) break;611}612if (j > 0) continue; // already on work list; do not repeat613worklist_visited.push(store);614}615worklist_mem.push(mem);616worklist_store.push(store);617}618continue;619}620621if (op == Op_MachProj || op == Op_Catch) continue;622if (store->needs_anti_dependence_check()) continue; // not really a store623624// Compute the alias index. Loads and stores with different alias625// indices do not need anti-dependence edges. Wide MemBar's are626// anti-dependent on everything (except immutable memories).627const TypePtr* adr_type = store->adr_type();628if (!C->can_alias(adr_type, load_alias_idx)) continue;629630// Most slow-path runtime calls do NOT modify Java memory, but631// they can block and so write Raw memory.632if (store->is_Mach()) {633MachNode* mstore = store->as_Mach();634if (load_alias_idx != Compile::AliasIdxRaw) {635// Check for call into the runtime using the Java calling636// convention (and from there into a wrapper); it has no637// _method. Can't do this optimization for Native calls because638// they CAN write to Java memory.639if (mstore->ideal_Opcode() == Op_CallStaticJava) {640assert(mstore->is_MachSafePoint(), "");641MachSafePointNode* ms = (MachSafePointNode*) mstore;642assert(ms->is_MachCallJava(), "");643MachCallJavaNode* mcj = (MachCallJavaNode*) ms;644if (mcj->_method == NULL) {645// These runtime calls do not write to Java visible memory646// (other than Raw) and so do not require anti-dependence edges.647continue;648}649}650// Same for SafePoints: they read/write Raw but only read otherwise.651// This is basically a workaround for SafePoints only defining control652// instead of control + memory.653if (mstore->ideal_Opcode() == Op_SafePoint)654continue;655} else {656// Some raw memory, such as the load of "top" at an allocation,657// can be control dependent on the previous safepoint. See658// comments in GraphKit::allocate_heap() about control input.659// Inserting an anti-dep between such a safepoint and a use660// creates a cycle, and will cause a subsequent failure in661// local scheduling. (BugId 4919904)662// (%%% How can a control input be a safepoint and not a projection??)663if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)664continue;665}666}667668// Identify a block that the current load must be above,669// or else observe that 'store' is all the way up in the670// earliest legal block for 'load'. In the latter case,671// immediately insert an anti-dependence edge.672Block* store_block = get_block_for_node(store);673assert(store_block != NULL, "unused killing projections skipped above");674675if (store->is_Phi()) {676// 'load' uses memory which is one (or more) of the Phi's inputs.677// It must be scheduled not before the Phi, but rather before678// each of the relevant Phi inputs.679//680// Instead of finding the LCA of all inputs to a Phi that match 'mem',681// we mark each corresponding predecessor block and do a combined682// hoisting operation later (raise_LCA_above_marks).683//684// Do not assert(store_block != early, "Phi merging memory after access")685// PhiNode may be at start of block 'early' with backedge to 'early'686DEBUG_ONLY(bool found_match = false);687for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {688if (store->in(j) == mem) { // Found matching input?689DEBUG_ONLY(found_match = true);690Block* pred_block = get_block_for_node(store_block->pred(j));691if (pred_block != early) {692// If any predecessor of the Phi matches the load's "early block",693// we do not need a precedence edge between the Phi and 'load'694// since the load will be forced into a block preceding the Phi.695pred_block->set_raise_LCA_mark(load_index);696assert(!LCA_orig->dominates(pred_block) ||697early->dominates(pred_block), "early is high enough");698must_raise_LCA = true;699} else {700// anti-dependent upon PHI pinned below 'early', no edge needed701LCA = early; // but can not schedule below 'early'702}703}704}705assert(found_match, "no worklist bug");706#ifdef TRACK_PHI_INPUTS707#ifdef ASSERT708// This assert asks about correct handling of PhiNodes, which may not709// have all input edges directly from 'mem'. See BugId 4621264710int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;711// Increment by exactly one even if there are multiple copies of 'mem'712// coming into the phi, because we will run this block several times713// if there are several copies of 'mem'. (That's how DU iterators work.)714phi_inputs.at_put(store->_idx, num_mem_inputs);715assert(PhiNode::Input + num_mem_inputs < store->req(),716"Expect at least one phi input will not be from original memory state");717#endif //ASSERT718#endif //TRACK_PHI_INPUTS719} else if (store_block != early) {720// 'store' is between the current LCA and earliest possible block.721// Label its block, and decide later on how to raise the LCA722// to include the effect on LCA of this store.723// If this store's block gets chosen as the raised LCA, we724// will find him on the non_early_stores list and stick him725// with a precedence edge.726// (But, don't bother if LCA is already raised all the way.)727if (LCA != early) {728store_block->set_raise_LCA_mark(load_index);729must_raise_LCA = true;730non_early_stores.push(store);731}732} else {733// Found a possibly-interfering store in the load's 'early' block.734// This means 'load' cannot sink at all in the dominator tree.735// Add an anti-dep edge, and squeeze 'load' into the highest block.736assert(store != load->in(0), "dependence cycle found");737if (verify) {738assert(store->find_edge(load) != -1, "missing precedence edge");739} else {740store->add_prec(load);741}742LCA = early;743// This turns off the process of gathering non_early_stores.744}745}746// (Worklist is now empty; all nearby stores have been visited.)747748// Finished if 'load' must be scheduled in its 'early' block.749// If we found any stores there, they have already been given750// precedence edges.751if (LCA == early) return LCA;752753// We get here only if there are no possibly-interfering stores754// in the load's 'early' block. Move LCA up above all predecessors755// which contain stores we have noted.756//757// The raised LCA block can be a home to such interfering stores,758// but its predecessors must not contain any such stores.759//760// The raised LCA will be a lower bound for placing the load,761// preventing the load from sinking past any block containing762// a store that may invalidate the memory state required by 'load'.763if (must_raise_LCA)764LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);765if (LCA == early) return LCA;766767// Insert anti-dependence edges from 'load' to each store768// in the non-early LCA block.769// Mine the non_early_stores list for such stores.770if (LCA->raise_LCA_mark() == load_index) {771while (non_early_stores.size() > 0) {772Node* store = non_early_stores.pop();773Block* store_block = get_block_for_node(store);774if (store_block == LCA) {775// add anti_dependence from store to load in its own block776assert(store != load->in(0), "dependence cycle found");777if (verify) {778assert(store->find_edge(load) != -1, "missing precedence edge");779} else {780store->add_prec(load);781}782} else {783assert(store_block->raise_LCA_mark() == load_index, "block was marked");784// Any other stores we found must be either inside the new LCA785// or else outside the original LCA. In the latter case, they786// did not interfere with any use of 'load'.787assert(LCA->dominates(store_block)788|| !LCA_orig->dominates(store_block), "no stray stores");789}790}791}792793// Return the highest block containing stores; any stores794// within that block have been given anti-dependence edges.795return LCA;796}797798// This class is used to iterate backwards over the nodes in the graph.799800class Node_Backward_Iterator {801802private:803Node_Backward_Iterator();804805public:806// Constructor for the iterator807Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);808809// Postincrement operator to iterate over the nodes810Node *next();811812private:813VectorSet &_visited;814Node_List &_stack;815PhaseCFG &_cfg;816};817818// Constructor for the Node_Backward_Iterator819Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)820: _visited(visited), _stack(stack), _cfg(cfg) {821// The stack should contain exactly the root822stack.clear();823stack.push(root);824825// Clear the visited bits826visited.Clear();827}828829// Iterator for the Node_Backward_Iterator830Node *Node_Backward_Iterator::next() {831832// If the _stack is empty, then just return NULL: finished.833if ( !_stack.size() )834return NULL;835836// '_stack' is emulating a real _stack. The 'visit-all-users' loop has been837// made stateless, so I do not need to record the index 'i' on my _stack.838// Instead I visit all users each time, scanning for unvisited users.839// I visit unvisited not-anti-dependence users first, then anti-dependent840// children next.841Node *self = _stack.pop();842843// I cycle here when I am entering a deeper level of recursion.844// The key variable 'self' was set prior to jumping here.845while( 1 ) {846847_visited.set(self->_idx);848849// Now schedule all uses as late as possible.850const Node* src = self->is_Proj() ? self->in(0) : self;851uint src_rpo = _cfg.get_block_for_node(src)->_rpo;852853// Schedule all nodes in a post-order visit854Node *unvisited = NULL; // Unvisited anti-dependent Node, if any855856// Scan for unvisited nodes857for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {858// For all uses, schedule late859Node* n = self->fast_out(i); // Use860861// Skip already visited children862if ( _visited.test(n->_idx) )863continue;864865// do not traverse backward control edges866Node *use = n->is_Proj() ? n->in(0) : n;867uint use_rpo = _cfg.get_block_for_node(use)->_rpo;868869if ( use_rpo < src_rpo )870continue;871872// Phi nodes always precede uses in a basic block873if ( use_rpo == src_rpo && use->is_Phi() )874continue;875876unvisited = n; // Found unvisited877878// Check for possible-anti-dependent879if( !n->needs_anti_dependence_check() )880break; // Not visited, not anti-dep; schedule it NOW881}882883// Did I find an unvisited not-anti-dependent Node?884if ( !unvisited )885break; // All done with children; post-visit 'self'886887// Visit the unvisited Node. Contains the obvious push to888// indicate I'm entering a deeper level of recursion. I push the889// old state onto the _stack and set a new state and loop (recurse).890_stack.push(self);891self = unvisited;892} // End recursion loop893894return self;895}896897//------------------------------ComputeLatenciesBackwards----------------------898// Compute the latency of all the instructions.899void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {900#ifndef PRODUCT901if (trace_opto_pipelining())902tty->print("\n#---- ComputeLatenciesBackwards ----\n");903#endif904905Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);906Node *n;907908// Walk over all the nodes from last to first909while (n = iter.next()) {910// Set the latency for the definitions of this instruction911partial_latency_of_defs(n);912}913} // end ComputeLatenciesBackwards914915//------------------------------partial_latency_of_defs------------------------916// Compute the latency impact of this node on all defs. This computes917// a number that increases as we approach the beginning of the routine.918void PhaseCFG::partial_latency_of_defs(Node *n) {919// Set the latency for this instruction920#ifndef PRODUCT921if (trace_opto_pipelining()) {922tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));923dump();924}925#endif926927if (n->is_Proj()) {928n = n->in(0);929}930931if (n->is_Root()) {932return;933}934935uint nlen = n->len();936uint use_latency = get_latency_for_node(n);937uint use_pre_order = get_block_for_node(n)->_pre_order;938939for (uint j = 0; j < nlen; j++) {940Node *def = n->in(j);941942if (!def || def == n) {943continue;944}945946// Walk backwards thru projections947if (def->is_Proj()) {948def = def->in(0);949}950951#ifndef PRODUCT952if (trace_opto_pipelining()) {953tty->print("# in(%2d): ", j);954def->dump();955}956#endif957958// If the defining block is not known, assume it is ok959Block *def_block = get_block_for_node(def);960uint def_pre_order = def_block ? def_block->_pre_order : 0;961962if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {963continue;964}965966uint delta_latency = n->latency(j);967uint current_latency = delta_latency + use_latency;968969if (get_latency_for_node(def) < current_latency) {970set_latency_for_node(def, current_latency);971}972973#ifndef PRODUCT974if (trace_opto_pipelining()) {975tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));976}977#endif978}979}980981//------------------------------latency_from_use-------------------------------982// Compute the latency of a specific use983int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {984// If self-reference, return no latency985if (use == n || use->is_Root()) {986return 0;987}988989uint def_pre_order = get_block_for_node(def)->_pre_order;990uint latency = 0;991992// If the use is not a projection, then it is simple...993if (!use->is_Proj()) {994#ifndef PRODUCT995if (trace_opto_pipelining()) {996tty->print("# out(): ");997use->dump();998}999#endif10001001uint use_pre_order = get_block_for_node(use)->_pre_order;10021003if (use_pre_order < def_pre_order)1004return 0;10051006if (use_pre_order == def_pre_order && use->is_Phi())1007return 0;10081009uint nlen = use->len();1010uint nl = get_latency_for_node(use);10111012for ( uint j=0; j<nlen; j++ ) {1013if (use->in(j) == n) {1014// Change this if we want local latencies1015uint ul = use->latency(j);1016uint l = ul + nl;1017if (latency < l) latency = l;1018#ifndef PRODUCT1019if (trace_opto_pipelining()) {1020tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",1021nl, j, ul, l, latency);1022}1023#endif1024}1025}1026} else {1027// This is a projection, just grab the latency of the use(s)1028for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {1029uint l = latency_from_use(use, def, use->fast_out(j));1030if (latency < l) latency = l;1031}1032}10331034return latency;1035}10361037//------------------------------latency_from_uses------------------------------1038// Compute the latency of this instruction relative to all of it's uses.1039// This computes a number that increases as we approach the beginning of the1040// routine.1041void PhaseCFG::latency_from_uses(Node *n) {1042// Set the latency for this instruction1043#ifndef PRODUCT1044if (trace_opto_pipelining()) {1045tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));1046dump();1047}1048#endif1049uint latency=0;1050const Node *def = n->is_Proj() ? n->in(0): n;10511052for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1053uint l = latency_from_use(n, def, n->fast_out(i));10541055if (latency < l) latency = l;1056}10571058set_latency_for_node(n, latency);1059}10601061//------------------------------hoist_to_cheaper_block-------------------------1062// Pick a block for node self, between early and LCA, that is a cheaper1063// alternative to LCA.1064Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {1065const double delta = 1+PROB_UNLIKELY_MAG(4);1066Block* least = LCA;1067double least_freq = least->_freq;1068uint target = get_latency_for_node(self);1069uint start_latency = get_latency_for_node(LCA->head());1070uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));1071bool in_latency = (target <= start_latency);1072const Block* root_block = get_block_for_node(_root);10731074// Turn off latency scheduling if scheduling is just plain off1075if (!C->do_scheduling())1076in_latency = true;10771078// Do not hoist (to cover latency) instructions which target a1079// single register. Hoisting stretches the live range of the1080// single register and may force spilling.1081MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;1082if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())1083in_latency = true;10841085#ifndef PRODUCT1086if (trace_opto_pipelining()) {1087tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));1088self->dump();1089tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",1090LCA->_pre_order,1091LCA->head()->_idx,1092start_latency,1093LCA->get_node(LCA->end_idx())->_idx,1094end_latency,1095least_freq);1096}1097#endif10981099int cand_cnt = 0; // number of candidates tried11001101// Walk up the dominator tree from LCA (Lowest common ancestor) to1102// the earliest legal location. Capture the least execution frequency.1103while (LCA != early) {1104LCA = LCA->_idom; // Follow up the dominator tree11051106if (LCA == NULL) {1107// Bailout without retry1108assert(false, "graph should be schedulable");1109C->record_method_not_compilable("late schedule failed: LCA == NULL");1110return least;1111}11121113// Don't hoist machine instructions to the root basic block1114if (mach && LCA == root_block)1115break;11161117uint start_lat = get_latency_for_node(LCA->head());1118uint end_idx = LCA->end_idx();1119uint end_lat = get_latency_for_node(LCA->get_node(end_idx));1120double LCA_freq = LCA->_freq;1121#ifndef PRODUCT1122if (trace_opto_pipelining()) {1123tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",1124LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);1125}1126#endif1127cand_cnt++;1128if (LCA_freq < least_freq || // Better Frequency1129(StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode1130(!StressGCM && // Otherwise, choose with latency1131!in_latency && // No block containing latency1132LCA_freq < least_freq * delta && // No worse frequency1133target >= end_lat && // within latency range1134!self->is_iteratively_computed() ) // But don't hoist IV increments1135// because they may end up above other uses of their phi forcing1136// their result register to be different from their input.1137) {1138least = LCA; // Found cheaper block1139least_freq = LCA_freq;1140start_latency = start_lat;1141end_latency = end_lat;1142if (target <= start_lat)1143in_latency = true;1144}1145}11461147#ifndef PRODUCT1148if (trace_opto_pipelining()) {1149tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",1150least->_pre_order, start_latency, least_freq);1151}1152#endif11531154// See if the latency needs to be updated1155if (target < end_latency) {1156#ifndef PRODUCT1157if (trace_opto_pipelining()) {1158tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);1159}1160#endif1161set_latency_for_node(self, end_latency);1162partial_latency_of_defs(self);1163}11641165return least;1166}116711681169//------------------------------schedule_late-----------------------------------1170// Now schedule all codes as LATE as possible. This is the LCA in the1171// dominator tree of all USES of a value. Pick the block with the least1172// loop nesting depth that is lowest in the dominator tree.1173extern const char must_clone[];1174void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {1175#ifndef PRODUCT1176if (trace_opto_pipelining())1177tty->print("\n#---- schedule_late ----\n");1178#endif11791180Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);1181Node *self;11821183// Walk over all the nodes from last to first1184while (self = iter.next()) {1185Block* early = get_block_for_node(self); // Earliest legal placement11861187if (self->is_top()) {1188// Top node goes in bb #2 with other constants.1189// It must be special-cased, because it has no out edges.1190early->add_inst(self);1191continue;1192}11931194// No uses, just terminate1195if (self->outcnt() == 0) {1196assert(self->is_MachProj(), "sanity");1197continue; // Must be a dead machine projection1198}11991200// If node is pinned in the block, then no scheduling can be done.1201if( self->pinned() ) // Pinned in block?1202continue;12031204MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;1205if (mach) {1206switch (mach->ideal_Opcode()) {1207case Op_CreateEx:1208// Don't move exception creation1209early->add_inst(self);1210continue;1211break;1212case Op_CheckCastPP:1213// Don't move CheckCastPP nodes away from their input, if the input1214// is a rawptr (5071820).1215Node *def = self->in(1);1216if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {1217early->add_inst(self);1218#ifdef ASSERT1219_raw_oops.push(def);1220#endif1221continue;1222}1223break;1224}1225}12261227// Gather LCA of all uses1228Block *LCA = NULL;1229{1230for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {1231// For all uses, find LCA1232Node* use = self->fast_out(i);1233LCA = raise_LCA_above_use(LCA, use, self, this);1234}1235} // (Hide defs of imax, i from rest of block.)12361237// Place temps in the block of their use. This isn't a1238// requirement for correctness but it reduces useless1239// interference between temps and other nodes.1240if (mach != NULL && mach->is_MachTemp()) {1241map_node_to_block(self, LCA);1242LCA->add_inst(self);1243continue;1244}12451246// Check if 'self' could be anti-dependent on memory1247if (self->needs_anti_dependence_check()) {1248// Hoist LCA above possible-defs and insert anti-dependences to1249// defs in new LCA block.1250LCA = insert_anti_dependences(LCA, self);1251}12521253if (early->_dom_depth > LCA->_dom_depth) {1254// Somehow the LCA has moved above the earliest legal point.1255// (One way this can happen is via memory_early_block.)1256if (C->subsume_loads() == true && !C->failing()) {1257// Retry with subsume_loads == false1258// If this is the first failure, the sentinel string will "stick"1259// to the Compile object, and the C2Compiler will see it and retry.1260C->record_failure(C2Compiler::retry_no_subsuming_loads());1261} else {1262// Bailout without retry when (early->_dom_depth > LCA->_dom_depth)1263assert(false, "graph should be schedulable");1264C->record_method_not_compilable("late schedule failed: incorrect graph");1265}1266return;1267}12681269// If there is no opportunity to hoist, then we're done.1270// In stress mode, try to hoist even the single operations.1271bool try_to_hoist = StressGCM || (LCA != early);12721273// Must clone guys stay next to use; no hoisting allowed.1274// Also cannot hoist guys that alter memory or are otherwise not1275// allocatable (hoisting can make a value live longer, leading to1276// anti and output dependency problems which are normally resolved1277// by the register allocator giving everyone a different register).1278if (mach != NULL && must_clone[mach->ideal_Opcode()])1279try_to_hoist = false;12801281Block* late = NULL;1282if (try_to_hoist) {1283// Now find the block with the least execution frequency.1284// Start at the latest schedule and work up to the earliest schedule1285// in the dominator tree. Thus the Node will dominate all its uses.1286late = hoist_to_cheaper_block(LCA, early, self);1287} else {1288// Just use the LCA of the uses.1289late = LCA;1290}12911292// Put the node into target block1293schedule_node_into_block(self, late);12941295#ifdef ASSERT1296if (self->needs_anti_dependence_check()) {1297// since precedence edges are only inserted when we're sure they1298// are needed make sure that after placement in a block we don't1299// need any new precedence edges.1300verify_anti_dependences(late, self);1301}1302#endif1303} // Loop until all nodes have been visited13041305} // end ScheduleLate13061307//------------------------------GlobalCodeMotion-------------------------------1308void PhaseCFG::global_code_motion() {1309ResourceMark rm;13101311#ifndef PRODUCT1312if (trace_opto_pipelining()) {1313tty->print("\n---- Start GlobalCodeMotion ----\n");1314}1315#endif13161317// Initialize the node to block mapping for things on the proj_list1318for (uint i = 0; i < _matcher.number_of_projections(); i++) {1319unmap_node_from_block(_matcher.get_projection(i));1320}13211322// Set the basic block for Nodes pinned into blocks1323Arena* arena = Thread::current()->resource_area();1324VectorSet visited(arena);1325schedule_pinned_nodes(visited);13261327// Find the earliest Block any instruction can be placed in. Some1328// instructions are pinned into Blocks. Unpinned instructions can1329// appear in last block in which all their inputs occur.1330visited.Clear();1331Node_List stack(arena);1332// Pre-grow the list1333stack.map((C->live_nodes() >> 1) + 16, NULL);1334if (!schedule_early(visited, stack)) {1335// Bailout without retry1336C->record_method_not_compilable("early schedule failed");1337return;1338}13391340// Build Def-Use edges.1341// Compute the latency information (via backwards walk) for all the1342// instructions in the graph1343_node_latency = new GrowableArray<uint>(); // resource_area allocation13441345if (C->do_scheduling()) {1346compute_latencies_backwards(visited, stack);1347}13481349// Now schedule all codes as LATE as possible. This is the LCA in the1350// dominator tree of all USES of a value. Pick the block with the least1351// loop nesting depth that is lowest in the dominator tree.1352// ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )1353schedule_late(visited, stack);1354if (C->failing()) {1355return;1356}13571358#ifndef PRODUCT1359if (trace_opto_pipelining()) {1360tty->print("\n---- Detect implicit null checks ----\n");1361}1362#endif13631364// Detect implicit-null-check opportunities. Basically, find NULL checks1365// with suitable memory ops nearby. Use the memory op to do the NULL check.1366// I can generate a memory op if there is not one nearby.1367if (C->is_method_compilation()) {1368// By reversing the loop direction we get a very minor gain on mpegaudio.1369// Feel free to revert to a forward loop for clarity.1370// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {1371for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {1372Node* proj = _matcher._null_check_tests[i];1373Node* val = _matcher._null_check_tests[i + 1];1374Block* block = get_block_for_node(proj);1375implicit_null_check(block, proj, val, C->allowed_deopt_reasons());1376// The implicit_null_check will only perform the transformation1377// if the null branch is truly uncommon, *and* it leads to an1378// uncommon trap. Combined with the too_many_traps guards1379// above, this prevents SEGV storms reported in 6366351,1380// by recompiling offending methods without this optimization.1381}1382}13831384#ifndef PRODUCT1385if (trace_opto_pipelining()) {1386tty->print("\n---- Start Local Scheduling ----\n");1387}1388#endif13891390// Schedule locally. Right now a simple topological sort.1391// Later, do a real latency aware scheduler.1392GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);1393visited.Clear();1394for (uint i = 0; i < number_of_blocks(); i++) {1395Block* block = get_block(i);1396if (!schedule_local(block, ready_cnt, visited)) {1397if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {1398C->record_method_not_compilable("local schedule failed");1399}1400return;1401}1402}14031404// If we inserted any instructions between a Call and his CatchNode,1405// clone the instructions on all paths below the Catch.1406for (uint i = 0; i < number_of_blocks(); i++) {1407Block* block = get_block(i);1408call_catch_cleanup(block);1409}14101411#ifndef PRODUCT1412if (trace_opto_pipelining()) {1413tty->print("\n---- After GlobalCodeMotion ----\n");1414for (uint i = 0; i < number_of_blocks(); i++) {1415Block* block = get_block(i);1416block->dump();1417}1418}1419#endif1420// Dead.1421_node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);1422}14231424bool PhaseCFG::do_global_code_motion() {14251426build_dominator_tree();1427if (C->failing()) {1428return false;1429}14301431NOT_PRODUCT( C->verify_graph_edges(); )14321433estimate_block_frequency();14341435global_code_motion();14361437if (C->failing()) {1438return false;1439}14401441return true;1442}14431444//------------------------------Estimate_Block_Frequency-----------------------1445// Estimate block frequencies based on IfNode probabilities.1446void PhaseCFG::estimate_block_frequency() {14471448// Force conditional branches leading to uncommon traps to be unlikely,1449// not because we get to the uncommon_trap with less relative frequency,1450// but because an uncommon_trap typically causes a deopt, so we only get1451// there once.1452if (C->do_freq_based_layout()) {1453Block_List worklist;1454Block* root_blk = get_block(0);1455for (uint i = 1; i < root_blk->num_preds(); i++) {1456Block *pb = get_block_for_node(root_blk->pred(i));1457if (pb->has_uncommon_code()) {1458worklist.push(pb);1459}1460}1461while (worklist.size() > 0) {1462Block* uct = worklist.pop();1463if (uct == get_root_block()) {1464continue;1465}1466for (uint i = 1; i < uct->num_preds(); i++) {1467Block *pb = get_block_for_node(uct->pred(i));1468if (pb->_num_succs == 1) {1469worklist.push(pb);1470} else if (pb->num_fall_throughs() == 2) {1471pb->update_uncommon_branch(uct);1472}1473}1474}1475}14761477// Create the loop tree and calculate loop depth.1478_root_loop = create_loop_tree();1479_root_loop->compute_loop_depth(0);14801481// Compute block frequency of each block, relative to a single loop entry.1482_root_loop->compute_freq();14831484// Adjust all frequencies to be relative to a single method entry1485_root_loop->_freq = 1.0;1486_root_loop->scale_freq();14871488// Save outmost loop frequency for LRG frequency threshold1489_outer_loop_frequency = _root_loop->outer_loop_freq();14901491// force paths ending at uncommon traps to be infrequent1492if (!C->do_freq_based_layout()) {1493Block_List worklist;1494Block* root_blk = get_block(0);1495for (uint i = 1; i < root_blk->num_preds(); i++) {1496Block *pb = get_block_for_node(root_blk->pred(i));1497if (pb->has_uncommon_code()) {1498worklist.push(pb);1499}1500}1501while (worklist.size() > 0) {1502Block* uct = worklist.pop();1503uct->_freq = PROB_MIN;1504for (uint i = 1; i < uct->num_preds(); i++) {1505Block *pb = get_block_for_node(uct->pred(i));1506if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {1507worklist.push(pb);1508}1509}1510}1511}15121513#ifdef ASSERT1514for (uint i = 0; i < number_of_blocks(); i++) {1515Block* b = get_block(i);1516assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");1517}1518#endif15191520#ifndef PRODUCT1521if (PrintCFGBlockFreq) {1522tty->print_cr("CFG Block Frequencies");1523_root_loop->dump_tree();1524if (Verbose) {1525tty->print_cr("PhaseCFG dump");1526dump();1527tty->print_cr("Node dump");1528_root->dump(99999);1529}1530}1531#endif1532}15331534//----------------------------create_loop_tree--------------------------------1535// Create a loop tree from the CFG1536CFGLoop* PhaseCFG::create_loop_tree() {15371538#ifdef ASSERT1539assert(get_block(0) == get_root_block(), "first block should be root block");1540for (uint i = 0; i < number_of_blocks(); i++) {1541Block* block = get_block(i);1542// Check that _loop field are clear...we could clear them if not.1543assert(block->_loop == NULL, "clear _loop expected");1544// Sanity check that the RPO numbering is reflected in the _blocks array.1545// It doesn't have to be for the loop tree to be built, but if it is not,1546// then the blocks have been reordered since dom graph building...which1547// may question the RPO numbering1548assert(block->_rpo == i, "unexpected reverse post order number");1549}1550#endif15511552int idct = 0;1553CFGLoop* root_loop = new CFGLoop(idct++);15541555Block_List worklist;15561557// Assign blocks to loops1558for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block1559Block* block = get_block(i);15601561if (block->head()->is_Loop()) {1562Block* loop_head = block;1563assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");1564Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);1565Block* tail = get_block_for_node(tail_n);15661567// Defensively filter out Loop nodes for non-single-entry loops.1568// For all reasonable loops, the head occurs before the tail in RPO.1569if (i <= tail->_rpo) {15701571// The tail and (recursive) predecessors of the tail1572// are made members of a new loop.15731574assert(worklist.size() == 0, "nonempty worklist");1575CFGLoop* nloop = new CFGLoop(idct++);1576assert(loop_head->_loop == NULL, "just checking");1577loop_head->_loop = nloop;1578// Add to nloop so push_pred() will skip over inner loops1579nloop->add_member(loop_head);1580nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);15811582while (worklist.size() > 0) {1583Block* member = worklist.pop();1584if (member != loop_head) {1585for (uint j = 1; j < member->num_preds(); j++) {1586nloop->push_pred(member, j, worklist, this);1587}1588}1589}1590}1591}1592}15931594// Create a member list for each loop consisting1595// of both blocks and (immediate child) loops.1596for (uint i = 0; i < number_of_blocks(); i++) {1597Block* block = get_block(i);1598CFGLoop* lp = block->_loop;1599if (lp == NULL) {1600// Not assigned to a loop. Add it to the method's pseudo loop.1601block->_loop = root_loop;1602lp = root_loop;1603}1604if (lp == root_loop || block != lp->head()) { // loop heads are already members1605lp->add_member(block);1606}1607if (lp != root_loop) {1608if (lp->parent() == NULL) {1609// Not a nested loop. Make it a child of the method's pseudo loop.1610root_loop->add_nested_loop(lp);1611}1612if (block == lp->head()) {1613// Add nested loop to member list of parent loop.1614lp->parent()->add_member(lp);1615}1616}1617}16181619return root_loop;1620}16211622//------------------------------push_pred--------------------------------------1623void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {1624Node* pred_n = blk->pred(i);1625Block* pred = cfg->get_block_for_node(pred_n);1626CFGLoop *pred_loop = pred->_loop;1627if (pred_loop == NULL) {1628// Filter out blocks for non-single-entry loops.1629// For all reasonable loops, the head occurs before the tail in RPO.1630if (pred->_rpo > head()->_rpo) {1631pred->_loop = this;1632worklist.push(pred);1633}1634} else if (pred_loop != this) {1635// Nested loop.1636while (pred_loop->_parent != NULL && pred_loop->_parent != this) {1637pred_loop = pred_loop->_parent;1638}1639// Make pred's loop be a child1640if (pred_loop->_parent == NULL) {1641add_nested_loop(pred_loop);1642// Continue with loop entry predecessor.1643Block* pred_head = pred_loop->head();1644assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");1645assert(pred_head != head(), "loop head in only one loop");1646push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);1647} else {1648assert(pred_loop->_parent == this && _parent == NULL, "just checking");1649}1650}1651}16521653//------------------------------add_nested_loop--------------------------------1654// Make cl a child of the current loop in the loop tree.1655void CFGLoop::add_nested_loop(CFGLoop* cl) {1656assert(_parent == NULL, "no parent yet");1657assert(cl != this, "not my own parent");1658cl->_parent = this;1659CFGLoop* ch = _child;1660if (ch == NULL) {1661_child = cl;1662} else {1663while (ch->_sibling != NULL) { ch = ch->_sibling; }1664ch->_sibling = cl;1665}1666}16671668//------------------------------compute_loop_depth-----------------------------1669// Store the loop depth in each CFGLoop object.1670// Recursively walk the children to do the same for them.1671void CFGLoop::compute_loop_depth(int depth) {1672_depth = depth;1673CFGLoop* ch = _child;1674while (ch != NULL) {1675ch->compute_loop_depth(depth + 1);1676ch = ch->_sibling;1677}1678}16791680//------------------------------compute_freq-----------------------------------1681// Compute the frequency of each block and loop, relative to a single entry1682// into the dominating loop head.1683void CFGLoop::compute_freq() {1684// Bottom up traversal of loop tree (visit inner loops first.)1685// Set loop head frequency to 1.0, then transitively1686// compute frequency for all successors in the loop,1687// as well as for each exit edge. Inner loops are1688// treated as single blocks with loop exit targets1689// as the successor blocks.16901691// Nested loops first1692CFGLoop* ch = _child;1693while (ch != NULL) {1694ch->compute_freq();1695ch = ch->_sibling;1696}1697assert (_members.length() > 0, "no empty loops");1698Block* hd = head();1699hd->_freq = 1.0f;1700for (int i = 0; i < _members.length(); i++) {1701CFGElement* s = _members.at(i);1702float freq = s->_freq;1703if (s->is_block()) {1704Block* b = s->as_Block();1705for (uint j = 0; j < b->_num_succs; j++) {1706Block* sb = b->_succs[j];1707update_succ_freq(sb, freq * b->succ_prob(j));1708}1709} else {1710CFGLoop* lp = s->as_CFGLoop();1711assert(lp->_parent == this, "immediate child");1712for (int k = 0; k < lp->_exits.length(); k++) {1713Block* eb = lp->_exits.at(k).get_target();1714float prob = lp->_exits.at(k).get_prob();1715update_succ_freq(eb, freq * prob);1716}1717}1718}17191720// For all loops other than the outer, "method" loop,1721// sum and normalize the exit probability. The "method" loop1722// should keep the initial exit probability of 1, so that1723// inner blocks do not get erroneously scaled.1724if (_depth != 0) {1725// Total the exit probabilities for this loop.1726float exits_sum = 0.0f;1727for (int i = 0; i < _exits.length(); i++) {1728exits_sum += _exits.at(i).get_prob();1729}17301731// Normalize the exit probabilities. Until now, the1732// probabilities estimate the possibility of exit per1733// a single loop iteration; afterward, they estimate1734// the probability of exit per loop entry.1735for (int i = 0; i < _exits.length(); i++) {1736Block* et = _exits.at(i).get_target();1737float new_prob = 0.0f;1738if (_exits.at(i).get_prob() > 0.0f) {1739new_prob = _exits.at(i).get_prob() / exits_sum;1740}1741BlockProbPair bpp(et, new_prob);1742_exits.at_put(i, bpp);1743}17441745// Save the total, but guard against unreasonable probability,1746// as the value is used to estimate the loop trip count.1747// An infinite trip count would blur relative block1748// frequencies.1749if (exits_sum > 1.0f) exits_sum = 1.0;1750if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;1751_exit_prob = exits_sum;1752}1753}17541755//------------------------------succ_prob-------------------------------------1756// Determine the probability of reaching successor 'i' from the receiver block.1757float Block::succ_prob(uint i) {1758int eidx = end_idx();1759Node *n = get_node(eidx); // Get ending Node17601761int op = n->Opcode();1762if (n->is_Mach()) {1763if (n->is_MachNullCheck()) {1764// Can only reach here if called after lcm. The original Op_If is gone,1765// so we attempt to infer the probability from one or both of the1766// successor blocks.1767assert(_num_succs == 2, "expecting 2 successors of a null check");1768// If either successor has only one predecessor, then the1769// probability estimate can be derived using the1770// relative frequency of the successor and this block.1771if (_succs[i]->num_preds() == 2) {1772return _succs[i]->_freq / _freq;1773} else if (_succs[1-i]->num_preds() == 2) {1774return 1 - (_succs[1-i]->_freq / _freq);1775} else {1776// Estimate using both successor frequencies1777float freq = _succs[i]->_freq;1778return freq / (freq + _succs[1-i]->_freq);1779}1780}1781op = n->as_Mach()->ideal_Opcode();1782}178317841785// Switch on branch type1786switch( op ) {1787case Op_CountedLoopEnd:1788case Op_If: {1789assert (i < 2, "just checking");1790// Conditionals pass on only part of their frequency1791float prob = n->as_MachIf()->_prob;1792assert(prob >= 0.0 && prob <= 1.0, "out of range probability");1793// If succ[i] is the FALSE branch, invert path info1794if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {1795return 1.0f - prob; // not taken1796} else {1797return prob; // taken1798}1799}18001801case Op_Jump:1802// Divide the frequency between all successors evenly1803return 1.0f/_num_succs;18041805case Op_Catch: {1806const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();1807if (ci->_con == CatchProjNode::fall_through_index) {1808// Fall-thru path gets the lion's share.1809return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;1810} else {1811// Presume exceptional paths are equally unlikely1812return PROB_UNLIKELY_MAG(5);1813}1814}18151816case Op_Root:1817case Op_Goto:1818// Pass frequency straight thru to target1819return 1.0f;18201821case Op_NeverBranch:1822return 0.0f;18231824case Op_TailCall:1825case Op_TailJump:1826case Op_Return:1827case Op_Halt:1828case Op_Rethrow:1829// Do not push out freq to root block1830return 0.0f;18311832default:1833ShouldNotReachHere();1834}18351836return 0.0f;1837}18381839//------------------------------num_fall_throughs-----------------------------1840// Return the number of fall-through candidates for a block1841int Block::num_fall_throughs() {1842int eidx = end_idx();1843Node *n = get_node(eidx); // Get ending Node18441845int op = n->Opcode();1846if (n->is_Mach()) {1847if (n->is_MachNullCheck()) {1848// In theory, either side can fall-thru, for simplicity sake,1849// let's say only the false branch can now.1850return 1;1851}1852op = n->as_Mach()->ideal_Opcode();1853}18541855// Switch on branch type1856switch( op ) {1857case Op_CountedLoopEnd:1858case Op_If:1859return 2;18601861case Op_Root:1862case Op_Goto:1863return 1;18641865case Op_Catch: {1866for (uint i = 0; i < _num_succs; i++) {1867const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();1868if (ci->_con == CatchProjNode::fall_through_index) {1869return 1;1870}1871}1872return 0;1873}18741875case Op_Jump:1876case Op_NeverBranch:1877case Op_TailCall:1878case Op_TailJump:1879case Op_Return:1880case Op_Halt:1881case Op_Rethrow:1882return 0;18831884default:1885ShouldNotReachHere();1886}18871888return 0;1889}18901891//------------------------------succ_fall_through-----------------------------1892// Return true if a specific successor could be fall-through target.1893bool Block::succ_fall_through(uint i) {1894int eidx = end_idx();1895Node *n = get_node(eidx); // Get ending Node18961897int op = n->Opcode();1898if (n->is_Mach()) {1899if (n->is_MachNullCheck()) {1900// In theory, either side can fall-thru, for simplicity sake,1901// let's say only the false branch can now.1902return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;1903}1904op = n->as_Mach()->ideal_Opcode();1905}19061907// Switch on branch type1908switch( op ) {1909case Op_CountedLoopEnd:1910case Op_If:1911case Op_Root:1912case Op_Goto:1913return true;19141915case Op_Catch: {1916const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();1917return ci->_con == CatchProjNode::fall_through_index;1918}19191920case Op_Jump:1921case Op_NeverBranch:1922case Op_TailCall:1923case Op_TailJump:1924case Op_Return:1925case Op_Halt:1926case Op_Rethrow:1927return false;19281929default:1930ShouldNotReachHere();1931}19321933return false;1934}19351936//------------------------------update_uncommon_branch------------------------1937// Update the probability of a two-branch to be uncommon1938void Block::update_uncommon_branch(Block* ub) {1939int eidx = end_idx();1940Node *n = get_node(eidx); // Get ending Node19411942int op = n->as_Mach()->ideal_Opcode();19431944assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");1945assert(num_fall_throughs() == 2, "must be a two way branch block");19461947// Which successor is ub?1948uint s;1949for (s = 0; s <_num_succs; s++) {1950if (_succs[s] == ub) break;1951}1952assert(s < 2, "uncommon successor must be found");19531954// If ub is the true path, make the proability small, else1955// ub is the false path, and make the probability large1956bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);19571958// Get existing probability1959float p = n->as_MachIf()->_prob;19601961if (invert) p = 1.0 - p;1962if (p > PROB_MIN) {1963p = PROB_MIN;1964}1965if (invert) p = 1.0 - p;19661967n->as_MachIf()->_prob = p;1968}19691970//------------------------------update_succ_freq-------------------------------1971// Update the appropriate frequency associated with block 'b', a successor of1972// a block in this loop.1973void CFGLoop::update_succ_freq(Block* b, float freq) {1974if (b->_loop == this) {1975if (b == head()) {1976// back branch within the loop1977// Do nothing now, the loop carried frequency will be1978// adjust later in scale_freq().1979} else {1980// simple branch within the loop1981b->_freq += freq;1982}1983} else if (!in_loop_nest(b)) {1984// branch is exit from this loop1985BlockProbPair bpp(b, freq);1986_exits.append(bpp);1987} else {1988// branch into nested loop1989CFGLoop* ch = b->_loop;1990ch->_freq += freq;1991}1992}19931994//------------------------------in_loop_nest-----------------------------------1995// Determine if block b is in the receiver's loop nest.1996bool CFGLoop::in_loop_nest(Block* b) {1997int depth = _depth;1998CFGLoop* b_loop = b->_loop;1999int b_depth = b_loop->_depth;2000if (depth == b_depth) {2001return true;2002}2003while (b_depth > depth) {2004b_loop = b_loop->_parent;2005b_depth = b_loop->_depth;2006}2007return b_loop == this;2008}20092010//------------------------------scale_freq-------------------------------------2011// Scale frequency of loops and blocks by trip counts from outer loops2012// Do a top down traversal of loop tree (visit outer loops first.)2013void CFGLoop::scale_freq() {2014float loop_freq = _freq * trip_count();2015_freq = loop_freq;2016for (int i = 0; i < _members.length(); i++) {2017CFGElement* s = _members.at(i);2018float block_freq = s->_freq * loop_freq;2019if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)2020block_freq = MIN_BLOCK_FREQUENCY;2021s->_freq = block_freq;2022}2023CFGLoop* ch = _child;2024while (ch != NULL) {2025ch->scale_freq();2026ch = ch->_sibling;2027}2028}20292030// Frequency of outer loop2031float CFGLoop::outer_loop_freq() const {2032if (_child != NULL) {2033return _child->_freq;2034}2035return _freq;2036}20372038#ifndef PRODUCT2039//------------------------------dump_tree--------------------------------------2040void CFGLoop::dump_tree() const {2041dump();2042if (_child != NULL) _child->dump_tree();2043if (_sibling != NULL) _sibling->dump_tree();2044}20452046//------------------------------dump-------------------------------------------2047void CFGLoop::dump() const {2048for (int i = 0; i < _depth; i++) tty->print(" ");2049tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",2050_depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);2051for (int i = 0; i < _depth; i++) tty->print(" ");2052tty->print(" members:");2053int k = 0;2054for (int i = 0; i < _members.length(); i++) {2055if (k++ >= 6) {2056tty->print("\n ");2057for (int j = 0; j < _depth+1; j++) tty->print(" ");2058k = 0;2059}2060CFGElement *s = _members.at(i);2061if (s->is_block()) {2062Block *b = s->as_Block();2063tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);2064} else {2065CFGLoop* lp = s->as_CFGLoop();2066tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);2067}2068}2069tty->print("\n");2070for (int i = 0; i < _depth; i++) tty->print(" ");2071tty->print(" exits: ");2072k = 0;2073for (int i = 0; i < _exits.length(); i++) {2074if (k++ >= 7) {2075tty->print("\n ");2076for (int j = 0; j < _depth+1; j++) tty->print(" ");2077k = 0;2078}2079Block *blk = _exits.at(i).get_target();2080float prob = _exits.at(i).get_prob();2081tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));2082}2083tty->print("\n");2084}2085#endif208620872088