Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/opto/escape.cpp
83404 views
/*1* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "compiler/compileLog.hpp"27#include "libadt/vectset.hpp"28#include "memory/allocation.hpp"29#include "opto/c2compiler.hpp"30#include "opto/callnode.hpp"31#include "opto/cfgnode.hpp"32#include "opto/compile.hpp"33#include "opto/escape.hpp"34#include "opto/phaseX.hpp"35#include "opto/rootnode.hpp"3637ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :38_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),39_in_worklist(C->comp_arena()),40_next_pidx(0),41_collecting(true),42_verify(false),43_compile(C),44_igvn(igvn),45_node_map(C->comp_arena()) {46// Add unknown java object.47add_java_object(C->top(), PointsToNode::GlobalEscape);48phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();49// Add ConP(#NULL) and ConN(#NULL) nodes.50Node* oop_null = igvn->zerocon(T_OBJECT);51assert(oop_null->_idx < nodes_size(), "should be created already");52add_java_object(oop_null, PointsToNode::NoEscape);53null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();54if (UseCompressedOops) {55Node* noop_null = igvn->zerocon(T_NARROWOOP);56assert(noop_null->_idx < nodes_size(), "should be created already");57map_ideal_node(noop_null, null_obj);58}59_pcmp_neq = NULL; // Should be initialized60_pcmp_eq = NULL;61}6263bool ConnectionGraph::has_candidates(Compile *C) {64// EA brings benefits only when the code has allocations and/or locks which65// are represented by ideal Macro nodes.66int cnt = C->macro_count();67for (int i = 0; i < cnt; i++) {68Node *n = C->macro_node(i);69if (n->is_Allocate())70return true;71if (n->is_Lock()) {72Node* obj = n->as_Lock()->obj_node()->uncast();73if (!(obj->is_Parm() || obj->is_Con()))74return true;75}76if (n->is_CallStaticJava() &&77n->as_CallStaticJava()->is_boxing_method()) {78return true;79}80}81return false;82}8384void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {85Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);86ResourceMark rm;8788// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction89// to create space for them in ConnectionGraph::_nodes[].90Node* oop_null = igvn->zerocon(T_OBJECT);91Node* noop_null = igvn->zerocon(T_NARROWOOP);92ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);93// Perform escape analysis94if (congraph->compute_escape()) {95// There are non escaping objects.96C->set_congraph(congraph);97}98// Cleanup.99if (oop_null->outcnt() == 0)100igvn->hash_delete(oop_null);101if (noop_null->outcnt() == 0)102igvn->hash_delete(noop_null);103}104105bool ConnectionGraph::compute_escape() {106Compile* C = _compile;107PhaseGVN* igvn = _igvn;108109// Worklists used by EA.110Unique_Node_List delayed_worklist;111GrowableArray<Node*> alloc_worklist;112GrowableArray<Node*> ptr_cmp_worklist;113GrowableArray<Node*> storestore_worklist;114GrowableArray<PointsToNode*> ptnodes_worklist;115GrowableArray<JavaObjectNode*> java_objects_worklist;116GrowableArray<JavaObjectNode*> non_escaped_worklist;117GrowableArray<FieldNode*> oop_fields_worklist;118DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )119120{ Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);121122// 1. Populate Connection Graph (CG) with PointsTo nodes.123ideal_nodes.map(C->live_nodes(), NULL); // preallocate space124// Initialize worklist125if (C->root() != NULL) {126ideal_nodes.push(C->root());127}128// Processed ideal nodes are unique on ideal_nodes list129// but several ideal nodes are mapped to the phantom_obj.130// To avoid duplicated entries on the following worklists131// add the phantom_obj only once to them.132ptnodes_worklist.append(phantom_obj);133java_objects_worklist.append(phantom_obj);134for( uint next = 0; next < ideal_nodes.size(); ++next ) {135Node* n = ideal_nodes.at(next);136// Create PointsTo nodes and add them to Connection Graph. Called137// only once per ideal node since ideal_nodes is Unique_Node list.138add_node_to_connection_graph(n, &delayed_worklist);139PointsToNode* ptn = ptnode_adr(n->_idx);140if (ptn != NULL && ptn != phantom_obj) {141ptnodes_worklist.append(ptn);142if (ptn->is_JavaObject()) {143java_objects_worklist.append(ptn->as_JavaObject());144if ((n->is_Allocate() || n->is_CallStaticJava()) &&145(ptn->escape_state() < PointsToNode::GlobalEscape)) {146// Only allocations and java static calls results are interesting.147non_escaped_worklist.append(ptn->as_JavaObject());148}149} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {150oop_fields_worklist.append(ptn->as_Field());151}152}153if (n->is_MergeMem()) {154// Collect all MergeMem nodes to add memory slices for155// scalar replaceable objects in split_unique_types().156_mergemem_worklist.append(n->as_MergeMem());157} else if (OptimizePtrCompare && n->is_Cmp() &&158(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {159// Collect compare pointers nodes.160ptr_cmp_worklist.append(n);161} else if (n->is_MemBarStoreStore()) {162// Collect all MemBarStoreStore nodes so that depending on the163// escape status of the associated Allocate node some of them164// may be eliminated.165storestore_worklist.append(n);166} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&167(n->req() > MemBarNode::Precedent)) {168record_for_optimizer(n);169#ifdef ASSERT170} else if (n->is_AddP()) {171// Collect address nodes for graph verification.172addp_worklist.append(n);173#endif174}175for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {176Node* m = n->fast_out(i); // Get user177ideal_nodes.push(m);178}179}180if (non_escaped_worklist.length() == 0) {181_collecting = false;182return false; // Nothing to do.183}184// Add final simple edges to graph.185while(delayed_worklist.size() > 0) {186Node* n = delayed_worklist.pop();187add_final_edges(n);188}189int ptnodes_length = ptnodes_worklist.length();190191#ifdef ASSERT192if (VerifyConnectionGraph) {193// Verify that no new simple edges could be created and all194// local vars has edges.195_verify = true;196for (int next = 0; next < ptnodes_length; ++next) {197PointsToNode* ptn = ptnodes_worklist.at(next);198add_final_edges(ptn->ideal_node());199if (ptn->is_LocalVar() && ptn->edge_count() == 0) {200ptn->dump();201assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");202}203}204_verify = false;205}206#endif207// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes208// processing, calls to CI to resolve symbols (types, fields, methods)209// referenced in bytecode. During symbol resolution VM may throw210// an exception which CI cleans and converts to compilation failure.211if (C->failing()) return false;212213// 2. Finish Graph construction by propagating references to all214// java objects through graph.215if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,216java_objects_worklist, oop_fields_worklist)) {217// All objects escaped or hit time or iterations limits.218_collecting = false;219return false;220}221222// 3. Adjust scalar_replaceable state of nonescaping objects and push223// scalar replaceable allocations on alloc_worklist for processing224// in split_unique_types().225int non_escaped_length = non_escaped_worklist.length();226for (int next = 0; next < non_escaped_length; next++) {227JavaObjectNode* ptn = non_escaped_worklist.at(next);228bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);229Node* n = ptn->ideal_node();230if (n->is_Allocate()) {231n->as_Allocate()->_is_non_escaping = noescape;232}233if (n->is_CallStaticJava()) {234n->as_CallStaticJava()->_is_non_escaping = noescape;235}236if (noescape && ptn->scalar_replaceable()) {237adjust_scalar_replaceable_state(ptn);238if (ptn->scalar_replaceable()) {239alloc_worklist.append(ptn->ideal_node());240}241}242}243244#ifdef ASSERT245if (VerifyConnectionGraph) {246// Verify that graph is complete - no new edges could be added or needed.247verify_connection_graph(ptnodes_worklist, non_escaped_worklist,248java_objects_worklist, addp_worklist);249}250assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");251assert(null_obj->escape_state() == PointsToNode::NoEscape &&252null_obj->edge_count() == 0 &&253!null_obj->arraycopy_src() &&254!null_obj->arraycopy_dst(), "sanity");255#endif256257_collecting = false;258259} // TracePhase t3("connectionGraph")260261// 4. Optimize ideal graph based on EA information.262bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);263if (has_non_escaping_obj) {264optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);265}266267#ifndef PRODUCT268if (PrintEscapeAnalysis) {269dump(ptnodes_worklist); // Dump ConnectionGraph270}271#endif272273bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);274#ifdef ASSERT275if (VerifyConnectionGraph) {276int alloc_length = alloc_worklist.length();277for (int next = 0; next < alloc_length; ++next) {278Node* n = alloc_worklist.at(next);279PointsToNode* ptn = ptnode_adr(n->_idx);280assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");281}282}283#endif284285// 5. Separate memory graph for scalar replaceable allcations.286if (has_scalar_replaceable_candidates &&287C->AliasLevel() >= 3 && EliminateAllocations) {288// Now use the escape information to create unique types for289// scalar replaceable objects.290split_unique_types(alloc_worklist);291if (C->failing()) return false;292C->print_method(PHASE_AFTER_EA, 2);293294#ifdef ASSERT295} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {296tty->print("=== No allocations eliminated for ");297C->method()->print_short_name();298if(!EliminateAllocations) {299tty->print(" since EliminateAllocations is off ===");300} else if(!has_scalar_replaceable_candidates) {301tty->print(" since there are no scalar replaceable candidates ===");302} else if(C->AliasLevel() < 3) {303tty->print(" since AliasLevel < 3 ===");304}305tty->cr();306#endif307}308return has_non_escaping_obj;309}310311// Utility function for nodes that load an object312void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {313// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because314// ThreadLocal has RawPtr type.315const Type* t = _igvn->type(n);316if (t->make_ptr() != NULL) {317Node* adr = n->in(MemNode::Address);318#ifdef ASSERT319if (!adr->is_AddP()) {320assert(_igvn->type(adr)->isa_rawptr(), "sanity");321} else {322assert((ptnode_adr(adr->_idx) == NULL ||323ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");324}325#endif326add_local_var_and_edge(n, PointsToNode::NoEscape,327adr, delayed_worklist);328}329}330331// Populate Connection Graph with PointsTo nodes and create simple332// connection graph edges.333void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {334assert(!_verify, "this method sould not be called for verification");335PhaseGVN* igvn = _igvn;336uint n_idx = n->_idx;337PointsToNode* n_ptn = ptnode_adr(n_idx);338if (n_ptn != NULL)339return; // No need to redefine PointsTo node during first iteration.340341if (n->is_Call()) {342// Arguments to allocation and locking don't escape.343if (n->is_AbstractLock()) {344// Put Lock and Unlock nodes on IGVN worklist to process them during345// first IGVN optimization when escape information is still available.346record_for_optimizer(n);347} else if (n->is_Allocate()) {348add_call_node(n->as_Call());349record_for_optimizer(n);350} else {351if (n->is_CallStaticJava()) {352const char* name = n->as_CallStaticJava()->_name;353if (name != NULL && strcmp(name, "uncommon_trap") == 0)354return; // Skip uncommon traps355}356// Don't mark as processed since call's arguments have to be processed.357delayed_worklist->push(n);358// Check if a call returns an object.359if ((n->as_Call()->returns_pointer() &&360n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||361(n->is_CallStaticJava() &&362n->as_CallStaticJava()->is_boxing_method())) {363add_call_node(n->as_Call());364}365}366return;367}368// Put this check here to process call arguments since some call nodes369// point to phantom_obj.370if (n_ptn == phantom_obj || n_ptn == null_obj)371return; // Skip predefined nodes.372373int opcode = n->Opcode();374switch (opcode) {375case Op_AddP: {376Node* base = get_addp_base(n);377PointsToNode* ptn_base = ptnode_adr(base->_idx);378// Field nodes are created for all field types. They are used in379// adjust_scalar_replaceable_state() and split_unique_types().380// Note, non-oop fields will have only base edges in Connection381// Graph because such fields are not used for oop loads and stores.382int offset = address_offset(n, igvn);383add_field(n, PointsToNode::NoEscape, offset);384if (ptn_base == NULL) {385delayed_worklist->push(n); // Process it later.386} else {387n_ptn = ptnode_adr(n_idx);388add_base(n_ptn->as_Field(), ptn_base);389}390break;391}392case Op_CastX2P: {393map_ideal_node(n, phantom_obj);394break;395}396case Op_CastPP:397case Op_CheckCastPP:398case Op_EncodeP:399case Op_DecodeN:400case Op_EncodePKlass:401case Op_DecodeNKlass: {402add_local_var_and_edge(n, PointsToNode::NoEscape,403n->in(1), delayed_worklist);404break;405}406case Op_CMoveP: {407add_local_var(n, PointsToNode::NoEscape);408// Do not add edges during first iteration because some could be409// not defined yet.410delayed_worklist->push(n);411break;412}413case Op_ConP:414case Op_ConN:415case Op_ConNKlass: {416// assume all oop constants globally escape except for null417PointsToNode::EscapeState es;418const Type* t = igvn->type(n);419if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {420es = PointsToNode::NoEscape;421} else {422es = PointsToNode::GlobalEscape;423}424add_java_object(n, es);425break;426}427case Op_CreateEx: {428// assume that all exception objects globally escape429map_ideal_node(n, phantom_obj);430break;431}432case Op_LoadKlass:433case Op_LoadNKlass: {434// Unknown class is loaded435map_ideal_node(n, phantom_obj);436break;437}438case Op_LoadP:439case Op_LoadN:440case Op_LoadPLocked: {441add_objload_to_connection_graph(n, delayed_worklist);442break;443}444case Op_Parm: {445map_ideal_node(n, phantom_obj);446break;447}448case Op_PartialSubtypeCheck: {449// Produces Null or notNull and is used in only in CmpP so450// phantom_obj could be used.451map_ideal_node(n, phantom_obj); // Result is unknown452break;453}454case Op_Phi: {455// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because456// ThreadLocal has RawPtr type.457const Type* t = n->as_Phi()->type();458if (t->make_ptr() != NULL) {459add_local_var(n, PointsToNode::NoEscape);460// Do not add edges during first iteration because some could be461// not defined yet.462delayed_worklist->push(n);463}464break;465}466case Op_Proj: {467// we are only interested in the oop result projection from a call468if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&469n->in(0)->as_Call()->returns_pointer()) {470add_local_var_and_edge(n, PointsToNode::NoEscape,471n->in(0), delayed_worklist);472}473break;474}475case Op_Rethrow: // Exception object escapes476case Op_Return: {477if (n->req() > TypeFunc::Parms &&478igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {479// Treat Return value as LocalVar with GlobalEscape escape state.480add_local_var_and_edge(n, PointsToNode::GlobalEscape,481n->in(TypeFunc::Parms), delayed_worklist);482}483break;484}485case Op_GetAndSetP:486case Op_GetAndSetN: {487add_objload_to_connection_graph(n, delayed_worklist);488// fallthrough489}490case Op_StoreP:491case Op_StoreN:492case Op_StoreNKlass:493case Op_StorePConditional:494case Op_CompareAndSwapP:495case Op_CompareAndSwapN: {496Node* adr = n->in(MemNode::Address);497const Type *adr_type = igvn->type(adr);498adr_type = adr_type->make_ptr();499if (adr_type == NULL) {500break; // skip dead nodes501}502if (adr_type->isa_oopptr() ||503(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&504(adr_type == TypeRawPtr::NOTNULL &&505adr->in(AddPNode::Address)->is_Proj() &&506adr->in(AddPNode::Address)->in(0)->is_Allocate())) {507delayed_worklist->push(n); // Process it later.508#ifdef ASSERT509assert(adr->is_AddP(), "expecting an AddP");510if (adr_type == TypeRawPtr::NOTNULL) {511// Verify a raw address for a store captured by Initialize node.512int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);513assert(offs != Type::OffsetBot, "offset must be a constant");514}515#endif516} else {517// Ignore copy the displaced header to the BoxNode (OSR compilation).518if (adr->is_BoxLock())519break;520// Stored value escapes in unsafe access.521if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {522// Pointer stores in G1 barriers looks like unsafe access.523// Ignore such stores to be able scalar replace non-escaping524// allocations.525if (UseG1GC && adr->is_AddP()) {526Node* base = get_addp_base(adr);527if (base->Opcode() == Op_LoadP &&528base->in(MemNode::Address)->is_AddP()) {529adr = base->in(MemNode::Address);530Node* tls = get_addp_base(adr);531if (tls->Opcode() == Op_ThreadLocal) {532int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);533if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +534PtrQueue::byte_offset_of_buf())) {535break; // G1 pre barier previous oop value store.536}537if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +538PtrQueue::byte_offset_of_buf())) {539break; // G1 post barier card address store.540}541}542}543}544delayed_worklist->push(n); // Process unsafe access later.545break;546}547#ifdef ASSERT548n->dump(1);549assert(false, "not unsafe or G1 barrier raw StoreP");550#endif551}552break;553}554case Op_AryEq:555case Op_StrComp:556case Op_StrEquals:557case Op_StrIndexOf:558case Op_EncodeISOArray: {559add_local_var(n, PointsToNode::ArgEscape);560delayed_worklist->push(n); // Process it later.561break;562}563case Op_ThreadLocal: {564add_java_object(n, PointsToNode::ArgEscape);565break;566}567default:568; // Do nothing for nodes not related to EA.569}570return;571}572573#ifdef ASSERT574#define ELSE_FAIL(name) \575/* Should not be called for not pointer type. */ \576n->dump(1); \577assert(false, name); \578break;579#else580#define ELSE_FAIL(name) \581break;582#endif583584// Add final simple edges to graph.585void ConnectionGraph::add_final_edges(Node *n) {586PointsToNode* n_ptn = ptnode_adr(n->_idx);587#ifdef ASSERT588if (_verify && n_ptn->is_JavaObject())589return; // This method does not change graph for JavaObject.590#endif591592if (n->is_Call()) {593process_call_arguments(n->as_Call());594return;595}596assert(n->is_Store() || n->is_LoadStore() ||597(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),598"node should be registered already");599int opcode = n->Opcode();600switch (opcode) {601case Op_AddP: {602Node* base = get_addp_base(n);603PointsToNode* ptn_base = ptnode_adr(base->_idx);604assert(ptn_base != NULL, "field's base should be registered");605add_base(n_ptn->as_Field(), ptn_base);606break;607}608case Op_CastPP:609case Op_CheckCastPP:610case Op_EncodeP:611case Op_DecodeN:612case Op_EncodePKlass:613case Op_DecodeNKlass: {614add_local_var_and_edge(n, PointsToNode::NoEscape,615n->in(1), NULL);616break;617}618case Op_CMoveP: {619for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {620Node* in = n->in(i);621if (in == NULL)622continue; // ignore NULL623Node* uncast_in = in->uncast();624if (uncast_in->is_top() || uncast_in == n)625continue; // ignore top or inputs which go back this node626PointsToNode* ptn = ptnode_adr(in->_idx);627assert(ptn != NULL, "node should be registered");628add_edge(n_ptn, ptn);629}630break;631}632case Op_LoadP:633case Op_LoadN:634case Op_LoadPLocked: {635// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because636// ThreadLocal has RawPtr type.637const Type* t = _igvn->type(n);638if (t->make_ptr() != NULL) {639Node* adr = n->in(MemNode::Address);640add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);641break;642}643ELSE_FAIL("Op_LoadP");644}645case Op_Phi: {646// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because647// ThreadLocal has RawPtr type.648const Type* t = n->as_Phi()->type();649if (t->make_ptr() != NULL) {650for (uint i = 1; i < n->req(); i++) {651Node* in = n->in(i);652if (in == NULL)653continue; // ignore NULL654Node* uncast_in = in->uncast();655if (uncast_in->is_top() || uncast_in == n)656continue; // ignore top or inputs which go back this node657PointsToNode* ptn = ptnode_adr(in->_idx);658assert(ptn != NULL, "node should be registered");659add_edge(n_ptn, ptn);660}661break;662}663ELSE_FAIL("Op_Phi");664}665case Op_Proj: {666// we are only interested in the oop result projection from a call667if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&668n->in(0)->as_Call()->returns_pointer()) {669add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);670break;671}672ELSE_FAIL("Op_Proj");673}674case Op_Rethrow: // Exception object escapes675case Op_Return: {676if (n->req() > TypeFunc::Parms &&677_igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {678// Treat Return value as LocalVar with GlobalEscape escape state.679add_local_var_and_edge(n, PointsToNode::GlobalEscape,680n->in(TypeFunc::Parms), NULL);681break;682}683ELSE_FAIL("Op_Return");684}685case Op_StoreP:686case Op_StoreN:687case Op_StoreNKlass:688case Op_StorePConditional:689case Op_CompareAndSwapP:690case Op_CompareAndSwapN:691case Op_GetAndSetP:692case Op_GetAndSetN: {693Node* adr = n->in(MemNode::Address);694const Type *adr_type = _igvn->type(adr);695adr_type = adr_type->make_ptr();696#ifdef ASSERT697if (adr_type == NULL) {698n->dump(1);699assert(adr_type != NULL, "dead node should not be on list");700break;701}702#endif703if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {704add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);705}706if (adr_type->isa_oopptr() ||707(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&708(adr_type == TypeRawPtr::NOTNULL &&709adr->in(AddPNode::Address)->is_Proj() &&710adr->in(AddPNode::Address)->in(0)->is_Allocate())) {711// Point Address to Value712PointsToNode* adr_ptn = ptnode_adr(adr->_idx);713assert(adr_ptn != NULL &&714adr_ptn->as_Field()->is_oop(), "node should be registered");715Node *val = n->in(MemNode::ValueIn);716PointsToNode* ptn = ptnode_adr(val->_idx);717assert(ptn != NULL, "node should be registered");718add_edge(adr_ptn, ptn);719break;720} else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {721// Stored value escapes in unsafe access.722Node *val = n->in(MemNode::ValueIn);723PointsToNode* ptn = ptnode_adr(val->_idx);724assert(ptn != NULL, "node should be registered");725set_escape_state(ptn, PointsToNode::GlobalEscape);726// Add edge to object for unsafe access with offset.727PointsToNode* adr_ptn = ptnode_adr(adr->_idx);728assert(adr_ptn != NULL, "node should be registered");729if (adr_ptn->is_Field()) {730assert(adr_ptn->as_Field()->is_oop(), "should be oop field");731add_edge(adr_ptn, ptn);732}733break;734}735ELSE_FAIL("Op_StoreP");736}737case Op_AryEq:738case Op_StrComp:739case Op_StrEquals:740case Op_StrIndexOf:741case Op_EncodeISOArray: {742// char[] arrays passed to string intrinsic do not escape but743// they are not scalar replaceable. Adjust escape state for them.744// Start from in(2) edge since in(1) is memory edge.745for (uint i = 2; i < n->req(); i++) {746Node* adr = n->in(i);747const Type* at = _igvn->type(adr);748if (!adr->is_top() && at->isa_ptr()) {749assert(at == Type::TOP || at == TypePtr::NULL_PTR ||750at->isa_ptr() != NULL, "expecting a pointer");751if (adr->is_AddP()) {752adr = get_addp_base(adr);753}754PointsToNode* ptn = ptnode_adr(adr->_idx);755assert(ptn != NULL, "node should be registered");756add_edge(n_ptn, ptn);757}758}759break;760}761default: {762// This method should be called only for EA specific nodes which may763// miss some edges when they were created.764#ifdef ASSERT765n->dump(1);766#endif767guarantee(false, "unknown node");768}769}770return;771}772773void ConnectionGraph::add_call_node(CallNode* call) {774assert(call->returns_pointer(), "only for call which returns pointer");775uint call_idx = call->_idx;776if (call->is_Allocate()) {777Node* k = call->in(AllocateNode::KlassNode);778const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();779assert(kt != NULL, "TypeKlassPtr required.");780ciKlass* cik = kt->klass();781PointsToNode::EscapeState es = PointsToNode::NoEscape;782bool scalar_replaceable = true;783if (call->is_AllocateArray()) {784if (!cik->is_array_klass()) { // StressReflectiveCode785es = PointsToNode::GlobalEscape;786} else {787int length = call->in(AllocateNode::ALength)->find_int_con(-1);788if (length < 0 || length > EliminateAllocationArraySizeLimit) {789// Not scalar replaceable if the length is not constant or too big.790scalar_replaceable = false;791}792}793} else { // Allocate instance794if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||795cik->is_subclass_of(_compile->env()->Reference_klass()) ||796!cik->is_instance_klass() || // StressReflectiveCode797cik->as_instance_klass()->has_finalizer()) {798es = PointsToNode::GlobalEscape;799}800}801add_java_object(call, es);802PointsToNode* ptn = ptnode_adr(call_idx);803if (!scalar_replaceable && ptn->scalar_replaceable()) {804ptn->set_scalar_replaceable(false);805}806} else if (call->is_CallStaticJava()) {807// Call nodes could be different types:808//809// 1. CallDynamicJavaNode (what happened during call is unknown):810//811// - mapped to GlobalEscape JavaObject node if oop is returned;812//813// - all oop arguments are escaping globally;814//815// 2. CallStaticJavaNode (execute bytecode analysis if possible):816//817// - the same as CallDynamicJavaNode if can't do bytecode analysis;818//819// - mapped to GlobalEscape JavaObject node if unknown oop is returned;820// - mapped to NoEscape JavaObject node if non-escaping object allocated821// during call is returned;822// - mapped to ArgEscape LocalVar node pointed to object arguments823// which are returned and does not escape during call;824//825// - oop arguments escaping status is defined by bytecode analysis;826//827// For a static call, we know exactly what method is being called.828// Use bytecode estimator to record whether the call's return value escapes.829ciMethod* meth = call->as_CallJava()->method();830if (meth == NULL) {831const char* name = call->as_CallStaticJava()->_name;832assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");833// Returns a newly allocated unescaped object.834add_java_object(call, PointsToNode::NoEscape);835ptnode_adr(call_idx)->set_scalar_replaceable(false);836} else if (meth->is_boxing_method()) {837// Returns boxing object838PointsToNode::EscapeState es;839vmIntrinsics::ID intr = meth->intrinsic_id();840if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {841// It does not escape if object is always allocated.842es = PointsToNode::NoEscape;843} else {844// It escapes globally if object could be loaded from cache.845es = PointsToNode::GlobalEscape;846}847add_java_object(call, es);848} else {849BCEscapeAnalyzer* call_analyzer = meth->get_bcea();850call_analyzer->copy_dependencies(_compile->dependencies());851if (call_analyzer->is_return_allocated()) {852// Returns a newly allocated unescaped object, simply853// update dependency information.854// Mark it as NoEscape so that objects referenced by855// it's fields will be marked as NoEscape at least.856add_java_object(call, PointsToNode::NoEscape);857ptnode_adr(call_idx)->set_scalar_replaceable(false);858} else {859// Determine whether any arguments are returned.860const TypeTuple* d = call->tf()->domain();861bool ret_arg = false;862for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {863if (d->field_at(i)->isa_ptr() != NULL &&864call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {865ret_arg = true;866break;867}868}869if (ret_arg) {870add_local_var(call, PointsToNode::ArgEscape);871} else {872// Returns unknown object.873map_ideal_node(call, phantom_obj);874}875}876}877} else {878// An other type of call, assume the worst case:879// returned value is unknown and globally escapes.880assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");881map_ideal_node(call, phantom_obj);882}883}884885void ConnectionGraph::process_call_arguments(CallNode *call) {886bool is_arraycopy = false;887switch (call->Opcode()) {888#ifdef ASSERT889case Op_Allocate:890case Op_AllocateArray:891case Op_Lock:892case Op_Unlock:893assert(false, "should be done already");894break;895#endif896case Op_CallLeafNoFP:897is_arraycopy = (call->as_CallLeaf()->_name != NULL &&898strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);899// fall through900case Op_CallLeaf: {901// Stub calls, objects do not escape but they are not scale replaceable.902// Adjust escape state for outgoing arguments.903const TypeTuple * d = call->tf()->domain();904bool src_has_oops = false;905for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {906const Type* at = d->field_at(i);907Node *arg = call->in(i);908const Type *aat = _igvn->type(arg);909if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())910continue;911if (arg->is_AddP()) {912//913// The inline_native_clone() case when the arraycopy stub is called914// after the allocation before Initialize and CheckCastPP nodes.915// Or normal arraycopy for object arrays case.916//917// Set AddP's base (Allocate) as not scalar replaceable since918// pointer to the base (with offset) is passed as argument.919//920arg = get_addp_base(arg);921}922PointsToNode* arg_ptn = ptnode_adr(arg->_idx);923assert(arg_ptn != NULL, "should be registered");924PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();925if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {926assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||927aat->isa_ptr() != NULL, "expecting an Ptr");928bool arg_has_oops = aat->isa_oopptr() &&929(aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||930(aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));931if (i == TypeFunc::Parms) {932src_has_oops = arg_has_oops;933}934//935// src or dst could be j.l.Object when other is basic type array:936//937// arraycopy(char[],0,Object*,0,size);938// arraycopy(Object*,0,char[],0,size);939//940// Don't add edges in such cases.941//942bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&943arg_has_oops && (i > TypeFunc::Parms);944#ifdef ASSERT945if (!(is_arraycopy ||946(call->as_CallLeaf()->_name != NULL &&947(strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||948strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||949strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||950strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||951strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||952strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||953strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||954strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||955strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||956strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||957strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||958strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||959strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||960strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||961strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||962strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||963strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||964strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||965strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0)966))) {967call->dump();968fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));969}970#endif971// Always process arraycopy's destination object since972// we need to add all possible edges to references in973// source object.974if (arg_esc >= PointsToNode::ArgEscape &&975!arg_is_arraycopy_dest) {976continue;977}978set_escape_state(arg_ptn, PointsToNode::ArgEscape);979if (arg_is_arraycopy_dest) {980Node* src = call->in(TypeFunc::Parms);981if (src->is_AddP()) {982src = get_addp_base(src);983}984PointsToNode* src_ptn = ptnode_adr(src->_idx);985assert(src_ptn != NULL, "should be registered");986if (arg_ptn != src_ptn) {987// Special arraycopy edge:988// A destination object's field can't have the source object989// as base since objects escape states are not related.990// Only escape state of destination object's fields affects991// escape state of fields in source object.992add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);993}994}995}996}997break;998}999case Op_CallStaticJava: {1000// For a static call, we know exactly what method is being called.1001// Use bytecode estimator to record the call's escape affects1002#ifdef ASSERT1003const char* name = call->as_CallStaticJava()->_name;1004assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");1005#endif1006ciMethod* meth = call->as_CallJava()->method();1007if ((meth != NULL) && meth->is_boxing_method()) {1008break; // Boxing methods do not modify any oops.1009}1010BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;1011// fall-through if not a Java method or no analyzer information1012if (call_analyzer != NULL) {1013PointsToNode* call_ptn = ptnode_adr(call->_idx);1014const TypeTuple* d = call->tf()->domain();1015for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1016const Type* at = d->field_at(i);1017int k = i - TypeFunc::Parms;1018Node* arg = call->in(i);1019PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1020if (at->isa_ptr() != NULL &&1021call_analyzer->is_arg_returned(k)) {1022// The call returns arguments.1023if (call_ptn != NULL) { // Is call's result used?1024assert(call_ptn->is_LocalVar(), "node should be registered");1025assert(arg_ptn != NULL, "node should be registered");1026add_edge(call_ptn, arg_ptn);1027}1028}1029if (at->isa_oopptr() != NULL &&1030arg_ptn->escape_state() < PointsToNode::GlobalEscape) {1031if (!call_analyzer->is_arg_stack(k)) {1032// The argument global escapes1033set_escape_state(arg_ptn, PointsToNode::GlobalEscape);1034} else {1035set_escape_state(arg_ptn, PointsToNode::ArgEscape);1036if (!call_analyzer->is_arg_local(k)) {1037// The argument itself doesn't escape, but any fields might1038set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);1039}1040}1041}1042}1043if (call_ptn != NULL && call_ptn->is_LocalVar()) {1044// The call returns arguments.1045assert(call_ptn->edge_count() > 0, "sanity");1046if (!call_analyzer->is_return_local()) {1047// Returns also unknown object.1048add_edge(call_ptn, phantom_obj);1049}1050}1051break;1052}1053}1054default: {1055// Fall-through here if not a Java method or no analyzer information1056// or some other type of call, assume the worst case: all arguments1057// globally escape.1058const TypeTuple* d = call->tf()->domain();1059for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1060const Type* at = d->field_at(i);1061if (at->isa_oopptr() != NULL) {1062Node* arg = call->in(i);1063if (arg->is_AddP()) {1064arg = get_addp_base(arg);1065}1066assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");1067set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);1068}1069}1070}1071}1072}107310741075// Finish Graph construction.1076bool ConnectionGraph::complete_connection_graph(1077GrowableArray<PointsToNode*>& ptnodes_worklist,1078GrowableArray<JavaObjectNode*>& non_escaped_worklist,1079GrowableArray<JavaObjectNode*>& java_objects_worklist,1080GrowableArray<FieldNode*>& oop_fields_worklist) {1081// Normally only 1-3 passes needed to build Connection Graph depending1082// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.1083// Set limit to 20 to catch situation when something did go wrong and1084// bailout Escape Analysis.1085// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.1086#define CG_BUILD_ITER_LIMIT 2010871088// Propagate GlobalEscape and ArgEscape escape states and check that1089// we still have non-escaping objects. The method pushs on _worklist1090// Field nodes which reference phantom_object.1091if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {1092return false; // Nothing to do.1093}1094// Now propagate references to all JavaObject nodes.1095int java_objects_length = java_objects_worklist.length();1096elapsedTimer time;1097bool timeout = false;1098int new_edges = 1;1099int iterations = 0;1100do {1101while ((new_edges > 0) &&1102(iterations++ < CG_BUILD_ITER_LIMIT)) {1103double start_time = time.seconds();1104time.start();1105new_edges = 0;1106// Propagate references to phantom_object for nodes pushed on _worklist1107// by find_non_escaped_objects() and find_field_value().1108new_edges += add_java_object_edges(phantom_obj, false);1109for (int next = 0; next < java_objects_length; ++next) {1110JavaObjectNode* ptn = java_objects_worklist.at(next);1111new_edges += add_java_object_edges(ptn, true);11121113#define SAMPLE_SIZE 41114if ((next % SAMPLE_SIZE) == 0) {1115// Each 4 iterations calculate how much time it will take1116// to complete graph construction.1117time.stop();1118// Poll for requests from shutdown mechanism to quiesce compiler1119// because Connection graph construction may take long time.1120CompileBroker::maybe_block();1121double stop_time = time.seconds();1122double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;1123double time_until_end = time_per_iter * (double)(java_objects_length - next);1124if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {1125timeout = true;1126break; // Timeout1127}1128start_time = stop_time;1129time.start();1130}1131#undef SAMPLE_SIZE11321133}1134if (timeout) break;1135if (new_edges > 0) {1136// Update escape states on each iteration if graph was updated.1137if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {1138return false; // Nothing to do.1139}1140}1141time.stop();1142if (time.seconds() >= EscapeAnalysisTimeout) {1143timeout = true;1144break;1145}1146}1147if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {1148time.start();1149// Find fields which have unknown value.1150int fields_length = oop_fields_worklist.length();1151for (int next = 0; next < fields_length; next++) {1152FieldNode* field = oop_fields_worklist.at(next);1153if (field->edge_count() == 0) {1154new_edges += find_field_value(field);1155// This code may added new edges to phantom_object.1156// Need an other cycle to propagate references to phantom_object.1157}1158}1159time.stop();1160if (time.seconds() >= EscapeAnalysisTimeout) {1161timeout = true;1162break;1163}1164} else {1165new_edges = 0; // Bailout1166}1167} while (new_edges > 0);11681169// Bailout if passed limits.1170if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {1171Compile* C = _compile;1172if (C->log() != NULL) {1173C->log()->begin_elem("connectionGraph_bailout reason='reached ");1174C->log()->text("%s", timeout ? "time" : "iterations");1175C->log()->end_elem(" limit'");1176}1177assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",1178time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));1179// Possible infinite build_connection_graph loop,1180// bailout (no changes to ideal graph were made).1181return false;1182}1183#ifdef ASSERT1184if (Verbose && PrintEscapeAnalysis) {1185tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",1186iterations, nodes_size(), ptnodes_worklist.length());1187}1188#endif11891190#undef CG_BUILD_ITER_LIMIT11911192// Find fields initialized by NULL for non-escaping Allocations.1193int non_escaped_length = non_escaped_worklist.length();1194for (int next = 0; next < non_escaped_length; next++) {1195JavaObjectNode* ptn = non_escaped_worklist.at(next);1196PointsToNode::EscapeState es = ptn->escape_state();1197assert(es <= PointsToNode::ArgEscape, "sanity");1198if (es == PointsToNode::NoEscape) {1199if (find_init_values(ptn, null_obj, _igvn) > 0) {1200// Adding references to NULL object does not change escape states1201// since it does not escape. Also no fields are added to NULL object.1202add_java_object_edges(null_obj, false);1203}1204}1205Node* n = ptn->ideal_node();1206if (n->is_Allocate()) {1207// The object allocated by this Allocate node will never be1208// seen by an other thread. Mark it so that when it is1209// expanded no MemBarStoreStore is added.1210InitializeNode* ini = n->as_Allocate()->initialization();1211if (ini != NULL)1212ini->set_does_not_escape();1213}1214}1215return true; // Finished graph construction.1216}12171218// Propagate GlobalEscape and ArgEscape escape states to all nodes1219// and check that we still have non-escaping java objects.1220bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,1221GrowableArray<JavaObjectNode*>& non_escaped_worklist) {1222GrowableArray<PointsToNode*> escape_worklist;1223// First, put all nodes with GlobalEscape and ArgEscape states on worklist.1224int ptnodes_length = ptnodes_worklist.length();1225for (int next = 0; next < ptnodes_length; ++next) {1226PointsToNode* ptn = ptnodes_worklist.at(next);1227if (ptn->escape_state() >= PointsToNode::ArgEscape ||1228ptn->fields_escape_state() >= PointsToNode::ArgEscape) {1229escape_worklist.push(ptn);1230}1231}1232// Set escape states to referenced nodes (edges list).1233while (escape_worklist.length() > 0) {1234PointsToNode* ptn = escape_worklist.pop();1235PointsToNode::EscapeState es = ptn->escape_state();1236PointsToNode::EscapeState field_es = ptn->fields_escape_state();1237if (ptn->is_Field() && ptn->as_Field()->is_oop() &&1238es >= PointsToNode::ArgEscape) {1239// GlobalEscape or ArgEscape state of field means it has unknown value.1240if (add_edge(ptn, phantom_obj)) {1241// New edge was added1242add_field_uses_to_worklist(ptn->as_Field());1243}1244}1245for (EdgeIterator i(ptn); i.has_next(); i.next()) {1246PointsToNode* e = i.get();1247if (e->is_Arraycopy()) {1248assert(ptn->arraycopy_dst(), "sanity");1249// Propagate only fields escape state through arraycopy edge.1250if (e->fields_escape_state() < field_es) {1251set_fields_escape_state(e, field_es);1252escape_worklist.push(e);1253}1254} else if (es >= field_es) {1255// fields_escape_state is also set to 'es' if it is less than 'es'.1256if (e->escape_state() < es) {1257set_escape_state(e, es);1258escape_worklist.push(e);1259}1260} else {1261// Propagate field escape state.1262bool es_changed = false;1263if (e->fields_escape_state() < field_es) {1264set_fields_escape_state(e, field_es);1265es_changed = true;1266}1267if ((e->escape_state() < field_es) &&1268e->is_Field() && ptn->is_JavaObject() &&1269e->as_Field()->is_oop()) {1270// Change escape state of referenced fileds.1271set_escape_state(e, field_es);1272es_changed = true;;1273} else if (e->escape_state() < es) {1274set_escape_state(e, es);1275es_changed = true;;1276}1277if (es_changed) {1278escape_worklist.push(e);1279}1280}1281}1282}1283// Remove escaped objects from non_escaped list.1284for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {1285JavaObjectNode* ptn = non_escaped_worklist.at(next);1286if (ptn->escape_state() >= PointsToNode::GlobalEscape) {1287non_escaped_worklist.delete_at(next);1288}1289if (ptn->escape_state() == PointsToNode::NoEscape) {1290// Find fields in non-escaped allocations which have unknown value.1291find_init_values(ptn, phantom_obj, NULL);1292}1293}1294return (non_escaped_worklist.length() > 0);1295}12961297// Add all references to JavaObject node by walking over all uses.1298int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {1299int new_edges = 0;1300if (populate_worklist) {1301// Populate _worklist by uses of jobj's uses.1302for (UseIterator i(jobj); i.has_next(); i.next()) {1303PointsToNode* use = i.get();1304if (use->is_Arraycopy())1305continue;1306add_uses_to_worklist(use);1307if (use->is_Field() && use->as_Field()->is_oop()) {1308// Put on worklist all field's uses (loads) and1309// related field nodes (same base and offset).1310add_field_uses_to_worklist(use->as_Field());1311}1312}1313}1314for (int l = 0; l < _worklist.length(); l++) {1315PointsToNode* use = _worklist.at(l);1316if (PointsToNode::is_base_use(use)) {1317// Add reference from jobj to field and from field to jobj (field's base).1318use = PointsToNode::get_use_node(use)->as_Field();1319if (add_base(use->as_Field(), jobj)) {1320new_edges++;1321}1322continue;1323}1324assert(!use->is_JavaObject(), "sanity");1325if (use->is_Arraycopy()) {1326if (jobj == null_obj) // NULL object does not have field edges1327continue;1328// Added edge from Arraycopy node to arraycopy's source java object1329if (add_edge(use, jobj)) {1330jobj->set_arraycopy_src();1331new_edges++;1332}1333// and stop here.1334continue;1335}1336if (!add_edge(use, jobj))1337continue; // No new edge added, there was such edge already.1338new_edges++;1339if (use->is_LocalVar()) {1340add_uses_to_worklist(use);1341if (use->arraycopy_dst()) {1342for (EdgeIterator i(use); i.has_next(); i.next()) {1343PointsToNode* e = i.get();1344if (e->is_Arraycopy()) {1345if (jobj == null_obj) // NULL object does not have field edges1346continue;1347// Add edge from arraycopy's destination java object to Arraycopy node.1348if (add_edge(jobj, e)) {1349new_edges++;1350jobj->set_arraycopy_dst();1351}1352}1353}1354}1355} else {1356// Added new edge to stored in field values.1357// Put on worklist all field's uses (loads) and1358// related field nodes (same base and offset).1359add_field_uses_to_worklist(use->as_Field());1360}1361}1362_worklist.clear();1363_in_worklist.Reset();1364return new_edges;1365}13661367// Put on worklist all related field nodes.1368void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {1369assert(field->is_oop(), "sanity");1370int offset = field->offset();1371add_uses_to_worklist(field);1372// Loop over all bases of this field and push on worklist Field nodes1373// with the same offset and base (since they may reference the same field).1374for (BaseIterator i(field); i.has_next(); i.next()) {1375PointsToNode* base = i.get();1376add_fields_to_worklist(field, base);1377// Check if the base was source object of arraycopy and go over arraycopy's1378// destination objects since values stored to a field of source object are1379// accessable by uses (loads) of fields of destination objects.1380if (base->arraycopy_src()) {1381for (UseIterator j(base); j.has_next(); j.next()) {1382PointsToNode* arycp = j.get();1383if (arycp->is_Arraycopy()) {1384for (UseIterator k(arycp); k.has_next(); k.next()) {1385PointsToNode* abase = k.get();1386if (abase->arraycopy_dst() && abase != base) {1387// Look for the same arracopy reference.1388add_fields_to_worklist(field, abase);1389}1390}1391}1392}1393}1394}1395}13961397// Put on worklist all related field nodes.1398void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {1399int offset = field->offset();1400if (base->is_LocalVar()) {1401for (UseIterator j(base); j.has_next(); j.next()) {1402PointsToNode* f = j.get();1403if (PointsToNode::is_base_use(f)) { // Field1404f = PointsToNode::get_use_node(f);1405if (f == field || !f->as_Field()->is_oop())1406continue;1407int offs = f->as_Field()->offset();1408if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1409add_to_worklist(f);1410}1411}1412}1413} else {1414assert(base->is_JavaObject(), "sanity");1415if (// Skip phantom_object since it is only used to indicate that1416// this field's content globally escapes.1417(base != phantom_obj) &&1418// NULL object node does not have fields.1419(base != null_obj)) {1420for (EdgeIterator i(base); i.has_next(); i.next()) {1421PointsToNode* f = i.get();1422// Skip arraycopy edge since store to destination object field1423// does not update value in source object field.1424if (f->is_Arraycopy()) {1425assert(base->arraycopy_dst(), "sanity");1426continue;1427}1428if (f == field || !f->as_Field()->is_oop())1429continue;1430int offs = f->as_Field()->offset();1431if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1432add_to_worklist(f);1433}1434}1435}1436}1437}14381439// Find fields which have unknown value.1440int ConnectionGraph::find_field_value(FieldNode* field) {1441// Escaped fields should have init value already.1442assert(field->escape_state() == PointsToNode::NoEscape, "sanity");1443int new_edges = 0;1444for (BaseIterator i(field); i.has_next(); i.next()) {1445PointsToNode* base = i.get();1446if (base->is_JavaObject()) {1447// Skip Allocate's fields which will be processed later.1448if (base->ideal_node()->is_Allocate())1449return 0;1450assert(base == null_obj, "only NULL ptr base expected here");1451}1452}1453if (add_edge(field, phantom_obj)) {1454// New edge was added1455new_edges++;1456add_field_uses_to_worklist(field);1457}1458return new_edges;1459}14601461// Find fields initializing values for allocations.1462int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {1463assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1464int new_edges = 0;1465Node* alloc = pta->ideal_node();1466if (init_val == phantom_obj) {1467// Do nothing for Allocate nodes since its fields values are "known".1468if (alloc->is_Allocate())1469return 0;1470assert(alloc->as_CallStaticJava(), "sanity");1471#ifdef ASSERT1472if (alloc->as_CallStaticJava()->method() == NULL) {1473const char* name = alloc->as_CallStaticJava()->_name;1474assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");1475}1476#endif1477// Non-escaped allocation returned from Java or runtime call have1478// unknown values in fields.1479for (EdgeIterator i(pta); i.has_next(); i.next()) {1480PointsToNode* field = i.get();1481if (field->is_Field() && field->as_Field()->is_oop()) {1482if (add_edge(field, phantom_obj)) {1483// New edge was added1484new_edges++;1485add_field_uses_to_worklist(field->as_Field());1486}1487}1488}1489return new_edges;1490}1491assert(init_val == null_obj, "sanity");1492// Do nothing for Call nodes since its fields values are unknown.1493if (!alloc->is_Allocate())1494return 0;14951496InitializeNode* ini = alloc->as_Allocate()->initialization();1497Compile* C = _compile;1498bool visited_bottom_offset = false;1499GrowableArray<int> offsets_worklist;15001501// Check if an oop field's initializing value is recorded and add1502// a corresponding NULL if field's value if it is not recorded.1503// Connection Graph does not record a default initialization by NULL1504// captured by Initialize node.1505//1506for (EdgeIterator i(pta); i.has_next(); i.next()) {1507PointsToNode* field = i.get(); // Field (AddP)1508if (!field->is_Field() || !field->as_Field()->is_oop())1509continue; // Not oop field1510int offset = field->as_Field()->offset();1511if (offset == Type::OffsetBot) {1512if (!visited_bottom_offset) {1513// OffsetBot is used to reference array's element,1514// always add reference to NULL to all Field nodes since we don't1515// known which element is referenced.1516if (add_edge(field, null_obj)) {1517// New edge was added1518new_edges++;1519add_field_uses_to_worklist(field->as_Field());1520visited_bottom_offset = true;1521}1522}1523} else {1524// Check only oop fields.1525const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();1526if (adr_type->isa_rawptr()) {1527#ifdef ASSERT1528// Raw pointers are used for initializing stores so skip it1529// since it should be recorded already1530Node* base = get_addp_base(field->ideal_node());1531assert(adr_type->isa_rawptr() && base->is_Proj() &&1532(base->in(0) == alloc),"unexpected pointer type");1533#endif1534continue;1535}1536if (!offsets_worklist.contains(offset)) {1537offsets_worklist.append(offset);1538Node* value = NULL;1539if (ini != NULL) {1540// StoreP::memory_type() == T_ADDRESS1541BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;1542Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);1543// Make sure initializing store has the same type as this AddP.1544// This AddP may reference non existing field because it is on a1545// dead branch of bimorphic call which is not eliminated yet.1546if (store != NULL && store->is_Store() &&1547store->as_Store()->memory_type() == ft) {1548value = store->in(MemNode::ValueIn);1549#ifdef ASSERT1550if (VerifyConnectionGraph) {1551// Verify that AddP already points to all objects the value points to.1552PointsToNode* val = ptnode_adr(value->_idx);1553assert((val != NULL), "should be processed already");1554PointsToNode* missed_obj = NULL;1555if (val->is_JavaObject()) {1556if (!field->points_to(val->as_JavaObject())) {1557missed_obj = val;1558}1559} else {1560if (!val->is_LocalVar() || (val->edge_count() == 0)) {1561tty->print_cr("----------init store has invalid value -----");1562store->dump();1563val->dump();1564assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");1565}1566for (EdgeIterator j(val); j.has_next(); j.next()) {1567PointsToNode* obj = j.get();1568if (obj->is_JavaObject()) {1569if (!field->points_to(obj->as_JavaObject())) {1570missed_obj = obj;1571break;1572}1573}1574}1575}1576if (missed_obj != NULL) {1577tty->print_cr("----------field---------------------------------");1578field->dump();1579tty->print_cr("----------missed referernce to object-----------");1580missed_obj->dump();1581tty->print_cr("----------object referernced by init store -----");1582store->dump();1583val->dump();1584assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");1585}1586}1587#endif1588} else {1589// There could be initializing stores which follow allocation.1590// For example, a volatile field store is not collected1591// by Initialize node.1592//1593// Need to check for dependent loads to separate such stores from1594// stores which follow loads. For now, add initial value NULL so1595// that compare pointers optimization works correctly.1596}1597}1598if (value == NULL) {1599// A field's initializing value was not recorded. Add NULL.1600if (add_edge(field, null_obj)) {1601// New edge was added1602new_edges++;1603add_field_uses_to_worklist(field->as_Field());1604}1605}1606}1607}1608}1609return new_edges;1610}16111612// Adjust scalar_replaceable state after Connection Graph is built.1613void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {1614// Search for non-escaping objects which are not scalar replaceable1615// and mark them to propagate the state to referenced objects.16161617// 1. An object is not scalar replaceable if the field into which it is1618// stored has unknown offset (stored into unknown element of an array).1619//1620for (UseIterator i(jobj); i.has_next(); i.next()) {1621PointsToNode* use = i.get();1622assert(!use->is_Arraycopy(), "sanity");1623if (use->is_Field()) {1624FieldNode* field = use->as_Field();1625assert(field->is_oop() && field->scalar_replaceable() &&1626field->fields_escape_state() == PointsToNode::NoEscape, "sanity");1627if (field->offset() == Type::OffsetBot) {1628jobj->set_scalar_replaceable(false);1629return;1630}1631// 2. An object is not scalar replaceable if the field into which it is1632// stored has multiple bases one of which is null.1633if (field->base_count() > 1) {1634for (BaseIterator i(field); i.has_next(); i.next()) {1635PointsToNode* base = i.get();1636if (base == null_obj) {1637jobj->set_scalar_replaceable(false);1638return;1639}1640}1641}1642}1643assert(use->is_Field() || use->is_LocalVar(), "sanity");1644// 3. An object is not scalar replaceable if it is merged with other objects.1645for (EdgeIterator j(use); j.has_next(); j.next()) {1646PointsToNode* ptn = j.get();1647if (ptn->is_JavaObject() && ptn != jobj) {1648// Mark all objects.1649jobj->set_scalar_replaceable(false);1650ptn->set_scalar_replaceable(false);1651}1652}1653if (!jobj->scalar_replaceable()) {1654return;1655}1656}16571658for (EdgeIterator j(jobj); j.has_next(); j.next()) {1659// Non-escaping object node should point only to field nodes.1660FieldNode* field = j.get()->as_Field();1661int offset = field->as_Field()->offset();16621663// 4. An object is not scalar replaceable if it has a field with unknown1664// offset (array's element is accessed in loop).1665if (offset == Type::OffsetBot) {1666jobj->set_scalar_replaceable(false);1667return;1668}1669// 5. Currently an object is not scalar replaceable if a LoadStore node1670// access its field since the field value is unknown after it.1671//1672Node* n = field->ideal_node();1673for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1674if (n->fast_out(i)->is_LoadStore()) {1675jobj->set_scalar_replaceable(false);1676return;1677}1678}16791680// 6. Or the address may point to more then one object. This may produce1681// the false positive result (set not scalar replaceable)1682// since the flow-insensitive escape analysis can't separate1683// the case when stores overwrite the field's value from the case1684// when stores happened on different control branches.1685//1686// Note: it will disable scalar replacement in some cases:1687//1688// Point p[] = new Point[1];1689// p[0] = new Point(); // Will be not scalar replaced1690//1691// but it will save us from incorrect optimizations in next cases:1692//1693// Point p[] = new Point[1];1694// if ( x ) p[0] = new Point(); // Will be not scalar replaced1695//1696if (field->base_count() > 1) {1697for (BaseIterator i(field); i.has_next(); i.next()) {1698PointsToNode* base = i.get();1699// Don't take into account LocalVar nodes which1700// may point to only one object which should be also1701// this field's base by now.1702if (base->is_JavaObject() && base != jobj) {1703// Mark all bases.1704jobj->set_scalar_replaceable(false);1705base->set_scalar_replaceable(false);1706}1707}1708}1709}1710}17111712#ifdef ASSERT1713void ConnectionGraph::verify_connection_graph(1714GrowableArray<PointsToNode*>& ptnodes_worklist,1715GrowableArray<JavaObjectNode*>& non_escaped_worklist,1716GrowableArray<JavaObjectNode*>& java_objects_worklist,1717GrowableArray<Node*>& addp_worklist) {1718// Verify that graph is complete - no new edges could be added.1719int java_objects_length = java_objects_worklist.length();1720int non_escaped_length = non_escaped_worklist.length();1721int new_edges = 0;1722for (int next = 0; next < java_objects_length; ++next) {1723JavaObjectNode* ptn = java_objects_worklist.at(next);1724new_edges += add_java_object_edges(ptn, true);1725}1726assert(new_edges == 0, "graph was not complete");1727// Verify that escape state is final.1728int length = non_escaped_worklist.length();1729find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);1730assert((non_escaped_length == non_escaped_worklist.length()) &&1731(non_escaped_length == length) &&1732(_worklist.length() == 0), "escape state was not final");17331734// Verify fields information.1735int addp_length = addp_worklist.length();1736for (int next = 0; next < addp_length; ++next ) {1737Node* n = addp_worklist.at(next);1738FieldNode* field = ptnode_adr(n->_idx)->as_Field();1739if (field->is_oop()) {1740// Verify that field has all bases1741Node* base = get_addp_base(n);1742PointsToNode* ptn = ptnode_adr(base->_idx);1743if (ptn->is_JavaObject()) {1744assert(field->has_base(ptn->as_JavaObject()), "sanity");1745} else {1746assert(ptn->is_LocalVar(), "sanity");1747for (EdgeIterator i(ptn); i.has_next(); i.next()) {1748PointsToNode* e = i.get();1749if (e->is_JavaObject()) {1750assert(field->has_base(e->as_JavaObject()), "sanity");1751}1752}1753}1754// Verify that all fields have initializing values.1755if (field->edge_count() == 0) {1756tty->print_cr("----------field does not have references----------");1757field->dump();1758for (BaseIterator i(field); i.has_next(); i.next()) {1759PointsToNode* base = i.get();1760tty->print_cr("----------field has next base---------------------");1761base->dump();1762if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {1763tty->print_cr("----------base has fields-------------------------");1764for (EdgeIterator j(base); j.has_next(); j.next()) {1765j.get()->dump();1766}1767tty->print_cr("----------base has references---------------------");1768for (UseIterator j(base); j.has_next(); j.next()) {1769j.get()->dump();1770}1771}1772}1773for (UseIterator i(field); i.has_next(); i.next()) {1774i.get()->dump();1775}1776assert(field->edge_count() > 0, "sanity");1777}1778}1779}1780}1781#endif17821783// Optimize ideal graph.1784void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,1785GrowableArray<Node*>& storestore_worklist) {1786Compile* C = _compile;1787PhaseIterGVN* igvn = _igvn;1788if (EliminateLocks) {1789// Mark locks before changing ideal graph.1790int cnt = C->macro_count();1791for( int i=0; i < cnt; i++ ) {1792Node *n = C->macro_node(i);1793if (n->is_AbstractLock()) { // Lock and Unlock nodes1794AbstractLockNode* alock = n->as_AbstractLock();1795if (!alock->is_non_esc_obj()) {1796if (not_global_escape(alock->obj_node())) {1797assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");1798// The lock could be marked eliminated by lock coarsening1799// code during first IGVN before EA. Replace coarsened flag1800// to eliminate all associated locks/unlocks.1801#ifdef ASSERT1802alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");1803#endif1804alock->set_non_esc_obj();1805}1806}1807}1808}1809}18101811if (OptimizePtrCompare) {1812// Add ConI(#CC_GT) and ConI(#CC_EQ).1813_pcmp_neq = igvn->makecon(TypeInt::CC_GT);1814_pcmp_eq = igvn->makecon(TypeInt::CC_EQ);1815// Optimize objects compare.1816while (ptr_cmp_worklist.length() != 0) {1817Node *n = ptr_cmp_worklist.pop();1818Node *res = optimize_ptr_compare(n);1819if (res != NULL) {1820#ifndef PRODUCT1821if (PrintOptimizePtrCompare) {1822tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));1823if (Verbose) {1824n->dump(1);1825}1826}1827#endif1828igvn->replace_node(n, res);1829}1830}1831// cleanup1832if (_pcmp_neq->outcnt() == 0)1833igvn->hash_delete(_pcmp_neq);1834if (_pcmp_eq->outcnt() == 0)1835igvn->hash_delete(_pcmp_eq);1836}18371838// For MemBarStoreStore nodes added in library_call.cpp, check1839// escape status of associated AllocateNode and optimize out1840// MemBarStoreStore node if the allocated object never escapes.1841while (storestore_worklist.length() != 0) {1842Node *n = storestore_worklist.pop();1843MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();1844Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);1845assert (alloc->is_Allocate(), "storestore should point to AllocateNode");1846if (not_global_escape(alloc)) {1847MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);1848mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));1849mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));1850igvn->register_new_node_with_optimizer(mb);1851igvn->replace_node(storestore, mb);1852}1853}1854}18551856// Optimize objects compare.1857Node* ConnectionGraph::optimize_ptr_compare(Node* n) {1858assert(OptimizePtrCompare, "sanity");1859PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);1860PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);1861JavaObjectNode* jobj1 = unique_java_object(n->in(1));1862JavaObjectNode* jobj2 = unique_java_object(n->in(2));1863assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");1864assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");18651866// Check simple cases first.1867if (jobj1 != NULL) {1868if (jobj1->escape_state() == PointsToNode::NoEscape) {1869if (jobj1 == jobj2) {1870// Comparing the same not escaping object.1871return _pcmp_eq;1872}1873Node* obj = jobj1->ideal_node();1874// Comparing not escaping allocation.1875if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&1876!ptn2->points_to(jobj1)) {1877return _pcmp_neq; // This includes nullness check.1878}1879}1880}1881if (jobj2 != NULL) {1882if (jobj2->escape_state() == PointsToNode::NoEscape) {1883Node* obj = jobj2->ideal_node();1884// Comparing not escaping allocation.1885if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&1886!ptn1->points_to(jobj2)) {1887return _pcmp_neq; // This includes nullness check.1888}1889}1890}1891if (jobj1 != NULL && jobj1 != phantom_obj &&1892jobj2 != NULL && jobj2 != phantom_obj &&1893jobj1->ideal_node()->is_Con() &&1894jobj2->ideal_node()->is_Con()) {1895// Klass or String constants compare. Need to be careful with1896// compressed pointers - compare types of ConN and ConP instead of nodes.1897const Type* t1 = jobj1->ideal_node()->get_ptr_type();1898const Type* t2 = jobj2->ideal_node()->get_ptr_type();1899if (t1->make_ptr() == t2->make_ptr()) {1900return _pcmp_eq;1901} else {1902return _pcmp_neq;1903}1904}1905if (ptn1->meet(ptn2)) {1906return NULL; // Sets are not disjoint1907}19081909// Sets are disjoint.1910bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);1911bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);1912bool set1_has_null_ptr = ptn1->points_to(null_obj);1913bool set2_has_null_ptr = ptn2->points_to(null_obj);1914if (set1_has_unknown_ptr && set2_has_null_ptr ||1915set2_has_unknown_ptr && set1_has_null_ptr) {1916// Check nullness of unknown object.1917return NULL;1918}19191920// Disjointness by itself is not sufficient since1921// alias analysis is not complete for escaped objects.1922// Disjoint sets are definitely unrelated only when1923// at least one set has only not escaping allocations.1924if (!set1_has_unknown_ptr && !set1_has_null_ptr) {1925if (ptn1->non_escaping_allocation()) {1926return _pcmp_neq;1927}1928}1929if (!set2_has_unknown_ptr && !set2_has_null_ptr) {1930if (ptn2->non_escaping_allocation()) {1931return _pcmp_neq;1932}1933}1934return NULL;1935}19361937// Connection Graph constuction functions.19381939void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {1940PointsToNode* ptadr = _nodes.at(n->_idx);1941if (ptadr != NULL) {1942assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");1943return;1944}1945Compile* C = _compile;1946ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);1947_nodes.at_put(n->_idx, ptadr);1948}19491950void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {1951PointsToNode* ptadr = _nodes.at(n->_idx);1952if (ptadr != NULL) {1953assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");1954return;1955}1956Compile* C = _compile;1957ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);1958_nodes.at_put(n->_idx, ptadr);1959}19601961void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {1962PointsToNode* ptadr = _nodes.at(n->_idx);1963if (ptadr != NULL) {1964assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");1965return;1966}1967bool unsafe = false;1968bool is_oop = is_oop_field(n, offset, &unsafe);1969if (unsafe) {1970es = PointsToNode::GlobalEscape;1971}1972Compile* C = _compile;1973FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);1974_nodes.at_put(n->_idx, field);1975}19761977void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,1978PointsToNode* src, PointsToNode* dst) {1979assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");1980assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");1981PointsToNode* ptadr = _nodes.at(n->_idx);1982if (ptadr != NULL) {1983assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");1984return;1985}1986Compile* C = _compile;1987ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);1988_nodes.at_put(n->_idx, ptadr);1989// Add edge from arraycopy node to source object.1990(void)add_edge(ptadr, src);1991src->set_arraycopy_src();1992// Add edge from destination object to arraycopy node.1993(void)add_edge(dst, ptadr);1994dst->set_arraycopy_dst();1995}19961997bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {1998const Type* adr_type = n->as_AddP()->bottom_type();1999BasicType bt = T_INT;2000if (offset == Type::OffsetBot) {2001// Check only oop fields.2002if (!adr_type->isa_aryptr() ||2003(adr_type->isa_aryptr()->klass() == NULL) ||2004adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {2005// OffsetBot is used to reference array's element. Ignore first AddP.2006if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {2007bt = T_OBJECT;2008}2009}2010} else if (offset != oopDesc::klass_offset_in_bytes()) {2011if (adr_type->isa_instptr()) {2012ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();2013if (field != NULL) {2014bt = field->layout_type();2015} else {2016// Check for unsafe oop field access2017for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2018int opcode = n->fast_out(i)->Opcode();2019if (opcode == Op_StoreP || opcode == Op_StoreN ||2020opcode == Op_LoadP || opcode == Op_LoadN ||2021opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||2022opcode == Op_CompareAndSwapP || opcode == Op_CompareAndSwapN) {2023bt = T_OBJECT;2024(*unsafe) = true;2025break;2026}2027}2028}2029} else if (adr_type->isa_aryptr()) {2030if (offset == arrayOopDesc::length_offset_in_bytes()) {2031// Ignore array length load.2032} else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {2033// Ignore first AddP.2034} else {2035const Type* elemtype = adr_type->isa_aryptr()->elem();2036bt = elemtype->array_element_basic_type();2037}2038} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {2039// Allocation initialization, ThreadLocal field access, unsafe access2040for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2041int opcode = n->fast_out(i)->Opcode();2042if (opcode == Op_StoreP || opcode == Op_StoreN ||2043opcode == Op_LoadP || opcode == Op_LoadN ||2044opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||2045opcode == Op_CompareAndSwapP || opcode == Op_CompareAndSwapN) {2046bt = T_OBJECT;2047break;2048}2049}2050}2051}2052return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);2053}20542055// Returns unique pointed java object or NULL.2056JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {2057assert(!_collecting, "should not call when contructed graph");2058// If the node was created after the escape computation we can't answer.2059uint idx = n->_idx;2060if (idx >= nodes_size()) {2061return NULL;2062}2063PointsToNode* ptn = ptnode_adr(idx);2064if (ptn == NULL) {2065return NULL;2066}2067if (ptn->is_JavaObject()) {2068return ptn->as_JavaObject();2069}2070assert(ptn->is_LocalVar(), "sanity");2071// Check all java objects it points to.2072JavaObjectNode* jobj = NULL;2073for (EdgeIterator i(ptn); i.has_next(); i.next()) {2074PointsToNode* e = i.get();2075if (e->is_JavaObject()) {2076if (jobj == NULL) {2077jobj = e->as_JavaObject();2078} else if (jobj != e) {2079return NULL;2080}2081}2082}2083return jobj;2084}20852086// Return true if this node points only to non-escaping allocations.2087bool PointsToNode::non_escaping_allocation() {2088if (is_JavaObject()) {2089Node* n = ideal_node();2090if (n->is_Allocate() || n->is_CallStaticJava()) {2091return (escape_state() == PointsToNode::NoEscape);2092} else {2093return false;2094}2095}2096assert(is_LocalVar(), "sanity");2097// Check all java objects it points to.2098for (EdgeIterator i(this); i.has_next(); i.next()) {2099PointsToNode* e = i.get();2100if (e->is_JavaObject()) {2101Node* n = e->ideal_node();2102if ((e->escape_state() != PointsToNode::NoEscape) ||2103!(n->is_Allocate() || n->is_CallStaticJava())) {2104return false;2105}2106}2107}2108return true;2109}21102111// Return true if we know the node does not escape globally.2112bool ConnectionGraph::not_global_escape(Node *n) {2113assert(!_collecting, "should not call during graph construction");2114// If the node was created after the escape computation we can't answer.2115uint idx = n->_idx;2116if (idx >= nodes_size()) {2117return false;2118}2119PointsToNode* ptn = ptnode_adr(idx);2120if (ptn == NULL) {2121return false; // not in congraph (e.g. ConI)2122}2123PointsToNode::EscapeState es = ptn->escape_state();2124// If we have already computed a value, return it.2125if (es >= PointsToNode::GlobalEscape)2126return false;2127if (ptn->is_JavaObject()) {2128return true; // (es < PointsToNode::GlobalEscape);2129}2130assert(ptn->is_LocalVar(), "sanity");2131// Check all java objects it points to.2132for (EdgeIterator i(ptn); i.has_next(); i.next()) {2133if (i.get()->escape_state() >= PointsToNode::GlobalEscape)2134return false;2135}2136return true;2137}213821392140// Helper functions21412142// Return true if this node points to specified node or nodes it points to.2143bool PointsToNode::points_to(JavaObjectNode* ptn) const {2144if (is_JavaObject()) {2145return (this == ptn);2146}2147assert(is_LocalVar() || is_Field(), "sanity");2148for (EdgeIterator i(this); i.has_next(); i.next()) {2149if (i.get() == ptn)2150return true;2151}2152return false;2153}21542155// Return true if one node points to an other.2156bool PointsToNode::meet(PointsToNode* ptn) {2157if (this == ptn) {2158return true;2159} else if (ptn->is_JavaObject()) {2160return this->points_to(ptn->as_JavaObject());2161} else if (this->is_JavaObject()) {2162return ptn->points_to(this->as_JavaObject());2163}2164assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");2165int ptn_count = ptn->edge_count();2166for (EdgeIterator i(this); i.has_next(); i.next()) {2167PointsToNode* this_e = i.get();2168for (int j = 0; j < ptn_count; j++) {2169if (this_e == ptn->edge(j))2170return true;2171}2172}2173return false;2174}21752176#ifdef ASSERT2177// Return true if bases point to this java object.2178bool FieldNode::has_base(JavaObjectNode* jobj) const {2179for (BaseIterator i(this); i.has_next(); i.next()) {2180if (i.get() == jobj)2181return true;2182}2183return false;2184}2185#endif21862187int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {2188const Type *adr_type = phase->type(adr);2189if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&2190adr->in(AddPNode::Address)->is_Proj() &&2191adr->in(AddPNode::Address)->in(0)->is_Allocate()) {2192// We are computing a raw address for a store captured by an Initialize2193// compute an appropriate address type. AddP cases #3 and #5 (see below).2194int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);2195assert(offs != Type::OffsetBot ||2196adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),2197"offset must be a constant or it is initialization of array");2198return offs;2199}2200const TypePtr *t_ptr = adr_type->isa_ptr();2201assert(t_ptr != NULL, "must be a pointer type");2202return t_ptr->offset();2203}22042205Node* ConnectionGraph::get_addp_base(Node *addp) {2206assert(addp->is_AddP(), "must be AddP");2207//2208// AddP cases for Base and Address inputs:2209// case #1. Direct object's field reference:2210// Allocate2211// |2212// Proj #5 ( oop result )2213// |2214// CheckCastPP (cast to instance type)2215// | |2216// AddP ( base == address )2217//2218// case #2. Indirect object's field reference:2219// Phi2220// |2221// CastPP (cast to instance type)2222// | |2223// AddP ( base == address )2224//2225// case #3. Raw object's field reference for Initialize node:2226// Allocate2227// |2228// Proj #5 ( oop result )2229// top |2230// \ |2231// AddP ( base == top )2232//2233// case #4. Array's element reference:2234// {CheckCastPP | CastPP}2235// | | |2236// | AddP ( array's element offset )2237// | |2238// AddP ( array's offset )2239//2240// case #5. Raw object's field reference for arraycopy stub call:2241// The inline_native_clone() case when the arraycopy stub is called2242// after the allocation before Initialize and CheckCastPP nodes.2243// Allocate2244// |2245// Proj #5 ( oop result )2246// | |2247// AddP ( base == address )2248//2249// case #6. Constant Pool, ThreadLocal, CastX2P or2250// Raw object's field reference:2251// {ConP, ThreadLocal, CastX2P, raw Load}2252// top |2253// \ |2254// AddP ( base == top )2255//2256// case #7. Klass's field reference.2257// LoadKlass2258// | |2259// AddP ( base == address )2260//2261// case #8. narrow Klass's field reference.2262// LoadNKlass2263// |2264// DecodeN2265// | |2266// AddP ( base == address )2267//2268Node *base = addp->in(AddPNode::Base);2269if (base->uncast()->is_top()) { // The AddP case #3 and #6.2270base = addp->in(AddPNode::Address);2271while (base->is_AddP()) {2272// Case #6 (unsafe access) may have several chained AddP nodes.2273assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");2274base = base->in(AddPNode::Address);2275}2276Node* uncast_base = base->uncast();2277int opcode = uncast_base->Opcode();2278assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||2279opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||2280(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||2281(uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");2282}2283return base;2284}22852286Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {2287assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");2288Node* addp2 = addp->raw_out(0);2289if (addp->outcnt() == 1 && addp2->is_AddP() &&2290addp2->in(AddPNode::Base) == n &&2291addp2->in(AddPNode::Address) == addp) {2292assert(addp->in(AddPNode::Base) == n, "expecting the same base");2293//2294// Find array's offset to push it on worklist first and2295// as result process an array's element offset first (pushed second)2296// to avoid CastPP for the array's offset.2297// Otherwise the inserted CastPP (LocalVar) will point to what2298// the AddP (Field) points to. Which would be wrong since2299// the algorithm expects the CastPP has the same point as2300// as AddP's base CheckCastPP (LocalVar).2301//2302// ArrayAllocation2303// |2304// CheckCastPP2305// |2306// memProj (from ArrayAllocation CheckCastPP)2307// | ||2308// | || Int (element index)2309// | || | ConI (log(element size))2310// | || | /2311// | || LShift2312// | || /2313// | AddP (array's element offset)2314// | |2315// | | ConI (array's offset: #12(32-bits) or #24(64-bits))2316// | / /2317// AddP (array's offset)2318// |2319// Load/Store (memory operation on array's element)2320//2321return addp2;2322}2323return NULL;2324}23252326//2327// Adjust the type and inputs of an AddP which computes the2328// address of a field of an instance2329//2330bool ConnectionGraph::split_AddP(Node *addp, Node *base) {2331PhaseGVN* igvn = _igvn;2332const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();2333assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");2334const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();2335if (t == NULL) {2336// We are computing a raw address for a store captured by an Initialize2337// compute an appropriate address type (cases #3 and #5).2338assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");2339assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");2340intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);2341assert(offs != Type::OffsetBot, "offset must be a constant");2342t = base_t->add_offset(offs)->is_oopptr();2343}2344int inst_id = base_t->instance_id();2345assert(!t->is_known_instance() || t->instance_id() == inst_id,2346"old type must be non-instance or match new type");23472348// The type 't' could be subclass of 'base_t'.2349// As result t->offset() could be large then base_t's size and it will2350// cause the failure in add_offset() with narrow oops since TypeOopPtr()2351// constructor verifies correctness of the offset.2352//2353// It could happened on subclass's branch (from the type profiling2354// inlining) which was not eliminated during parsing since the exactness2355// of the allocation type was not propagated to the subclass type check.2356//2357// Or the type 't' could be not related to 'base_t' at all.2358// It could happened when CHA type is different from MDO type on a dead path2359// (for example, from instanceof check) which is not collapsed during parsing.2360//2361// Do nothing for such AddP node and don't process its users since2362// this code branch will go away.2363//2364if (!t->is_known_instance() &&2365!base_t->klass()->is_subtype_of(t->klass())) {2366return false; // bail out2367}2368const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();2369// Do NOT remove the next line: ensure a new alias index is allocated2370// for the instance type. Note: C++ will not remove it since the call2371// has side effect.2372int alias_idx = _compile->get_alias_index(tinst);2373igvn->set_type(addp, tinst);2374// record the allocation in the node map2375set_map(addp, get_map(base->_idx));2376// Set addp's Base and Address to 'base'.2377Node *abase = addp->in(AddPNode::Base);2378Node *adr = addp->in(AddPNode::Address);2379if (adr->is_Proj() && adr->in(0)->is_Allocate() &&2380adr->in(0)->_idx == (uint)inst_id) {2381// Skip AddP cases #3 and #5.2382} else {2383assert(!abase->is_top(), "sanity"); // AddP case #32384if (abase != base) {2385igvn->hash_delete(addp);2386addp->set_req(AddPNode::Base, base);2387if (abase == adr) {2388addp->set_req(AddPNode::Address, base);2389} else {2390// AddP case #4 (adr is array's element offset AddP node)2391#ifdef ASSERT2392const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();2393assert(adr->is_AddP() && atype != NULL &&2394atype->instance_id() == inst_id, "array's element offset should be processed first");2395#endif2396}2397igvn->hash_insert(addp);2398}2399}2400// Put on IGVN worklist since at least addp's type was changed above.2401record_for_optimizer(addp);2402return true;2403}24042405//2406// Create a new version of orig_phi if necessary. Returns either the newly2407// created phi or an existing phi. Sets create_new to indicate whether a new2408// phi was created. Cache the last newly created phi in the node map.2409//2410PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {2411Compile *C = _compile;2412PhaseGVN* igvn = _igvn;2413new_created = false;2414int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());2415// nothing to do if orig_phi is bottom memory or matches alias_idx2416if (phi_alias_idx == alias_idx) {2417return orig_phi;2418}2419// Have we recently created a Phi for this alias index?2420PhiNode *result = get_map_phi(orig_phi->_idx);2421if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {2422return result;2423}2424// Previous check may fail when the same wide memory Phi was split into Phis2425// for different memory slices. Search all Phis for this region.2426if (result != NULL) {2427Node* region = orig_phi->in(0);2428for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {2429Node* phi = region->fast_out(i);2430if (phi->is_Phi() &&2431C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {2432assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");2433return phi->as_Phi();2434}2435}2436}2437if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {2438if (C->do_escape_analysis() == true && !C->failing()) {2439// Retry compilation without escape analysis.2440// If this is the first failure, the sentinel string will "stick"2441// to the Compile object, and the C2Compiler will see it and retry.2442C->record_failure(C2Compiler::retry_no_escape_analysis());2443}2444return NULL;2445}2446orig_phi_worklist.append_if_missing(orig_phi);2447const TypePtr *atype = C->get_adr_type(alias_idx);2448result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);2449C->copy_node_notes_to(result, orig_phi);2450igvn->set_type(result, result->bottom_type());2451record_for_optimizer(result);2452set_map(orig_phi, result);2453new_created = true;2454return result;2455}24562457//2458// Return a new version of Memory Phi "orig_phi" with the inputs having the2459// specified alias index.2460//2461PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {2462assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");2463Compile *C = _compile;2464PhaseGVN* igvn = _igvn;2465bool new_phi_created;2466PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);2467if (!new_phi_created) {2468return result;2469}2470GrowableArray<PhiNode *> phi_list;2471GrowableArray<uint> cur_input;2472PhiNode *phi = orig_phi;2473uint idx = 1;2474bool finished = false;2475while(!finished) {2476while (idx < phi->req()) {2477Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);2478if (mem != NULL && mem->is_Phi()) {2479PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);2480if (new_phi_created) {2481// found an phi for which we created a new split, push current one on worklist and begin2482// processing new one2483phi_list.push(phi);2484cur_input.push(idx);2485phi = mem->as_Phi();2486result = newphi;2487idx = 1;2488continue;2489} else {2490mem = newphi;2491}2492}2493if (C->failing()) {2494return NULL;2495}2496result->set_req(idx++, mem);2497}2498#ifdef ASSERT2499// verify that the new Phi has an input for each input of the original2500assert( phi->req() == result->req(), "must have same number of inputs.");2501assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");2502#endif2503// Check if all new phi's inputs have specified alias index.2504// Otherwise use old phi.2505for (uint i = 1; i < phi->req(); i++) {2506Node* in = result->in(i);2507assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");2508}2509// we have finished processing a Phi, see if there are any more to do2510finished = (phi_list.length() == 0 );2511if (!finished) {2512phi = phi_list.pop();2513idx = cur_input.pop();2514PhiNode *prev_result = get_map_phi(phi->_idx);2515prev_result->set_req(idx++, result);2516result = prev_result;2517}2518}2519return result;2520}25212522//2523// The next methods are derived from methods in MemNode.2524//2525Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {2526Node *mem = mmem;2527// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally2528// means an array I have not precisely typed yet. Do not do any2529// alias stuff with it any time soon.2530if (toop->base() != Type::AnyPtr &&2531!(toop->klass() != NULL &&2532toop->klass()->is_java_lang_Object() &&2533toop->offset() == Type::OffsetBot)) {2534mem = mmem->memory_at(alias_idx);2535// Update input if it is progress over what we have now2536}2537return mem;2538}25392540//2541// Move memory users to their memory slices.2542//2543void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {2544Compile* C = _compile;2545PhaseGVN* igvn = _igvn;2546const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();2547assert(tp != NULL, "ptr type");2548int alias_idx = C->get_alias_index(tp);2549int general_idx = C->get_general_index(alias_idx);25502551// Move users first2552for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2553Node* use = n->fast_out(i);2554if (use->is_MergeMem()) {2555MergeMemNode* mmem = use->as_MergeMem();2556assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");2557if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {2558continue; // Nothing to do2559}2560// Replace previous general reference to mem node.2561uint orig_uniq = C->unique();2562Node* m = find_inst_mem(n, general_idx, orig_phis);2563assert(orig_uniq == C->unique(), "no new nodes");2564mmem->set_memory_at(general_idx, m);2565--imax;2566--i;2567} else if (use->is_MemBar()) {2568assert(!use->is_Initialize(), "initializing stores should not be moved");2569if (use->req() > MemBarNode::Precedent &&2570use->in(MemBarNode::Precedent) == n) {2571// Don't move related membars.2572record_for_optimizer(use);2573continue;2574}2575tp = use->as_MemBar()->adr_type()->isa_ptr();2576if (tp != NULL && C->get_alias_index(tp) == alias_idx ||2577alias_idx == general_idx) {2578continue; // Nothing to do2579}2580// Move to general memory slice.2581uint orig_uniq = C->unique();2582Node* m = find_inst_mem(n, general_idx, orig_phis);2583assert(orig_uniq == C->unique(), "no new nodes");2584igvn->hash_delete(use);2585imax -= use->replace_edge(n, m);2586igvn->hash_insert(use);2587record_for_optimizer(use);2588--i;2589#ifdef ASSERT2590} else if (use->is_Mem()) {2591if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {2592// Don't move related cardmark.2593continue;2594}2595// Memory nodes should have new memory input.2596tp = igvn->type(use->in(MemNode::Address))->isa_ptr();2597assert(tp != NULL, "ptr type");2598int idx = C->get_alias_index(tp);2599assert(get_map(use->_idx) != NULL || idx == alias_idx,2600"Following memory nodes should have new memory input or be on the same memory slice");2601} else if (use->is_Phi()) {2602// Phi nodes should be split and moved already.2603tp = use->as_Phi()->adr_type()->isa_ptr();2604assert(tp != NULL, "ptr type");2605int idx = C->get_alias_index(tp);2606assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");2607} else {2608use->dump();2609assert(false, "should not be here");2610#endif2611}2612}2613}26142615//2616// Search memory chain of "mem" to find a MemNode whose address2617// is the specified alias index.2618//2619Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {2620if (orig_mem == NULL)2621return orig_mem;2622Compile* C = _compile;2623PhaseGVN* igvn = _igvn;2624const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();2625bool is_instance = (toop != NULL) && toop->is_known_instance();2626Node *start_mem = C->start()->proj_out(TypeFunc::Memory);2627Node *prev = NULL;2628Node *result = orig_mem;2629while (prev != result) {2630prev = result;2631if (result == start_mem)2632break; // hit one of our sentinels2633if (result->is_Mem()) {2634const Type *at = igvn->type(result->in(MemNode::Address));2635if (at == Type::TOP)2636break; // Dead2637assert (at->isa_ptr() != NULL, "pointer type required.");2638int idx = C->get_alias_index(at->is_ptr());2639if (idx == alias_idx)2640break; // Found2641if (!is_instance && (at->isa_oopptr() == NULL ||2642!at->is_oopptr()->is_known_instance())) {2643break; // Do not skip store to general memory slice.2644}2645result = result->in(MemNode::Memory);2646}2647if (!is_instance)2648continue; // don't search further for non-instance types2649// skip over a call which does not affect this memory slice2650if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {2651Node *proj_in = result->in(0);2652if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {2653break; // hit one of our sentinels2654} else if (proj_in->is_Call()) {2655CallNode *call = proj_in->as_Call();2656if (!call->may_modify(toop, igvn)) {2657result = call->in(TypeFunc::Memory);2658}2659} else if (proj_in->is_Initialize()) {2660AllocateNode* alloc = proj_in->as_Initialize()->allocation();2661// Stop if this is the initialization for the object instance which2662// which contains this memory slice, otherwise skip over it.2663if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {2664result = proj_in->in(TypeFunc::Memory);2665}2666} else if (proj_in->is_MemBar()) {2667result = proj_in->in(TypeFunc::Memory);2668}2669} else if (result->is_MergeMem()) {2670MergeMemNode *mmem = result->as_MergeMem();2671result = step_through_mergemem(mmem, alias_idx, toop);2672if (result == mmem->base_memory()) {2673// Didn't find instance memory, search through general slice recursively.2674result = mmem->memory_at(C->get_general_index(alias_idx));2675result = find_inst_mem(result, alias_idx, orig_phis);2676if (C->failing()) {2677return NULL;2678}2679mmem->set_memory_at(alias_idx, result);2680}2681} else if (result->is_Phi() &&2682C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {2683Node *un = result->as_Phi()->unique_input(igvn);2684if (un != NULL) {2685orig_phis.append_if_missing(result->as_Phi());2686result = un;2687} else {2688break;2689}2690} else if (result->is_ClearArray()) {2691if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {2692// Can not bypass initialization of the instance2693// we are looking for.2694break;2695}2696// Otherwise skip it (the call updated 'result' value).2697} else if (result->Opcode() == Op_SCMemProj) {2698Node* mem = result->in(0);2699Node* adr = NULL;2700if (mem->is_LoadStore()) {2701adr = mem->in(MemNode::Address);2702} else {2703assert(mem->Opcode() == Op_EncodeISOArray, "sanity");2704adr = mem->in(3); // Memory edge corresponds to destination array2705}2706const Type *at = igvn->type(adr);2707if (at != Type::TOP) {2708assert (at->isa_ptr() != NULL, "pointer type required.");2709int idx = C->get_alias_index(at->is_ptr());2710assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");2711break;2712}2713result = mem->in(MemNode::Memory);2714}2715}2716if (result->is_Phi()) {2717PhiNode *mphi = result->as_Phi();2718assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");2719const TypePtr *t = mphi->adr_type();2720if (!is_instance) {2721// Push all non-instance Phis on the orig_phis worklist to update inputs2722// during Phase 4 if needed.2723orig_phis.append_if_missing(mphi);2724} else if (C->get_alias_index(t) != alias_idx) {2725// Create a new Phi with the specified alias index type.2726result = split_memory_phi(mphi, alias_idx, orig_phis);2727}2728}2729// the result is either MemNode, PhiNode, InitializeNode.2730return result;2731}27322733//2734// Convert the types of unescaped object to instance types where possible,2735// propagate the new type information through the graph, and update memory2736// edges and MergeMem inputs to reflect the new type.2737//2738// We start with allocations (and calls which may be allocations) on alloc_worklist.2739// The processing is done in 4 phases:2740//2741// Phase 1: Process possible allocations from alloc_worklist. Create instance2742// types for the CheckCastPP for allocations where possible.2743// Propagate the the new types through users as follows:2744// casts and Phi: push users on alloc_worklist2745// AddP: cast Base and Address inputs to the instance type2746// push any AddP users on alloc_worklist and push any memnode2747// users onto memnode_worklist.2748// Phase 2: Process MemNode's from memnode_worklist. compute new address type and2749// search the Memory chain for a store with the appropriate type2750// address type. If a Phi is found, create a new version with2751// the appropriate memory slices from each of the Phi inputs.2752// For stores, process the users as follows:2753// MemNode: push on memnode_worklist2754// MergeMem: push on mergemem_worklist2755// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice2756// moving the first node encountered of each instance type to the2757// the input corresponding to its alias index.2758// appropriate memory slice.2759// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.2760//2761// In the following example, the CheckCastPP nodes are the cast of allocation2762// results and the allocation of node 29 is unescaped and eligible to be an2763// instance type.2764//2765// We start with:2766//2767// 7 Parm #memory2768// 10 ConI "12"2769// 19 CheckCastPP "Foo"2770// 20 AddP _ 19 19 10 Foo+12 alias_index=42771// 29 CheckCastPP "Foo"2772// 30 AddP _ 29 29 10 Foo+12 alias_index=42773//2774// 40 StoreP 25 7 20 ... alias_index=42775// 50 StoreP 35 40 30 ... alias_index=42776// 60 StoreP 45 50 20 ... alias_index=42777// 70 LoadP _ 60 30 ... alias_index=42778// 80 Phi 75 50 60 Memory alias_index=42779// 90 LoadP _ 80 30 ... alias_index=42780// 100 LoadP _ 80 20 ... alias_index=42781//2782//2783// Phase 1 creates an instance type for node 29 assigning it an instance id of 242784// and creating a new alias index for node 30. This gives:2785//2786// 7 Parm #memory2787// 10 ConI "12"2788// 19 CheckCastPP "Foo"2789// 20 AddP _ 19 19 10 Foo+12 alias_index=42790// 29 CheckCastPP "Foo" iid=242791// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=242792//2793// 40 StoreP 25 7 20 ... alias_index=42794// 50 StoreP 35 40 30 ... alias_index=62795// 60 StoreP 45 50 20 ... alias_index=42796// 70 LoadP _ 60 30 ... alias_index=62797// 80 Phi 75 50 60 Memory alias_index=42798// 90 LoadP _ 80 30 ... alias_index=62799// 100 LoadP _ 80 20 ... alias_index=42800//2801// In phase 2, new memory inputs are computed for the loads and stores,2802// And a new version of the phi is created. In phase 4, the inputs to2803// node 80 are updated and then the memory nodes are updated with the2804// values computed in phase 2. This results in:2805//2806// 7 Parm #memory2807// 10 ConI "12"2808// 19 CheckCastPP "Foo"2809// 20 AddP _ 19 19 10 Foo+12 alias_index=42810// 29 CheckCastPP "Foo" iid=242811// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=242812//2813// 40 StoreP 25 7 20 ... alias_index=42814// 50 StoreP 35 7 30 ... alias_index=62815// 60 StoreP 45 40 20 ... alias_index=42816// 70 LoadP _ 50 30 ... alias_index=62817// 80 Phi 75 40 60 Memory alias_index=42818// 120 Phi 75 50 50 Memory alias_index=62819// 90 LoadP _ 120 30 ... alias_index=62820// 100 LoadP _ 80 20 ... alias_index=42821//2822void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {2823GrowableArray<Node *> memnode_worklist;2824GrowableArray<PhiNode *> orig_phis;2825PhaseIterGVN *igvn = _igvn;2826uint new_index_start = (uint) _compile->num_alias_types();2827Arena* arena = Thread::current()->resource_area();2828VectorSet visited(arena);2829ideal_nodes.clear(); // Reset for use with set_map/get_map.2830uint unique_old = _compile->unique();28312832// Phase 1: Process possible allocations from alloc_worklist.2833// Create instance types for the CheckCastPP for allocations where possible.2834//2835// (Note: don't forget to change the order of the second AddP node on2836// the alloc_worklist if the order of the worklist processing is changed,2837// see the comment in find_second_addp().)2838//2839while (alloc_worklist.length() != 0) {2840Node *n = alloc_worklist.pop();2841uint ni = n->_idx;2842if (n->is_Call()) {2843CallNode *alloc = n->as_Call();2844// copy escape information to call node2845PointsToNode* ptn = ptnode_adr(alloc->_idx);2846PointsToNode::EscapeState es = ptn->escape_state();2847// We have an allocation or call which returns a Java object,2848// see if it is unescaped.2849if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())2850continue;2851// Find CheckCastPP for the allocate or for the return value of a call2852n = alloc->result_cast();2853if (n == NULL) { // No uses except Initialize node2854if (alloc->is_Allocate()) {2855// Set the scalar_replaceable flag for allocation2856// so it could be eliminated if it has no uses.2857alloc->as_Allocate()->_is_scalar_replaceable = true;2858}2859if (alloc->is_CallStaticJava()) {2860// Set the scalar_replaceable flag for boxing method2861// so it could be eliminated if it has no uses.2862alloc->as_CallStaticJava()->_is_scalar_replaceable = true;2863}2864continue;2865}2866if (!n->is_CheckCastPP()) { // not unique CheckCastPP.2867assert(!alloc->is_Allocate(), "allocation should have unique type");2868continue;2869}28702871// The inline code for Object.clone() casts the allocation result to2872// java.lang.Object and then to the actual type of the allocated2873// object. Detect this case and use the second cast.2874// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when2875// the allocation result is cast to java.lang.Object and then2876// to the actual Array type.2877if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL2878&& (alloc->is_AllocateArray() ||2879igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {2880Node *cast2 = NULL;2881for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2882Node *use = n->fast_out(i);2883if (use->is_CheckCastPP()) {2884cast2 = use;2885break;2886}2887}2888if (cast2 != NULL) {2889n = cast2;2890} else {2891// Non-scalar replaceable if the allocation type is unknown statically2892// (reflection allocation), the object can't be restored during2893// deoptimization without precise type.2894continue;2895}2896}28972898const TypeOopPtr *t = igvn->type(n)->isa_oopptr();2899if (t == NULL)2900continue; // not a TypeOopPtr2901if (!t->klass_is_exact())2902continue; // not an unique type29032904if (alloc->is_Allocate()) {2905// Set the scalar_replaceable flag for allocation2906// so it could be eliminated.2907alloc->as_Allocate()->_is_scalar_replaceable = true;2908}2909if (alloc->is_CallStaticJava()) {2910// Set the scalar_replaceable flag for boxing method2911// so it could be eliminated.2912alloc->as_CallStaticJava()->_is_scalar_replaceable = true;2913}2914set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state2915// in order for an object to be scalar-replaceable, it must be:2916// - a direct allocation (not a call returning an object)2917// - non-escaping2918// - eligible to be a unique type2919// - not determined to be ineligible by escape analysis2920set_map(alloc, n);2921set_map(n, alloc);2922const TypeOopPtr* tinst = t->cast_to_instance_id(ni);2923igvn->hash_delete(n);2924igvn->set_type(n, tinst);2925n->raise_bottom_type(tinst);2926igvn->hash_insert(n);2927record_for_optimizer(n);2928if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {29292930// First, put on the worklist all Field edges from Connection Graph2931// which is more accurate then putting immediate users from Ideal Graph.2932for (EdgeIterator e(ptn); e.has_next(); e.next()) {2933PointsToNode* tgt = e.get();2934Node* use = tgt->ideal_node();2935assert(tgt->is_Field() && use->is_AddP(),2936"only AddP nodes are Field edges in CG");2937if (use->outcnt() > 0) { // Don't process dead nodes2938Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));2939if (addp2 != NULL) {2940assert(alloc->is_AllocateArray(),"array allocation was expected");2941alloc_worklist.append_if_missing(addp2);2942}2943alloc_worklist.append_if_missing(use);2944}2945}29462947// An allocation may have an Initialize which has raw stores. Scan2948// the users of the raw allocation result and push AddP users2949// on alloc_worklist.2950Node *raw_result = alloc->proj_out(TypeFunc::Parms);2951assert (raw_result != NULL, "must have an allocation result");2952for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {2953Node *use = raw_result->fast_out(i);2954if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes2955Node* addp2 = find_second_addp(use, raw_result);2956if (addp2 != NULL) {2957assert(alloc->is_AllocateArray(),"array allocation was expected");2958alloc_worklist.append_if_missing(addp2);2959}2960alloc_worklist.append_if_missing(use);2961} else if (use->is_MemBar()) {2962memnode_worklist.append_if_missing(use);2963}2964}2965}2966} else if (n->is_AddP()) {2967JavaObjectNode* jobj = unique_java_object(get_addp_base(n));2968if (jobj == NULL || jobj == phantom_obj) {2969#ifdef ASSERT2970ptnode_adr(get_addp_base(n)->_idx)->dump();2971ptnode_adr(n->_idx)->dump();2972assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");2973#endif2974_compile->record_failure(C2Compiler::retry_no_escape_analysis());2975return;2976}2977Node *base = get_map(jobj->idx()); // CheckCastPP node2978if (!split_AddP(n, base)) continue; // wrong type from dead path2979} else if (n->is_Phi() ||2980n->is_CheckCastPP() ||2981n->is_EncodeP() ||2982n->is_DecodeN() ||2983(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {2984if (visited.test_set(n->_idx)) {2985assert(n->is_Phi(), "loops only through Phi's");2986continue; // already processed2987}2988JavaObjectNode* jobj = unique_java_object(n);2989if (jobj == NULL || jobj == phantom_obj) {2990#ifdef ASSERT2991ptnode_adr(n->_idx)->dump();2992assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");2993#endif2994_compile->record_failure(C2Compiler::retry_no_escape_analysis());2995return;2996} else {2997Node *val = get_map(jobj->idx()); // CheckCastPP node2998TypeNode *tn = n->as_Type();2999const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();3000assert(tinst != NULL && tinst->is_known_instance() &&3001tinst->instance_id() == jobj->idx() , "instance type expected.");30023003const Type *tn_type = igvn->type(tn);3004const TypeOopPtr *tn_t;3005if (tn_type->isa_narrowoop()) {3006tn_t = tn_type->make_ptr()->isa_oopptr();3007} else {3008tn_t = tn_type->isa_oopptr();3009}3010if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {3011if (tn_type->isa_narrowoop()) {3012tn_type = tinst->make_narrowoop();3013} else {3014tn_type = tinst;3015}3016igvn->hash_delete(tn);3017igvn->set_type(tn, tn_type);3018tn->set_type(tn_type);3019igvn->hash_insert(tn);3020record_for_optimizer(n);3021} else {3022assert(tn_type == TypePtr::NULL_PTR ||3023tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),3024"unexpected type");3025continue; // Skip dead path with different type3026}3027}3028} else {3029debug_only(n->dump();)3030assert(false, "EA: unexpected node");3031continue;3032}3033// push allocation's users on appropriate worklist3034for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3035Node *use = n->fast_out(i);3036if(use->is_Mem() && use->in(MemNode::Address) == n) {3037// Load/store to instance's field3038memnode_worklist.append_if_missing(use);3039} else if (use->is_MemBar()) {3040if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3041memnode_worklist.append_if_missing(use);3042}3043} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes3044Node* addp2 = find_second_addp(use, n);3045if (addp2 != NULL) {3046alloc_worklist.append_if_missing(addp2);3047}3048alloc_worklist.append_if_missing(use);3049} else if (use->is_Phi() ||3050use->is_CheckCastPP() ||3051use->is_EncodeNarrowPtr() ||3052use->is_DecodeNarrowPtr() ||3053(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {3054alloc_worklist.append_if_missing(use);3055#ifdef ASSERT3056} else if (use->is_Mem()) {3057assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");3058} else if (use->is_MergeMem()) {3059assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3060} else if (use->is_SafePoint()) {3061// Look for MergeMem nodes for calls which reference unique allocation3062// (through CheckCastPP nodes) even for debug info.3063Node* m = use->in(TypeFunc::Memory);3064if (m->is_MergeMem()) {3065assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");3066}3067} else if (use->Opcode() == Op_EncodeISOArray) {3068if (use->in(MemNode::Memory) == n || use->in(3) == n) {3069// EncodeISOArray overwrites destination array3070memnode_worklist.append_if_missing(use);3071}3072} else {3073uint op = use->Opcode();3074if (!(op == Op_CmpP || op == Op_Conv2B ||3075op == Op_CastP2X || op == Op_StoreCM ||3076op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||3077op == Op_StrEquals || op == Op_StrIndexOf)) {3078n->dump();3079use->dump();3080assert(false, "EA: missing allocation reference path");3081}3082#endif3083}3084}30853086}3087// New alias types were created in split_AddP().3088uint new_index_end = (uint) _compile->num_alias_types();3089assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");30903091// Phase 2: Process MemNode's from memnode_worklist. compute new address type and3092// compute new values for Memory inputs (the Memory inputs are not3093// actually updated until phase 4.)3094if (memnode_worklist.length() == 0)3095return; // nothing to do3096while (memnode_worklist.length() != 0) {3097Node *n = memnode_worklist.pop();3098if (visited.test_set(n->_idx))3099continue;3100if (n->is_Phi() || n->is_ClearArray()) {3101// we don't need to do anything, but the users must be pushed3102} else if (n->is_MemBar()) { // Initialize, MemBar nodes3103// we don't need to do anything, but the users must be pushed3104n = n->as_MemBar()->proj_out(TypeFunc::Memory);3105if (n == NULL)3106continue;3107} else if (n->Opcode() == Op_EncodeISOArray) {3108// get the memory projection3109for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3110Node *use = n->fast_out(i);3111if (use->Opcode() == Op_SCMemProj) {3112n = use;3113break;3114}3115}3116assert(n->Opcode() == Op_SCMemProj, "memory projection required");3117} else {3118assert(n->is_Mem(), "memory node required.");3119Node *addr = n->in(MemNode::Address);3120const Type *addr_t = igvn->type(addr);3121if (addr_t == Type::TOP)3122continue;3123assert (addr_t->isa_ptr() != NULL, "pointer type required.");3124int alias_idx = _compile->get_alias_index(addr_t->is_ptr());3125assert ((uint)alias_idx < new_index_end, "wrong alias index");3126Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);3127if (_compile->failing()) {3128return;3129}3130if (mem != n->in(MemNode::Memory)) {3131// We delay the memory edge update since we need old one in3132// MergeMem code below when instances memory slices are separated.3133set_map(n, mem);3134}3135if (n->is_Load()) {3136continue; // don't push users3137} else if (n->is_LoadStore()) {3138// get the memory projection3139for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3140Node *use = n->fast_out(i);3141if (use->Opcode() == Op_SCMemProj) {3142n = use;3143break;3144}3145}3146assert(n->Opcode() == Op_SCMemProj, "memory projection required");3147}3148}3149// push user on appropriate worklist3150for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3151Node *use = n->fast_out(i);3152if (use->is_Phi() || use->is_ClearArray()) {3153memnode_worklist.append_if_missing(use);3154} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {3155if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores3156continue;3157memnode_worklist.append_if_missing(use);3158} else if (use->is_MemBar()) {3159if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3160memnode_worklist.append_if_missing(use);3161}3162#ifdef ASSERT3163} else if(use->is_Mem()) {3164assert(use->in(MemNode::Memory) != n, "EA: missing memory path");3165} else if (use->is_MergeMem()) {3166assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3167} else if (use->Opcode() == Op_EncodeISOArray) {3168if (use->in(MemNode::Memory) == n || use->in(3) == n) {3169// EncodeISOArray overwrites destination array3170memnode_worklist.append_if_missing(use);3171}3172} else {3173uint op = use->Opcode();3174if (!(op == Op_StoreCM ||3175(op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&3176strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||3177op == Op_AryEq || op == Op_StrComp ||3178op == Op_StrEquals || op == Op_StrIndexOf)) {3179n->dump();3180use->dump();3181assert(false, "EA: missing memory path");3182}3183#endif3184}3185}3186}31873188// Phase 3: Process MergeMem nodes from mergemem_worklist.3189// Walk each memory slice moving the first node encountered of each3190// instance type to the the input corresponding to its alias index.3191uint length = _mergemem_worklist.length();3192for( uint next = 0; next < length; ++next ) {3193MergeMemNode* nmm = _mergemem_worklist.at(next);3194assert(!visited.test_set(nmm->_idx), "should not be visited before");3195// Note: we don't want to use MergeMemStream here because we only want to3196// scan inputs which exist at the start, not ones we add during processing.3197// Note 2: MergeMem may already contains instance memory slices added3198// during find_inst_mem() call when memory nodes were processed above.3199igvn->hash_delete(nmm);3200uint nslices = MIN2(nmm->req(), new_index_start);3201for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {3202Node* mem = nmm->in(i);3203Node* cur = NULL;3204if (mem == NULL || mem->is_top())3205continue;3206// First, update mergemem by moving memory nodes to corresponding slices3207// if their type became more precise since this mergemem was created.3208while (mem->is_Mem()) {3209const Type *at = igvn->type(mem->in(MemNode::Address));3210if (at != Type::TOP) {3211assert (at->isa_ptr() != NULL, "pointer type required.");3212uint idx = (uint)_compile->get_alias_index(at->is_ptr());3213if (idx == i) {3214if (cur == NULL)3215cur = mem;3216} else {3217if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {3218nmm->set_memory_at(idx, mem);3219}3220}3221}3222mem = mem->in(MemNode::Memory);3223}3224nmm->set_memory_at(i, (cur != NULL) ? cur : mem);3225// Find any instance of the current type if we haven't encountered3226// already a memory slice of the instance along the memory chain.3227for (uint ni = new_index_start; ni < new_index_end; ni++) {3228if((uint)_compile->get_general_index(ni) == i) {3229Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);3230if (nmm->is_empty_memory(m)) {3231Node* result = find_inst_mem(mem, ni, orig_phis);3232if (_compile->failing()) {3233return;3234}3235nmm->set_memory_at(ni, result);3236}3237}3238}3239}3240// Find the rest of instances values3241for (uint ni = new_index_start; ni < new_index_end; ni++) {3242const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();3243Node* result = step_through_mergemem(nmm, ni, tinst);3244if (result == nmm->base_memory()) {3245// Didn't find instance memory, search through general slice recursively.3246result = nmm->memory_at(_compile->get_general_index(ni));3247result = find_inst_mem(result, ni, orig_phis);3248if (_compile->failing()) {3249return;3250}3251nmm->set_memory_at(ni, result);3252}3253}3254igvn->hash_insert(nmm);3255record_for_optimizer(nmm);3256}32573258// Phase 4: Update the inputs of non-instance memory Phis and3259// the Memory input of memnodes3260// First update the inputs of any non-instance Phi's from3261// which we split out an instance Phi. Note we don't have3262// to recursively process Phi's encounted on the input memory3263// chains as is done in split_memory_phi() since they will3264// also be processed here.3265for (int j = 0; j < orig_phis.length(); j++) {3266PhiNode *phi = orig_phis.at(j);3267int alias_idx = _compile->get_alias_index(phi->adr_type());3268igvn->hash_delete(phi);3269for (uint i = 1; i < phi->req(); i++) {3270Node *mem = phi->in(i);3271Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);3272if (_compile->failing()) {3273return;3274}3275if (mem != new_mem) {3276phi->set_req(i, new_mem);3277}3278}3279igvn->hash_insert(phi);3280record_for_optimizer(phi);3281}32823283// Update the memory inputs of MemNodes with the value we computed3284// in Phase 2 and move stores memory users to corresponding memory slices.3285// Disable memory split verification code until the fix for 6984348.3286// Currently it produces false negative results since it does not cover all cases.3287#if 0 // ifdef ASSERT3288visited.Reset();3289Node_Stack old_mems(arena, _compile->unique() >> 2);3290#endif3291for (uint i = 0; i < ideal_nodes.size(); i++) {3292Node* n = ideal_nodes.at(i);3293Node* nmem = get_map(n->_idx);3294assert(nmem != NULL, "sanity");3295if (n->is_Mem()) {3296#if 0 // ifdef ASSERT3297Node* old_mem = n->in(MemNode::Memory);3298if (!visited.test_set(old_mem->_idx)) {3299old_mems.push(old_mem, old_mem->outcnt());3300}3301#endif3302assert(n->in(MemNode::Memory) != nmem, "sanity");3303if (!n->is_Load()) {3304// Move memory users of a store first.3305move_inst_mem(n, orig_phis);3306}3307// Now update memory input3308igvn->hash_delete(n);3309n->set_req(MemNode::Memory, nmem);3310igvn->hash_insert(n);3311record_for_optimizer(n);3312} else {3313assert(n->is_Allocate() || n->is_CheckCastPP() ||3314n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");3315}3316}3317#if 0 // ifdef ASSERT3318// Verify that memory was split correctly3319while (old_mems.is_nonempty()) {3320Node* old_mem = old_mems.node();3321uint old_cnt = old_mems.index();3322old_mems.pop();3323assert(old_cnt == old_mem->outcnt(), "old mem could be lost");3324}3325#endif3326}33273328#ifndef PRODUCT3329static const char *node_type_names[] = {3330"UnknownType",3331"JavaObject",3332"LocalVar",3333"Field",3334"Arraycopy"3335};33363337static const char *esc_names[] = {3338"UnknownEscape",3339"NoEscape",3340"ArgEscape",3341"GlobalEscape"3342};33433344void PointsToNode::dump(bool print_state) const {3345NodeType nt = node_type();3346tty->print("%s ", node_type_names[(int) nt]);3347if (print_state) {3348EscapeState es = escape_state();3349EscapeState fields_es = fields_escape_state();3350tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);3351if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())3352tty->print("NSR ");3353}3354if (is_Field()) {3355FieldNode* f = (FieldNode*)this;3356if (f->is_oop())3357tty->print("oop ");3358if (f->offset() > 0)3359tty->print("+%d ", f->offset());3360tty->print("(");3361for (BaseIterator i(f); i.has_next(); i.next()) {3362PointsToNode* b = i.get();3363tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));3364}3365tty->print(" )");3366}3367tty->print("[");3368for (EdgeIterator i(this); i.has_next(); i.next()) {3369PointsToNode* e = i.get();3370tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");3371}3372tty->print(" [");3373for (UseIterator i(this); i.has_next(); i.next()) {3374PointsToNode* u = i.get();3375bool is_base = false;3376if (PointsToNode::is_base_use(u)) {3377is_base = true;3378u = PointsToNode::get_use_node(u)->as_Field();3379}3380tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");3381}3382tty->print(" ]] ");3383if (_node == NULL)3384tty->print_cr("<null>");3385else3386_node->dump();3387}33883389void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {3390bool first = true;3391int ptnodes_length = ptnodes_worklist.length();3392for (int i = 0; i < ptnodes_length; i++) {3393PointsToNode *ptn = ptnodes_worklist.at(i);3394if (ptn == NULL || !ptn->is_JavaObject())3395continue;3396PointsToNode::EscapeState es = ptn->escape_state();3397if ((es != PointsToNode::NoEscape) && !Verbose) {3398continue;3399}3400Node* n = ptn->ideal_node();3401if (n->is_Allocate() || (n->is_CallStaticJava() &&3402n->as_CallStaticJava()->is_boxing_method())) {3403if (first) {3404tty->cr();3405tty->print("======== Connection graph for ");3406_compile->method()->print_short_name();3407tty->cr();3408first = false;3409}3410ptn->dump();3411// Print all locals and fields which reference this allocation3412for (UseIterator j(ptn); j.has_next(); j.next()) {3413PointsToNode* use = j.get();3414if (use->is_LocalVar()) {3415use->dump(Verbose);3416} else if (Verbose) {3417use->dump();3418}3419}3420tty->cr();3421}3422}3423}3424#endif342534263427