Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/escape.cpp
32285 views
/*1* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "compiler/compileLog.hpp"27#include "libadt/vectset.hpp"28#include "memory/allocation.hpp"29#include "opto/c2compiler.hpp"30#include "opto/callnode.hpp"31#include "opto/cfgnode.hpp"32#include "opto/compile.hpp"33#include "opto/escape.hpp"34#include "opto/phaseX.hpp"35#include "opto/rootnode.hpp"36#if INCLUDE_ALL_GCS37#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"38#endif3940ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :41_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),42_in_worklist(C->comp_arena()),43_next_pidx(0),44_collecting(true),45_verify(false),46_compile(C),47_igvn(igvn),48_node_map(C->comp_arena()) {49// Add unknown java object.50add_java_object(C->top(), PointsToNode::GlobalEscape);51phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();52// Add ConP(#NULL) and ConN(#NULL) nodes.53Node* oop_null = igvn->zerocon(T_OBJECT);54assert(oop_null->_idx < nodes_size(), "should be created already");55add_java_object(oop_null, PointsToNode::NoEscape);56null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();57if (UseCompressedOops) {58Node* noop_null = igvn->zerocon(T_NARROWOOP);59assert(noop_null->_idx < nodes_size(), "should be created already");60map_ideal_node(noop_null, null_obj);61}62_pcmp_neq = NULL; // Should be initialized63_pcmp_eq = NULL;64}6566bool ConnectionGraph::has_candidates(Compile *C) {67// EA brings benefits only when the code has allocations and/or locks which68// are represented by ideal Macro nodes.69int cnt = C->macro_count();70for (int i = 0; i < cnt; i++) {71Node *n = C->macro_node(i);72if (n->is_Allocate())73return true;74if (n->is_Lock()) {75Node* obj = n->as_Lock()->obj_node()->uncast();76if (!(obj->is_Parm() || obj->is_Con()))77return true;78}79if (n->is_CallStaticJava() &&80n->as_CallStaticJava()->is_boxing_method()) {81return true;82}83}84return false;85}8687void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {88Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);89ResourceMark rm;9091// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction92// to create space for them in ConnectionGraph::_nodes[].93Node* oop_null = igvn->zerocon(T_OBJECT);94Node* noop_null = igvn->zerocon(T_NARROWOOP);95ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);96// Perform escape analysis97if (congraph->compute_escape()) {98// There are non escaping objects.99C->set_congraph(congraph);100}101// Cleanup.102if (oop_null->outcnt() == 0)103igvn->hash_delete(oop_null);104if (noop_null->outcnt() == 0)105igvn->hash_delete(noop_null);106}107108bool ConnectionGraph::compute_escape() {109Compile* C = _compile;110PhaseGVN* igvn = _igvn;111112// Worklists used by EA.113Unique_Node_List delayed_worklist;114GrowableArray<Node*> alloc_worklist;115GrowableArray<Node*> ptr_cmp_worklist;116GrowableArray<Node*> storestore_worklist;117GrowableArray<PointsToNode*> ptnodes_worklist;118GrowableArray<JavaObjectNode*> java_objects_worklist;119GrowableArray<JavaObjectNode*> non_escaped_worklist;120GrowableArray<FieldNode*> oop_fields_worklist;121DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )122123{ Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);124125// 1. Populate Connection Graph (CG) with PointsTo nodes.126ideal_nodes.map(C->live_nodes(), NULL); // preallocate space127// Initialize worklist128if (C->root() != NULL) {129ideal_nodes.push(C->root());130}131// Processed ideal nodes are unique on ideal_nodes list132// but several ideal nodes are mapped to the phantom_obj.133// To avoid duplicated entries on the following worklists134// add the phantom_obj only once to them.135ptnodes_worklist.append(phantom_obj);136java_objects_worklist.append(phantom_obj);137for( uint next = 0; next < ideal_nodes.size(); ++next ) {138Node* n = ideal_nodes.at(next);139// Create PointsTo nodes and add them to Connection Graph. Called140// only once per ideal node since ideal_nodes is Unique_Node list.141add_node_to_connection_graph(n, &delayed_worklist);142PointsToNode* ptn = ptnode_adr(n->_idx);143if (ptn != NULL && ptn != phantom_obj) {144ptnodes_worklist.append(ptn);145if (ptn->is_JavaObject()) {146java_objects_worklist.append(ptn->as_JavaObject());147if ((n->is_Allocate() || n->is_CallStaticJava()) &&148(ptn->escape_state() < PointsToNode::GlobalEscape)) {149// Only allocations and java static calls results are interesting.150non_escaped_worklist.append(ptn->as_JavaObject());151}152} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {153oop_fields_worklist.append(ptn->as_Field());154}155}156if (n->is_MergeMem()) {157// Collect all MergeMem nodes to add memory slices for158// scalar replaceable objects in split_unique_types().159_mergemem_worklist.append(n->as_MergeMem());160} else if (OptimizePtrCompare && n->is_Cmp() &&161(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {162// Collect compare pointers nodes.163ptr_cmp_worklist.append(n);164} else if (n->is_MemBarStoreStore()) {165// Collect all MemBarStoreStore nodes so that depending on the166// escape status of the associated Allocate node some of them167// may be eliminated.168storestore_worklist.append(n);169} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&170(n->req() > MemBarNode::Precedent)) {171record_for_optimizer(n);172#ifdef ASSERT173} else if (n->is_AddP()) {174// Collect address nodes for graph verification.175addp_worklist.append(n);176#endif177}178for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {179Node* m = n->fast_out(i); // Get user180ideal_nodes.push(m);181}182}183if (non_escaped_worklist.length() == 0) {184_collecting = false;185return false; // Nothing to do.186}187// Add final simple edges to graph.188while(delayed_worklist.size() > 0) {189Node* n = delayed_worklist.pop();190add_final_edges(n);191}192int ptnodes_length = ptnodes_worklist.length();193194#ifdef ASSERT195if (VerifyConnectionGraph) {196// Verify that no new simple edges could be created and all197// local vars has edges.198_verify = true;199for (int next = 0; next < ptnodes_length; ++next) {200PointsToNode* ptn = ptnodes_worklist.at(next);201add_final_edges(ptn->ideal_node());202if (ptn->is_LocalVar() && ptn->edge_count() == 0) {203ptn->dump();204assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");205}206}207_verify = false;208}209#endif210// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes211// processing, calls to CI to resolve symbols (types, fields, methods)212// referenced in bytecode. During symbol resolution VM may throw213// an exception which CI cleans and converts to compilation failure.214if (C->failing()) return false;215216// 2. Finish Graph construction by propagating references to all217// java objects through graph.218if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,219java_objects_worklist, oop_fields_worklist)) {220// All objects escaped or hit time or iterations limits.221_collecting = false;222return false;223}224225// 3. Adjust scalar_replaceable state of nonescaping objects and push226// scalar replaceable allocations on alloc_worklist for processing227// in split_unique_types().228int non_escaped_length = non_escaped_worklist.length();229for (int next = 0; next < non_escaped_length; next++) {230JavaObjectNode* ptn = non_escaped_worklist.at(next);231bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);232Node* n = ptn->ideal_node();233if (n->is_Allocate()) {234n->as_Allocate()->_is_non_escaping = noescape;235}236if (n->is_CallStaticJava()) {237n->as_CallStaticJava()->_is_non_escaping = noescape;238}239if (noescape && ptn->scalar_replaceable()) {240adjust_scalar_replaceable_state(ptn);241if (ptn->scalar_replaceable()) {242alloc_worklist.append(ptn->ideal_node());243}244}245}246247#ifdef ASSERT248if (VerifyConnectionGraph) {249// Verify that graph is complete - no new edges could be added or needed.250verify_connection_graph(ptnodes_worklist, non_escaped_worklist,251java_objects_worklist, addp_worklist);252}253assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");254assert(null_obj->escape_state() == PointsToNode::NoEscape &&255null_obj->edge_count() == 0 &&256!null_obj->arraycopy_src() &&257!null_obj->arraycopy_dst(), "sanity");258#endif259260_collecting = false;261262} // TracePhase t3("connectionGraph")263264// 4. Optimize ideal graph based on EA information.265bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);266if (has_non_escaping_obj) {267optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);268}269270#ifndef PRODUCT271if (PrintEscapeAnalysis) {272dump(ptnodes_worklist); // Dump ConnectionGraph273}274#endif275276bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);277#ifdef ASSERT278if (VerifyConnectionGraph) {279int alloc_length = alloc_worklist.length();280for (int next = 0; next < alloc_length; ++next) {281Node* n = alloc_worklist.at(next);282PointsToNode* ptn = ptnode_adr(n->_idx);283assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");284}285}286#endif287288// 5. Separate memory graph for scalar replaceable allcations.289if (has_scalar_replaceable_candidates &&290C->AliasLevel() >= 3 && EliminateAllocations) {291// Now use the escape information to create unique types for292// scalar replaceable objects.293split_unique_types(alloc_worklist);294if (C->failing()) return false;295C->print_method(PHASE_AFTER_EA, 2);296297#ifdef ASSERT298} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {299tty->print("=== No allocations eliminated for ");300C->method()->print_short_name();301if(!EliminateAllocations) {302tty->print(" since EliminateAllocations is off ===");303} else if(!has_scalar_replaceable_candidates) {304tty->print(" since there are no scalar replaceable candidates ===");305} else if(C->AliasLevel() < 3) {306tty->print(" since AliasLevel < 3 ===");307}308tty->cr();309#endif310}311return has_non_escaping_obj;312}313314// Utility function for nodes that load an object315void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {316// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because317// ThreadLocal has RawPtr type.318const Type* t = _igvn->type(n);319if (t->make_ptr() != NULL) {320Node* adr = n->in(MemNode::Address);321#ifdef ASSERT322if (!adr->is_AddP()) {323assert(_igvn->type(adr)->isa_rawptr(), "sanity");324} else {325assert((ptnode_adr(adr->_idx) == NULL ||326ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");327}328#endif329add_local_var_and_edge(n, PointsToNode::NoEscape,330adr, delayed_worklist);331}332}333334// Populate Connection Graph with PointsTo nodes and create simple335// connection graph edges.336void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {337assert(!_verify, "this method sould not be called for verification");338PhaseGVN* igvn = _igvn;339uint n_idx = n->_idx;340PointsToNode* n_ptn = ptnode_adr(n_idx);341if (n_ptn != NULL)342return; // No need to redefine PointsTo node during first iteration.343344if (n->is_Call()) {345// Arguments to allocation and locking don't escape.346if (n->is_AbstractLock()) {347// Put Lock and Unlock nodes on IGVN worklist to process them during348// first IGVN optimization when escape information is still available.349record_for_optimizer(n);350} else if (n->is_Allocate()) {351add_call_node(n->as_Call());352record_for_optimizer(n);353} else {354if (n->is_CallStaticJava()) {355const char* name = n->as_CallStaticJava()->_name;356if (name != NULL && strcmp(name, "uncommon_trap") == 0)357return; // Skip uncommon traps358}359// Don't mark as processed since call's arguments have to be processed.360delayed_worklist->push(n);361// Check if a call returns an object.362if ((n->as_Call()->returns_pointer() &&363n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||364(n->is_CallStaticJava() &&365n->as_CallStaticJava()->is_boxing_method())) {366add_call_node(n->as_Call());367}368}369return;370}371// Put this check here to process call arguments since some call nodes372// point to phantom_obj.373if (n_ptn == phantom_obj || n_ptn == null_obj)374return; // Skip predefined nodes.375376int opcode = n->Opcode();377switch (opcode) {378case Op_AddP: {379Node* base = get_addp_base(n);380PointsToNode* ptn_base = ptnode_adr(base->_idx);381// Field nodes are created for all field types. They are used in382// adjust_scalar_replaceable_state() and split_unique_types().383// Note, non-oop fields will have only base edges in Connection384// Graph because such fields are not used for oop loads and stores.385int offset = address_offset(n, igvn);386add_field(n, PointsToNode::NoEscape, offset);387if (ptn_base == NULL) {388delayed_worklist->push(n); // Process it later.389} else {390n_ptn = ptnode_adr(n_idx);391add_base(n_ptn->as_Field(), ptn_base);392}393break;394}395case Op_CastX2P: {396map_ideal_node(n, phantom_obj);397break;398}399case Op_CastPP:400case Op_CheckCastPP:401case Op_EncodeP:402case Op_DecodeN:403case Op_EncodePKlass:404case Op_DecodeNKlass: {405add_local_var_and_edge(n, PointsToNode::NoEscape,406n->in(1), delayed_worklist);407break;408}409case Op_CMoveP: {410add_local_var(n, PointsToNode::NoEscape);411// Do not add edges during first iteration because some could be412// not defined yet.413delayed_worklist->push(n);414break;415}416case Op_ConP:417case Op_ConN:418case Op_ConNKlass: {419// assume all oop constants globally escape except for null420PointsToNode::EscapeState es;421const Type* t = igvn->type(n);422if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {423es = PointsToNode::NoEscape;424} else {425es = PointsToNode::GlobalEscape;426}427add_java_object(n, es);428break;429}430case Op_CreateEx: {431// assume that all exception objects globally escape432map_ideal_node(n, phantom_obj);433break;434}435case Op_LoadKlass:436case Op_LoadNKlass: {437// Unknown class is loaded438map_ideal_node(n, phantom_obj);439break;440}441case Op_LoadP:442case Op_LoadN:443case Op_LoadPLocked: {444add_objload_to_connection_graph(n, delayed_worklist);445break;446}447case Op_Parm: {448map_ideal_node(n, phantom_obj);449break;450}451case Op_PartialSubtypeCheck: {452// Produces Null or notNull and is used in only in CmpP so453// phantom_obj could be used.454map_ideal_node(n, phantom_obj); // Result is unknown455break;456}457case Op_Phi: {458// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because459// ThreadLocal has RawPtr type.460const Type* t = n->as_Phi()->type();461if (t->make_ptr() != NULL) {462add_local_var(n, PointsToNode::NoEscape);463// Do not add edges during first iteration because some could be464// not defined yet.465delayed_worklist->push(n);466}467break;468}469case Op_Proj: {470// we are only interested in the oop result projection from a call471if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&472n->in(0)->as_Call()->returns_pointer()) {473add_local_var_and_edge(n, PointsToNode::NoEscape,474n->in(0), delayed_worklist);475}476break;477}478case Op_Rethrow: // Exception object escapes479case Op_Return: {480if (n->req() > TypeFunc::Parms &&481igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {482// Treat Return value as LocalVar with GlobalEscape escape state.483add_local_var_and_edge(n, PointsToNode::GlobalEscape,484n->in(TypeFunc::Parms), delayed_worklist);485}486break;487}488case Op_GetAndSetP:489case Op_GetAndSetN: {490add_objload_to_connection_graph(n, delayed_worklist);491// fallthrough492}493case Op_StoreP:494case Op_StoreN:495case Op_StoreNKlass:496case Op_StorePConditional:497case Op_CompareAndSwapP:498case Op_CompareAndSwapN: {499Node* adr = n->in(MemNode::Address);500const Type *adr_type = igvn->type(adr);501adr_type = adr_type->make_ptr();502if (adr_type == NULL) {503break; // skip dead nodes504}505if (adr_type->isa_oopptr() ||506(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&507(adr_type == TypeRawPtr::NOTNULL &&508adr->in(AddPNode::Address)->is_Proj() &&509adr->in(AddPNode::Address)->in(0)->is_Allocate())) {510delayed_worklist->push(n); // Process it later.511#ifdef ASSERT512assert(adr->is_AddP(), "expecting an AddP");513if (adr_type == TypeRawPtr::NOTNULL) {514// Verify a raw address for a store captured by Initialize node.515int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);516assert(offs != Type::OffsetBot, "offset must be a constant");517}518#endif519} else {520// Ignore copy the displaced header to the BoxNode (OSR compilation).521if (adr->is_BoxLock())522break;523// Stored value escapes in unsafe access.524if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {525// Pointer stores in G1 barriers looks like unsafe access.526// Ignore such stores to be able scalar replace non-escaping527// allocations.528if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) {529Node* base = get_addp_base(adr);530if (base->Opcode() == Op_LoadP &&531base->in(MemNode::Address)->is_AddP()) {532adr = base->in(MemNode::Address);533Node* tls = get_addp_base(adr);534if (tls->Opcode() == Op_ThreadLocal) {535int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);536if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +537PtrQueue::byte_offset_of_buf())) {538break; // G1 pre barier previous oop value store.539}540if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +541PtrQueue::byte_offset_of_buf())) {542break; // G1 post barier card address store.543}544}545}546}547delayed_worklist->push(n); // Process unsafe access later.548break;549}550#ifdef ASSERT551n->dump(1);552assert(false, "not unsafe or G1 barrier raw StoreP");553#endif554}555break;556}557case Op_AryEq:558case Op_StrComp:559case Op_StrEquals:560case Op_StrIndexOf:561case Op_EncodeISOArray: {562add_local_var(n, PointsToNode::ArgEscape);563delayed_worklist->push(n); // Process it later.564break;565}566case Op_ThreadLocal: {567add_java_object(n, PointsToNode::ArgEscape);568break;569}570#if INCLUDE_ALL_GCS571case Op_ShenandoahLoadReferenceBarrier:572add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);573break;574#endif575default:576; // Do nothing for nodes not related to EA.577}578return;579}580581#ifdef ASSERT582#define ELSE_FAIL(name) \583/* Should not be called for not pointer type. */ \584n->dump(1); \585assert(false, name); \586break;587#else588#define ELSE_FAIL(name) \589break;590#endif591592// Add final simple edges to graph.593void ConnectionGraph::add_final_edges(Node *n) {594PointsToNode* n_ptn = ptnode_adr(n->_idx);595#ifdef ASSERT596if (_verify && n_ptn->is_JavaObject())597return; // This method does not change graph for JavaObject.598#endif599600if (n->is_Call()) {601process_call_arguments(n->as_Call());602return;603}604assert(n->is_Store() || n->is_LoadStore() ||605(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),606"node should be registered already");607int opcode = n->Opcode();608switch (opcode) {609case Op_AddP: {610Node* base = get_addp_base(n);611PointsToNode* ptn_base = ptnode_adr(base->_idx);612assert(ptn_base != NULL, "field's base should be registered");613add_base(n_ptn->as_Field(), ptn_base);614break;615}616case Op_CastPP:617case Op_CheckCastPP:618case Op_EncodeP:619case Op_DecodeN:620case Op_EncodePKlass:621case Op_DecodeNKlass: {622add_local_var_and_edge(n, PointsToNode::NoEscape,623n->in(1), NULL);624break;625}626case Op_CMoveP: {627for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {628Node* in = n->in(i);629if (in == NULL)630continue; // ignore NULL631Node* uncast_in = in->uncast();632if (uncast_in->is_top() || uncast_in == n)633continue; // ignore top or inputs which go back this node634PointsToNode* ptn = ptnode_adr(in->_idx);635assert(ptn != NULL, "node should be registered");636add_edge(n_ptn, ptn);637}638break;639}640case Op_LoadP:641case Op_LoadN:642case Op_LoadPLocked: {643// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because644// ThreadLocal has RawPtr type.645const Type* t = _igvn->type(n);646if (t->make_ptr() != NULL) {647Node* adr = n->in(MemNode::Address);648add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);649break;650}651ELSE_FAIL("Op_LoadP");652}653case Op_Phi: {654// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because655// ThreadLocal has RawPtr type.656const Type* t = n->as_Phi()->type();657if (t->make_ptr() != NULL) {658for (uint i = 1; i < n->req(); i++) {659Node* in = n->in(i);660if (in == NULL)661continue; // ignore NULL662Node* uncast_in = in->uncast();663if (uncast_in->is_top() || uncast_in == n)664continue; // ignore top or inputs which go back this node665PointsToNode* ptn = ptnode_adr(in->_idx);666assert(ptn != NULL, "node should be registered");667add_edge(n_ptn, ptn);668}669break;670}671ELSE_FAIL("Op_Phi");672}673case Op_Proj: {674// we are only interested in the oop result projection from a call675if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&676n->in(0)->as_Call()->returns_pointer()) {677add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);678break;679}680ELSE_FAIL("Op_Proj");681}682case Op_Rethrow: // Exception object escapes683case Op_Return: {684if (n->req() > TypeFunc::Parms &&685_igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {686// Treat Return value as LocalVar with GlobalEscape escape state.687add_local_var_and_edge(n, PointsToNode::GlobalEscape,688n->in(TypeFunc::Parms), NULL);689break;690}691ELSE_FAIL("Op_Return");692}693case Op_StoreP:694case Op_StoreN:695case Op_StoreNKlass:696case Op_StorePConditional:697case Op_CompareAndSwapP:698case Op_CompareAndSwapN:699case Op_GetAndSetP:700case Op_GetAndSetN: {701Node* adr = n->in(MemNode::Address);702const Type *adr_type = _igvn->type(adr);703adr_type = adr_type->make_ptr();704#ifdef ASSERT705if (adr_type == NULL) {706n->dump(1);707assert(adr_type != NULL, "dead node should not be on list");708break;709}710#endif711if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {712add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);713}714if (adr_type->isa_oopptr() ||715(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&716(adr_type == TypeRawPtr::NOTNULL &&717adr->in(AddPNode::Address)->is_Proj() &&718adr->in(AddPNode::Address)->in(0)->is_Allocate())) {719// Point Address to Value720PointsToNode* adr_ptn = ptnode_adr(adr->_idx);721assert(adr_ptn != NULL &&722adr_ptn->as_Field()->is_oop(), "node should be registered");723Node *val = n->in(MemNode::ValueIn);724PointsToNode* ptn = ptnode_adr(val->_idx);725assert(ptn != NULL, "node should be registered");726add_edge(adr_ptn, ptn);727break;728} else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {729// Stored value escapes in unsafe access.730Node *val = n->in(MemNode::ValueIn);731PointsToNode* ptn = ptnode_adr(val->_idx);732assert(ptn != NULL, "node should be registered");733set_escape_state(ptn, PointsToNode::GlobalEscape);734// Add edge to object for unsafe access with offset.735PointsToNode* adr_ptn = ptnode_adr(adr->_idx);736assert(adr_ptn != NULL, "node should be registered");737if (adr_ptn->is_Field()) {738assert(adr_ptn->as_Field()->is_oop(), "should be oop field");739add_edge(adr_ptn, ptn);740}741break;742}743ELSE_FAIL("Op_StoreP");744}745case Op_AryEq:746case Op_StrComp:747case Op_StrEquals:748case Op_StrIndexOf:749case Op_EncodeISOArray: {750// char[] arrays passed to string intrinsic do not escape but751// they are not scalar replaceable. Adjust escape state for them.752// Start from in(2) edge since in(1) is memory edge.753for (uint i = 2; i < n->req(); i++) {754Node* adr = n->in(i);755const Type* at = _igvn->type(adr);756if (!adr->is_top() && at->isa_ptr()) {757assert(at == Type::TOP || at == TypePtr::NULL_PTR ||758at->isa_ptr() != NULL, "expecting a pointer");759if (adr->is_AddP()) {760adr = get_addp_base(adr);761}762PointsToNode* ptn = ptnode_adr(adr->_idx);763assert(ptn != NULL, "node should be registered");764add_edge(n_ptn, ptn);765}766}767break;768}769#if INCLUDE_ALL_GCS770case Op_ShenandoahLoadReferenceBarrier:771add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);772break;773#endif774default: {775// This method should be called only for EA specific nodes which may776// miss some edges when they were created.777#ifdef ASSERT778n->dump(1);779#endif780guarantee(false, "unknown node");781}782}783return;784}785786void ConnectionGraph::add_call_node(CallNode* call) {787assert(call->returns_pointer(), "only for call which returns pointer");788uint call_idx = call->_idx;789if (call->is_Allocate()) {790Node* k = call->in(AllocateNode::KlassNode);791const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();792assert(kt != NULL, "TypeKlassPtr required.");793ciKlass* cik = kt->klass();794PointsToNode::EscapeState es = PointsToNode::NoEscape;795bool scalar_replaceable = true;796if (call->is_AllocateArray()) {797if (!cik->is_array_klass()) { // StressReflectiveCode798es = PointsToNode::GlobalEscape;799} else {800int length = call->in(AllocateNode::ALength)->find_int_con(-1);801if (length < 0 || length > EliminateAllocationArraySizeLimit) {802// Not scalar replaceable if the length is not constant or too big.803scalar_replaceable = false;804}805}806} else { // Allocate instance807if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||808cik->is_subclass_of(_compile->env()->Reference_klass()) ||809!cik->is_instance_klass() || // StressReflectiveCode810cik->as_instance_klass()->has_finalizer()) {811es = PointsToNode::GlobalEscape;812}813}814add_java_object(call, es);815PointsToNode* ptn = ptnode_adr(call_idx);816if (!scalar_replaceable && ptn->scalar_replaceable()) {817ptn->set_scalar_replaceable(false);818}819} else if (call->is_CallStaticJava()) {820// Call nodes could be different types:821//822// 1. CallDynamicJavaNode (what happened during call is unknown):823//824// - mapped to GlobalEscape JavaObject node if oop is returned;825//826// - all oop arguments are escaping globally;827//828// 2. CallStaticJavaNode (execute bytecode analysis if possible):829//830// - the same as CallDynamicJavaNode if can't do bytecode analysis;831//832// - mapped to GlobalEscape JavaObject node if unknown oop is returned;833// - mapped to NoEscape JavaObject node if non-escaping object allocated834// during call is returned;835// - mapped to ArgEscape LocalVar node pointed to object arguments836// which are returned and does not escape during call;837//838// - oop arguments escaping status is defined by bytecode analysis;839//840// For a static call, we know exactly what method is being called.841// Use bytecode estimator to record whether the call's return value escapes.842ciMethod* meth = call->as_CallJava()->method();843if (meth == NULL) {844const char* name = call->as_CallStaticJava()->_name;845assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");846// Returns a newly allocated unescaped object.847add_java_object(call, PointsToNode::NoEscape);848ptnode_adr(call_idx)->set_scalar_replaceable(false);849} else if (meth->is_boxing_method()) {850// Returns boxing object851PointsToNode::EscapeState es;852vmIntrinsics::ID intr = meth->intrinsic_id();853if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {854// It does not escape if object is always allocated.855es = PointsToNode::NoEscape;856} else {857// It escapes globally if object could be loaded from cache.858es = PointsToNode::GlobalEscape;859}860add_java_object(call, es);861} else {862BCEscapeAnalyzer* call_analyzer = meth->get_bcea();863call_analyzer->copy_dependencies(_compile->dependencies());864if (call_analyzer->is_return_allocated()) {865// Returns a newly allocated unescaped object, simply866// update dependency information.867// Mark it as NoEscape so that objects referenced by868// it's fields will be marked as NoEscape at least.869add_java_object(call, PointsToNode::NoEscape);870ptnode_adr(call_idx)->set_scalar_replaceable(false);871} else {872// Determine whether any arguments are returned.873const TypeTuple* d = call->tf()->domain();874bool ret_arg = false;875for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {876if (d->field_at(i)->isa_ptr() != NULL &&877call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {878ret_arg = true;879break;880}881}882if (ret_arg) {883add_local_var(call, PointsToNode::ArgEscape);884} else {885// Returns unknown object.886map_ideal_node(call, phantom_obj);887}888}889}890} else {891// An other type of call, assume the worst case:892// returned value is unknown and globally escapes.893assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");894map_ideal_node(call, phantom_obj);895}896}897898void ConnectionGraph::process_call_arguments(CallNode *call) {899bool is_arraycopy = false;900switch (call->Opcode()) {901#ifdef ASSERT902case Op_Allocate:903case Op_AllocateArray:904case Op_Lock:905case Op_Unlock:906assert(false, "should be done already");907break;908#endif909case Op_CallLeafNoFP:910is_arraycopy = (call->as_CallLeaf()->_name != NULL &&911strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);912// fall through913case Op_CallLeaf: {914// Stub calls, objects do not escape but they are not scale replaceable.915// Adjust escape state for outgoing arguments.916const TypeTuple * d = call->tf()->domain();917bool src_has_oops = false;918for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {919const Type* at = d->field_at(i);920Node *arg = call->in(i);921const Type *aat = _igvn->type(arg);922if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())923continue;924if (arg->is_AddP()) {925//926// The inline_native_clone() case when the arraycopy stub is called927// after the allocation before Initialize and CheckCastPP nodes.928// Or normal arraycopy for object arrays case.929//930// Set AddP's base (Allocate) as not scalar replaceable since931// pointer to the base (with offset) is passed as argument.932//933arg = get_addp_base(arg);934}935PointsToNode* arg_ptn = ptnode_adr(arg->_idx);936assert(arg_ptn != NULL, "should be registered");937PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();938if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {939assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||940aat->isa_ptr() != NULL, "expecting an Ptr");941bool arg_has_oops = aat->isa_oopptr() &&942(aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||943(aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));944if (i == TypeFunc::Parms) {945src_has_oops = arg_has_oops;946}947//948// src or dst could be j.l.Object when other is basic type array:949//950// arraycopy(char[],0,Object*,0,size);951// arraycopy(Object*,0,char[],0,size);952//953// Don't add edges in such cases.954//955bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&956arg_has_oops && (i > TypeFunc::Parms);957#ifdef ASSERT958if (!(is_arraycopy ||959(call->as_CallLeaf()->_name != NULL &&960(strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||961strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||962strcmp(call->as_CallLeaf()->_name, "shenandoah_clone_barrier") == 0 ||963strcmp(call->as_CallLeaf()->_name, "shenandoah_cas_obj") == 0 ||964strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||965strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||966strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||967strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||968strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||969strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||970strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||971strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||972strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||973strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||974strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||975strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||976strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||977strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||978strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||979strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||980strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0)981))) {982call->dump();983fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));984}985#endif986// Always process arraycopy's destination object since987// we need to add all possible edges to references in988// source object.989if (arg_esc >= PointsToNode::ArgEscape &&990!arg_is_arraycopy_dest) {991continue;992}993set_escape_state(arg_ptn, PointsToNode::ArgEscape);994if (arg_is_arraycopy_dest) {995Node* src = call->in(TypeFunc::Parms);996if (src->is_AddP()) {997src = get_addp_base(src);998}999PointsToNode* src_ptn = ptnode_adr(src->_idx);1000assert(src_ptn != NULL, "should be registered");1001if (arg_ptn != src_ptn) {1002// Special arraycopy edge:1003// A destination object's field can't have the source object1004// as base since objects escape states are not related.1005// Only escape state of destination object's fields affects1006// escape state of fields in source object.1007add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);1008}1009}1010}1011}1012break;1013}1014case Op_CallStaticJava: {1015// For a static call, we know exactly what method is being called.1016// Use bytecode estimator to record the call's escape affects1017#ifdef ASSERT1018const char* name = call->as_CallStaticJava()->_name;1019assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");1020#endif1021ciMethod* meth = call->as_CallJava()->method();1022if ((meth != NULL) && meth->is_boxing_method()) {1023break; // Boxing methods do not modify any oops.1024}1025BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;1026// fall-through if not a Java method or no analyzer information1027if (call_analyzer != NULL) {1028PointsToNode* call_ptn = ptnode_adr(call->_idx);1029const TypeTuple* d = call->tf()->domain();1030for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1031const Type* at = d->field_at(i);1032int k = i - TypeFunc::Parms;1033Node* arg = call->in(i);1034PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1035if (at->isa_ptr() != NULL &&1036call_analyzer->is_arg_returned(k)) {1037// The call returns arguments.1038if (call_ptn != NULL) { // Is call's result used?1039assert(call_ptn->is_LocalVar(), "node should be registered");1040assert(arg_ptn != NULL, "node should be registered");1041add_edge(call_ptn, arg_ptn);1042}1043}1044if (at->isa_oopptr() != NULL &&1045arg_ptn->escape_state() < PointsToNode::GlobalEscape) {1046if (!call_analyzer->is_arg_stack(k)) {1047// The argument global escapes1048set_escape_state(arg_ptn, PointsToNode::GlobalEscape);1049} else {1050set_escape_state(arg_ptn, PointsToNode::ArgEscape);1051if (!call_analyzer->is_arg_local(k)) {1052// The argument itself doesn't escape, but any fields might1053set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);1054}1055}1056}1057}1058if (call_ptn != NULL && call_ptn->is_LocalVar()) {1059// The call returns arguments.1060assert(call_ptn->edge_count() > 0, "sanity");1061if (!call_analyzer->is_return_local()) {1062// Returns also unknown object.1063add_edge(call_ptn, phantom_obj);1064}1065}1066break;1067}1068}1069default: {1070// Fall-through here if not a Java method or no analyzer information1071// or some other type of call, assume the worst case: all arguments1072// globally escape.1073const TypeTuple* d = call->tf()->domain();1074for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1075const Type* at = d->field_at(i);1076if (at->isa_oopptr() != NULL) {1077Node* arg = call->in(i);1078if (arg->is_AddP()) {1079arg = get_addp_base(arg);1080}1081assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");1082set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);1083}1084}1085}1086}1087}108810891090// Finish Graph construction.1091bool ConnectionGraph::complete_connection_graph(1092GrowableArray<PointsToNode*>& ptnodes_worklist,1093GrowableArray<JavaObjectNode*>& non_escaped_worklist,1094GrowableArray<JavaObjectNode*>& java_objects_worklist,1095GrowableArray<FieldNode*>& oop_fields_worklist) {1096// Normally only 1-3 passes needed to build Connection Graph depending1097// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.1098// Set limit to 20 to catch situation when something did go wrong and1099// bailout Escape Analysis.1100// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.1101#define CG_BUILD_ITER_LIMIT 2011021103// Propagate GlobalEscape and ArgEscape escape states and check that1104// we still have non-escaping objects. The method pushs on _worklist1105// Field nodes which reference phantom_object.1106if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {1107return false; // Nothing to do.1108}1109// Now propagate references to all JavaObject nodes.1110int java_objects_length = java_objects_worklist.length();1111elapsedTimer time;1112bool timeout = false;1113int new_edges = 1;1114int iterations = 0;1115do {1116while ((new_edges > 0) &&1117(iterations++ < CG_BUILD_ITER_LIMIT)) {1118double start_time = time.seconds();1119time.start();1120new_edges = 0;1121// Propagate references to phantom_object for nodes pushed on _worklist1122// by find_non_escaped_objects() and find_field_value().1123new_edges += add_java_object_edges(phantom_obj, false);1124for (int next = 0; next < java_objects_length; ++next) {1125JavaObjectNode* ptn = java_objects_worklist.at(next);1126new_edges += add_java_object_edges(ptn, true);11271128#define SAMPLE_SIZE 41129if ((next % SAMPLE_SIZE) == 0) {1130// Each 4 iterations calculate how much time it will take1131// to complete graph construction.1132time.stop();1133// Poll for requests from shutdown mechanism to quiesce compiler1134// because Connection graph construction may take long time.1135CompileBroker::maybe_block();1136double stop_time = time.seconds();1137double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;1138double time_until_end = time_per_iter * (double)(java_objects_length - next);1139if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {1140timeout = true;1141break; // Timeout1142}1143start_time = stop_time;1144time.start();1145}1146#undef SAMPLE_SIZE11471148}1149if (timeout) break;1150if (new_edges > 0) {1151// Update escape states on each iteration if graph was updated.1152if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {1153return false; // Nothing to do.1154}1155}1156time.stop();1157if (time.seconds() >= EscapeAnalysisTimeout) {1158timeout = true;1159break;1160}1161}1162if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {1163time.start();1164// Find fields which have unknown value.1165int fields_length = oop_fields_worklist.length();1166for (int next = 0; next < fields_length; next++) {1167FieldNode* field = oop_fields_worklist.at(next);1168if (field->edge_count() == 0) {1169new_edges += find_field_value(field);1170// This code may added new edges to phantom_object.1171// Need an other cycle to propagate references to phantom_object.1172}1173}1174time.stop();1175if (time.seconds() >= EscapeAnalysisTimeout) {1176timeout = true;1177break;1178}1179} else {1180new_edges = 0; // Bailout1181}1182} while (new_edges > 0);11831184// Bailout if passed limits.1185if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {1186Compile* C = _compile;1187if (C->log() != NULL) {1188C->log()->begin_elem("connectionGraph_bailout reason='reached ");1189C->log()->text("%s", timeout ? "time" : "iterations");1190C->log()->end_elem(" limit'");1191}1192assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",1193time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));1194// Possible infinite build_connection_graph loop,1195// bailout (no changes to ideal graph were made).1196return false;1197}1198#ifdef ASSERT1199if (Verbose && PrintEscapeAnalysis) {1200tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",1201iterations, nodes_size(), ptnodes_worklist.length());1202}1203#endif12041205#undef CG_BUILD_ITER_LIMIT12061207// Find fields initialized by NULL for non-escaping Allocations.1208int non_escaped_length = non_escaped_worklist.length();1209for (int next = 0; next < non_escaped_length; next++) {1210JavaObjectNode* ptn = non_escaped_worklist.at(next);1211PointsToNode::EscapeState es = ptn->escape_state();1212assert(es <= PointsToNode::ArgEscape, "sanity");1213if (es == PointsToNode::NoEscape) {1214if (find_init_values(ptn, null_obj, _igvn) > 0) {1215// Adding references to NULL object does not change escape states1216// since it does not escape. Also no fields are added to NULL object.1217add_java_object_edges(null_obj, false);1218}1219}1220Node* n = ptn->ideal_node();1221if (n->is_Allocate()) {1222// The object allocated by this Allocate node will never be1223// seen by an other thread. Mark it so that when it is1224// expanded no MemBarStoreStore is added.1225InitializeNode* ini = n->as_Allocate()->initialization();1226if (ini != NULL)1227ini->set_does_not_escape();1228}1229}1230return true; // Finished graph construction.1231}12321233// Propagate GlobalEscape and ArgEscape escape states to all nodes1234// and check that we still have non-escaping java objects.1235bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,1236GrowableArray<JavaObjectNode*>& non_escaped_worklist) {1237GrowableArray<PointsToNode*> escape_worklist;1238// First, put all nodes with GlobalEscape and ArgEscape states on worklist.1239int ptnodes_length = ptnodes_worklist.length();1240for (int next = 0; next < ptnodes_length; ++next) {1241PointsToNode* ptn = ptnodes_worklist.at(next);1242if (ptn->escape_state() >= PointsToNode::ArgEscape ||1243ptn->fields_escape_state() >= PointsToNode::ArgEscape) {1244escape_worklist.push(ptn);1245}1246}1247// Set escape states to referenced nodes (edges list).1248while (escape_worklist.length() > 0) {1249PointsToNode* ptn = escape_worklist.pop();1250PointsToNode::EscapeState es = ptn->escape_state();1251PointsToNode::EscapeState field_es = ptn->fields_escape_state();1252if (ptn->is_Field() && ptn->as_Field()->is_oop() &&1253es >= PointsToNode::ArgEscape) {1254// GlobalEscape or ArgEscape state of field means it has unknown value.1255if (add_edge(ptn, phantom_obj)) {1256// New edge was added1257add_field_uses_to_worklist(ptn->as_Field());1258}1259}1260for (EdgeIterator i(ptn); i.has_next(); i.next()) {1261PointsToNode* e = i.get();1262if (e->is_Arraycopy()) {1263assert(ptn->arraycopy_dst(), "sanity");1264// Propagate only fields escape state through arraycopy edge.1265if (e->fields_escape_state() < field_es) {1266set_fields_escape_state(e, field_es);1267escape_worklist.push(e);1268}1269} else if (es >= field_es) {1270// fields_escape_state is also set to 'es' if it is less than 'es'.1271if (e->escape_state() < es) {1272set_escape_state(e, es);1273escape_worklist.push(e);1274}1275} else {1276// Propagate field escape state.1277bool es_changed = false;1278if (e->fields_escape_state() < field_es) {1279set_fields_escape_state(e, field_es);1280es_changed = true;1281}1282if ((e->escape_state() < field_es) &&1283e->is_Field() && ptn->is_JavaObject() &&1284e->as_Field()->is_oop()) {1285// Change escape state of referenced fileds.1286set_escape_state(e, field_es);1287es_changed = true;;1288} else if (e->escape_state() < es) {1289set_escape_state(e, es);1290es_changed = true;;1291}1292if (es_changed) {1293escape_worklist.push(e);1294}1295}1296}1297}1298// Remove escaped objects from non_escaped list.1299for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {1300JavaObjectNode* ptn = non_escaped_worklist.at(next);1301if (ptn->escape_state() >= PointsToNode::GlobalEscape) {1302non_escaped_worklist.delete_at(next);1303}1304if (ptn->escape_state() == PointsToNode::NoEscape) {1305// Find fields in non-escaped allocations which have unknown value.1306find_init_values(ptn, phantom_obj, NULL);1307}1308}1309return (non_escaped_worklist.length() > 0);1310}13111312// Add all references to JavaObject node by walking over all uses.1313int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {1314int new_edges = 0;1315if (populate_worklist) {1316// Populate _worklist by uses of jobj's uses.1317for (UseIterator i(jobj); i.has_next(); i.next()) {1318PointsToNode* use = i.get();1319if (use->is_Arraycopy())1320continue;1321add_uses_to_worklist(use);1322if (use->is_Field() && use->as_Field()->is_oop()) {1323// Put on worklist all field's uses (loads) and1324// related field nodes (same base and offset).1325add_field_uses_to_worklist(use->as_Field());1326}1327}1328}1329for (int l = 0; l < _worklist.length(); l++) {1330PointsToNode* use = _worklist.at(l);1331if (PointsToNode::is_base_use(use)) {1332// Add reference from jobj to field and from field to jobj (field's base).1333use = PointsToNode::get_use_node(use)->as_Field();1334if (add_base(use->as_Field(), jobj)) {1335new_edges++;1336}1337continue;1338}1339assert(!use->is_JavaObject(), "sanity");1340if (use->is_Arraycopy()) {1341if (jobj == null_obj) // NULL object does not have field edges1342continue;1343// Added edge from Arraycopy node to arraycopy's source java object1344if (add_edge(use, jobj)) {1345jobj->set_arraycopy_src();1346new_edges++;1347}1348// and stop here.1349continue;1350}1351if (!add_edge(use, jobj))1352continue; // No new edge added, there was such edge already.1353new_edges++;1354if (use->is_LocalVar()) {1355add_uses_to_worklist(use);1356if (use->arraycopy_dst()) {1357for (EdgeIterator i(use); i.has_next(); i.next()) {1358PointsToNode* e = i.get();1359if (e->is_Arraycopy()) {1360if (jobj == null_obj) // NULL object does not have field edges1361continue;1362// Add edge from arraycopy's destination java object to Arraycopy node.1363if (add_edge(jobj, e)) {1364new_edges++;1365jobj->set_arraycopy_dst();1366}1367}1368}1369}1370} else {1371// Added new edge to stored in field values.1372// Put on worklist all field's uses (loads) and1373// related field nodes (same base and offset).1374add_field_uses_to_worklist(use->as_Field());1375}1376}1377_worklist.clear();1378_in_worklist.Reset();1379return new_edges;1380}13811382// Put on worklist all related field nodes.1383void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {1384assert(field->is_oop(), "sanity");1385int offset = field->offset();1386add_uses_to_worklist(field);1387// Loop over all bases of this field and push on worklist Field nodes1388// with the same offset and base (since they may reference the same field).1389for (BaseIterator i(field); i.has_next(); i.next()) {1390PointsToNode* base = i.get();1391add_fields_to_worklist(field, base);1392// Check if the base was source object of arraycopy and go over arraycopy's1393// destination objects since values stored to a field of source object are1394// accessable by uses (loads) of fields of destination objects.1395if (base->arraycopy_src()) {1396for (UseIterator j(base); j.has_next(); j.next()) {1397PointsToNode* arycp = j.get();1398if (arycp->is_Arraycopy()) {1399for (UseIterator k(arycp); k.has_next(); k.next()) {1400PointsToNode* abase = k.get();1401if (abase->arraycopy_dst() && abase != base) {1402// Look for the same arracopy reference.1403add_fields_to_worklist(field, abase);1404}1405}1406}1407}1408}1409}1410}14111412// Put on worklist all related field nodes.1413void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {1414int offset = field->offset();1415if (base->is_LocalVar()) {1416for (UseIterator j(base); j.has_next(); j.next()) {1417PointsToNode* f = j.get();1418if (PointsToNode::is_base_use(f)) { // Field1419f = PointsToNode::get_use_node(f);1420if (f == field || !f->as_Field()->is_oop())1421continue;1422int offs = f->as_Field()->offset();1423if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1424add_to_worklist(f);1425}1426}1427}1428} else {1429assert(base->is_JavaObject(), "sanity");1430if (// Skip phantom_object since it is only used to indicate that1431// this field's content globally escapes.1432(base != phantom_obj) &&1433// NULL object node does not have fields.1434(base != null_obj)) {1435for (EdgeIterator i(base); i.has_next(); i.next()) {1436PointsToNode* f = i.get();1437// Skip arraycopy edge since store to destination object field1438// does not update value in source object field.1439if (f->is_Arraycopy()) {1440assert(base->arraycopy_dst(), "sanity");1441continue;1442}1443if (f == field || !f->as_Field()->is_oop())1444continue;1445int offs = f->as_Field()->offset();1446if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1447add_to_worklist(f);1448}1449}1450}1451}1452}14531454// Find fields which have unknown value.1455int ConnectionGraph::find_field_value(FieldNode* field) {1456// Escaped fields should have init value already.1457assert(field->escape_state() == PointsToNode::NoEscape, "sanity");1458int new_edges = 0;1459for (BaseIterator i(field); i.has_next(); i.next()) {1460PointsToNode* base = i.get();1461if (base->is_JavaObject()) {1462// Skip Allocate's fields which will be processed later.1463if (base->ideal_node()->is_Allocate())1464return 0;1465assert(base == null_obj, "only NULL ptr base expected here");1466}1467}1468if (add_edge(field, phantom_obj)) {1469// New edge was added1470new_edges++;1471add_field_uses_to_worklist(field);1472}1473return new_edges;1474}14751476// Find fields initializing values for allocations.1477int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {1478assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1479int new_edges = 0;1480Node* alloc = pta->ideal_node();1481if (init_val == phantom_obj) {1482// Do nothing for Allocate nodes since its fields values are "known".1483if (alloc->is_Allocate())1484return 0;1485assert(alloc->as_CallStaticJava(), "sanity");1486#ifdef ASSERT1487if (alloc->as_CallStaticJava()->method() == NULL) {1488const char* name = alloc->as_CallStaticJava()->_name;1489assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");1490}1491#endif1492// Non-escaped allocation returned from Java or runtime call have1493// unknown values in fields.1494for (EdgeIterator i(pta); i.has_next(); i.next()) {1495PointsToNode* field = i.get();1496if (field->is_Field() && field->as_Field()->is_oop()) {1497if (add_edge(field, phantom_obj)) {1498// New edge was added1499new_edges++;1500add_field_uses_to_worklist(field->as_Field());1501}1502}1503}1504return new_edges;1505}1506assert(init_val == null_obj, "sanity");1507// Do nothing for Call nodes since its fields values are unknown.1508if (!alloc->is_Allocate())1509return 0;15101511InitializeNode* ini = alloc->as_Allocate()->initialization();1512Compile* C = _compile;1513bool visited_bottom_offset = false;1514GrowableArray<int> offsets_worklist;15151516// Check if an oop field's initializing value is recorded and add1517// a corresponding NULL if field's value if it is not recorded.1518// Connection Graph does not record a default initialization by NULL1519// captured by Initialize node.1520//1521for (EdgeIterator i(pta); i.has_next(); i.next()) {1522PointsToNode* field = i.get(); // Field (AddP)1523if (!field->is_Field() || !field->as_Field()->is_oop())1524continue; // Not oop field1525int offset = field->as_Field()->offset();1526if (offset == Type::OffsetBot) {1527if (!visited_bottom_offset) {1528// OffsetBot is used to reference array's element,1529// always add reference to NULL to all Field nodes since we don't1530// known which element is referenced.1531if (add_edge(field, null_obj)) {1532// New edge was added1533new_edges++;1534add_field_uses_to_worklist(field->as_Field());1535visited_bottom_offset = true;1536}1537}1538} else {1539// Check only oop fields.1540const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();1541if (adr_type->isa_rawptr()) {1542#ifdef ASSERT1543// Raw pointers are used for initializing stores so skip it1544// since it should be recorded already1545Node* base = get_addp_base(field->ideal_node());1546assert(adr_type->isa_rawptr() && base->is_Proj() &&1547(base->in(0) == alloc),"unexpected pointer type");1548#endif1549continue;1550}1551if (!offsets_worklist.contains(offset)) {1552offsets_worklist.append(offset);1553Node* value = NULL;1554if (ini != NULL) {1555// StoreP::memory_type() == T_ADDRESS1556BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;1557Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);1558// Make sure initializing store has the same type as this AddP.1559// This AddP may reference non existing field because it is on a1560// dead branch of bimorphic call which is not eliminated yet.1561if (store != NULL && store->is_Store() &&1562store->as_Store()->memory_type() == ft) {1563value = store->in(MemNode::ValueIn);1564#ifdef ASSERT1565if (VerifyConnectionGraph) {1566// Verify that AddP already points to all objects the value points to.1567PointsToNode* val = ptnode_adr(value->_idx);1568assert((val != NULL), "should be processed already");1569PointsToNode* missed_obj = NULL;1570if (val->is_JavaObject()) {1571if (!field->points_to(val->as_JavaObject())) {1572missed_obj = val;1573}1574} else {1575if (!val->is_LocalVar() || (val->edge_count() == 0)) {1576tty->print_cr("----------init store has invalid value -----");1577store->dump();1578val->dump();1579assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");1580}1581for (EdgeIterator j(val); j.has_next(); j.next()) {1582PointsToNode* obj = j.get();1583if (obj->is_JavaObject()) {1584if (!field->points_to(obj->as_JavaObject())) {1585missed_obj = obj;1586break;1587}1588}1589}1590}1591if (missed_obj != NULL) {1592tty->print_cr("----------field---------------------------------");1593field->dump();1594tty->print_cr("----------missed referernce to object-----------");1595missed_obj->dump();1596tty->print_cr("----------object referernced by init store -----");1597store->dump();1598val->dump();1599assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");1600}1601}1602#endif1603} else {1604// There could be initializing stores which follow allocation.1605// For example, a volatile field store is not collected1606// by Initialize node.1607//1608// Need to check for dependent loads to separate such stores from1609// stores which follow loads. For now, add initial value NULL so1610// that compare pointers optimization works correctly.1611}1612}1613if (value == NULL) {1614// A field's initializing value was not recorded. Add NULL.1615if (add_edge(field, null_obj)) {1616// New edge was added1617new_edges++;1618add_field_uses_to_worklist(field->as_Field());1619}1620}1621}1622}1623}1624return new_edges;1625}16261627// Adjust scalar_replaceable state after Connection Graph is built.1628void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {1629// Search for non-escaping objects which are not scalar replaceable1630// and mark them to propagate the state to referenced objects.16311632// 1. An object is not scalar replaceable if the field into which it is1633// stored has unknown offset (stored into unknown element of an array).1634//1635for (UseIterator i(jobj); i.has_next(); i.next()) {1636PointsToNode* use = i.get();1637assert(!use->is_Arraycopy(), "sanity");1638if (use->is_Field()) {1639FieldNode* field = use->as_Field();1640assert(field->is_oop() && field->scalar_replaceable() &&1641field->fields_escape_state() == PointsToNode::NoEscape, "sanity");1642if (field->offset() == Type::OffsetBot) {1643jobj->set_scalar_replaceable(false);1644return;1645}1646// 2. An object is not scalar replaceable if the field into which it is1647// stored has multiple bases one of which is null.1648if (field->base_count() > 1) {1649for (BaseIterator i(field); i.has_next(); i.next()) {1650PointsToNode* base = i.get();1651if (base == null_obj) {1652jobj->set_scalar_replaceable(false);1653return;1654}1655}1656}1657}1658assert(use->is_Field() || use->is_LocalVar(), "sanity");1659// 3. An object is not scalar replaceable if it is merged with other objects.1660for (EdgeIterator j(use); j.has_next(); j.next()) {1661PointsToNode* ptn = j.get();1662if (ptn->is_JavaObject() && ptn != jobj) {1663// Mark all objects.1664jobj->set_scalar_replaceable(false);1665ptn->set_scalar_replaceable(false);1666}1667}1668if (!jobj->scalar_replaceable()) {1669return;1670}1671}16721673for (EdgeIterator j(jobj); j.has_next(); j.next()) {1674// Non-escaping object node should point only to field nodes.1675FieldNode* field = j.get()->as_Field();1676int offset = field->as_Field()->offset();16771678// 4. An object is not scalar replaceable if it has a field with unknown1679// offset (array's element is accessed in loop).1680if (offset == Type::OffsetBot) {1681jobj->set_scalar_replaceable(false);1682return;1683}1684// 5. Currently an object is not scalar replaceable if a LoadStore node1685// access its field since the field value is unknown after it.1686//1687Node* n = field->ideal_node();1688for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1689if (n->fast_out(i)->is_LoadStore()) {1690jobj->set_scalar_replaceable(false);1691return;1692}1693}16941695// 6. Or the address may point to more then one object. This may produce1696// the false positive result (set not scalar replaceable)1697// since the flow-insensitive escape analysis can't separate1698// the case when stores overwrite the field's value from the case1699// when stores happened on different control branches.1700//1701// Note: it will disable scalar replacement in some cases:1702//1703// Point p[] = new Point[1];1704// p[0] = new Point(); // Will be not scalar replaced1705//1706// but it will save us from incorrect optimizations in next cases:1707//1708// Point p[] = new Point[1];1709// if ( x ) p[0] = new Point(); // Will be not scalar replaced1710//1711if (field->base_count() > 1) {1712for (BaseIterator i(field); i.has_next(); i.next()) {1713PointsToNode* base = i.get();1714// Don't take into account LocalVar nodes which1715// may point to only one object which should be also1716// this field's base by now.1717if (base->is_JavaObject() && base != jobj) {1718// Mark all bases.1719jobj->set_scalar_replaceable(false);1720base->set_scalar_replaceable(false);1721}1722}1723}1724}1725}17261727#ifdef ASSERT1728void ConnectionGraph::verify_connection_graph(1729GrowableArray<PointsToNode*>& ptnodes_worklist,1730GrowableArray<JavaObjectNode*>& non_escaped_worklist,1731GrowableArray<JavaObjectNode*>& java_objects_worklist,1732GrowableArray<Node*>& addp_worklist) {1733// Verify that graph is complete - no new edges could be added.1734int java_objects_length = java_objects_worklist.length();1735int non_escaped_length = non_escaped_worklist.length();1736int new_edges = 0;1737for (int next = 0; next < java_objects_length; ++next) {1738JavaObjectNode* ptn = java_objects_worklist.at(next);1739new_edges += add_java_object_edges(ptn, true);1740}1741assert(new_edges == 0, "graph was not complete");1742// Verify that escape state is final.1743int length = non_escaped_worklist.length();1744find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);1745assert((non_escaped_length == non_escaped_worklist.length()) &&1746(non_escaped_length == length) &&1747(_worklist.length() == 0), "escape state was not final");17481749// Verify fields information.1750int addp_length = addp_worklist.length();1751for (int next = 0; next < addp_length; ++next ) {1752Node* n = addp_worklist.at(next);1753FieldNode* field = ptnode_adr(n->_idx)->as_Field();1754if (field->is_oop()) {1755// Verify that field has all bases1756Node* base = get_addp_base(n);1757PointsToNode* ptn = ptnode_adr(base->_idx);1758if (ptn->is_JavaObject()) {1759assert(field->has_base(ptn->as_JavaObject()), "sanity");1760} else {1761assert(ptn->is_LocalVar(), "sanity");1762for (EdgeIterator i(ptn); i.has_next(); i.next()) {1763PointsToNode* e = i.get();1764if (e->is_JavaObject()) {1765assert(field->has_base(e->as_JavaObject()), "sanity");1766}1767}1768}1769// Verify that all fields have initializing values.1770if (field->edge_count() == 0) {1771tty->print_cr("----------field does not have references----------");1772field->dump();1773for (BaseIterator i(field); i.has_next(); i.next()) {1774PointsToNode* base = i.get();1775tty->print_cr("----------field has next base---------------------");1776base->dump();1777if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {1778tty->print_cr("----------base has fields-------------------------");1779for (EdgeIterator j(base); j.has_next(); j.next()) {1780j.get()->dump();1781}1782tty->print_cr("----------base has references---------------------");1783for (UseIterator j(base); j.has_next(); j.next()) {1784j.get()->dump();1785}1786}1787}1788for (UseIterator i(field); i.has_next(); i.next()) {1789i.get()->dump();1790}1791assert(field->edge_count() > 0, "sanity");1792}1793}1794}1795}1796#endif17971798// Optimize ideal graph.1799void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,1800GrowableArray<Node*>& storestore_worklist) {1801Compile* C = _compile;1802PhaseIterGVN* igvn = _igvn;1803if (EliminateLocks) {1804// Mark locks before changing ideal graph.1805int cnt = C->macro_count();1806for( int i=0; i < cnt; i++ ) {1807Node *n = C->macro_node(i);1808if (n->is_AbstractLock()) { // Lock and Unlock nodes1809AbstractLockNode* alock = n->as_AbstractLock();1810if (!alock->is_non_esc_obj()) {1811if (not_global_escape(alock->obj_node())) {1812assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");1813// The lock could be marked eliminated by lock coarsening1814// code during first IGVN before EA. Replace coarsened flag1815// to eliminate all associated locks/unlocks.1816#ifdef ASSERT1817alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");1818#endif1819alock->set_non_esc_obj();1820}1821}1822}1823}1824}18251826if (OptimizePtrCompare) {1827// Add ConI(#CC_GT) and ConI(#CC_EQ).1828_pcmp_neq = igvn->makecon(TypeInt::CC_GT);1829_pcmp_eq = igvn->makecon(TypeInt::CC_EQ);1830// Optimize objects compare.1831while (ptr_cmp_worklist.length() != 0) {1832Node *n = ptr_cmp_worklist.pop();1833Node *res = optimize_ptr_compare(n);1834if (res != NULL) {1835#ifndef PRODUCT1836if (PrintOptimizePtrCompare) {1837tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));1838if (Verbose) {1839n->dump(1);1840}1841}1842#endif1843igvn->replace_node(n, res);1844}1845}1846// cleanup1847if (_pcmp_neq->outcnt() == 0)1848igvn->hash_delete(_pcmp_neq);1849if (_pcmp_eq->outcnt() == 0)1850igvn->hash_delete(_pcmp_eq);1851}18521853// For MemBarStoreStore nodes added in library_call.cpp, check1854// escape status of associated AllocateNode and optimize out1855// MemBarStoreStore node if the allocated object never escapes.1856while (storestore_worklist.length() != 0) {1857Node *n = storestore_worklist.pop();1858MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();1859Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);1860assert (alloc->is_Allocate(), "storestore should point to AllocateNode");1861if (not_global_escape(alloc)) {1862MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);1863mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));1864mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));1865igvn->register_new_node_with_optimizer(mb);1866igvn->replace_node(storestore, mb);1867}1868}1869}18701871// Optimize objects compare.1872Node* ConnectionGraph::optimize_ptr_compare(Node* n) {1873assert(OptimizePtrCompare, "sanity");1874PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);1875PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);1876JavaObjectNode* jobj1 = unique_java_object(n->in(1));1877JavaObjectNode* jobj2 = unique_java_object(n->in(2));1878assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");1879assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");18801881// Check simple cases first.1882if (jobj1 != NULL) {1883if (jobj1->escape_state() == PointsToNode::NoEscape) {1884if (jobj1 == jobj2) {1885// Comparing the same not escaping object.1886return _pcmp_eq;1887}1888Node* obj = jobj1->ideal_node();1889// Comparing not escaping allocation.1890if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&1891!ptn2->points_to(jobj1)) {1892return _pcmp_neq; // This includes nullness check.1893}1894}1895}1896if (jobj2 != NULL) {1897if (jobj2->escape_state() == PointsToNode::NoEscape) {1898Node* obj = jobj2->ideal_node();1899// Comparing not escaping allocation.1900if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&1901!ptn1->points_to(jobj2)) {1902return _pcmp_neq; // This includes nullness check.1903}1904}1905}1906if (jobj1 != NULL && jobj1 != phantom_obj &&1907jobj2 != NULL && jobj2 != phantom_obj &&1908jobj1->ideal_node()->is_Con() &&1909jobj2->ideal_node()->is_Con()) {1910// Klass or String constants compare. Need to be careful with1911// compressed pointers - compare types of ConN and ConP instead of nodes.1912const Type* t1 = jobj1->ideal_node()->get_ptr_type();1913const Type* t2 = jobj2->ideal_node()->get_ptr_type();1914if (t1->make_ptr() == t2->make_ptr()) {1915return _pcmp_eq;1916} else {1917return _pcmp_neq;1918}1919}1920if (ptn1->meet(ptn2)) {1921return NULL; // Sets are not disjoint1922}19231924// Sets are disjoint.1925bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);1926bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);1927bool set1_has_null_ptr = ptn1->points_to(null_obj);1928bool set2_has_null_ptr = ptn2->points_to(null_obj);1929if (set1_has_unknown_ptr && set2_has_null_ptr ||1930set2_has_unknown_ptr && set1_has_null_ptr) {1931// Check nullness of unknown object.1932return NULL;1933}19341935// Disjointness by itself is not sufficient since1936// alias analysis is not complete for escaped objects.1937// Disjoint sets are definitely unrelated only when1938// at least one set has only not escaping allocations.1939if (!set1_has_unknown_ptr && !set1_has_null_ptr) {1940if (ptn1->non_escaping_allocation()) {1941return _pcmp_neq;1942}1943}1944if (!set2_has_unknown_ptr && !set2_has_null_ptr) {1945if (ptn2->non_escaping_allocation()) {1946return _pcmp_neq;1947}1948}1949return NULL;1950}19511952// Connection Graph constuction functions.19531954void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {1955PointsToNode* ptadr = _nodes.at(n->_idx);1956if (ptadr != NULL) {1957assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");1958return;1959}1960Compile* C = _compile;1961ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);1962_nodes.at_put(n->_idx, ptadr);1963}19641965void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {1966PointsToNode* ptadr = _nodes.at(n->_idx);1967if (ptadr != NULL) {1968assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");1969return;1970}1971Compile* C = _compile;1972ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);1973_nodes.at_put(n->_idx, ptadr);1974}19751976void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {1977PointsToNode* ptadr = _nodes.at(n->_idx);1978if (ptadr != NULL) {1979assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");1980return;1981}1982bool unsafe = false;1983bool is_oop = is_oop_field(n, offset, &unsafe);1984if (unsafe) {1985es = PointsToNode::GlobalEscape;1986}1987Compile* C = _compile;1988FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);1989_nodes.at_put(n->_idx, field);1990}19911992void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,1993PointsToNode* src, PointsToNode* dst) {1994assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");1995assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");1996PointsToNode* ptadr = _nodes.at(n->_idx);1997if (ptadr != NULL) {1998assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");1999return;2000}2001Compile* C = _compile;2002ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);2003_nodes.at_put(n->_idx, ptadr);2004// Add edge from arraycopy node to source object.2005(void)add_edge(ptadr, src);2006src->set_arraycopy_src();2007// Add edge from destination object to arraycopy node.2008(void)add_edge(dst, ptadr);2009dst->set_arraycopy_dst();2010}20112012bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {2013const Type* adr_type = n->as_AddP()->bottom_type();2014BasicType bt = T_INT;2015if (offset == Type::OffsetBot) {2016// Check only oop fields.2017if (!adr_type->isa_aryptr() ||2018(adr_type->isa_aryptr()->klass() == NULL) ||2019adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {2020// OffsetBot is used to reference array's element. Ignore first AddP.2021if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {2022bt = T_OBJECT;2023}2024}2025} else if (offset != oopDesc::klass_offset_in_bytes()) {2026if (adr_type->isa_instptr()) {2027ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();2028if (field != NULL) {2029bt = field->layout_type();2030} else {2031// Check for unsafe oop field access2032for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2033int opcode = n->fast_out(i)->Opcode();2034if (opcode == Op_StoreP || opcode == Op_StoreN ||2035opcode == Op_LoadP || opcode == Op_LoadN ||2036opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||2037opcode == Op_CompareAndSwapP || opcode == Op_CompareAndSwapN) {2038bt = T_OBJECT;2039(*unsafe) = true;2040break;2041}2042}2043}2044} else if (adr_type->isa_aryptr()) {2045if (offset == arrayOopDesc::length_offset_in_bytes()) {2046// Ignore array length load.2047} else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {2048// Ignore first AddP.2049} else {2050const Type* elemtype = adr_type->isa_aryptr()->elem();2051bt = elemtype->array_element_basic_type();2052}2053} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {2054// Allocation initialization, ThreadLocal field access, unsafe access2055for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2056int opcode = n->fast_out(i)->Opcode();2057if (opcode == Op_StoreP || opcode == Op_StoreN ||2058opcode == Op_LoadP || opcode == Op_LoadN ||2059opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||2060opcode == Op_CompareAndSwapP || opcode == Op_CompareAndSwapN) {2061bt = T_OBJECT;2062break;2063}2064}2065}2066}2067return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);2068}20692070// Returns unique pointed java object or NULL.2071JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {2072assert(!_collecting, "should not call when contructed graph");2073// If the node was created after the escape computation we can't answer.2074uint idx = n->_idx;2075if (idx >= nodes_size()) {2076return NULL;2077}2078PointsToNode* ptn = ptnode_adr(idx);2079if (ptn == NULL) {2080return NULL;2081}2082if (ptn->is_JavaObject()) {2083return ptn->as_JavaObject();2084}2085assert(ptn->is_LocalVar(), "sanity");2086// Check all java objects it points to.2087JavaObjectNode* jobj = NULL;2088for (EdgeIterator i(ptn); i.has_next(); i.next()) {2089PointsToNode* e = i.get();2090if (e->is_JavaObject()) {2091if (jobj == NULL) {2092jobj = e->as_JavaObject();2093} else if (jobj != e) {2094return NULL;2095}2096}2097}2098return jobj;2099}21002101// Return true if this node points only to non-escaping allocations.2102bool PointsToNode::non_escaping_allocation() {2103if (is_JavaObject()) {2104Node* n = ideal_node();2105if (n->is_Allocate() || n->is_CallStaticJava()) {2106return (escape_state() == PointsToNode::NoEscape);2107} else {2108return false;2109}2110}2111assert(is_LocalVar(), "sanity");2112// Check all java objects it points to.2113for (EdgeIterator i(this); i.has_next(); i.next()) {2114PointsToNode* e = i.get();2115if (e->is_JavaObject()) {2116Node* n = e->ideal_node();2117if ((e->escape_state() != PointsToNode::NoEscape) ||2118!(n->is_Allocate() || n->is_CallStaticJava())) {2119return false;2120}2121}2122}2123return true;2124}21252126// Return true if we know the node does not escape globally.2127bool ConnectionGraph::not_global_escape(Node *n) {2128assert(!_collecting, "should not call during graph construction");2129// If the node was created after the escape computation we can't answer.2130uint idx = n->_idx;2131if (idx >= nodes_size()) {2132return false;2133}2134PointsToNode* ptn = ptnode_adr(idx);2135if (ptn == NULL) {2136return false; // not in congraph (e.g. ConI)2137}2138PointsToNode::EscapeState es = ptn->escape_state();2139// If we have already computed a value, return it.2140if (es >= PointsToNode::GlobalEscape)2141return false;2142if (ptn->is_JavaObject()) {2143return true; // (es < PointsToNode::GlobalEscape);2144}2145assert(ptn->is_LocalVar(), "sanity");2146// Check all java objects it points to.2147for (EdgeIterator i(ptn); i.has_next(); i.next()) {2148if (i.get()->escape_state() >= PointsToNode::GlobalEscape)2149return false;2150}2151return true;2152}215321542155// Helper functions21562157// Return true if this node points to specified node or nodes it points to.2158bool PointsToNode::points_to(JavaObjectNode* ptn) const {2159if (is_JavaObject()) {2160return (this == ptn);2161}2162assert(is_LocalVar() || is_Field(), "sanity");2163for (EdgeIterator i(this); i.has_next(); i.next()) {2164if (i.get() == ptn)2165return true;2166}2167return false;2168}21692170// Return true if one node points to an other.2171bool PointsToNode::meet(PointsToNode* ptn) {2172if (this == ptn) {2173return true;2174} else if (ptn->is_JavaObject()) {2175return this->points_to(ptn->as_JavaObject());2176} else if (this->is_JavaObject()) {2177return ptn->points_to(this->as_JavaObject());2178}2179assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");2180int ptn_count = ptn->edge_count();2181for (EdgeIterator i(this); i.has_next(); i.next()) {2182PointsToNode* this_e = i.get();2183for (int j = 0; j < ptn_count; j++) {2184if (this_e == ptn->edge(j))2185return true;2186}2187}2188return false;2189}21902191#ifdef ASSERT2192// Return true if bases point to this java object.2193bool FieldNode::has_base(JavaObjectNode* jobj) const {2194for (BaseIterator i(this); i.has_next(); i.next()) {2195if (i.get() == jobj)2196return true;2197}2198return false;2199}2200#endif22012202int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {2203const Type *adr_type = phase->type(adr);2204if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&2205adr->in(AddPNode::Address)->is_Proj() &&2206adr->in(AddPNode::Address)->in(0)->is_Allocate()) {2207// We are computing a raw address for a store captured by an Initialize2208// compute an appropriate address type. AddP cases #3 and #5 (see below).2209int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);2210assert(offs != Type::OffsetBot ||2211adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),2212"offset must be a constant or it is initialization of array");2213return offs;2214}2215const TypePtr *t_ptr = adr_type->isa_ptr();2216assert(t_ptr != NULL, "must be a pointer type");2217return t_ptr->offset();2218}22192220Node* ConnectionGraph::get_addp_base(Node *addp) {2221assert(addp->is_AddP(), "must be AddP");2222//2223// AddP cases for Base and Address inputs:2224// case #1. Direct object's field reference:2225// Allocate2226// |2227// Proj #5 ( oop result )2228// |2229// CheckCastPP (cast to instance type)2230// | |2231// AddP ( base == address )2232//2233// case #2. Indirect object's field reference:2234// Phi2235// |2236// CastPP (cast to instance type)2237// | |2238// AddP ( base == address )2239//2240// case #3. Raw object's field reference for Initialize node:2241// Allocate2242// |2243// Proj #5 ( oop result )2244// top |2245// \ |2246// AddP ( base == top )2247//2248// case #4. Array's element reference:2249// {CheckCastPP | CastPP}2250// | | |2251// | AddP ( array's element offset )2252// | |2253// AddP ( array's offset )2254//2255// case #5. Raw object's field reference for arraycopy stub call:2256// The inline_native_clone() case when the arraycopy stub is called2257// after the allocation before Initialize and CheckCastPP nodes.2258// Allocate2259// |2260// Proj #5 ( oop result )2261// | |2262// AddP ( base == address )2263//2264// case #6. Constant Pool, ThreadLocal, CastX2P or2265// Raw object's field reference:2266// {ConP, ThreadLocal, CastX2P, raw Load}2267// top |2268// \ |2269// AddP ( base == top )2270//2271// case #7. Klass's field reference.2272// LoadKlass2273// | |2274// AddP ( base == address )2275//2276// case #8. narrow Klass's field reference.2277// LoadNKlass2278// |2279// DecodeN2280// | |2281// AddP ( base == address )2282//2283Node *base = addp->in(AddPNode::Base);2284if (base->uncast()->is_top()) { // The AddP case #3 and #6.2285base = addp->in(AddPNode::Address);2286while (base->is_AddP()) {2287// Case #6 (unsafe access) may have several chained AddP nodes.2288assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");2289base = base->in(AddPNode::Address);2290}2291Node* uncast_base = base->uncast();2292int opcode = uncast_base->Opcode();2293assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||2294opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||2295(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||2296(uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||2297(uncast_base->is_Phi() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||2298uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity");2299}2300return base;2301}23022303Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {2304assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");2305Node* addp2 = addp->raw_out(0);2306if (addp->outcnt() == 1 && addp2->is_AddP() &&2307addp2->in(AddPNode::Base) == n &&2308addp2->in(AddPNode::Address) == addp) {2309assert(addp->in(AddPNode::Base) == n, "expecting the same base");2310//2311// Find array's offset to push it on worklist first and2312// as result process an array's element offset first (pushed second)2313// to avoid CastPP for the array's offset.2314// Otherwise the inserted CastPP (LocalVar) will point to what2315// the AddP (Field) points to. Which would be wrong since2316// the algorithm expects the CastPP has the same point as2317// as AddP's base CheckCastPP (LocalVar).2318//2319// ArrayAllocation2320// |2321// CheckCastPP2322// |2323// memProj (from ArrayAllocation CheckCastPP)2324// | ||2325// | || Int (element index)2326// | || | ConI (log(element size))2327// | || | /2328// | || LShift2329// | || /2330// | AddP (array's element offset)2331// | |2332// | | ConI (array's offset: #12(32-bits) or #24(64-bits))2333// | / /2334// AddP (array's offset)2335// |2336// Load/Store (memory operation on array's element)2337//2338return addp2;2339}2340return NULL;2341}23422343//2344// Adjust the type and inputs of an AddP which computes the2345// address of a field of an instance2346//2347bool ConnectionGraph::split_AddP(Node *addp, Node *base) {2348PhaseGVN* igvn = _igvn;2349const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();2350assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");2351const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();2352if (t == NULL) {2353// We are computing a raw address for a store captured by an Initialize2354// compute an appropriate address type (cases #3 and #5).2355assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");2356assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");2357intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);2358assert(offs != Type::OffsetBot, "offset must be a constant");2359t = base_t->add_offset(offs)->is_oopptr();2360}2361int inst_id = base_t->instance_id();2362assert(!t->is_known_instance() || t->instance_id() == inst_id,2363"old type must be non-instance or match new type");23642365// The type 't' could be subclass of 'base_t'.2366// As result t->offset() could be large then base_t's size and it will2367// cause the failure in add_offset() with narrow oops since TypeOopPtr()2368// constructor verifies correctness of the offset.2369//2370// It could happened on subclass's branch (from the type profiling2371// inlining) which was not eliminated during parsing since the exactness2372// of the allocation type was not propagated to the subclass type check.2373//2374// Or the type 't' could be not related to 'base_t' at all.2375// It could happened when CHA type is different from MDO type on a dead path2376// (for example, from instanceof check) which is not collapsed during parsing.2377//2378// Do nothing for such AddP node and don't process its users since2379// this code branch will go away.2380//2381if (!t->is_known_instance() &&2382!base_t->klass()->is_subtype_of(t->klass())) {2383return false; // bail out2384}2385const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();2386// Do NOT remove the next line: ensure a new alias index is allocated2387// for the instance type. Note: C++ will not remove it since the call2388// has side effect.2389int alias_idx = _compile->get_alias_index(tinst);2390igvn->set_type(addp, tinst);2391// record the allocation in the node map2392set_map(addp, get_map(base->_idx));2393// Set addp's Base and Address to 'base'.2394Node *abase = addp->in(AddPNode::Base);2395Node *adr = addp->in(AddPNode::Address);2396if (adr->is_Proj() && adr->in(0)->is_Allocate() &&2397adr->in(0)->_idx == (uint)inst_id) {2398// Skip AddP cases #3 and #5.2399} else {2400assert(!abase->is_top(), "sanity"); // AddP case #32401if (abase != base) {2402igvn->hash_delete(addp);2403addp->set_req(AddPNode::Base, base);2404if (abase == adr) {2405addp->set_req(AddPNode::Address, base);2406} else {2407// AddP case #4 (adr is array's element offset AddP node)2408#ifdef ASSERT2409const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();2410assert(adr->is_AddP() && atype != NULL &&2411atype->instance_id() == inst_id, "array's element offset should be processed first");2412#endif2413}2414igvn->hash_insert(addp);2415}2416}2417// Put on IGVN worklist since at least addp's type was changed above.2418record_for_optimizer(addp);2419return true;2420}24212422//2423// Create a new version of orig_phi if necessary. Returns either the newly2424// created phi or an existing phi. Sets create_new to indicate whether a new2425// phi was created. Cache the last newly created phi in the node map.2426//2427PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {2428Compile *C = _compile;2429PhaseGVN* igvn = _igvn;2430new_created = false;2431int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());2432// nothing to do if orig_phi is bottom memory or matches alias_idx2433if (phi_alias_idx == alias_idx) {2434return orig_phi;2435}2436// Have we recently created a Phi for this alias index?2437PhiNode *result = get_map_phi(orig_phi->_idx);2438if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {2439return result;2440}2441// Previous check may fail when the same wide memory Phi was split into Phis2442// for different memory slices. Search all Phis for this region.2443if (result != NULL) {2444Node* region = orig_phi->in(0);2445for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {2446Node* phi = region->fast_out(i);2447if (phi->is_Phi() &&2448C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {2449assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");2450return phi->as_Phi();2451}2452}2453}2454if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {2455if (C->do_escape_analysis() == true && !C->failing()) {2456// Retry compilation without escape analysis.2457// If this is the first failure, the sentinel string will "stick"2458// to the Compile object, and the C2Compiler will see it and retry.2459C->record_failure(C2Compiler::retry_no_escape_analysis());2460}2461return NULL;2462}2463orig_phi_worklist.append_if_missing(orig_phi);2464const TypePtr *atype = C->get_adr_type(alias_idx);2465result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);2466C->copy_node_notes_to(result, orig_phi);2467igvn->set_type(result, result->bottom_type());2468record_for_optimizer(result);2469set_map(orig_phi, result);2470new_created = true;2471return result;2472}24732474//2475// Return a new version of Memory Phi "orig_phi" with the inputs having the2476// specified alias index.2477//2478PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {2479assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");2480Compile *C = _compile;2481PhaseGVN* igvn = _igvn;2482bool new_phi_created;2483PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);2484if (!new_phi_created) {2485return result;2486}2487GrowableArray<PhiNode *> phi_list;2488GrowableArray<uint> cur_input;2489PhiNode *phi = orig_phi;2490uint idx = 1;2491bool finished = false;2492while(!finished) {2493while (idx < phi->req()) {2494Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);2495if (mem != NULL && mem->is_Phi()) {2496PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);2497if (new_phi_created) {2498// found an phi for which we created a new split, push current one on worklist and begin2499// processing new one2500phi_list.push(phi);2501cur_input.push(idx);2502phi = mem->as_Phi();2503result = newphi;2504idx = 1;2505continue;2506} else {2507mem = newphi;2508}2509}2510if (C->failing()) {2511return NULL;2512}2513result->set_req(idx++, mem);2514}2515#ifdef ASSERT2516// verify that the new Phi has an input for each input of the original2517assert( phi->req() == result->req(), "must have same number of inputs.");2518assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");2519#endif2520// Check if all new phi's inputs have specified alias index.2521// Otherwise use old phi.2522for (uint i = 1; i < phi->req(); i++) {2523Node* in = result->in(i);2524assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");2525}2526// we have finished processing a Phi, see if there are any more to do2527finished = (phi_list.length() == 0 );2528if (!finished) {2529phi = phi_list.pop();2530idx = cur_input.pop();2531PhiNode *prev_result = get_map_phi(phi->_idx);2532prev_result->set_req(idx++, result);2533result = prev_result;2534}2535}2536return result;2537}25382539//2540// The next methods are derived from methods in MemNode.2541//2542Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {2543Node *mem = mmem;2544// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally2545// means an array I have not precisely typed yet. Do not do any2546// alias stuff with it any time soon.2547if (toop->base() != Type::AnyPtr &&2548!(toop->klass() != NULL &&2549toop->klass()->is_java_lang_Object() &&2550toop->offset() == Type::OffsetBot)) {2551mem = mmem->memory_at(alias_idx);2552// Update input if it is progress over what we have now2553}2554return mem;2555}25562557//2558// Move memory users to their memory slices.2559//2560void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {2561Compile* C = _compile;2562PhaseGVN* igvn = _igvn;2563const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();2564assert(tp != NULL, "ptr type");2565int alias_idx = C->get_alias_index(tp);2566int general_idx = C->get_general_index(alias_idx);25672568// Move users first2569for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2570Node* use = n->fast_out(i);2571if (use->is_MergeMem()) {2572MergeMemNode* mmem = use->as_MergeMem();2573assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");2574if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {2575continue; // Nothing to do2576}2577// Replace previous general reference to mem node.2578uint orig_uniq = C->unique();2579Node* m = find_inst_mem(n, general_idx, orig_phis);2580assert(orig_uniq == C->unique(), "no new nodes");2581mmem->set_memory_at(general_idx, m);2582--imax;2583--i;2584} else if (use->is_MemBar()) {2585assert(!use->is_Initialize(), "initializing stores should not be moved");2586if (use->req() > MemBarNode::Precedent &&2587use->in(MemBarNode::Precedent) == n) {2588// Don't move related membars.2589record_for_optimizer(use);2590continue;2591}2592tp = use->as_MemBar()->adr_type()->isa_ptr();2593if (tp != NULL && C->get_alias_index(tp) == alias_idx ||2594alias_idx == general_idx) {2595continue; // Nothing to do2596}2597// Move to general memory slice.2598uint orig_uniq = C->unique();2599Node* m = find_inst_mem(n, general_idx, orig_phis);2600assert(orig_uniq == C->unique(), "no new nodes");2601igvn->hash_delete(use);2602imax -= use->replace_edge(n, m);2603igvn->hash_insert(use);2604record_for_optimizer(use);2605--i;2606#ifdef ASSERT2607} else if (use->is_Mem()) {2608if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {2609// Don't move related cardmark.2610continue;2611}2612// Memory nodes should have new memory input.2613tp = igvn->type(use->in(MemNode::Address))->isa_ptr();2614assert(tp != NULL, "ptr type");2615int idx = C->get_alias_index(tp);2616assert(get_map(use->_idx) != NULL || idx == alias_idx,2617"Following memory nodes should have new memory input or be on the same memory slice");2618} else if (use->is_Phi()) {2619// Phi nodes should be split and moved already.2620tp = use->as_Phi()->adr_type()->isa_ptr();2621assert(tp != NULL, "ptr type");2622int idx = C->get_alias_index(tp);2623assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");2624} else {2625use->dump();2626assert(false, "should not be here");2627#endif2628}2629}2630}26312632//2633// Search memory chain of "mem" to find a MemNode whose address2634// is the specified alias index.2635//2636Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {2637if (orig_mem == NULL)2638return orig_mem;2639Compile* C = _compile;2640PhaseGVN* igvn = _igvn;2641const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();2642bool is_instance = (toop != NULL) && toop->is_known_instance();2643Node *start_mem = C->start()->proj_out(TypeFunc::Memory);2644Node *prev = NULL;2645Node *result = orig_mem;2646while (prev != result) {2647prev = result;2648if (result == start_mem)2649break; // hit one of our sentinels2650if (result->is_Mem()) {2651const Type *at = igvn->type(result->in(MemNode::Address));2652if (at == Type::TOP)2653break; // Dead2654assert (at->isa_ptr() != NULL, "pointer type required.");2655int idx = C->get_alias_index(at->is_ptr());2656if (idx == alias_idx)2657break; // Found2658if (!is_instance && (at->isa_oopptr() == NULL ||2659!at->is_oopptr()->is_known_instance())) {2660break; // Do not skip store to general memory slice.2661}2662result = result->in(MemNode::Memory);2663}2664if (!is_instance)2665continue; // don't search further for non-instance types2666// skip over a call which does not affect this memory slice2667if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {2668Node *proj_in = result->in(0);2669if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {2670break; // hit one of our sentinels2671} else if (proj_in->is_Call()) {2672CallNode *call = proj_in->as_Call();2673if (!call->may_modify(toop, igvn)) {2674result = call->in(TypeFunc::Memory);2675}2676} else if (proj_in->is_Initialize()) {2677AllocateNode* alloc = proj_in->as_Initialize()->allocation();2678// Stop if this is the initialization for the object instance which2679// which contains this memory slice, otherwise skip over it.2680if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {2681result = proj_in->in(TypeFunc::Memory);2682}2683} else if (proj_in->is_MemBar()) {2684result = proj_in->in(TypeFunc::Memory);2685}2686} else if (result->is_MergeMem()) {2687MergeMemNode *mmem = result->as_MergeMem();2688result = step_through_mergemem(mmem, alias_idx, toop);2689if (result == mmem->base_memory()) {2690// Didn't find instance memory, search through general slice recursively.2691result = mmem->memory_at(C->get_general_index(alias_idx));2692result = find_inst_mem(result, alias_idx, orig_phis);2693if (C->failing()) {2694return NULL;2695}2696mmem->set_memory_at(alias_idx, result);2697}2698} else if (result->is_Phi() &&2699C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {2700Node *un = result->as_Phi()->unique_input(igvn);2701if (un != NULL) {2702orig_phis.append_if_missing(result->as_Phi());2703result = un;2704} else {2705break;2706}2707} else if (result->is_ClearArray()) {2708if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {2709// Can not bypass initialization of the instance2710// we are looking for.2711break;2712}2713// Otherwise skip it (the call updated 'result' value).2714} else if (result->Opcode() == Op_SCMemProj) {2715Node* mem = result->in(0);2716Node* adr = NULL;2717if (mem->is_LoadStore()) {2718adr = mem->in(MemNode::Address);2719} else {2720assert(mem->Opcode() == Op_EncodeISOArray, "sanity");2721adr = mem->in(3); // Memory edge corresponds to destination array2722}2723const Type *at = igvn->type(adr);2724if (at != Type::TOP) {2725assert (at->isa_ptr() != NULL, "pointer type required.");2726int idx = C->get_alias_index(at->is_ptr());2727assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");2728break;2729}2730result = mem->in(MemNode::Memory);2731}2732}2733if (result->is_Phi()) {2734PhiNode *mphi = result->as_Phi();2735assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");2736const TypePtr *t = mphi->adr_type();2737if (!is_instance) {2738// Push all non-instance Phis on the orig_phis worklist to update inputs2739// during Phase 4 if needed.2740orig_phis.append_if_missing(mphi);2741} else if (C->get_alias_index(t) != alias_idx) {2742// Create a new Phi with the specified alias index type.2743result = split_memory_phi(mphi, alias_idx, orig_phis);2744}2745}2746// the result is either MemNode, PhiNode, InitializeNode.2747return result;2748}27492750//2751// Convert the types of unescaped object to instance types where possible,2752// propagate the new type information through the graph, and update memory2753// edges and MergeMem inputs to reflect the new type.2754//2755// We start with allocations (and calls which may be allocations) on alloc_worklist.2756// The processing is done in 4 phases:2757//2758// Phase 1: Process possible allocations from alloc_worklist. Create instance2759// types for the CheckCastPP for allocations where possible.2760// Propagate the the new types through users as follows:2761// casts and Phi: push users on alloc_worklist2762// AddP: cast Base and Address inputs to the instance type2763// push any AddP users on alloc_worklist and push any memnode2764// users onto memnode_worklist.2765// Phase 2: Process MemNode's from memnode_worklist. compute new address type and2766// search the Memory chain for a store with the appropriate type2767// address type. If a Phi is found, create a new version with2768// the appropriate memory slices from each of the Phi inputs.2769// For stores, process the users as follows:2770// MemNode: push on memnode_worklist2771// MergeMem: push on mergemem_worklist2772// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice2773// moving the first node encountered of each instance type to the2774// the input corresponding to its alias index.2775// appropriate memory slice.2776// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.2777//2778// In the following example, the CheckCastPP nodes are the cast of allocation2779// results and the allocation of node 29 is unescaped and eligible to be an2780// instance type.2781//2782// We start with:2783//2784// 7 Parm #memory2785// 10 ConI "12"2786// 19 CheckCastPP "Foo"2787// 20 AddP _ 19 19 10 Foo+12 alias_index=42788// 29 CheckCastPP "Foo"2789// 30 AddP _ 29 29 10 Foo+12 alias_index=42790//2791// 40 StoreP 25 7 20 ... alias_index=42792// 50 StoreP 35 40 30 ... alias_index=42793// 60 StoreP 45 50 20 ... alias_index=42794// 70 LoadP _ 60 30 ... alias_index=42795// 80 Phi 75 50 60 Memory alias_index=42796// 90 LoadP _ 80 30 ... alias_index=42797// 100 LoadP _ 80 20 ... alias_index=42798//2799//2800// Phase 1 creates an instance type for node 29 assigning it an instance id of 242801// and creating a new alias index for node 30. This gives:2802//2803// 7 Parm #memory2804// 10 ConI "12"2805// 19 CheckCastPP "Foo"2806// 20 AddP _ 19 19 10 Foo+12 alias_index=42807// 29 CheckCastPP "Foo" iid=242808// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=242809//2810// 40 StoreP 25 7 20 ... alias_index=42811// 50 StoreP 35 40 30 ... alias_index=62812// 60 StoreP 45 50 20 ... alias_index=42813// 70 LoadP _ 60 30 ... alias_index=62814// 80 Phi 75 50 60 Memory alias_index=42815// 90 LoadP _ 80 30 ... alias_index=62816// 100 LoadP _ 80 20 ... alias_index=42817//2818// In phase 2, new memory inputs are computed for the loads and stores,2819// And a new version of the phi is created. In phase 4, the inputs to2820// node 80 are updated and then the memory nodes are updated with the2821// values computed in phase 2. This results in:2822//2823// 7 Parm #memory2824// 10 ConI "12"2825// 19 CheckCastPP "Foo"2826// 20 AddP _ 19 19 10 Foo+12 alias_index=42827// 29 CheckCastPP "Foo" iid=242828// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=242829//2830// 40 StoreP 25 7 20 ... alias_index=42831// 50 StoreP 35 7 30 ... alias_index=62832// 60 StoreP 45 40 20 ... alias_index=42833// 70 LoadP _ 50 30 ... alias_index=62834// 80 Phi 75 40 60 Memory alias_index=42835// 120 Phi 75 50 50 Memory alias_index=62836// 90 LoadP _ 120 30 ... alias_index=62837// 100 LoadP _ 80 20 ... alias_index=42838//2839void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {2840GrowableArray<Node *> memnode_worklist;2841GrowableArray<PhiNode *> orig_phis;2842PhaseIterGVN *igvn = _igvn;2843uint new_index_start = (uint) _compile->num_alias_types();2844Arena* arena = Thread::current()->resource_area();2845VectorSet visited(arena);2846ideal_nodes.clear(); // Reset for use with set_map/get_map.2847uint unique_old = _compile->unique();28482849// Phase 1: Process possible allocations from alloc_worklist.2850// Create instance types for the CheckCastPP for allocations where possible.2851//2852// (Note: don't forget to change the order of the second AddP node on2853// the alloc_worklist if the order of the worklist processing is changed,2854// see the comment in find_second_addp().)2855//2856while (alloc_worklist.length() != 0) {2857Node *n = alloc_worklist.pop();2858uint ni = n->_idx;2859if (n->is_Call()) {2860CallNode *alloc = n->as_Call();2861// copy escape information to call node2862PointsToNode* ptn = ptnode_adr(alloc->_idx);2863PointsToNode::EscapeState es = ptn->escape_state();2864// We have an allocation or call which returns a Java object,2865// see if it is unescaped.2866if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())2867continue;2868// Find CheckCastPP for the allocate or for the return value of a call2869n = alloc->result_cast();2870if (n == NULL) { // No uses except Initialize node2871if (alloc->is_Allocate()) {2872// Set the scalar_replaceable flag for allocation2873// so it could be eliminated if it has no uses.2874alloc->as_Allocate()->_is_scalar_replaceable = true;2875}2876if (alloc->is_CallStaticJava()) {2877// Set the scalar_replaceable flag for boxing method2878// so it could be eliminated if it has no uses.2879alloc->as_CallStaticJava()->_is_scalar_replaceable = true;2880}2881continue;2882}2883if (!n->is_CheckCastPP()) { // not unique CheckCastPP.2884assert(!alloc->is_Allocate(), "allocation should have unique type");2885continue;2886}28872888// The inline code for Object.clone() casts the allocation result to2889// java.lang.Object and then to the actual type of the allocated2890// object. Detect this case and use the second cast.2891// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when2892// the allocation result is cast to java.lang.Object and then2893// to the actual Array type.2894if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL2895&& (alloc->is_AllocateArray() ||2896igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {2897Node *cast2 = NULL;2898for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2899Node *use = n->fast_out(i);2900if (use->is_CheckCastPP()) {2901cast2 = use;2902break;2903}2904}2905if (cast2 != NULL) {2906n = cast2;2907} else {2908// Non-scalar replaceable if the allocation type is unknown statically2909// (reflection allocation), the object can't be restored during2910// deoptimization without precise type.2911continue;2912}2913}29142915const TypeOopPtr *t = igvn->type(n)->isa_oopptr();2916if (t == NULL)2917continue; // not a TypeOopPtr2918if (!t->klass_is_exact())2919continue; // not an unique type29202921if (alloc->is_Allocate()) {2922// Set the scalar_replaceable flag for allocation2923// so it could be eliminated.2924alloc->as_Allocate()->_is_scalar_replaceable = true;2925}2926if (alloc->is_CallStaticJava()) {2927// Set the scalar_replaceable flag for boxing method2928// so it could be eliminated.2929alloc->as_CallStaticJava()->_is_scalar_replaceable = true;2930}2931set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state2932// in order for an object to be scalar-replaceable, it must be:2933// - a direct allocation (not a call returning an object)2934// - non-escaping2935// - eligible to be a unique type2936// - not determined to be ineligible by escape analysis2937set_map(alloc, n);2938set_map(n, alloc);2939const TypeOopPtr* tinst = t->cast_to_instance_id(ni);2940igvn->hash_delete(n);2941igvn->set_type(n, tinst);2942n->raise_bottom_type(tinst);2943igvn->hash_insert(n);2944record_for_optimizer(n);2945if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {29462947// First, put on the worklist all Field edges from Connection Graph2948// which is more accurate then putting immediate users from Ideal Graph.2949for (EdgeIterator e(ptn); e.has_next(); e.next()) {2950PointsToNode* tgt = e.get();2951Node* use = tgt->ideal_node();2952assert(tgt->is_Field() && use->is_AddP(),2953"only AddP nodes are Field edges in CG");2954if (use->outcnt() > 0) { // Don't process dead nodes2955Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));2956if (addp2 != NULL) {2957assert(alloc->is_AllocateArray(),"array allocation was expected");2958alloc_worklist.append_if_missing(addp2);2959}2960alloc_worklist.append_if_missing(use);2961}2962}29632964// An allocation may have an Initialize which has raw stores. Scan2965// the users of the raw allocation result and push AddP users2966// on alloc_worklist.2967Node *raw_result = alloc->proj_out(TypeFunc::Parms);2968assert (raw_result != NULL, "must have an allocation result");2969for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {2970Node *use = raw_result->fast_out(i);2971if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes2972Node* addp2 = find_second_addp(use, raw_result);2973if (addp2 != NULL) {2974assert(alloc->is_AllocateArray(),"array allocation was expected");2975alloc_worklist.append_if_missing(addp2);2976}2977alloc_worklist.append_if_missing(use);2978} else if (use->is_MemBar()) {2979memnode_worklist.append_if_missing(use);2980}2981}2982}2983} else if (n->is_AddP()) {2984JavaObjectNode* jobj = unique_java_object(get_addp_base(n));2985if (jobj == NULL || jobj == phantom_obj) {2986#ifdef ASSERT2987ptnode_adr(get_addp_base(n)->_idx)->dump();2988ptnode_adr(n->_idx)->dump();2989assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");2990#endif2991_compile->record_failure(C2Compiler::retry_no_escape_analysis());2992return;2993}2994Node *base = get_map(jobj->idx()); // CheckCastPP node2995if (!split_AddP(n, base)) continue; // wrong type from dead path2996} else if (n->is_Phi() ||2997n->is_CheckCastPP() ||2998n->is_EncodeP() ||2999n->is_DecodeN() ||3000(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {3001if (visited.test_set(n->_idx)) {3002assert(n->is_Phi(), "loops only through Phi's");3003continue; // already processed3004}3005JavaObjectNode* jobj = unique_java_object(n);3006if (jobj == NULL || jobj == phantom_obj) {3007#ifdef ASSERT3008ptnode_adr(n->_idx)->dump();3009assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");3010#endif3011_compile->record_failure(C2Compiler::retry_no_escape_analysis());3012return;3013} else {3014Node *val = get_map(jobj->idx()); // CheckCastPP node3015TypeNode *tn = n->as_Type();3016const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();3017assert(tinst != NULL && tinst->is_known_instance() &&3018tinst->instance_id() == jobj->idx() , "instance type expected.");30193020const Type *tn_type = igvn->type(tn);3021const TypeOopPtr *tn_t;3022if (tn_type->isa_narrowoop()) {3023tn_t = tn_type->make_ptr()->isa_oopptr();3024} else {3025tn_t = tn_type->isa_oopptr();3026}3027if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {3028if (tn_type->isa_narrowoop()) {3029tn_type = tinst->make_narrowoop();3030} else {3031tn_type = tinst;3032}3033igvn->hash_delete(tn);3034igvn->set_type(tn, tn_type);3035tn->set_type(tn_type);3036igvn->hash_insert(tn);3037record_for_optimizer(n);3038} else {3039assert(tn_type == TypePtr::NULL_PTR ||3040tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),3041"unexpected type");3042continue; // Skip dead path with different type3043}3044}3045} else {3046debug_only(n->dump();)3047assert(false, "EA: unexpected node");3048continue;3049}3050// push allocation's users on appropriate worklist3051for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3052Node *use = n->fast_out(i);3053if(use->is_Mem() && use->in(MemNode::Address) == n) {3054// Load/store to instance's field3055memnode_worklist.append_if_missing(use);3056} else if (use->is_MemBar()) {3057if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3058memnode_worklist.append_if_missing(use);3059}3060} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes3061Node* addp2 = find_second_addp(use, n);3062if (addp2 != NULL) {3063alloc_worklist.append_if_missing(addp2);3064}3065alloc_worklist.append_if_missing(use);3066} else if (use->is_Phi() ||3067use->is_CheckCastPP() ||3068use->is_EncodeNarrowPtr() ||3069use->is_DecodeNarrowPtr() ||3070(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {3071alloc_worklist.append_if_missing(use);3072#ifdef ASSERT3073} else if (use->is_Mem()) {3074assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");3075} else if (use->is_MergeMem()) {3076assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3077} else if (use->is_SafePoint()) {3078// Look for MergeMem nodes for calls which reference unique allocation3079// (through CheckCastPP nodes) even for debug info.3080Node* m = use->in(TypeFunc::Memory);3081if (m->is_MergeMem()) {3082assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");3083}3084} else if (use->Opcode() == Op_EncodeISOArray) {3085if (use->in(MemNode::Memory) == n || use->in(3) == n) {3086// EncodeISOArray overwrites destination array3087memnode_worklist.append_if_missing(use);3088}3089} else {3090uint op = use->Opcode();3091if (!(op == Op_CmpP || op == Op_Conv2B ||3092op == Op_CastP2X || op == Op_StoreCM ||3093op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||3094op == Op_StrEquals || op == Op_StrIndexOf)) {3095n->dump();3096use->dump();3097assert(false, "EA: missing allocation reference path");3098}3099#endif3100}3101}31023103}3104// New alias types were created in split_AddP().3105uint new_index_end = (uint) _compile->num_alias_types();3106assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");31073108// Phase 2: Process MemNode's from memnode_worklist. compute new address type and3109// compute new values for Memory inputs (the Memory inputs are not3110// actually updated until phase 4.)3111if (memnode_worklist.length() == 0)3112return; // nothing to do3113while (memnode_worklist.length() != 0) {3114Node *n = memnode_worklist.pop();3115if (visited.test_set(n->_idx))3116continue;3117if (n->is_Phi() || n->is_ClearArray()) {3118// we don't need to do anything, but the users must be pushed3119} else if (n->is_MemBar()) { // Initialize, MemBar nodes3120// we don't need to do anything, but the users must be pushed3121n = n->as_MemBar()->proj_out(TypeFunc::Memory);3122if (n == NULL)3123continue;3124} else if (n->Opcode() == Op_EncodeISOArray) {3125// get the memory projection3126for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3127Node *use = n->fast_out(i);3128if (use->Opcode() == Op_SCMemProj) {3129n = use;3130break;3131}3132}3133assert(n->Opcode() == Op_SCMemProj, "memory projection required");3134} else {3135assert(n->is_Mem(), "memory node required.");3136Node *addr = n->in(MemNode::Address);3137const Type *addr_t = igvn->type(addr);3138if (addr_t == Type::TOP)3139continue;3140assert (addr_t->isa_ptr() != NULL, "pointer type required.");3141int alias_idx = _compile->get_alias_index(addr_t->is_ptr());3142assert ((uint)alias_idx < new_index_end, "wrong alias index");3143Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);3144if (_compile->failing()) {3145return;3146}3147if (mem != n->in(MemNode::Memory)) {3148// We delay the memory edge update since we need old one in3149// MergeMem code below when instances memory slices are separated.3150set_map(n, mem);3151}3152if (n->is_Load()) {3153continue; // don't push users3154} else if (n->is_LoadStore()) {3155// get the memory projection3156for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3157Node *use = n->fast_out(i);3158if (use->Opcode() == Op_SCMemProj) {3159n = use;3160break;3161}3162}3163assert(n->Opcode() == Op_SCMemProj, "memory projection required");3164}3165}3166// push user on appropriate worklist3167for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3168Node *use = n->fast_out(i);3169if (use->is_Phi() || use->is_ClearArray()) {3170memnode_worklist.append_if_missing(use);3171} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {3172if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores3173continue;3174memnode_worklist.append_if_missing(use);3175} else if (use->is_MemBar()) {3176if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3177memnode_worklist.append_if_missing(use);3178}3179#ifdef ASSERT3180} else if(use->is_Mem()) {3181assert(use->in(MemNode::Memory) != n, "EA: missing memory path");3182} else if (use->is_MergeMem()) {3183assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3184} else if (use->Opcode() == Op_EncodeISOArray) {3185if (use->in(MemNode::Memory) == n || use->in(3) == n) {3186// EncodeISOArray overwrites destination array3187memnode_worklist.append_if_missing(use);3188}3189} else {3190uint op = use->Opcode();3191if (!(op == Op_StoreCM ||3192(op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&3193strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||3194op == Op_AryEq || op == Op_StrComp ||3195op == Op_StrEquals || op == Op_StrIndexOf)) {3196n->dump();3197use->dump();3198assert(false, "EA: missing memory path");3199}3200#endif3201}3202}3203}32043205// Phase 3: Process MergeMem nodes from mergemem_worklist.3206// Walk each memory slice moving the first node encountered of each3207// instance type to the the input corresponding to its alias index.3208uint length = _mergemem_worklist.length();3209for( uint next = 0; next < length; ++next ) {3210MergeMemNode* nmm = _mergemem_worklist.at(next);3211assert(!visited.test_set(nmm->_idx), "should not be visited before");3212// Note: we don't want to use MergeMemStream here because we only want to3213// scan inputs which exist at the start, not ones we add during processing.3214// Note 2: MergeMem may already contains instance memory slices added3215// during find_inst_mem() call when memory nodes were processed above.3216igvn->hash_delete(nmm);3217uint nslices = MIN2(nmm->req(), new_index_start);3218for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {3219Node* mem = nmm->in(i);3220Node* cur = NULL;3221if (mem == NULL || mem->is_top())3222continue;3223// First, update mergemem by moving memory nodes to corresponding slices3224// if their type became more precise since this mergemem was created.3225while (mem->is_Mem()) {3226const Type *at = igvn->type(mem->in(MemNode::Address));3227if (at != Type::TOP) {3228assert (at->isa_ptr() != NULL, "pointer type required.");3229uint idx = (uint)_compile->get_alias_index(at->is_ptr());3230if (idx == i) {3231if (cur == NULL)3232cur = mem;3233} else {3234if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {3235nmm->set_memory_at(idx, mem);3236}3237}3238}3239mem = mem->in(MemNode::Memory);3240}3241nmm->set_memory_at(i, (cur != NULL) ? cur : mem);3242// Find any instance of the current type if we haven't encountered3243// already a memory slice of the instance along the memory chain.3244for (uint ni = new_index_start; ni < new_index_end; ni++) {3245if((uint)_compile->get_general_index(ni) == i) {3246Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);3247if (nmm->is_empty_memory(m)) {3248Node* result = find_inst_mem(mem, ni, orig_phis);3249if (_compile->failing()) {3250return;3251}3252nmm->set_memory_at(ni, result);3253}3254}3255}3256}3257// Find the rest of instances values3258for (uint ni = new_index_start; ni < new_index_end; ni++) {3259const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();3260Node* result = step_through_mergemem(nmm, ni, tinst);3261if (result == nmm->base_memory()) {3262// Didn't find instance memory, search through general slice recursively.3263result = nmm->memory_at(_compile->get_general_index(ni));3264result = find_inst_mem(result, ni, orig_phis);3265if (_compile->failing()) {3266return;3267}3268nmm->set_memory_at(ni, result);3269}3270}3271igvn->hash_insert(nmm);3272record_for_optimizer(nmm);3273}32743275// Phase 4: Update the inputs of non-instance memory Phis and3276// the Memory input of memnodes3277// First update the inputs of any non-instance Phi's from3278// which we split out an instance Phi. Note we don't have3279// to recursively process Phi's encounted on the input memory3280// chains as is done in split_memory_phi() since they will3281// also be processed here.3282for (int j = 0; j < orig_phis.length(); j++) {3283PhiNode *phi = orig_phis.at(j);3284int alias_idx = _compile->get_alias_index(phi->adr_type());3285igvn->hash_delete(phi);3286for (uint i = 1; i < phi->req(); i++) {3287Node *mem = phi->in(i);3288Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);3289if (_compile->failing()) {3290return;3291}3292if (mem != new_mem) {3293phi->set_req(i, new_mem);3294}3295}3296igvn->hash_insert(phi);3297record_for_optimizer(phi);3298}32993300// Update the memory inputs of MemNodes with the value we computed3301// in Phase 2 and move stores memory users to corresponding memory slices.3302// Disable memory split verification code until the fix for 6984348.3303// Currently it produces false negative results since it does not cover all cases.3304#if 0 // ifdef ASSERT3305visited.Reset();3306Node_Stack old_mems(arena, _compile->unique() >> 2);3307#endif3308for (uint i = 0; i < ideal_nodes.size(); i++) {3309Node* n = ideal_nodes.at(i);3310Node* nmem = get_map(n->_idx);3311assert(nmem != NULL, "sanity");3312if (n->is_Mem()) {3313#if 0 // ifdef ASSERT3314Node* old_mem = n->in(MemNode::Memory);3315if (!visited.test_set(old_mem->_idx)) {3316old_mems.push(old_mem, old_mem->outcnt());3317}3318#endif3319assert(n->in(MemNode::Memory) != nmem, "sanity");3320if (!n->is_Load()) {3321// Move memory users of a store first.3322move_inst_mem(n, orig_phis);3323}3324// Now update memory input3325igvn->hash_delete(n);3326n->set_req(MemNode::Memory, nmem);3327igvn->hash_insert(n);3328record_for_optimizer(n);3329} else {3330assert(n->is_Allocate() || n->is_CheckCastPP() ||3331n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");3332}3333}3334#if 0 // ifdef ASSERT3335// Verify that memory was split correctly3336while (old_mems.is_nonempty()) {3337Node* old_mem = old_mems.node();3338uint old_cnt = old_mems.index();3339old_mems.pop();3340assert(old_cnt == old_mem->outcnt(), "old mem could be lost");3341}3342#endif3343}33443345#ifndef PRODUCT3346static const char *node_type_names[] = {3347"UnknownType",3348"JavaObject",3349"LocalVar",3350"Field",3351"Arraycopy"3352};33533354static const char *esc_names[] = {3355"UnknownEscape",3356"NoEscape",3357"ArgEscape",3358"GlobalEscape"3359};33603361void PointsToNode::dump(bool print_state) const {3362NodeType nt = node_type();3363tty->print("%s ", node_type_names[(int) nt]);3364if (print_state) {3365EscapeState es = escape_state();3366EscapeState fields_es = fields_escape_state();3367tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);3368if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())3369tty->print("NSR ");3370}3371if (is_Field()) {3372FieldNode* f = (FieldNode*)this;3373if (f->is_oop())3374tty->print("oop ");3375if (f->offset() > 0)3376tty->print("+%d ", f->offset());3377tty->print("(");3378for (BaseIterator i(f); i.has_next(); i.next()) {3379PointsToNode* b = i.get();3380tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));3381}3382tty->print(" )");3383}3384tty->print("[");3385for (EdgeIterator i(this); i.has_next(); i.next()) {3386PointsToNode* e = i.get();3387tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");3388}3389tty->print(" [");3390for (UseIterator i(this); i.has_next(); i.next()) {3391PointsToNode* u = i.get();3392bool is_base = false;3393if (PointsToNode::is_base_use(u)) {3394is_base = true;3395u = PointsToNode::get_use_node(u)->as_Field();3396}3397tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");3398}3399tty->print(" ]] ");3400if (_node == NULL)3401tty->print_cr("<null>");3402else3403_node->dump();3404}34053406void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {3407bool first = true;3408int ptnodes_length = ptnodes_worklist.length();3409for (int i = 0; i < ptnodes_length; i++) {3410PointsToNode *ptn = ptnodes_worklist.at(i);3411if (ptn == NULL || !ptn->is_JavaObject())3412continue;3413PointsToNode::EscapeState es = ptn->escape_state();3414if ((es != PointsToNode::NoEscape) && !Verbose) {3415continue;3416}3417Node* n = ptn->ideal_node();3418if (n->is_Allocate() || (n->is_CallStaticJava() &&3419n->as_CallStaticJava()->is_boxing_method())) {3420if (first) {3421tty->cr();3422tty->print("======== Connection graph for ");3423_compile->method()->print_short_name();3424tty->cr();3425first = false;3426}3427ptn->dump();3428// Print all locals and fields which reference this allocation3429for (UseIterator j(ptn); j.has_next(); j.next()) {3430PointsToNode* use = j.get();3431if (use->is_LocalVar()) {3432use->dump(Verbose);3433} else if (Verbose) {3434use->dump();3435}3436}3437tty->cr();3438}3439}3440}3441#endif344234433444