Path: blob/master/src/hotspot/share/opto/escape.cpp
40930 views
/*1* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "compiler/compileLog.hpp"27#include "gc/shared/barrierSet.hpp"28#include "gc/shared/c2/barrierSetC2.hpp"29#include "libadt/vectset.hpp"30#include "memory/allocation.hpp"31#include "memory/resourceArea.hpp"32#include "opto/c2compiler.hpp"33#include "opto/arraycopynode.hpp"34#include "opto/callnode.hpp"35#include "opto/cfgnode.hpp"36#include "opto/compile.hpp"37#include "opto/escape.hpp"38#include "opto/phaseX.hpp"39#include "opto/movenode.hpp"40#include "opto/rootnode.hpp"41#include "utilities/macros.hpp"4243ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :44_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),45_in_worklist(C->comp_arena()),46_next_pidx(0),47_collecting(true),48_verify(false),49_compile(C),50_igvn(igvn),51_node_map(C->comp_arena()) {52// Add unknown java object.53add_java_object(C->top(), PointsToNode::GlobalEscape);54phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();55// Add ConP(#NULL) and ConN(#NULL) nodes.56Node* oop_null = igvn->zerocon(T_OBJECT);57assert(oop_null->_idx < nodes_size(), "should be created already");58add_java_object(oop_null, PointsToNode::NoEscape);59null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();60if (UseCompressedOops) {61Node* noop_null = igvn->zerocon(T_NARROWOOP);62assert(noop_null->_idx < nodes_size(), "should be created already");63map_ideal_node(noop_null, null_obj);64}65}6667bool ConnectionGraph::has_candidates(Compile *C) {68// EA brings benefits only when the code has allocations and/or locks which69// are represented by ideal Macro nodes.70int cnt = C->macro_count();71for (int i = 0; i < cnt; i++) {72Node *n = C->macro_node(i);73if (n->is_Allocate()) {74return true;75}76if (n->is_Lock()) {77Node* obj = n->as_Lock()->obj_node()->uncast();78if (!(obj->is_Parm() || obj->is_Con())) {79return true;80}81}82if (n->is_CallStaticJava() &&83n->as_CallStaticJava()->is_boxing_method()) {84return true;85}86}87return false;88}8990void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {91Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);92ResourceMark rm;9394// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction95// to create space for them in ConnectionGraph::_nodes[].96Node* oop_null = igvn->zerocon(T_OBJECT);97Node* noop_null = igvn->zerocon(T_NARROWOOP);98ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);99// Perform escape analysis100if (congraph->compute_escape()) {101// There are non escaping objects.102C->set_congraph(congraph);103}104// Cleanup.105if (oop_null->outcnt() == 0) {106igvn->hash_delete(oop_null);107}108if (noop_null->outcnt() == 0) {109igvn->hash_delete(noop_null);110}111}112113bool ConnectionGraph::compute_escape() {114Compile* C = _compile;115PhaseGVN* igvn = _igvn;116117// Worklists used by EA.118Unique_Node_List delayed_worklist;119GrowableArray<Node*> alloc_worklist;120GrowableArray<Node*> ptr_cmp_worklist;121GrowableArray<Node*> storestore_worklist;122GrowableArray<ArrayCopyNode*> arraycopy_worklist;123GrowableArray<PointsToNode*> ptnodes_worklist;124GrowableArray<JavaObjectNode*> java_objects_worklist;125GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;126GrowableArray<FieldNode*> oop_fields_worklist;127GrowableArray<SafePointNode*> sfn_worklist;128DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )129130{ Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);131132// 1. Populate Connection Graph (CG) with PointsTo nodes.133ideal_nodes.map(C->live_nodes(), NULL); // preallocate space134// Initialize worklist135if (C->root() != NULL) {136ideal_nodes.push(C->root());137}138// Processed ideal nodes are unique on ideal_nodes list139// but several ideal nodes are mapped to the phantom_obj.140// To avoid duplicated entries on the following worklists141// add the phantom_obj only once to them.142ptnodes_worklist.append(phantom_obj);143java_objects_worklist.append(phantom_obj);144for( uint next = 0; next < ideal_nodes.size(); ++next ) {145Node* n = ideal_nodes.at(next);146// Create PointsTo nodes and add them to Connection Graph. Called147// only once per ideal node since ideal_nodes is Unique_Node list.148add_node_to_connection_graph(n, &delayed_worklist);149PointsToNode* ptn = ptnode_adr(n->_idx);150if (ptn != NULL && ptn != phantom_obj) {151ptnodes_worklist.append(ptn);152if (ptn->is_JavaObject()) {153java_objects_worklist.append(ptn->as_JavaObject());154if ((n->is_Allocate() || n->is_CallStaticJava()) &&155(ptn->escape_state() < PointsToNode::GlobalEscape)) {156// Only allocations and java static calls results are interesting.157non_escaped_allocs_worklist.append(ptn->as_JavaObject());158}159} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {160oop_fields_worklist.append(ptn->as_Field());161}162}163if (n->is_MergeMem()) {164// Collect all MergeMem nodes to add memory slices for165// scalar replaceable objects in split_unique_types().166_mergemem_worklist.append(n->as_MergeMem());167} else if (OptimizePtrCompare && n->is_Cmp() &&168(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {169// Collect compare pointers nodes.170ptr_cmp_worklist.append(n);171} else if (n->is_MemBarStoreStore()) {172// Collect all MemBarStoreStore nodes so that depending on the173// escape status of the associated Allocate node some of them174// may be eliminated.175storestore_worklist.append(n);176} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&177(n->req() > MemBarNode::Precedent)) {178record_for_optimizer(n);179#ifdef ASSERT180} else if (n->is_AddP()) {181// Collect address nodes for graph verification.182addp_worklist.append(n);183#endif184} else if (n->is_ArrayCopy()) {185// Keep a list of ArrayCopy nodes so if one of its input is non186// escaping, we can record a unique type187arraycopy_worklist.append(n->as_ArrayCopy());188}189for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {190Node* m = n->fast_out(i); // Get user191ideal_nodes.push(m);192}193if (n-> is_SafePoint()) {194sfn_worklist.append(n->as_SafePoint());195}196}197if (non_escaped_allocs_worklist.length() == 0) {198_collecting = false;199return false; // Nothing to do.200}201// Add final simple edges to graph.202while(delayed_worklist.size() > 0) {203Node* n = delayed_worklist.pop();204add_final_edges(n);205}206207#ifdef ASSERT208if (VerifyConnectionGraph) {209// Verify that no new simple edges could be created and all210// local vars has edges.211_verify = true;212int ptnodes_length = ptnodes_worklist.length();213for (int next = 0; next < ptnodes_length; ++next) {214PointsToNode* ptn = ptnodes_worklist.at(next);215add_final_edges(ptn->ideal_node());216if (ptn->is_LocalVar() && ptn->edge_count() == 0) {217ptn->dump();218assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");219}220}221_verify = false;222}223#endif224// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes225// processing, calls to CI to resolve symbols (types, fields, methods)226// referenced in bytecode. During symbol resolution VM may throw227// an exception which CI cleans and converts to compilation failure.228if (C->failing()) return false;229230// 2. Finish Graph construction by propagating references to all231// java objects through graph.232if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,233java_objects_worklist, oop_fields_worklist)) {234// All objects escaped or hit time or iterations limits.235_collecting = false;236return false;237}238239// 3. Adjust scalar_replaceable state of nonescaping objects and push240// scalar replaceable allocations on alloc_worklist for processing241// in split_unique_types().242int non_escaped_length = non_escaped_allocs_worklist.length();243for (int next = 0; next < non_escaped_length; next++) {244JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);245bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);246Node* n = ptn->ideal_node();247if (n->is_Allocate()) {248n->as_Allocate()->_is_non_escaping = noescape;249}250if (noescape && ptn->scalar_replaceable()) {251adjust_scalar_replaceable_state(ptn);252if (ptn->scalar_replaceable()) {253alloc_worklist.append(ptn->ideal_node());254}255}256}257258#ifdef ASSERT259if (VerifyConnectionGraph) {260// Verify that graph is complete - no new edges could be added or needed.261verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,262java_objects_worklist, addp_worklist);263}264assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");265assert(null_obj->escape_state() == PointsToNode::NoEscape &&266null_obj->edge_count() == 0 &&267!null_obj->arraycopy_src() &&268!null_obj->arraycopy_dst(), "sanity");269#endif270271_collecting = false;272273} // TracePhase t3("connectionGraph")274275// 4. Optimize ideal graph based on EA information.276bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);277if (has_non_escaping_obj) {278optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);279}280281#ifndef PRODUCT282if (PrintEscapeAnalysis) {283dump(ptnodes_worklist); // Dump ConnectionGraph284}285#endif286287#ifdef ASSERT288if (VerifyConnectionGraph) {289int alloc_length = alloc_worklist.length();290for (int next = 0; next < alloc_length; ++next) {291Node* n = alloc_worklist.at(next);292PointsToNode* ptn = ptnode_adr(n->_idx);293assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");294}295}296#endif297298// 5. Separate memory graph for scalar replaceable allcations.299bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);300if (has_scalar_replaceable_candidates &&301C->AliasLevel() >= 3 && EliminateAllocations) {302// Now use the escape information to create unique types for303// scalar replaceable objects.304split_unique_types(alloc_worklist, arraycopy_worklist);305if (C->failing()) return false;306C->print_method(PHASE_AFTER_EA, 2);307308#ifdef ASSERT309} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {310tty->print("=== No allocations eliminated for ");311C->method()->print_short_name();312if (!EliminateAllocations) {313tty->print(" since EliminateAllocations is off ===");314} else if(!has_scalar_replaceable_candidates) {315tty->print(" since there are no scalar replaceable candidates ===");316} else if(C->AliasLevel() < 3) {317tty->print(" since AliasLevel < 3 ===");318}319tty->cr();320#endif321}322323// Annotate at safepoints if they have <= ArgEscape objects in their scope and at324// java calls if they pass ArgEscape objects as parameters.325if (has_non_escaping_obj &&326(C->env()->should_retain_local_variables() ||327C->env()->jvmti_can_get_owned_monitor_info() ||328C->env()->jvmti_can_walk_any_space() ||329DeoptimizeObjectsALot)) {330int sfn_length = sfn_worklist.length();331for (int next = 0; next < sfn_length; next++) {332SafePointNode* sfn = sfn_worklist.at(next);333sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));334if (sfn->is_CallJava()) {335CallJavaNode* call = sfn->as_CallJava();336call->set_arg_escape(has_arg_escape(call));337}338}339}340341return has_non_escaping_obj;342}343344// Returns true if there is an object in the scope of sfn that does not escape globally.345bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {346Compile* C = _compile;347for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) {348if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||349DeoptimizeObjectsALot) {350// Jvmti agents can access locals. Must provide info about local objects at runtime.351int num_locs = jvms->loc_size();352for (int idx = 0; idx < num_locs; idx++) {353Node* l = sfn->local(jvms, idx);354if (not_global_escape(l)) {355return true;356}357}358}359if (C->env()->jvmti_can_get_owned_monitor_info() ||360C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {361// Jvmti agents can read monitors. Must provide info about locked objects at runtime.362int num_mon = jvms->nof_monitors();363for (int idx = 0; idx < num_mon; idx++) {364Node* m = sfn->monitor_obj(jvms, idx);365if (m != NULL && not_global_escape(m)) {366return true;367}368}369}370}371return false;372}373374// Returns true if at least one of the arguments to the call is an object375// that does not escape globally.376bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {377if (call->method() != NULL) {378uint max_idx = TypeFunc::Parms + call->method()->arg_size();379for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {380Node* p = call->in(idx);381if (not_global_escape(p)) {382return true;383}384}385} else {386const char* name = call->as_CallStaticJava()->_name;387assert(name != NULL, "no name");388// no arg escapes through uncommon traps389if (strcmp(name, "uncommon_trap") != 0) {390// process_call_arguments() assumes that all arguments escape globally391const TypeTuple* d = call->tf()->domain();392for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {393const Type* at = d->field_at(i);394if (at->isa_oopptr() != NULL) {395return true;396}397}398}399}400return false;401}402403404405// Utility function for nodes that load an object406void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {407// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because408// ThreadLocal has RawPtr type.409const Type* t = _igvn->type(n);410if (t->make_ptr() != NULL) {411Node* adr = n->in(MemNode::Address);412#ifdef ASSERT413if (!adr->is_AddP()) {414assert(_igvn->type(adr)->isa_rawptr(), "sanity");415} else {416assert((ptnode_adr(adr->_idx) == NULL ||417ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");418}419#endif420add_local_var_and_edge(n, PointsToNode::NoEscape,421adr, delayed_worklist);422}423}424425// Populate Connection Graph with PointsTo nodes and create simple426// connection graph edges.427void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {428assert(!_verify, "this method should not be called for verification");429PhaseGVN* igvn = _igvn;430uint n_idx = n->_idx;431PointsToNode* n_ptn = ptnode_adr(n_idx);432if (n_ptn != NULL) {433return; // No need to redefine PointsTo node during first iteration.434}435int opcode = n->Opcode();436bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);437if (gc_handled) {438return; // Ignore node if already handled by GC.439}440441if (n->is_Call()) {442// Arguments to allocation and locking don't escape.443if (n->is_AbstractLock()) {444// Put Lock and Unlock nodes on IGVN worklist to process them during445// first IGVN optimization when escape information is still available.446record_for_optimizer(n);447} else if (n->is_Allocate()) {448add_call_node(n->as_Call());449record_for_optimizer(n);450} else {451if (n->is_CallStaticJava()) {452const char* name = n->as_CallStaticJava()->_name;453if (name != NULL && strcmp(name, "uncommon_trap") == 0) {454return; // Skip uncommon traps455}456}457// Don't mark as processed since call's arguments have to be processed.458delayed_worklist->push(n);459// Check if a call returns an object.460if ((n->as_Call()->returns_pointer() &&461n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||462(n->is_CallStaticJava() &&463n->as_CallStaticJava()->is_boxing_method())) {464add_call_node(n->as_Call());465}466}467return;468}469// Put this check here to process call arguments since some call nodes470// point to phantom_obj.471if (n_ptn == phantom_obj || n_ptn == null_obj) {472return; // Skip predefined nodes.473}474switch (opcode) {475case Op_AddP: {476Node* base = get_addp_base(n);477PointsToNode* ptn_base = ptnode_adr(base->_idx);478// Field nodes are created for all field types. They are used in479// adjust_scalar_replaceable_state() and split_unique_types().480// Note, non-oop fields will have only base edges in Connection481// Graph because such fields are not used for oop loads and stores.482int offset = address_offset(n, igvn);483add_field(n, PointsToNode::NoEscape, offset);484if (ptn_base == NULL) {485delayed_worklist->push(n); // Process it later.486} else {487n_ptn = ptnode_adr(n_idx);488add_base(n_ptn->as_Field(), ptn_base);489}490break;491}492case Op_CastX2P: {493map_ideal_node(n, phantom_obj);494break;495}496case Op_CastPP:497case Op_CheckCastPP:498case Op_EncodeP:499case Op_DecodeN:500case Op_EncodePKlass:501case Op_DecodeNKlass: {502add_local_var_and_edge(n, PointsToNode::NoEscape,503n->in(1), delayed_worklist);504break;505}506case Op_CMoveP: {507add_local_var(n, PointsToNode::NoEscape);508// Do not add edges during first iteration because some could be509// not defined yet.510delayed_worklist->push(n);511break;512}513case Op_ConP:514case Op_ConN:515case Op_ConNKlass: {516// assume all oop constants globally escape except for null517PointsToNode::EscapeState es;518const Type* t = igvn->type(n);519if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {520es = PointsToNode::NoEscape;521} else {522es = PointsToNode::GlobalEscape;523}524add_java_object(n, es);525break;526}527case Op_CreateEx: {528// assume that all exception objects globally escape529map_ideal_node(n, phantom_obj);530break;531}532case Op_LoadKlass:533case Op_LoadNKlass: {534// Unknown class is loaded535map_ideal_node(n, phantom_obj);536break;537}538case Op_LoadP:539case Op_LoadN:540case Op_LoadPLocked: {541add_objload_to_connection_graph(n, delayed_worklist);542break;543}544case Op_Parm: {545map_ideal_node(n, phantom_obj);546break;547}548case Op_PartialSubtypeCheck: {549// Produces Null or notNull and is used in only in CmpP so550// phantom_obj could be used.551map_ideal_node(n, phantom_obj); // Result is unknown552break;553}554case Op_Phi: {555// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because556// ThreadLocal has RawPtr type.557const Type* t = n->as_Phi()->type();558if (t->make_ptr() != NULL) {559add_local_var(n, PointsToNode::NoEscape);560// Do not add edges during first iteration because some could be561// not defined yet.562delayed_worklist->push(n);563}564break;565}566case Op_Proj: {567// we are only interested in the oop result projection from a call568if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&569n->in(0)->as_Call()->returns_pointer()) {570add_local_var_and_edge(n, PointsToNode::NoEscape,571n->in(0), delayed_worklist);572}573break;574}575case Op_Rethrow: // Exception object escapes576case Op_Return: {577if (n->req() > TypeFunc::Parms &&578igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {579// Treat Return value as LocalVar with GlobalEscape escape state.580add_local_var_and_edge(n, PointsToNode::GlobalEscape,581n->in(TypeFunc::Parms), delayed_worklist);582}583break;584}585case Op_CompareAndExchangeP:586case Op_CompareAndExchangeN:587case Op_GetAndSetP:588case Op_GetAndSetN: {589add_objload_to_connection_graph(n, delayed_worklist);590// fall-through591}592case Op_StoreP:593case Op_StoreN:594case Op_StoreNKlass:595case Op_StorePConditional:596case Op_WeakCompareAndSwapP:597case Op_WeakCompareAndSwapN:598case Op_CompareAndSwapP:599case Op_CompareAndSwapN: {600add_to_congraph_unsafe_access(n, opcode, delayed_worklist);601break;602}603case Op_AryEq:604case Op_HasNegatives:605case Op_StrComp:606case Op_StrEquals:607case Op_StrIndexOf:608case Op_StrIndexOfChar:609case Op_StrInflatedCopy:610case Op_StrCompressedCopy:611case Op_EncodeISOArray: {612add_local_var(n, PointsToNode::ArgEscape);613delayed_worklist->push(n); // Process it later.614break;615}616case Op_ThreadLocal: {617add_java_object(n, PointsToNode::ArgEscape);618break;619}620default:621; // Do nothing for nodes not related to EA.622}623return;624}625626#ifdef ASSERT627#define ELSE_FAIL(name) \628/* Should not be called for not pointer type. */ \629n->dump(1); \630assert(false, name); \631break;632#else633#define ELSE_FAIL(name) \634break;635#endif636637// Add final simple edges to graph.638void ConnectionGraph::add_final_edges(Node *n) {639PointsToNode* n_ptn = ptnode_adr(n->_idx);640#ifdef ASSERT641if (_verify && n_ptn->is_JavaObject())642return; // This method does not change graph for JavaObject.643#endif644645if (n->is_Call()) {646process_call_arguments(n->as_Call());647return;648}649assert(n->is_Store() || n->is_LoadStore() ||650(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),651"node should be registered already");652int opcode = n->Opcode();653bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);654if (gc_handled) {655return; // Ignore node if already handled by GC.656}657switch (opcode) {658case Op_AddP: {659Node* base = get_addp_base(n);660PointsToNode* ptn_base = ptnode_adr(base->_idx);661assert(ptn_base != NULL, "field's base should be registered");662add_base(n_ptn->as_Field(), ptn_base);663break;664}665case Op_CastPP:666case Op_CheckCastPP:667case Op_EncodeP:668case Op_DecodeN:669case Op_EncodePKlass:670case Op_DecodeNKlass: {671add_local_var_and_edge(n, PointsToNode::NoEscape,672n->in(1), NULL);673break;674}675case Op_CMoveP: {676for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {677Node* in = n->in(i);678if (in == NULL) {679continue; // ignore NULL680}681Node* uncast_in = in->uncast();682if (uncast_in->is_top() || uncast_in == n) {683continue; // ignore top or inputs which go back this node684}685PointsToNode* ptn = ptnode_adr(in->_idx);686assert(ptn != NULL, "node should be registered");687add_edge(n_ptn, ptn);688}689break;690}691case Op_LoadP:692case Op_LoadN:693case Op_LoadPLocked: {694// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because695// ThreadLocal has RawPtr type.696const Type* t = _igvn->type(n);697if (t->make_ptr() != NULL) {698Node* adr = n->in(MemNode::Address);699add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);700break;701}702ELSE_FAIL("Op_LoadP");703}704case Op_Phi: {705// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because706// ThreadLocal has RawPtr type.707const Type* t = n->as_Phi()->type();708if (t->make_ptr() != NULL) {709for (uint i = 1; i < n->req(); i++) {710Node* in = n->in(i);711if (in == NULL) {712continue; // ignore NULL713}714Node* uncast_in = in->uncast();715if (uncast_in->is_top() || uncast_in == n) {716continue; // ignore top or inputs which go back this node717}718PointsToNode* ptn = ptnode_adr(in->_idx);719assert(ptn != NULL, "node should be registered");720add_edge(n_ptn, ptn);721}722break;723}724ELSE_FAIL("Op_Phi");725}726case Op_Proj: {727// we are only interested in the oop result projection from a call728if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&729n->in(0)->as_Call()->returns_pointer()) {730add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);731break;732}733ELSE_FAIL("Op_Proj");734}735case Op_Rethrow: // Exception object escapes736case Op_Return: {737if (n->req() > TypeFunc::Parms &&738_igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {739// Treat Return value as LocalVar with GlobalEscape escape state.740add_local_var_and_edge(n, PointsToNode::GlobalEscape,741n->in(TypeFunc::Parms), NULL);742break;743}744ELSE_FAIL("Op_Return");745}746case Op_StoreP:747case Op_StoreN:748case Op_StoreNKlass:749case Op_StorePConditional:750case Op_CompareAndExchangeP:751case Op_CompareAndExchangeN:752case Op_CompareAndSwapP:753case Op_CompareAndSwapN:754case Op_WeakCompareAndSwapP:755case Op_WeakCompareAndSwapN:756case Op_GetAndSetP:757case Op_GetAndSetN: {758if (add_final_edges_unsafe_access(n, opcode)) {759break;760}761ELSE_FAIL("Op_StoreP");762}763case Op_AryEq:764case Op_HasNegatives:765case Op_StrComp:766case Op_StrEquals:767case Op_StrIndexOf:768case Op_StrIndexOfChar:769case Op_StrInflatedCopy:770case Op_StrCompressedCopy:771case Op_EncodeISOArray: {772// char[]/byte[] arrays passed to string intrinsic do not escape but773// they are not scalar replaceable. Adjust escape state for them.774// Start from in(2) edge since in(1) is memory edge.775for (uint i = 2; i < n->req(); i++) {776Node* adr = n->in(i);777const Type* at = _igvn->type(adr);778if (!adr->is_top() && at->isa_ptr()) {779assert(at == Type::TOP || at == TypePtr::NULL_PTR ||780at->isa_ptr() != NULL, "expecting a pointer");781if (adr->is_AddP()) {782adr = get_addp_base(adr);783}784PointsToNode* ptn = ptnode_adr(adr->_idx);785assert(ptn != NULL, "node should be registered");786add_edge(n_ptn, ptn);787}788}789break;790}791default: {792// This method should be called only for EA specific nodes which may793// miss some edges when they were created.794#ifdef ASSERT795n->dump(1);796#endif797guarantee(false, "unknown node");798}799}800return;801}802803void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {804Node* adr = n->in(MemNode::Address);805const Type* adr_type = _igvn->type(adr);806adr_type = adr_type->make_ptr();807if (adr_type == NULL) {808return; // skip dead nodes809}810if (adr_type->isa_oopptr()811|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)812&& adr_type == TypeRawPtr::NOTNULL813&& is_captured_store_address(adr))) {814delayed_worklist->push(n); // Process it later.815#ifdef ASSERT816assert (adr->is_AddP(), "expecting an AddP");817if (adr_type == TypeRawPtr::NOTNULL) {818// Verify a raw address for a store captured by Initialize node.819int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);820assert(offs != Type::OffsetBot, "offset must be a constant");821}822#endif823} else {824// Ignore copy the displaced header to the BoxNode (OSR compilation).825if (adr->is_BoxLock()) {826return;827}828// Stored value escapes in unsafe access.829if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {830delayed_worklist->push(n); // Process unsafe access later.831return;832}833#ifdef ASSERT834n->dump(1);835assert(false, "not unsafe");836#endif837}838}839840bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {841Node* adr = n->in(MemNode::Address);842const Type *adr_type = _igvn->type(adr);843adr_type = adr_type->make_ptr();844#ifdef ASSERT845if (adr_type == NULL) {846n->dump(1);847assert(adr_type != NULL, "dead node should not be on list");848return true;849}850#endif851852if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||853opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {854add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);855}856857if (adr_type->isa_oopptr()858|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)859&& adr_type == TypeRawPtr::NOTNULL860&& is_captured_store_address(adr))) {861// Point Address to Value862PointsToNode* adr_ptn = ptnode_adr(adr->_idx);863assert(adr_ptn != NULL &&864adr_ptn->as_Field()->is_oop(), "node should be registered");865Node* val = n->in(MemNode::ValueIn);866PointsToNode* ptn = ptnode_adr(val->_idx);867assert(ptn != NULL, "node should be registered");868add_edge(adr_ptn, ptn);869return true;870} else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {871// Stored value escapes in unsafe access.872Node* val = n->in(MemNode::ValueIn);873PointsToNode* ptn = ptnode_adr(val->_idx);874assert(ptn != NULL, "node should be registered");875set_escape_state(ptn, PointsToNode::GlobalEscape);876// Add edge to object for unsafe access with offset.877PointsToNode* adr_ptn = ptnode_adr(adr->_idx);878assert(adr_ptn != NULL, "node should be registered");879if (adr_ptn->is_Field()) {880assert(adr_ptn->as_Field()->is_oop(), "should be oop field");881add_edge(adr_ptn, ptn);882}883return true;884}885return false;886}887888void ConnectionGraph::add_call_node(CallNode* call) {889assert(call->returns_pointer(), "only for call which returns pointer");890uint call_idx = call->_idx;891if (call->is_Allocate()) {892Node* k = call->in(AllocateNode::KlassNode);893const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();894assert(kt != NULL, "TypeKlassPtr required.");895ciKlass* cik = kt->klass();896PointsToNode::EscapeState es = PointsToNode::NoEscape;897bool scalar_replaceable = true;898if (call->is_AllocateArray()) {899if (!cik->is_array_klass()) { // StressReflectiveCode900es = PointsToNode::GlobalEscape;901} else {902int length = call->in(AllocateNode::ALength)->find_int_con(-1);903if (length < 0 || length > EliminateAllocationArraySizeLimit) {904// Not scalar replaceable if the length is not constant or too big.905scalar_replaceable = false;906}907}908} else { // Allocate instance909if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||910cik->is_subclass_of(_compile->env()->Reference_klass()) ||911!cik->is_instance_klass() || // StressReflectiveCode912!cik->as_instance_klass()->can_be_instantiated() ||913cik->as_instance_klass()->has_finalizer()) {914es = PointsToNode::GlobalEscape;915} else {916int nfields = cik->as_instance_klass()->nof_nonstatic_fields();917if (nfields > EliminateAllocationFieldsLimit) {918// Not scalar replaceable if there are too many fields.919scalar_replaceable = false;920}921}922}923add_java_object(call, es);924PointsToNode* ptn = ptnode_adr(call_idx);925if (!scalar_replaceable && ptn->scalar_replaceable()) {926ptn->set_scalar_replaceable(false);927}928} else if (call->is_CallStaticJava()) {929// Call nodes could be different types:930//931// 1. CallDynamicJavaNode (what happened during call is unknown):932//933// - mapped to GlobalEscape JavaObject node if oop is returned;934//935// - all oop arguments are escaping globally;936//937// 2. CallStaticJavaNode (execute bytecode analysis if possible):938//939// - the same as CallDynamicJavaNode if can't do bytecode analysis;940//941// - mapped to GlobalEscape JavaObject node if unknown oop is returned;942// - mapped to NoEscape JavaObject node if non-escaping object allocated943// during call is returned;944// - mapped to ArgEscape LocalVar node pointed to object arguments945// which are returned and does not escape during call;946//947// - oop arguments escaping status is defined by bytecode analysis;948//949// For a static call, we know exactly what method is being called.950// Use bytecode estimator to record whether the call's return value escapes.951ciMethod* meth = call->as_CallJava()->method();952if (meth == NULL) {953const char* name = call->as_CallStaticJava()->_name;954assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");955// Returns a newly allocated non-escaped object.956add_java_object(call, PointsToNode::NoEscape);957ptnode_adr(call_idx)->set_scalar_replaceable(false);958} else if (meth->is_boxing_method()) {959// Returns boxing object960PointsToNode::EscapeState es;961vmIntrinsics::ID intr = meth->intrinsic_id();962if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {963// It does not escape if object is always allocated.964es = PointsToNode::NoEscape;965} else {966// It escapes globally if object could be loaded from cache.967es = PointsToNode::GlobalEscape;968}969add_java_object(call, es);970} else {971BCEscapeAnalyzer* call_analyzer = meth->get_bcea();972call_analyzer->copy_dependencies(_compile->dependencies());973if (call_analyzer->is_return_allocated()) {974// Returns a newly allocated non-escaped object, simply975// update dependency information.976// Mark it as NoEscape so that objects referenced by977// it's fields will be marked as NoEscape at least.978add_java_object(call, PointsToNode::NoEscape);979ptnode_adr(call_idx)->set_scalar_replaceable(false);980} else {981// Determine whether any arguments are returned.982const TypeTuple* d = call->tf()->domain();983bool ret_arg = false;984for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {985if (d->field_at(i)->isa_ptr() != NULL &&986call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {987ret_arg = true;988break;989}990}991if (ret_arg) {992add_local_var(call, PointsToNode::ArgEscape);993} else {994// Returns unknown object.995map_ideal_node(call, phantom_obj);996}997}998}999} else {1000// An other type of call, assume the worst case:1001// returned value is unknown and globally escapes.1002assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");1003map_ideal_node(call, phantom_obj);1004}1005}10061007void ConnectionGraph::process_call_arguments(CallNode *call) {1008bool is_arraycopy = false;1009switch (call->Opcode()) {1010#ifdef ASSERT1011case Op_Allocate:1012case Op_AllocateArray:1013case Op_Lock:1014case Op_Unlock:1015assert(false, "should be done already");1016break;1017#endif1018case Op_ArrayCopy:1019case Op_CallLeafNoFP:1020// Most array copies are ArrayCopy nodes at this point but there1021// are still a few direct calls to the copy subroutines (See1022// PhaseStringOpts::copy_string())1023is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||1024call->as_CallLeaf()->is_call_to_arraycopystub();1025// fall through1026case Op_CallLeafVector:1027case Op_CallLeaf: {1028// Stub calls, objects do not escape but they are not scale replaceable.1029// Adjust escape state for outgoing arguments.1030const TypeTuple * d = call->tf()->domain();1031bool src_has_oops = false;1032for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1033const Type* at = d->field_at(i);1034Node *arg = call->in(i);1035if (arg == NULL) {1036continue;1037}1038const Type *aat = _igvn->type(arg);1039if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {1040continue;1041}1042if (arg->is_AddP()) {1043//1044// The inline_native_clone() case when the arraycopy stub is called1045// after the allocation before Initialize and CheckCastPP nodes.1046// Or normal arraycopy for object arrays case.1047//1048// Set AddP's base (Allocate) as not scalar replaceable since1049// pointer to the base (with offset) is passed as argument.1050//1051arg = get_addp_base(arg);1052}1053PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1054assert(arg_ptn != NULL, "should be registered");1055PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();1056if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {1057assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||1058aat->isa_ptr() != NULL, "expecting an Ptr");1059bool arg_has_oops = aat->isa_oopptr() &&1060(aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||1061(aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));1062if (i == TypeFunc::Parms) {1063src_has_oops = arg_has_oops;1064}1065//1066// src or dst could be j.l.Object when other is basic type array:1067//1068// arraycopy(char[],0,Object*,0,size);1069// arraycopy(Object*,0,char[],0,size);1070//1071// Don't add edges in such cases.1072//1073bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&1074arg_has_oops && (i > TypeFunc::Parms);1075#ifdef ASSERT1076if (!(is_arraycopy ||1077BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||1078(call->as_CallLeaf()->_name != NULL &&1079(strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||1080strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||1081strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||1082strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||1083strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||1084strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||1085strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||1086strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||1087strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||1088strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||1089strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||1090strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||1091strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||1092strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||1093strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||1094strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||1095strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||1096strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||1097strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||1098strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||1099strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||1100strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||1101strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||1102strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||1103strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||1104strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||1105strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||1106strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||1107strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||1108strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||1109strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||1110strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)1111))) {1112call->dump();1113fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);1114}1115#endif1116// Always process arraycopy's destination object since1117// we need to add all possible edges to references in1118// source object.1119if (arg_esc >= PointsToNode::ArgEscape &&1120!arg_is_arraycopy_dest) {1121continue;1122}1123PointsToNode::EscapeState es = PointsToNode::ArgEscape;1124if (call->is_ArrayCopy()) {1125ArrayCopyNode* ac = call->as_ArrayCopy();1126if (ac->is_clonebasic() ||1127ac->is_arraycopy_validated() ||1128ac->is_copyof_validated() ||1129ac->is_copyofrange_validated()) {1130es = PointsToNode::NoEscape;1131}1132}1133set_escape_state(arg_ptn, es);1134if (arg_is_arraycopy_dest) {1135Node* src = call->in(TypeFunc::Parms);1136if (src->is_AddP()) {1137src = get_addp_base(src);1138}1139PointsToNode* src_ptn = ptnode_adr(src->_idx);1140assert(src_ptn != NULL, "should be registered");1141if (arg_ptn != src_ptn) {1142// Special arraycopy edge:1143// A destination object's field can't have the source object1144// as base since objects escape states are not related.1145// Only escape state of destination object's fields affects1146// escape state of fields in source object.1147add_arraycopy(call, es, src_ptn, arg_ptn);1148}1149}1150}1151}1152break;1153}1154case Op_CallStaticJava: {1155// For a static call, we know exactly what method is being called.1156// Use bytecode estimator to record the call's escape affects1157#ifdef ASSERT1158const char* name = call->as_CallStaticJava()->_name;1159assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");1160#endif1161ciMethod* meth = call->as_CallJava()->method();1162if ((meth != NULL) && meth->is_boxing_method()) {1163break; // Boxing methods do not modify any oops.1164}1165BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;1166// fall-through if not a Java method or no analyzer information1167if (call_analyzer != NULL) {1168PointsToNode* call_ptn = ptnode_adr(call->_idx);1169const TypeTuple* d = call->tf()->domain();1170for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1171const Type* at = d->field_at(i);1172int k = i - TypeFunc::Parms;1173Node* arg = call->in(i);1174PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1175if (at->isa_ptr() != NULL &&1176call_analyzer->is_arg_returned(k)) {1177// The call returns arguments.1178if (call_ptn != NULL) { // Is call's result used?1179assert(call_ptn->is_LocalVar(), "node should be registered");1180assert(arg_ptn != NULL, "node should be registered");1181add_edge(call_ptn, arg_ptn);1182}1183}1184if (at->isa_oopptr() != NULL &&1185arg_ptn->escape_state() < PointsToNode::GlobalEscape) {1186if (!call_analyzer->is_arg_stack(k)) {1187// The argument global escapes1188set_escape_state(arg_ptn, PointsToNode::GlobalEscape);1189} else {1190set_escape_state(arg_ptn, PointsToNode::ArgEscape);1191if (!call_analyzer->is_arg_local(k)) {1192// The argument itself doesn't escape, but any fields might1193set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);1194}1195}1196}1197}1198if (call_ptn != NULL && call_ptn->is_LocalVar()) {1199// The call returns arguments.1200assert(call_ptn->edge_count() > 0, "sanity");1201if (!call_analyzer->is_return_local()) {1202// Returns also unknown object.1203add_edge(call_ptn, phantom_obj);1204}1205}1206break;1207}1208}1209default: {1210// Fall-through here if not a Java method or no analyzer information1211// or some other type of call, assume the worst case: all arguments1212// globally escape.1213const TypeTuple* d = call->tf()->domain();1214for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1215const Type* at = d->field_at(i);1216if (at->isa_oopptr() != NULL) {1217Node* arg = call->in(i);1218if (arg->is_AddP()) {1219arg = get_addp_base(arg);1220}1221assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");1222set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);1223}1224}1225}1226}1227}122812291230// Finish Graph construction.1231bool ConnectionGraph::complete_connection_graph(1232GrowableArray<PointsToNode*>& ptnodes_worklist,1233GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,1234GrowableArray<JavaObjectNode*>& java_objects_worklist,1235GrowableArray<FieldNode*>& oop_fields_worklist) {1236// Normally only 1-3 passes needed to build Connection Graph depending1237// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.1238// Set limit to 20 to catch situation when something did go wrong and1239// bailout Escape Analysis.1240// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.1241#define GRAPH_BUILD_ITER_LIMIT 2012421243// Propagate GlobalEscape and ArgEscape escape states and check that1244// we still have non-escaping objects. The method pushs on _worklist1245// Field nodes which reference phantom_object.1246if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {1247return false; // Nothing to do.1248}1249// Now propagate references to all JavaObject nodes.1250int java_objects_length = java_objects_worklist.length();1251elapsedTimer build_time;1252build_time.start();1253elapsedTimer time;1254bool timeout = false;1255int new_edges = 1;1256int iterations = 0;1257do {1258while ((new_edges > 0) &&1259(iterations++ < GRAPH_BUILD_ITER_LIMIT)) {1260double start_time = time.seconds();1261time.start();1262new_edges = 0;1263// Propagate references to phantom_object for nodes pushed on _worklist1264// by find_non_escaped_objects() and find_field_value().1265new_edges += add_java_object_edges(phantom_obj, false);1266for (int next = 0; next < java_objects_length; ++next) {1267JavaObjectNode* ptn = java_objects_worklist.at(next);1268new_edges += add_java_object_edges(ptn, true);12691270#define SAMPLE_SIZE 41271if ((next % SAMPLE_SIZE) == 0) {1272// Each 4 iterations calculate how much time it will take1273// to complete graph construction.1274time.stop();1275// Poll for requests from shutdown mechanism to quiesce compiler1276// because Connection graph construction may take long time.1277CompileBroker::maybe_block();1278double stop_time = time.seconds();1279double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;1280double time_until_end = time_per_iter * (double)(java_objects_length - next);1281if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {1282timeout = true;1283break; // Timeout1284}1285start_time = stop_time;1286time.start();1287}1288#undef SAMPLE_SIZE12891290}1291if (timeout) break;1292if (new_edges > 0) {1293// Update escape states on each iteration if graph was updated.1294if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {1295return false; // Nothing to do.1296}1297}1298time.stop();1299if (time.seconds() >= EscapeAnalysisTimeout) {1300timeout = true;1301break;1302}1303}1304if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {1305time.start();1306// Find fields which have unknown value.1307int fields_length = oop_fields_worklist.length();1308for (int next = 0; next < fields_length; next++) {1309FieldNode* field = oop_fields_worklist.at(next);1310if (field->edge_count() == 0) {1311new_edges += find_field_value(field);1312// This code may added new edges to phantom_object.1313// Need an other cycle to propagate references to phantom_object.1314}1315}1316time.stop();1317if (time.seconds() >= EscapeAnalysisTimeout) {1318timeout = true;1319break;1320}1321} else {1322new_edges = 0; // Bailout1323}1324} while (new_edges > 0);13251326build_time.stop();1327_build_time = build_time.seconds();1328_build_iterations = iterations;13291330// Bailout if passed limits.1331if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {1332Compile* C = _compile;1333if (C->log() != NULL) {1334C->log()->begin_elem("connectionGraph_bailout reason='reached ");1335C->log()->text("%s", timeout ? "time" : "iterations");1336C->log()->end_elem(" limit'");1337}1338assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",1339_build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());1340// Possible infinite build_connection_graph loop,1341// bailout (no changes to ideal graph were made).1342return false;1343}1344#ifdef ASSERT1345if (Verbose && PrintEscapeAnalysis) {1346tty->print_cr("EA: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",1347_build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());1348}1349#endif13501351#undef GRAPH_BUILD_ITER_LIMIT13521353// Find fields initialized by NULL for non-escaping Allocations.1354int non_escaped_length = non_escaped_allocs_worklist.length();1355for (int next = 0; next < non_escaped_length; next++) {1356JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);1357PointsToNode::EscapeState es = ptn->escape_state();1358assert(es <= PointsToNode::ArgEscape, "sanity");1359if (es == PointsToNode::NoEscape) {1360if (find_init_values_null(ptn, _igvn) > 0) {1361// Adding references to NULL object does not change escape states1362// since it does not escape. Also no fields are added to NULL object.1363add_java_object_edges(null_obj, false);1364}1365}1366Node* n = ptn->ideal_node();1367if (n->is_Allocate()) {1368// The object allocated by this Allocate node will never be1369// seen by an other thread. Mark it so that when it is1370// expanded no MemBarStoreStore is added.1371InitializeNode* ini = n->as_Allocate()->initialization();1372if (ini != NULL)1373ini->set_does_not_escape();1374}1375}1376return true; // Finished graph construction.1377}13781379// Propagate GlobalEscape and ArgEscape escape states to all nodes1380// and check that we still have non-escaping java objects.1381bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,1382GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {1383GrowableArray<PointsToNode*> escape_worklist;1384// First, put all nodes with GlobalEscape and ArgEscape states on worklist.1385int ptnodes_length = ptnodes_worklist.length();1386for (int next = 0; next < ptnodes_length; ++next) {1387PointsToNode* ptn = ptnodes_worklist.at(next);1388if (ptn->escape_state() >= PointsToNode::ArgEscape ||1389ptn->fields_escape_state() >= PointsToNode::ArgEscape) {1390escape_worklist.push(ptn);1391}1392}1393// Set escape states to referenced nodes (edges list).1394while (escape_worklist.length() > 0) {1395PointsToNode* ptn = escape_worklist.pop();1396PointsToNode::EscapeState es = ptn->escape_state();1397PointsToNode::EscapeState field_es = ptn->fields_escape_state();1398if (ptn->is_Field() && ptn->as_Field()->is_oop() &&1399es >= PointsToNode::ArgEscape) {1400// GlobalEscape or ArgEscape state of field means it has unknown value.1401if (add_edge(ptn, phantom_obj)) {1402// New edge was added1403add_field_uses_to_worklist(ptn->as_Field());1404}1405}1406for (EdgeIterator i(ptn); i.has_next(); i.next()) {1407PointsToNode* e = i.get();1408if (e->is_Arraycopy()) {1409assert(ptn->arraycopy_dst(), "sanity");1410// Propagate only fields escape state through arraycopy edge.1411if (e->fields_escape_state() < field_es) {1412set_fields_escape_state(e, field_es);1413escape_worklist.push(e);1414}1415} else if (es >= field_es) {1416// fields_escape_state is also set to 'es' if it is less than 'es'.1417if (e->escape_state() < es) {1418set_escape_state(e, es);1419escape_worklist.push(e);1420}1421} else {1422// Propagate field escape state.1423bool es_changed = false;1424if (e->fields_escape_state() < field_es) {1425set_fields_escape_state(e, field_es);1426es_changed = true;1427}1428if ((e->escape_state() < field_es) &&1429e->is_Field() && ptn->is_JavaObject() &&1430e->as_Field()->is_oop()) {1431// Change escape state of referenced fields.1432set_escape_state(e, field_es);1433es_changed = true;1434} else if (e->escape_state() < es) {1435set_escape_state(e, es);1436es_changed = true;1437}1438if (es_changed) {1439escape_worklist.push(e);1440}1441}1442}1443}1444// Remove escaped objects from non_escaped list.1445for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {1446JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);1447if (ptn->escape_state() >= PointsToNode::GlobalEscape) {1448non_escaped_allocs_worklist.delete_at(next);1449}1450if (ptn->escape_state() == PointsToNode::NoEscape) {1451// Find fields in non-escaped allocations which have unknown value.1452find_init_values_phantom(ptn);1453}1454}1455return (non_escaped_allocs_worklist.length() > 0);1456}14571458// Add all references to JavaObject node by walking over all uses.1459int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {1460int new_edges = 0;1461if (populate_worklist) {1462// Populate _worklist by uses of jobj's uses.1463for (UseIterator i(jobj); i.has_next(); i.next()) {1464PointsToNode* use = i.get();1465if (use->is_Arraycopy()) {1466continue;1467}1468add_uses_to_worklist(use);1469if (use->is_Field() && use->as_Field()->is_oop()) {1470// Put on worklist all field's uses (loads) and1471// related field nodes (same base and offset).1472add_field_uses_to_worklist(use->as_Field());1473}1474}1475}1476for (int l = 0; l < _worklist.length(); l++) {1477PointsToNode* use = _worklist.at(l);1478if (PointsToNode::is_base_use(use)) {1479// Add reference from jobj to field and from field to jobj (field's base).1480use = PointsToNode::get_use_node(use)->as_Field();1481if (add_base(use->as_Field(), jobj)) {1482new_edges++;1483}1484continue;1485}1486assert(!use->is_JavaObject(), "sanity");1487if (use->is_Arraycopy()) {1488if (jobj == null_obj) { // NULL object does not have field edges1489continue;1490}1491// Added edge from Arraycopy node to arraycopy's source java object1492if (add_edge(use, jobj)) {1493jobj->set_arraycopy_src();1494new_edges++;1495}1496// and stop here.1497continue;1498}1499if (!add_edge(use, jobj)) {1500continue; // No new edge added, there was such edge already.1501}1502new_edges++;1503if (use->is_LocalVar()) {1504add_uses_to_worklist(use);1505if (use->arraycopy_dst()) {1506for (EdgeIterator i(use); i.has_next(); i.next()) {1507PointsToNode* e = i.get();1508if (e->is_Arraycopy()) {1509if (jobj == null_obj) { // NULL object does not have field edges1510continue;1511}1512// Add edge from arraycopy's destination java object to Arraycopy node.1513if (add_edge(jobj, e)) {1514new_edges++;1515jobj->set_arraycopy_dst();1516}1517}1518}1519}1520} else {1521// Added new edge to stored in field values.1522// Put on worklist all field's uses (loads) and1523// related field nodes (same base and offset).1524add_field_uses_to_worklist(use->as_Field());1525}1526}1527_worklist.clear();1528_in_worklist.reset();1529return new_edges;1530}15311532// Put on worklist all related field nodes.1533void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {1534assert(field->is_oop(), "sanity");1535int offset = field->offset();1536add_uses_to_worklist(field);1537// Loop over all bases of this field and push on worklist Field nodes1538// with the same offset and base (since they may reference the same field).1539for (BaseIterator i(field); i.has_next(); i.next()) {1540PointsToNode* base = i.get();1541add_fields_to_worklist(field, base);1542// Check if the base was source object of arraycopy and go over arraycopy's1543// destination objects since values stored to a field of source object are1544// accessable by uses (loads) of fields of destination objects.1545if (base->arraycopy_src()) {1546for (UseIterator j(base); j.has_next(); j.next()) {1547PointsToNode* arycp = j.get();1548if (arycp->is_Arraycopy()) {1549for (UseIterator k(arycp); k.has_next(); k.next()) {1550PointsToNode* abase = k.get();1551if (abase->arraycopy_dst() && abase != base) {1552// Look for the same arraycopy reference.1553add_fields_to_worklist(field, abase);1554}1555}1556}1557}1558}1559}1560}15611562// Put on worklist all related field nodes.1563void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {1564int offset = field->offset();1565if (base->is_LocalVar()) {1566for (UseIterator j(base); j.has_next(); j.next()) {1567PointsToNode* f = j.get();1568if (PointsToNode::is_base_use(f)) { // Field1569f = PointsToNode::get_use_node(f);1570if (f == field || !f->as_Field()->is_oop()) {1571continue;1572}1573int offs = f->as_Field()->offset();1574if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1575add_to_worklist(f);1576}1577}1578}1579} else {1580assert(base->is_JavaObject(), "sanity");1581if (// Skip phantom_object since it is only used to indicate that1582// this field's content globally escapes.1583(base != phantom_obj) &&1584// NULL object node does not have fields.1585(base != null_obj)) {1586for (EdgeIterator i(base); i.has_next(); i.next()) {1587PointsToNode* f = i.get();1588// Skip arraycopy edge since store to destination object field1589// does not update value in source object field.1590if (f->is_Arraycopy()) {1591assert(base->arraycopy_dst(), "sanity");1592continue;1593}1594if (f == field || !f->as_Field()->is_oop()) {1595continue;1596}1597int offs = f->as_Field()->offset();1598if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1599add_to_worklist(f);1600}1601}1602}1603}1604}16051606// Find fields which have unknown value.1607int ConnectionGraph::find_field_value(FieldNode* field) {1608// Escaped fields should have init value already.1609assert(field->escape_state() == PointsToNode::NoEscape, "sanity");1610int new_edges = 0;1611for (BaseIterator i(field); i.has_next(); i.next()) {1612PointsToNode* base = i.get();1613if (base->is_JavaObject()) {1614// Skip Allocate's fields which will be processed later.1615if (base->ideal_node()->is_Allocate()) {1616return 0;1617}1618assert(base == null_obj, "only NULL ptr base expected here");1619}1620}1621if (add_edge(field, phantom_obj)) {1622// New edge was added1623new_edges++;1624add_field_uses_to_worklist(field);1625}1626return new_edges;1627}16281629// Find fields initializing values for allocations.1630int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {1631assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1632Node* alloc = pta->ideal_node();16331634// Do nothing for Allocate nodes since its fields values are1635// "known" unless they are initialized by arraycopy/clone.1636if (alloc->is_Allocate() && !pta->arraycopy_dst()) {1637return 0;1638}1639assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");1640#ifdef ASSERT1641if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {1642const char* name = alloc->as_CallStaticJava()->_name;1643assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");1644}1645#endif1646// Non-escaped allocation returned from Java or runtime call have unknown values in fields.1647int new_edges = 0;1648for (EdgeIterator i(pta); i.has_next(); i.next()) {1649PointsToNode* field = i.get();1650if (field->is_Field() && field->as_Field()->is_oop()) {1651if (add_edge(field, phantom_obj)) {1652// New edge was added1653new_edges++;1654add_field_uses_to_worklist(field->as_Field());1655}1656}1657}1658return new_edges;1659}16601661// Find fields initializing values for allocations.1662int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {1663assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1664Node* alloc = pta->ideal_node();1665// Do nothing for Call nodes since its fields values are unknown.1666if (!alloc->is_Allocate()) {1667return 0;1668}1669InitializeNode* ini = alloc->as_Allocate()->initialization();1670bool visited_bottom_offset = false;1671GrowableArray<int> offsets_worklist;1672int new_edges = 0;16731674// Check if an oop field's initializing value is recorded and add1675// a corresponding NULL if field's value if it is not recorded.1676// Connection Graph does not record a default initialization by NULL1677// captured by Initialize node.1678//1679for (EdgeIterator i(pta); i.has_next(); i.next()) {1680PointsToNode* field = i.get(); // Field (AddP)1681if (!field->is_Field() || !field->as_Field()->is_oop()) {1682continue; // Not oop field1683}1684int offset = field->as_Field()->offset();1685if (offset == Type::OffsetBot) {1686if (!visited_bottom_offset) {1687// OffsetBot is used to reference array's element,1688// always add reference to NULL to all Field nodes since we don't1689// known which element is referenced.1690if (add_edge(field, null_obj)) {1691// New edge was added1692new_edges++;1693add_field_uses_to_worklist(field->as_Field());1694visited_bottom_offset = true;1695}1696}1697} else {1698// Check only oop fields.1699const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();1700if (adr_type->isa_rawptr()) {1701#ifdef ASSERT1702// Raw pointers are used for initializing stores so skip it1703// since it should be recorded already1704Node* base = get_addp_base(field->ideal_node());1705assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");1706#endif1707continue;1708}1709if (!offsets_worklist.contains(offset)) {1710offsets_worklist.append(offset);1711Node* value = NULL;1712if (ini != NULL) {1713// StoreP::memory_type() == T_ADDRESS1714BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;1715Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);1716// Make sure initializing store has the same type as this AddP.1717// This AddP may reference non existing field because it is on a1718// dead branch of bimorphic call which is not eliminated yet.1719if (store != NULL && store->is_Store() &&1720store->as_Store()->memory_type() == ft) {1721value = store->in(MemNode::ValueIn);1722#ifdef ASSERT1723if (VerifyConnectionGraph) {1724// Verify that AddP already points to all objects the value points to.1725PointsToNode* val = ptnode_adr(value->_idx);1726assert((val != NULL), "should be processed already");1727PointsToNode* missed_obj = NULL;1728if (val->is_JavaObject()) {1729if (!field->points_to(val->as_JavaObject())) {1730missed_obj = val;1731}1732} else {1733if (!val->is_LocalVar() || (val->edge_count() == 0)) {1734tty->print_cr("----------init store has invalid value -----");1735store->dump();1736val->dump();1737assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");1738}1739for (EdgeIterator j(val); j.has_next(); j.next()) {1740PointsToNode* obj = j.get();1741if (obj->is_JavaObject()) {1742if (!field->points_to(obj->as_JavaObject())) {1743missed_obj = obj;1744break;1745}1746}1747}1748}1749if (missed_obj != NULL) {1750tty->print_cr("----------field---------------------------------");1751field->dump();1752tty->print_cr("----------missed referernce to object-----------");1753missed_obj->dump();1754tty->print_cr("----------object referernced by init store -----");1755store->dump();1756val->dump();1757assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");1758}1759}1760#endif1761} else {1762// There could be initializing stores which follow allocation.1763// For example, a volatile field store is not collected1764// by Initialize node.1765//1766// Need to check for dependent loads to separate such stores from1767// stores which follow loads. For now, add initial value NULL so1768// that compare pointers optimization works correctly.1769}1770}1771if (value == NULL) {1772// A field's initializing value was not recorded. Add NULL.1773if (add_edge(field, null_obj)) {1774// New edge was added1775new_edges++;1776add_field_uses_to_worklist(field->as_Field());1777}1778}1779}1780}1781}1782return new_edges;1783}17841785// Adjust scalar_replaceable state after Connection Graph is built.1786void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {1787// Search for non-escaping objects which are not scalar replaceable1788// and mark them to propagate the state to referenced objects.17891790for (UseIterator i(jobj); i.has_next(); i.next()) {1791PointsToNode* use = i.get();1792if (use->is_Arraycopy()) {1793continue;1794}1795if (use->is_Field()) {1796FieldNode* field = use->as_Field();1797assert(field->is_oop() && field->scalar_replaceable(), "sanity");1798// 1. An object is not scalar replaceable if the field into which it is1799// stored has unknown offset (stored into unknown element of an array).1800if (field->offset() == Type::OffsetBot) {1801jobj->set_scalar_replaceable(false);1802return;1803}1804// 2. An object is not scalar replaceable if the field into which it is1805// stored has multiple bases one of which is null.1806if (field->base_count() > 1) {1807for (BaseIterator i(field); i.has_next(); i.next()) {1808PointsToNode* base = i.get();1809if (base == null_obj) {1810jobj->set_scalar_replaceable(false);1811return;1812}1813}1814}1815}1816assert(use->is_Field() || use->is_LocalVar(), "sanity");1817// 3. An object is not scalar replaceable if it is merged with other objects.1818for (EdgeIterator j(use); j.has_next(); j.next()) {1819PointsToNode* ptn = j.get();1820if (ptn->is_JavaObject() && ptn != jobj) {1821// Mark all objects.1822jobj->set_scalar_replaceable(false);1823ptn->set_scalar_replaceable(false);1824}1825}1826if (!jobj->scalar_replaceable()) {1827return;1828}1829}18301831for (EdgeIterator j(jobj); j.has_next(); j.next()) {1832if (j.get()->is_Arraycopy()) {1833continue;1834}18351836// Non-escaping object node should point only to field nodes.1837FieldNode* field = j.get()->as_Field();1838int offset = field->as_Field()->offset();18391840// 4. An object is not scalar replaceable if it has a field with unknown1841// offset (array's element is accessed in loop).1842if (offset == Type::OffsetBot) {1843jobj->set_scalar_replaceable(false);1844return;1845}1846// 5. Currently an object is not scalar replaceable if a LoadStore node1847// access its field since the field value is unknown after it.1848//1849Node* n = field->ideal_node();18501851// Test for an unsafe access that was parsed as maybe off heap1852// (with a CheckCastPP to raw memory).1853assert(n->is_AddP(), "expect an address computation");1854if (n->in(AddPNode::Base)->is_top() &&1855n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {1856assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");1857assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");1858jobj->set_scalar_replaceable(false);1859return;1860}18611862for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1863Node* u = n->fast_out(i);1864if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {1865jobj->set_scalar_replaceable(false);1866return;1867}1868}18691870// 6. Or the address may point to more then one object. This may produce1871// the false positive result (set not scalar replaceable)1872// since the flow-insensitive escape analysis can't separate1873// the case when stores overwrite the field's value from the case1874// when stores happened on different control branches.1875//1876// Note: it will disable scalar replacement in some cases:1877//1878// Point p[] = new Point[1];1879// p[0] = new Point(); // Will be not scalar replaced1880//1881// but it will save us from incorrect optimizations in next cases:1882//1883// Point p[] = new Point[1];1884// if ( x ) p[0] = new Point(); // Will be not scalar replaced1885//1886if (field->base_count() > 1) {1887for (BaseIterator i(field); i.has_next(); i.next()) {1888PointsToNode* base = i.get();1889// Don't take into account LocalVar nodes which1890// may point to only one object which should be also1891// this field's base by now.1892if (base->is_JavaObject() && base != jobj) {1893// Mark all bases.1894jobj->set_scalar_replaceable(false);1895base->set_scalar_replaceable(false);1896}1897}1898}1899}1900}19011902#ifdef ASSERT1903void ConnectionGraph::verify_connection_graph(1904GrowableArray<PointsToNode*>& ptnodes_worklist,1905GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,1906GrowableArray<JavaObjectNode*>& java_objects_worklist,1907GrowableArray<Node*>& addp_worklist) {1908// Verify that graph is complete - no new edges could be added.1909int java_objects_length = java_objects_worklist.length();1910int non_escaped_length = non_escaped_allocs_worklist.length();1911int new_edges = 0;1912for (int next = 0; next < java_objects_length; ++next) {1913JavaObjectNode* ptn = java_objects_worklist.at(next);1914new_edges += add_java_object_edges(ptn, true);1915}1916assert(new_edges == 0, "graph was not complete");1917// Verify that escape state is final.1918int length = non_escaped_allocs_worklist.length();1919find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);1920assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&1921(non_escaped_length == length) &&1922(_worklist.length() == 0), "escape state was not final");19231924// Verify fields information.1925int addp_length = addp_worklist.length();1926for (int next = 0; next < addp_length; ++next ) {1927Node* n = addp_worklist.at(next);1928FieldNode* field = ptnode_adr(n->_idx)->as_Field();1929if (field->is_oop()) {1930// Verify that field has all bases1931Node* base = get_addp_base(n);1932PointsToNode* ptn = ptnode_adr(base->_idx);1933if (ptn->is_JavaObject()) {1934assert(field->has_base(ptn->as_JavaObject()), "sanity");1935} else {1936assert(ptn->is_LocalVar(), "sanity");1937for (EdgeIterator i(ptn); i.has_next(); i.next()) {1938PointsToNode* e = i.get();1939if (e->is_JavaObject()) {1940assert(field->has_base(e->as_JavaObject()), "sanity");1941}1942}1943}1944// Verify that all fields have initializing values.1945if (field->edge_count() == 0) {1946tty->print_cr("----------field does not have references----------");1947field->dump();1948for (BaseIterator i(field); i.has_next(); i.next()) {1949PointsToNode* base = i.get();1950tty->print_cr("----------field has next base---------------------");1951base->dump();1952if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {1953tty->print_cr("----------base has fields-------------------------");1954for (EdgeIterator j(base); j.has_next(); j.next()) {1955j.get()->dump();1956}1957tty->print_cr("----------base has references---------------------");1958for (UseIterator j(base); j.has_next(); j.next()) {1959j.get()->dump();1960}1961}1962}1963for (UseIterator i(field); i.has_next(); i.next()) {1964i.get()->dump();1965}1966assert(field->edge_count() > 0, "sanity");1967}1968}1969}1970}1971#endif19721973// Optimize ideal graph.1974void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,1975GrowableArray<Node*>& storestore_worklist) {1976Compile* C = _compile;1977PhaseIterGVN* igvn = _igvn;1978if (EliminateLocks) {1979// Mark locks before changing ideal graph.1980int cnt = C->macro_count();1981for (int i = 0; i < cnt; i++) {1982Node *n = C->macro_node(i);1983if (n->is_AbstractLock()) { // Lock and Unlock nodes1984AbstractLockNode* alock = n->as_AbstractLock();1985if (!alock->is_non_esc_obj()) {1986if (not_global_escape(alock->obj_node())) {1987assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");1988// The lock could be marked eliminated by lock coarsening1989// code during first IGVN before EA. Replace coarsened flag1990// to eliminate all associated locks/unlocks.1991#ifdef ASSERT1992alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");1993#endif1994alock->set_non_esc_obj();1995}1996}1997}1998}1999}20002001if (OptimizePtrCompare) {2002for (int i = 0; i < ptr_cmp_worklist.length(); i++) {2003Node *n = ptr_cmp_worklist.at(i);2004const TypeInt* tcmp = optimize_ptr_compare(n);2005if (tcmp->singleton()) {2006Node* cmp = igvn->makecon(tcmp);2007#ifndef PRODUCT2008if (PrintOptimizePtrCompare) {2009tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));2010if (Verbose) {2011n->dump(1);2012}2013}2014#endif2015igvn->replace_node(n, cmp);2016}2017}2018}20192020// For MemBarStoreStore nodes added in library_call.cpp, check2021// escape status of associated AllocateNode and optimize out2022// MemBarStoreStore node if the allocated object never escapes.2023for (int i = 0; i < storestore_worklist.length(); i++) {2024Node* storestore = storestore_worklist.at(i);2025assert(storestore->is_MemBarStoreStore(), "");2026Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);2027if (alloc->is_Allocate() && not_global_escape(alloc)) {2028MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);2029mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));2030mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));2031igvn->register_new_node_with_optimizer(mb);2032igvn->replace_node(storestore, mb);2033}2034}2035}20362037// Optimize objects compare.2038const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {2039assert(OptimizePtrCompare, "sanity");2040const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO2041const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE2042const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]20432044PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);2045PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);2046JavaObjectNode* jobj1 = unique_java_object(n->in(1));2047JavaObjectNode* jobj2 = unique_java_object(n->in(2));2048assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");2049assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");20502051// Check simple cases first.2052if (jobj1 != NULL) {2053if (jobj1->escape_state() == PointsToNode::NoEscape) {2054if (jobj1 == jobj2) {2055// Comparing the same not escaping object.2056return EQ;2057}2058Node* obj = jobj1->ideal_node();2059// Comparing not escaping allocation.2060if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&2061!ptn2->points_to(jobj1)) {2062return NE; // This includes nullness check.2063}2064}2065}2066if (jobj2 != NULL) {2067if (jobj2->escape_state() == PointsToNode::NoEscape) {2068Node* obj = jobj2->ideal_node();2069// Comparing not escaping allocation.2070if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&2071!ptn1->points_to(jobj2)) {2072return NE; // This includes nullness check.2073}2074}2075}2076if (jobj1 != NULL && jobj1 != phantom_obj &&2077jobj2 != NULL && jobj2 != phantom_obj &&2078jobj1->ideal_node()->is_Con() &&2079jobj2->ideal_node()->is_Con()) {2080// Klass or String constants compare. Need to be careful with2081// compressed pointers - compare types of ConN and ConP instead of nodes.2082const Type* t1 = jobj1->ideal_node()->get_ptr_type();2083const Type* t2 = jobj2->ideal_node()->get_ptr_type();2084if (t1->make_ptr() == t2->make_ptr()) {2085return EQ;2086} else {2087return NE;2088}2089}2090if (ptn1->meet(ptn2)) {2091return UNKNOWN; // Sets are not disjoint2092}20932094// Sets are disjoint.2095bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);2096bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);2097bool set1_has_null_ptr = ptn1->points_to(null_obj);2098bool set2_has_null_ptr = ptn2->points_to(null_obj);2099if ((set1_has_unknown_ptr && set2_has_null_ptr) ||2100(set2_has_unknown_ptr && set1_has_null_ptr)) {2101// Check nullness of unknown object.2102return UNKNOWN;2103}21042105// Disjointness by itself is not sufficient since2106// alias analysis is not complete for escaped objects.2107// Disjoint sets are definitely unrelated only when2108// at least one set has only not escaping allocations.2109if (!set1_has_unknown_ptr && !set1_has_null_ptr) {2110if (ptn1->non_escaping_allocation()) {2111return NE;2112}2113}2114if (!set2_has_unknown_ptr && !set2_has_null_ptr) {2115if (ptn2->non_escaping_allocation()) {2116return NE;2117}2118}2119return UNKNOWN;2120}21212122// Connection Graph construction functions.21232124void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {2125PointsToNode* ptadr = _nodes.at(n->_idx);2126if (ptadr != NULL) {2127assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");2128return;2129}2130Compile* C = _compile;2131ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);2132map_ideal_node(n, ptadr);2133}21342135void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {2136PointsToNode* ptadr = _nodes.at(n->_idx);2137if (ptadr != NULL) {2138assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");2139return;2140}2141Compile* C = _compile;2142ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);2143map_ideal_node(n, ptadr);2144}21452146void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {2147PointsToNode* ptadr = _nodes.at(n->_idx);2148if (ptadr != NULL) {2149assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");2150return;2151}2152bool unsafe = false;2153bool is_oop = is_oop_field(n, offset, &unsafe);2154if (unsafe) {2155es = PointsToNode::GlobalEscape;2156}2157Compile* C = _compile;2158FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);2159map_ideal_node(n, field);2160}21612162void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,2163PointsToNode* src, PointsToNode* dst) {2164assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");2165assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");2166PointsToNode* ptadr = _nodes.at(n->_idx);2167if (ptadr != NULL) {2168assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");2169return;2170}2171Compile* C = _compile;2172ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);2173map_ideal_node(n, ptadr);2174// Add edge from arraycopy node to source object.2175(void)add_edge(ptadr, src);2176src->set_arraycopy_src();2177// Add edge from destination object to arraycopy node.2178(void)add_edge(dst, ptadr);2179dst->set_arraycopy_dst();2180}21812182bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {2183const Type* adr_type = n->as_AddP()->bottom_type();2184BasicType bt = T_INT;2185if (offset == Type::OffsetBot) {2186// Check only oop fields.2187if (!adr_type->isa_aryptr() ||2188(adr_type->isa_aryptr()->klass() == NULL) ||2189adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {2190// OffsetBot is used to reference array's element. Ignore first AddP.2191if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {2192bt = T_OBJECT;2193}2194}2195} else if (offset != oopDesc::klass_offset_in_bytes()) {2196if (adr_type->isa_instptr()) {2197ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();2198if (field != NULL) {2199bt = field->layout_type();2200} else {2201// Check for unsafe oop field access2202if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||2203n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||2204n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||2205BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {2206bt = T_OBJECT;2207(*unsafe) = true;2208}2209}2210} else if (adr_type->isa_aryptr()) {2211if (offset == arrayOopDesc::length_offset_in_bytes()) {2212// Ignore array length load.2213} else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {2214// Ignore first AddP.2215} else {2216const Type* elemtype = adr_type->isa_aryptr()->elem();2217bt = elemtype->array_element_basic_type();2218}2219} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {2220// Allocation initialization, ThreadLocal field access, unsafe access2221if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||2222n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||2223n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||2224BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {2225bt = T_OBJECT;2226}2227}2228}2229// Note: T_NARROWOOP is not classed as a real reference type2230return (is_reference_type(bt) || bt == T_NARROWOOP);2231}22322233// Returns unique pointed java object or NULL.2234JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {2235assert(!_collecting, "should not call when constructed graph");2236// If the node was created after the escape computation we can't answer.2237uint idx = n->_idx;2238if (idx >= nodes_size()) {2239return NULL;2240}2241PointsToNode* ptn = ptnode_adr(idx);2242if (ptn == NULL) {2243return NULL;2244}2245if (ptn->is_JavaObject()) {2246return ptn->as_JavaObject();2247}2248assert(ptn->is_LocalVar(), "sanity");2249// Check all java objects it points to.2250JavaObjectNode* jobj = NULL;2251for (EdgeIterator i(ptn); i.has_next(); i.next()) {2252PointsToNode* e = i.get();2253if (e->is_JavaObject()) {2254if (jobj == NULL) {2255jobj = e->as_JavaObject();2256} else if (jobj != e) {2257return NULL;2258}2259}2260}2261return jobj;2262}22632264// Return true if this node points only to non-escaping allocations.2265bool PointsToNode::non_escaping_allocation() {2266if (is_JavaObject()) {2267Node* n = ideal_node();2268if (n->is_Allocate() || n->is_CallStaticJava()) {2269return (escape_state() == PointsToNode::NoEscape);2270} else {2271return false;2272}2273}2274assert(is_LocalVar(), "sanity");2275// Check all java objects it points to.2276for (EdgeIterator i(this); i.has_next(); i.next()) {2277PointsToNode* e = i.get();2278if (e->is_JavaObject()) {2279Node* n = e->ideal_node();2280if ((e->escape_state() != PointsToNode::NoEscape) ||2281!(n->is_Allocate() || n->is_CallStaticJava())) {2282return false;2283}2284}2285}2286return true;2287}22882289// Return true if we know the node does not escape globally.2290bool ConnectionGraph::not_global_escape(Node *n) {2291assert(!_collecting, "should not call during graph construction");2292// If the node was created after the escape computation we can't answer.2293uint idx = n->_idx;2294if (idx >= nodes_size()) {2295return false;2296}2297PointsToNode* ptn = ptnode_adr(idx);2298if (ptn == NULL) {2299return false; // not in congraph (e.g. ConI)2300}2301PointsToNode::EscapeState es = ptn->escape_state();2302// If we have already computed a value, return it.2303if (es >= PointsToNode::GlobalEscape) {2304return false;2305}2306if (ptn->is_JavaObject()) {2307return true; // (es < PointsToNode::GlobalEscape);2308}2309assert(ptn->is_LocalVar(), "sanity");2310// Check all java objects it points to.2311for (EdgeIterator i(ptn); i.has_next(); i.next()) {2312if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {2313return false;2314}2315}2316return true;2317}231823192320// Helper functions23212322// Return true if this node points to specified node or nodes it points to.2323bool PointsToNode::points_to(JavaObjectNode* ptn) const {2324if (is_JavaObject()) {2325return (this == ptn);2326}2327assert(is_LocalVar() || is_Field(), "sanity");2328for (EdgeIterator i(this); i.has_next(); i.next()) {2329if (i.get() == ptn) {2330return true;2331}2332}2333return false;2334}23352336// Return true if one node points to an other.2337bool PointsToNode::meet(PointsToNode* ptn) {2338if (this == ptn) {2339return true;2340} else if (ptn->is_JavaObject()) {2341return this->points_to(ptn->as_JavaObject());2342} else if (this->is_JavaObject()) {2343return ptn->points_to(this->as_JavaObject());2344}2345assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");2346int ptn_count = ptn->edge_count();2347for (EdgeIterator i(this); i.has_next(); i.next()) {2348PointsToNode* this_e = i.get();2349for (int j = 0; j < ptn_count; j++) {2350if (this_e == ptn->edge(j)) {2351return true;2352}2353}2354}2355return false;2356}23572358#ifdef ASSERT2359// Return true if bases point to this java object.2360bool FieldNode::has_base(JavaObjectNode* jobj) const {2361for (BaseIterator i(this); i.has_next(); i.next()) {2362if (i.get() == jobj) {2363return true;2364}2365}2366return false;2367}2368#endif23692370bool ConnectionGraph::is_captured_store_address(Node* addp) {2371// Handle simple case first.2372assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access");2373if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {2374return true;2375} else if (addp->in(AddPNode::Address)->is_Phi()) {2376for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {2377Node* addp_use = addp->fast_out(i);2378if (addp_use->is_Store()) {2379for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {2380if (addp_use->fast_out(j)->is_Initialize()) {2381return true;2382}2383}2384}2385}2386}2387return false;2388}23892390int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {2391const Type *adr_type = phase->type(adr);2392if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {2393// We are computing a raw address for a store captured by an Initialize2394// compute an appropriate address type. AddP cases #3 and #5 (see below).2395int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);2396assert(offs != Type::OffsetBot ||2397adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),2398"offset must be a constant or it is initialization of array");2399return offs;2400}2401const TypePtr *t_ptr = adr_type->isa_ptr();2402assert(t_ptr != NULL, "must be a pointer type");2403return t_ptr->offset();2404}24052406Node* ConnectionGraph::get_addp_base(Node *addp) {2407assert(addp->is_AddP(), "must be AddP");2408//2409// AddP cases for Base and Address inputs:2410// case #1. Direct object's field reference:2411// Allocate2412// |2413// Proj #5 ( oop result )2414// |2415// CheckCastPP (cast to instance type)2416// | |2417// AddP ( base == address )2418//2419// case #2. Indirect object's field reference:2420// Phi2421// |2422// CastPP (cast to instance type)2423// | |2424// AddP ( base == address )2425//2426// case #3. Raw object's field reference for Initialize node:2427// Allocate2428// |2429// Proj #5 ( oop result )2430// top |2431// \ |2432// AddP ( base == top )2433//2434// case #4. Array's element reference:2435// {CheckCastPP | CastPP}2436// | | |2437// | AddP ( array's element offset )2438// | |2439// AddP ( array's offset )2440//2441// case #5. Raw object's field reference for arraycopy stub call:2442// The inline_native_clone() case when the arraycopy stub is called2443// after the allocation before Initialize and CheckCastPP nodes.2444// Allocate2445// |2446// Proj #5 ( oop result )2447// | |2448// AddP ( base == address )2449//2450// case #6. Constant Pool, ThreadLocal, CastX2P or2451// Raw object's field reference:2452// {ConP, ThreadLocal, CastX2P, raw Load}2453// top |2454// \ |2455// AddP ( base == top )2456//2457// case #7. Klass's field reference.2458// LoadKlass2459// | |2460// AddP ( base == address )2461//2462// case #8. narrow Klass's field reference.2463// LoadNKlass2464// |2465// DecodeN2466// | |2467// AddP ( base == address )2468//2469// case #9. Mixed unsafe access2470// {instance}2471// |2472// CheckCastPP (raw)2473// top |2474// \ |2475// AddP ( base == top )2476//2477Node *base = addp->in(AddPNode::Base);2478if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.2479base = addp->in(AddPNode::Address);2480while (base->is_AddP()) {2481// Case #6 (unsafe access) may have several chained AddP nodes.2482assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");2483base = base->in(AddPNode::Address);2484}2485if (base->Opcode() == Op_CheckCastPP &&2486base->bottom_type()->isa_rawptr() &&2487_igvn->type(base->in(1))->isa_oopptr()) {2488base = base->in(1); // Case #92489} else {2490Node* uncast_base = base->uncast();2491int opcode = uncast_base->Opcode();2492assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||2493opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||2494(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||2495is_captured_store_address(addp), "sanity");2496}2497}2498return base;2499}25002501Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {2502assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");2503Node* addp2 = addp->raw_out(0);2504if (addp->outcnt() == 1 && addp2->is_AddP() &&2505addp2->in(AddPNode::Base) == n &&2506addp2->in(AddPNode::Address) == addp) {2507assert(addp->in(AddPNode::Base) == n, "expecting the same base");2508//2509// Find array's offset to push it on worklist first and2510// as result process an array's element offset first (pushed second)2511// to avoid CastPP for the array's offset.2512// Otherwise the inserted CastPP (LocalVar) will point to what2513// the AddP (Field) points to. Which would be wrong since2514// the algorithm expects the CastPP has the same point as2515// as AddP's base CheckCastPP (LocalVar).2516//2517// ArrayAllocation2518// |2519// CheckCastPP2520// |2521// memProj (from ArrayAllocation CheckCastPP)2522// | ||2523// | || Int (element index)2524// | || | ConI (log(element size))2525// | || | /2526// | || LShift2527// | || /2528// | AddP (array's element offset)2529// | |2530// | | ConI (array's offset: #12(32-bits) or #24(64-bits))2531// | / /2532// AddP (array's offset)2533// |2534// Load/Store (memory operation on array's element)2535//2536return addp2;2537}2538return NULL;2539}25402541//2542// Adjust the type and inputs of an AddP which computes the2543// address of a field of an instance2544//2545bool ConnectionGraph::split_AddP(Node *addp, Node *base) {2546PhaseGVN* igvn = _igvn;2547const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();2548assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");2549const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();2550if (t == NULL) {2551// We are computing a raw address for a store captured by an Initialize2552// compute an appropriate address type (cases #3 and #5).2553assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");2554assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");2555intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);2556assert(offs != Type::OffsetBot, "offset must be a constant");2557t = base_t->add_offset(offs)->is_oopptr();2558}2559int inst_id = base_t->instance_id();2560assert(!t->is_known_instance() || t->instance_id() == inst_id,2561"old type must be non-instance or match new type");25622563// The type 't' could be subclass of 'base_t'.2564// As result t->offset() could be large then base_t's size and it will2565// cause the failure in add_offset() with narrow oops since TypeOopPtr()2566// constructor verifies correctness of the offset.2567//2568// It could happened on subclass's branch (from the type profiling2569// inlining) which was not eliminated during parsing since the exactness2570// of the allocation type was not propagated to the subclass type check.2571//2572// Or the type 't' could be not related to 'base_t' at all.2573// It could happened when CHA type is different from MDO type on a dead path2574// (for example, from instanceof check) which is not collapsed during parsing.2575//2576// Do nothing for such AddP node and don't process its users since2577// this code branch will go away.2578//2579if (!t->is_known_instance() &&2580!base_t->klass()->is_subtype_of(t->klass())) {2581return false; // bail out2582}2583const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();2584// Do NOT remove the next line: ensure a new alias index is allocated2585// for the instance type. Note: C++ will not remove it since the call2586// has side effect.2587int alias_idx = _compile->get_alias_index(tinst);2588igvn->set_type(addp, tinst);2589// record the allocation in the node map2590set_map(addp, get_map(base->_idx));2591// Set addp's Base and Address to 'base'.2592Node *abase = addp->in(AddPNode::Base);2593Node *adr = addp->in(AddPNode::Address);2594if (adr->is_Proj() && adr->in(0)->is_Allocate() &&2595adr->in(0)->_idx == (uint)inst_id) {2596// Skip AddP cases #3 and #5.2597} else {2598assert(!abase->is_top(), "sanity"); // AddP case #32599if (abase != base) {2600igvn->hash_delete(addp);2601addp->set_req(AddPNode::Base, base);2602if (abase == adr) {2603addp->set_req(AddPNode::Address, base);2604} else {2605// AddP case #4 (adr is array's element offset AddP node)2606#ifdef ASSERT2607const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();2608assert(adr->is_AddP() && atype != NULL &&2609atype->instance_id() == inst_id, "array's element offset should be processed first");2610#endif2611}2612igvn->hash_insert(addp);2613}2614}2615// Put on IGVN worklist since at least addp's type was changed above.2616record_for_optimizer(addp);2617return true;2618}26192620//2621// Create a new version of orig_phi if necessary. Returns either the newly2622// created phi or an existing phi. Sets create_new to indicate whether a new2623// phi was created. Cache the last newly created phi in the node map.2624//2625PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {2626Compile *C = _compile;2627PhaseGVN* igvn = _igvn;2628new_created = false;2629int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());2630// nothing to do if orig_phi is bottom memory or matches alias_idx2631if (phi_alias_idx == alias_idx) {2632return orig_phi;2633}2634// Have we recently created a Phi for this alias index?2635PhiNode *result = get_map_phi(orig_phi->_idx);2636if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {2637return result;2638}2639// Previous check may fail when the same wide memory Phi was split into Phis2640// for different memory slices. Search all Phis for this region.2641if (result != NULL) {2642Node* region = orig_phi->in(0);2643for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {2644Node* phi = region->fast_out(i);2645if (phi->is_Phi() &&2646C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {2647assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");2648return phi->as_Phi();2649}2650}2651}2652if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {2653if (C->do_escape_analysis() == true && !C->failing()) {2654// Retry compilation without escape analysis.2655// If this is the first failure, the sentinel string will "stick"2656// to the Compile object, and the C2Compiler will see it and retry.2657C->record_failure(C2Compiler::retry_no_escape_analysis());2658}2659return NULL;2660}2661orig_phi_worklist.append_if_missing(orig_phi);2662const TypePtr *atype = C->get_adr_type(alias_idx);2663result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);2664C->copy_node_notes_to(result, orig_phi);2665igvn->set_type(result, result->bottom_type());2666record_for_optimizer(result);2667set_map(orig_phi, result);2668new_created = true;2669return result;2670}26712672//2673// Return a new version of Memory Phi "orig_phi" with the inputs having the2674// specified alias index.2675//2676PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {2677assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");2678Compile *C = _compile;2679PhaseGVN* igvn = _igvn;2680bool new_phi_created;2681PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);2682if (!new_phi_created) {2683return result;2684}2685GrowableArray<PhiNode *> phi_list;2686GrowableArray<uint> cur_input;2687PhiNode *phi = orig_phi;2688uint idx = 1;2689bool finished = false;2690while(!finished) {2691while (idx < phi->req()) {2692Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);2693if (mem != NULL && mem->is_Phi()) {2694PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);2695if (new_phi_created) {2696// found an phi for which we created a new split, push current one on worklist and begin2697// processing new one2698phi_list.push(phi);2699cur_input.push(idx);2700phi = mem->as_Phi();2701result = newphi;2702idx = 1;2703continue;2704} else {2705mem = newphi;2706}2707}2708if (C->failing()) {2709return NULL;2710}2711result->set_req(idx++, mem);2712}2713#ifdef ASSERT2714// verify that the new Phi has an input for each input of the original2715assert( phi->req() == result->req(), "must have same number of inputs.");2716assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");2717#endif2718// Check if all new phi's inputs have specified alias index.2719// Otherwise use old phi.2720for (uint i = 1; i < phi->req(); i++) {2721Node* in = result->in(i);2722assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");2723}2724// we have finished processing a Phi, see if there are any more to do2725finished = (phi_list.length() == 0 );2726if (!finished) {2727phi = phi_list.pop();2728idx = cur_input.pop();2729PhiNode *prev_result = get_map_phi(phi->_idx);2730prev_result->set_req(idx++, result);2731result = prev_result;2732}2733}2734return result;2735}27362737//2738// The next methods are derived from methods in MemNode.2739//2740Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {2741Node *mem = mmem;2742// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally2743// means an array I have not precisely typed yet. Do not do any2744// alias stuff with it any time soon.2745if (toop->base() != Type::AnyPtr &&2746!(toop->klass() != NULL &&2747toop->klass()->is_java_lang_Object() &&2748toop->offset() == Type::OffsetBot)) {2749mem = mmem->memory_at(alias_idx);2750// Update input if it is progress over what we have now2751}2752return mem;2753}27542755//2756// Move memory users to their memory slices.2757//2758void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {2759Compile* C = _compile;2760PhaseGVN* igvn = _igvn;2761const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();2762assert(tp != NULL, "ptr type");2763int alias_idx = C->get_alias_index(tp);2764int general_idx = C->get_general_index(alias_idx);27652766// Move users first2767for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2768Node* use = n->fast_out(i);2769if (use->is_MergeMem()) {2770MergeMemNode* mmem = use->as_MergeMem();2771assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");2772if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {2773continue; // Nothing to do2774}2775// Replace previous general reference to mem node.2776uint orig_uniq = C->unique();2777Node* m = find_inst_mem(n, general_idx, orig_phis);2778assert(orig_uniq == C->unique(), "no new nodes");2779mmem->set_memory_at(general_idx, m);2780--imax;2781--i;2782} else if (use->is_MemBar()) {2783assert(!use->is_Initialize(), "initializing stores should not be moved");2784if (use->req() > MemBarNode::Precedent &&2785use->in(MemBarNode::Precedent) == n) {2786// Don't move related membars.2787record_for_optimizer(use);2788continue;2789}2790tp = use->as_MemBar()->adr_type()->isa_ptr();2791if ((tp != NULL && C->get_alias_index(tp) == alias_idx) ||2792alias_idx == general_idx) {2793continue; // Nothing to do2794}2795// Move to general memory slice.2796uint orig_uniq = C->unique();2797Node* m = find_inst_mem(n, general_idx, orig_phis);2798assert(orig_uniq == C->unique(), "no new nodes");2799igvn->hash_delete(use);2800imax -= use->replace_edge(n, m, igvn);2801igvn->hash_insert(use);2802record_for_optimizer(use);2803--i;2804#ifdef ASSERT2805} else if (use->is_Mem()) {2806if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {2807// Don't move related cardmark.2808continue;2809}2810// Memory nodes should have new memory input.2811tp = igvn->type(use->in(MemNode::Address))->isa_ptr();2812assert(tp != NULL, "ptr type");2813int idx = C->get_alias_index(tp);2814assert(get_map(use->_idx) != NULL || idx == alias_idx,2815"Following memory nodes should have new memory input or be on the same memory slice");2816} else if (use->is_Phi()) {2817// Phi nodes should be split and moved already.2818tp = use->as_Phi()->adr_type()->isa_ptr();2819assert(tp != NULL, "ptr type");2820int idx = C->get_alias_index(tp);2821assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");2822} else {2823use->dump();2824assert(false, "should not be here");2825#endif2826}2827}2828}28292830//2831// Search memory chain of "mem" to find a MemNode whose address2832// is the specified alias index.2833//2834Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {2835if (orig_mem == NULL) {2836return orig_mem;2837}2838Compile* C = _compile;2839PhaseGVN* igvn = _igvn;2840const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();2841bool is_instance = (toop != NULL) && toop->is_known_instance();2842Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);2843Node *prev = NULL;2844Node *result = orig_mem;2845while (prev != result) {2846prev = result;2847if (result == start_mem) {2848break; // hit one of our sentinels2849}2850if (result->is_Mem()) {2851const Type *at = igvn->type(result->in(MemNode::Address));2852if (at == Type::TOP) {2853break; // Dead2854}2855assert (at->isa_ptr() != NULL, "pointer type required.");2856int idx = C->get_alias_index(at->is_ptr());2857if (idx == alias_idx) {2858break; // Found2859}2860if (!is_instance && (at->isa_oopptr() == NULL ||2861!at->is_oopptr()->is_known_instance())) {2862break; // Do not skip store to general memory slice.2863}2864result = result->in(MemNode::Memory);2865}2866if (!is_instance) {2867continue; // don't search further for non-instance types2868}2869// skip over a call which does not affect this memory slice2870if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {2871Node *proj_in = result->in(0);2872if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {2873break; // hit one of our sentinels2874} else if (proj_in->is_Call()) {2875// ArrayCopy node processed here as well2876CallNode *call = proj_in->as_Call();2877if (!call->may_modify(toop, igvn)) {2878result = call->in(TypeFunc::Memory);2879}2880} else if (proj_in->is_Initialize()) {2881AllocateNode* alloc = proj_in->as_Initialize()->allocation();2882// Stop if this is the initialization for the object instance which2883// which contains this memory slice, otherwise skip over it.2884if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {2885result = proj_in->in(TypeFunc::Memory);2886}2887} else if (proj_in->is_MemBar()) {2888// Check if there is an array copy for a clone2889// Step over GC barrier when ReduceInitialCardMarks is disabled2890BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();2891Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));28922893if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {2894// Stop if it is a clone2895ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();2896if (ac->may_modify(toop, igvn)) {2897break;2898}2899}2900result = proj_in->in(TypeFunc::Memory);2901}2902} else if (result->is_MergeMem()) {2903MergeMemNode *mmem = result->as_MergeMem();2904result = step_through_mergemem(mmem, alias_idx, toop);2905if (result == mmem->base_memory()) {2906// Didn't find instance memory, search through general slice recursively.2907result = mmem->memory_at(C->get_general_index(alias_idx));2908result = find_inst_mem(result, alias_idx, orig_phis);2909if (C->failing()) {2910return NULL;2911}2912mmem->set_memory_at(alias_idx, result);2913}2914} else if (result->is_Phi() &&2915C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {2916Node *un = result->as_Phi()->unique_input(igvn);2917if (un != NULL) {2918orig_phis.append_if_missing(result->as_Phi());2919result = un;2920} else {2921break;2922}2923} else if (result->is_ClearArray()) {2924if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {2925// Can not bypass initialization of the instance2926// we are looking for.2927break;2928}2929// Otherwise skip it (the call updated 'result' value).2930} else if (result->Opcode() == Op_SCMemProj) {2931Node* mem = result->in(0);2932Node* adr = NULL;2933if (mem->is_LoadStore()) {2934adr = mem->in(MemNode::Address);2935} else {2936assert(mem->Opcode() == Op_EncodeISOArray ||2937mem->Opcode() == Op_StrCompressedCopy, "sanity");2938adr = mem->in(3); // Memory edge corresponds to destination array2939}2940const Type *at = igvn->type(adr);2941if (at != Type::TOP) {2942assert(at->isa_ptr() != NULL, "pointer type required.");2943int idx = C->get_alias_index(at->is_ptr());2944if (idx == alias_idx) {2945// Assert in debug mode2946assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");2947break; // In product mode return SCMemProj node2948}2949}2950result = mem->in(MemNode::Memory);2951} else if (result->Opcode() == Op_StrInflatedCopy) {2952Node* adr = result->in(3); // Memory edge corresponds to destination array2953const Type *at = igvn->type(adr);2954if (at != Type::TOP) {2955assert(at->isa_ptr() != NULL, "pointer type required.");2956int idx = C->get_alias_index(at->is_ptr());2957if (idx == alias_idx) {2958// Assert in debug mode2959assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");2960break; // In product mode return SCMemProj node2961}2962}2963result = result->in(MemNode::Memory);2964}2965}2966if (result->is_Phi()) {2967PhiNode *mphi = result->as_Phi();2968assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");2969const TypePtr *t = mphi->adr_type();2970if (!is_instance) {2971// Push all non-instance Phis on the orig_phis worklist to update inputs2972// during Phase 4 if needed.2973orig_phis.append_if_missing(mphi);2974} else if (C->get_alias_index(t) != alias_idx) {2975// Create a new Phi with the specified alias index type.2976result = split_memory_phi(mphi, alias_idx, orig_phis);2977}2978}2979// the result is either MemNode, PhiNode, InitializeNode.2980return result;2981}29822983//2984// Convert the types of non-escaped object to instance types where possible,2985// propagate the new type information through the graph, and update memory2986// edges and MergeMem inputs to reflect the new type.2987//2988// We start with allocations (and calls which may be allocations) on alloc_worklist.2989// The processing is done in 4 phases:2990//2991// Phase 1: Process possible allocations from alloc_worklist. Create instance2992// types for the CheckCastPP for allocations where possible.2993// Propagate the new types through users as follows:2994// casts and Phi: push users on alloc_worklist2995// AddP: cast Base and Address inputs to the instance type2996// push any AddP users on alloc_worklist and push any memnode2997// users onto memnode_worklist.2998// Phase 2: Process MemNode's from memnode_worklist. compute new address type and2999// search the Memory chain for a store with the appropriate type3000// address type. If a Phi is found, create a new version with3001// the appropriate memory slices from each of the Phi inputs.3002// For stores, process the users as follows:3003// MemNode: push on memnode_worklist3004// MergeMem: push on mergemem_worklist3005// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice3006// moving the first node encountered of each instance type to the3007// the input corresponding to its alias index.3008// appropriate memory slice.3009// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.3010//3011// In the following example, the CheckCastPP nodes are the cast of allocation3012// results and the allocation of node 29 is non-escaped and eligible to be an3013// instance type.3014//3015// We start with:3016//3017// 7 Parm #memory3018// 10 ConI "12"3019// 19 CheckCastPP "Foo"3020// 20 AddP _ 19 19 10 Foo+12 alias_index=43021// 29 CheckCastPP "Foo"3022// 30 AddP _ 29 29 10 Foo+12 alias_index=43023//3024// 40 StoreP 25 7 20 ... alias_index=43025// 50 StoreP 35 40 30 ... alias_index=43026// 60 StoreP 45 50 20 ... alias_index=43027// 70 LoadP _ 60 30 ... alias_index=43028// 80 Phi 75 50 60 Memory alias_index=43029// 90 LoadP _ 80 30 ... alias_index=43030// 100 LoadP _ 80 20 ... alias_index=43031//3032//3033// Phase 1 creates an instance type for node 29 assigning it an instance id of 243034// and creating a new alias index for node 30. This gives:3035//3036// 7 Parm #memory3037// 10 ConI "12"3038// 19 CheckCastPP "Foo"3039// 20 AddP _ 19 19 10 Foo+12 alias_index=43040// 29 CheckCastPP "Foo" iid=243041// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=243042//3043// 40 StoreP 25 7 20 ... alias_index=43044// 50 StoreP 35 40 30 ... alias_index=63045// 60 StoreP 45 50 20 ... alias_index=43046// 70 LoadP _ 60 30 ... alias_index=63047// 80 Phi 75 50 60 Memory alias_index=43048// 90 LoadP _ 80 30 ... alias_index=63049// 100 LoadP _ 80 20 ... alias_index=43050//3051// In phase 2, new memory inputs are computed for the loads and stores,3052// And a new version of the phi is created. In phase 4, the inputs to3053// node 80 are updated and then the memory nodes are updated with the3054// values computed in phase 2. This results in:3055//3056// 7 Parm #memory3057// 10 ConI "12"3058// 19 CheckCastPP "Foo"3059// 20 AddP _ 19 19 10 Foo+12 alias_index=43060// 29 CheckCastPP "Foo" iid=243061// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=243062//3063// 40 StoreP 25 7 20 ... alias_index=43064// 50 StoreP 35 7 30 ... alias_index=63065// 60 StoreP 45 40 20 ... alias_index=43066// 70 LoadP _ 50 30 ... alias_index=63067// 80 Phi 75 40 60 Memory alias_index=43068// 120 Phi 75 50 50 Memory alias_index=63069// 90 LoadP _ 120 30 ... alias_index=63070// 100 LoadP _ 80 20 ... alias_index=43071//3072void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) {3073GrowableArray<Node *> memnode_worklist;3074GrowableArray<PhiNode *> orig_phis;3075PhaseIterGVN *igvn = _igvn;3076uint new_index_start = (uint) _compile->num_alias_types();3077VectorSet visited;3078ideal_nodes.clear(); // Reset for use with set_map/get_map.3079uint unique_old = _compile->unique();30803081// Phase 1: Process possible allocations from alloc_worklist.3082// Create instance types for the CheckCastPP for allocations where possible.3083//3084// (Note: don't forget to change the order of the second AddP node on3085// the alloc_worklist if the order of the worklist processing is changed,3086// see the comment in find_second_addp().)3087//3088while (alloc_worklist.length() != 0) {3089Node *n = alloc_worklist.pop();3090uint ni = n->_idx;3091if (n->is_Call()) {3092CallNode *alloc = n->as_Call();3093// copy escape information to call node3094PointsToNode* ptn = ptnode_adr(alloc->_idx);3095PointsToNode::EscapeState es = ptn->escape_state();3096// We have an allocation or call which returns a Java object,3097// see if it is non-escaped.3098if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {3099continue;3100}3101// Find CheckCastPP for the allocate or for the return value of a call3102n = alloc->result_cast();3103if (n == NULL) { // No uses except Initialize node3104if (alloc->is_Allocate()) {3105// Set the scalar_replaceable flag for allocation3106// so it could be eliminated if it has no uses.3107alloc->as_Allocate()->_is_scalar_replaceable = true;3108}3109continue;3110}3111if (!n->is_CheckCastPP()) { // not unique CheckCastPP.3112// we could reach here for allocate case if one init is associated with many allocs.3113if (alloc->is_Allocate()) {3114alloc->as_Allocate()->_is_scalar_replaceable = false;3115}3116continue;3117}31183119// The inline code for Object.clone() casts the allocation result to3120// java.lang.Object and then to the actual type of the allocated3121// object. Detect this case and use the second cast.3122// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when3123// the allocation result is cast to java.lang.Object and then3124// to the actual Array type.3125if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL3126&& (alloc->is_AllocateArray() ||3127igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {3128Node *cast2 = NULL;3129for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3130Node *use = n->fast_out(i);3131if (use->is_CheckCastPP()) {3132cast2 = use;3133break;3134}3135}3136if (cast2 != NULL) {3137n = cast2;3138} else {3139// Non-scalar replaceable if the allocation type is unknown statically3140// (reflection allocation), the object can't be restored during3141// deoptimization without precise type.3142continue;3143}3144}31453146const TypeOopPtr *t = igvn->type(n)->isa_oopptr();3147if (t == NULL) {3148continue; // not a TypeOopPtr3149}3150if (!t->klass_is_exact()) {3151continue; // not an unique type3152}3153if (alloc->is_Allocate()) {3154// Set the scalar_replaceable flag for allocation3155// so it could be eliminated.3156alloc->as_Allocate()->_is_scalar_replaceable = true;3157}3158set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state3159// in order for an object to be scalar-replaceable, it must be:3160// - a direct allocation (not a call returning an object)3161// - non-escaping3162// - eligible to be a unique type3163// - not determined to be ineligible by escape analysis3164set_map(alloc, n);3165set_map(n, alloc);3166const TypeOopPtr* tinst = t->cast_to_instance_id(ni);3167igvn->hash_delete(n);3168igvn->set_type(n, tinst);3169n->raise_bottom_type(tinst);3170igvn->hash_insert(n);3171record_for_optimizer(n);3172// Allocate an alias index for the header fields. Accesses to3173// the header emitted during macro expansion wouldn't have3174// correct memory state otherwise.3175_compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));3176_compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));3177if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {31783179// First, put on the worklist all Field edges from Connection Graph3180// which is more accurate than putting immediate users from Ideal Graph.3181for (EdgeIterator e(ptn); e.has_next(); e.next()) {3182PointsToNode* tgt = e.get();3183if (tgt->is_Arraycopy()) {3184continue;3185}3186Node* use = tgt->ideal_node();3187assert(tgt->is_Field() && use->is_AddP(),3188"only AddP nodes are Field edges in CG");3189if (use->outcnt() > 0) { // Don't process dead nodes3190Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));3191if (addp2 != NULL) {3192assert(alloc->is_AllocateArray(),"array allocation was expected");3193alloc_worklist.append_if_missing(addp2);3194}3195alloc_worklist.append_if_missing(use);3196}3197}31983199// An allocation may have an Initialize which has raw stores. Scan3200// the users of the raw allocation result and push AddP users3201// on alloc_worklist.3202Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);3203assert (raw_result != NULL, "must have an allocation result");3204for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {3205Node *use = raw_result->fast_out(i);3206if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes3207Node* addp2 = find_second_addp(use, raw_result);3208if (addp2 != NULL) {3209assert(alloc->is_AllocateArray(),"array allocation was expected");3210alloc_worklist.append_if_missing(addp2);3211}3212alloc_worklist.append_if_missing(use);3213} else if (use->is_MemBar()) {3214memnode_worklist.append_if_missing(use);3215}3216}3217}3218} else if (n->is_AddP()) {3219JavaObjectNode* jobj = unique_java_object(get_addp_base(n));3220if (jobj == NULL || jobj == phantom_obj) {3221#ifdef ASSERT3222ptnode_adr(get_addp_base(n)->_idx)->dump();3223ptnode_adr(n->_idx)->dump();3224assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");3225#endif3226_compile->record_failure(C2Compiler::retry_no_escape_analysis());3227return;3228}3229Node *base = get_map(jobj->idx()); // CheckCastPP node3230if (!split_AddP(n, base)) continue; // wrong type from dead path3231} else if (n->is_Phi() ||3232n->is_CheckCastPP() ||3233n->is_EncodeP() ||3234n->is_DecodeN() ||3235(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {3236if (visited.test_set(n->_idx)) {3237assert(n->is_Phi(), "loops only through Phi's");3238continue; // already processed3239}3240JavaObjectNode* jobj = unique_java_object(n);3241if (jobj == NULL || jobj == phantom_obj) {3242#ifdef ASSERT3243ptnode_adr(n->_idx)->dump();3244assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");3245#endif3246_compile->record_failure(C2Compiler::retry_no_escape_analysis());3247return;3248} else {3249Node *val = get_map(jobj->idx()); // CheckCastPP node3250TypeNode *tn = n->as_Type();3251const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();3252assert(tinst != NULL && tinst->is_known_instance() &&3253tinst->instance_id() == jobj->idx() , "instance type expected.");32543255const Type *tn_type = igvn->type(tn);3256const TypeOopPtr *tn_t;3257if (tn_type->isa_narrowoop()) {3258tn_t = tn_type->make_ptr()->isa_oopptr();3259} else {3260tn_t = tn_type->isa_oopptr();3261}3262if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {3263if (tn_type->isa_narrowoop()) {3264tn_type = tinst->make_narrowoop();3265} else {3266tn_type = tinst;3267}3268igvn->hash_delete(tn);3269igvn->set_type(tn, tn_type);3270tn->set_type(tn_type);3271igvn->hash_insert(tn);3272record_for_optimizer(n);3273} else {3274assert(tn_type == TypePtr::NULL_PTR ||3275tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),3276"unexpected type");3277continue; // Skip dead path with different type3278}3279}3280} else {3281debug_only(n->dump();)3282assert(false, "EA: unexpected node");3283continue;3284}3285// push allocation's users on appropriate worklist3286for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3287Node *use = n->fast_out(i);3288if(use->is_Mem() && use->in(MemNode::Address) == n) {3289// Load/store to instance's field3290memnode_worklist.append_if_missing(use);3291} else if (use->is_MemBar()) {3292if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3293memnode_worklist.append_if_missing(use);3294}3295} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes3296Node* addp2 = find_second_addp(use, n);3297if (addp2 != NULL) {3298alloc_worklist.append_if_missing(addp2);3299}3300alloc_worklist.append_if_missing(use);3301} else if (use->is_Phi() ||3302use->is_CheckCastPP() ||3303use->is_EncodeNarrowPtr() ||3304use->is_DecodeNarrowPtr() ||3305(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {3306alloc_worklist.append_if_missing(use);3307#ifdef ASSERT3308} else if (use->is_Mem()) {3309assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");3310} else if (use->is_MergeMem()) {3311assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3312} else if (use->is_SafePoint()) {3313// Look for MergeMem nodes for calls which reference unique allocation3314// (through CheckCastPP nodes) even for debug info.3315Node* m = use->in(TypeFunc::Memory);3316if (m->is_MergeMem()) {3317assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");3318}3319} else if (use->Opcode() == Op_EncodeISOArray) {3320if (use->in(MemNode::Memory) == n || use->in(3) == n) {3321// EncodeISOArray overwrites destination array3322memnode_worklist.append_if_missing(use);3323}3324} else {3325uint op = use->Opcode();3326if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&3327(use->in(MemNode::Memory) == n)) {3328// They overwrite memory edge corresponding to destination array,3329memnode_worklist.append_if_missing(use);3330} else if (!(op == Op_CmpP || op == Op_Conv2B ||3331op == Op_CastP2X || op == Op_StoreCM ||3332op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||3333op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||3334op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||3335op == Op_SubTypeCheck ||3336BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {3337n->dump();3338use->dump();3339assert(false, "EA: missing allocation reference path");3340}3341#endif3342}3343}33443345}33463347// Go over all ArrayCopy nodes and if one of the inputs has a unique3348// type, record it in the ArrayCopy node so we know what memory this3349// node uses/modified.3350for (int next = 0; next < arraycopy_worklist.length(); next++) {3351ArrayCopyNode* ac = arraycopy_worklist.at(next);3352Node* dest = ac->in(ArrayCopyNode::Dest);3353if (dest->is_AddP()) {3354dest = get_addp_base(dest);3355}3356JavaObjectNode* jobj = unique_java_object(dest);3357if (jobj != NULL) {3358Node *base = get_map(jobj->idx());3359if (base != NULL) {3360const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();3361ac->_dest_type = base_t;3362}3363}3364Node* src = ac->in(ArrayCopyNode::Src);3365if (src->is_AddP()) {3366src = get_addp_base(src);3367}3368jobj = unique_java_object(src);3369if (jobj != NULL) {3370Node* base = get_map(jobj->idx());3371if (base != NULL) {3372const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();3373ac->_src_type = base_t;3374}3375}3376}33773378// New alias types were created in split_AddP().3379uint new_index_end = (uint) _compile->num_alias_types();3380assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");33813382// Phase 2: Process MemNode's from memnode_worklist. compute new address type and3383// compute new values for Memory inputs (the Memory inputs are not3384// actually updated until phase 4.)3385if (memnode_worklist.length() == 0)3386return; // nothing to do3387while (memnode_worklist.length() != 0) {3388Node *n = memnode_worklist.pop();3389if (visited.test_set(n->_idx)) {3390continue;3391}3392if (n->is_Phi() || n->is_ClearArray()) {3393// we don't need to do anything, but the users must be pushed3394} else if (n->is_MemBar()) { // Initialize, MemBar nodes3395// we don't need to do anything, but the users must be pushed3396n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);3397if (n == NULL) {3398continue;3399}3400} else if (n->Opcode() == Op_StrCompressedCopy ||3401n->Opcode() == Op_EncodeISOArray) {3402// get the memory projection3403n = n->find_out_with(Op_SCMemProj);3404assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");3405} else {3406assert(n->is_Mem(), "memory node required.");3407Node *addr = n->in(MemNode::Address);3408const Type *addr_t = igvn->type(addr);3409if (addr_t == Type::TOP) {3410continue;3411}3412assert (addr_t->isa_ptr() != NULL, "pointer type required.");3413int alias_idx = _compile->get_alias_index(addr_t->is_ptr());3414assert ((uint)alias_idx < new_index_end, "wrong alias index");3415Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);3416if (_compile->failing()) {3417return;3418}3419if (mem != n->in(MemNode::Memory)) {3420// We delay the memory edge update since we need old one in3421// MergeMem code below when instances memory slices are separated.3422set_map(n, mem);3423}3424if (n->is_Load()) {3425continue; // don't push users3426} else if (n->is_LoadStore()) {3427// get the memory projection3428n = n->find_out_with(Op_SCMemProj);3429assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");3430}3431}3432// push user on appropriate worklist3433for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3434Node *use = n->fast_out(i);3435if (use->is_Phi() || use->is_ClearArray()) {3436memnode_worklist.append_if_missing(use);3437} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {3438if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores3439continue;3440}3441memnode_worklist.append_if_missing(use);3442} else if (use->is_MemBar()) {3443if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3444memnode_worklist.append_if_missing(use);3445}3446#ifdef ASSERT3447} else if(use->is_Mem()) {3448assert(use->in(MemNode::Memory) != n, "EA: missing memory path");3449} else if (use->is_MergeMem()) {3450assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3451} else if (use->Opcode() == Op_EncodeISOArray) {3452if (use->in(MemNode::Memory) == n || use->in(3) == n) {3453// EncodeISOArray overwrites destination array3454memnode_worklist.append_if_missing(use);3455}3456} else {3457uint op = use->Opcode();3458if ((use->in(MemNode::Memory) == n) &&3459(op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {3460// They overwrite memory edge corresponding to destination array,3461memnode_worklist.append_if_missing(use);3462} else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||3463op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||3464op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||3465op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {3466n->dump();3467use->dump();3468assert(false, "EA: missing memory path");3469}3470#endif3471}3472}3473}34743475// Phase 3: Process MergeMem nodes from mergemem_worklist.3476// Walk each memory slice moving the first node encountered of each3477// instance type to the the input corresponding to its alias index.3478uint length = _mergemem_worklist.length();3479for( uint next = 0; next < length; ++next ) {3480MergeMemNode* nmm = _mergemem_worklist.at(next);3481assert(!visited.test_set(nmm->_idx), "should not be visited before");3482// Note: we don't want to use MergeMemStream here because we only want to3483// scan inputs which exist at the start, not ones we add during processing.3484// Note 2: MergeMem may already contains instance memory slices added3485// during find_inst_mem() call when memory nodes were processed above.3486igvn->hash_delete(nmm);3487uint nslices = MIN2(nmm->req(), new_index_start);3488for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {3489Node* mem = nmm->in(i);3490Node* cur = NULL;3491if (mem == NULL || mem->is_top()) {3492continue;3493}3494// First, update mergemem by moving memory nodes to corresponding slices3495// if their type became more precise since this mergemem was created.3496while (mem->is_Mem()) {3497const Type *at = igvn->type(mem->in(MemNode::Address));3498if (at != Type::TOP) {3499assert (at->isa_ptr() != NULL, "pointer type required.");3500uint idx = (uint)_compile->get_alias_index(at->is_ptr());3501if (idx == i) {3502if (cur == NULL) {3503cur = mem;3504}3505} else {3506if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {3507nmm->set_memory_at(idx, mem);3508}3509}3510}3511mem = mem->in(MemNode::Memory);3512}3513nmm->set_memory_at(i, (cur != NULL) ? cur : mem);3514// Find any instance of the current type if we haven't encountered3515// already a memory slice of the instance along the memory chain.3516for (uint ni = new_index_start; ni < new_index_end; ni++) {3517if((uint)_compile->get_general_index(ni) == i) {3518Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);3519if (nmm->is_empty_memory(m)) {3520Node* result = find_inst_mem(mem, ni, orig_phis);3521if (_compile->failing()) {3522return;3523}3524nmm->set_memory_at(ni, result);3525}3526}3527}3528}3529// Find the rest of instances values3530for (uint ni = new_index_start; ni < new_index_end; ni++) {3531const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();3532Node* result = step_through_mergemem(nmm, ni, tinst);3533if (result == nmm->base_memory()) {3534// Didn't find instance memory, search through general slice recursively.3535result = nmm->memory_at(_compile->get_general_index(ni));3536result = find_inst_mem(result, ni, orig_phis);3537if (_compile->failing()) {3538return;3539}3540nmm->set_memory_at(ni, result);3541}3542}3543igvn->hash_insert(nmm);3544record_for_optimizer(nmm);3545}35463547// Phase 4: Update the inputs of non-instance memory Phis and3548// the Memory input of memnodes3549// First update the inputs of any non-instance Phi's from3550// which we split out an instance Phi. Note we don't have3551// to recursively process Phi's encountered on the input memory3552// chains as is done in split_memory_phi() since they will3553// also be processed here.3554for (int j = 0; j < orig_phis.length(); j++) {3555PhiNode *phi = orig_phis.at(j);3556int alias_idx = _compile->get_alias_index(phi->adr_type());3557igvn->hash_delete(phi);3558for (uint i = 1; i < phi->req(); i++) {3559Node *mem = phi->in(i);3560Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);3561if (_compile->failing()) {3562return;3563}3564if (mem != new_mem) {3565phi->set_req(i, new_mem);3566}3567}3568igvn->hash_insert(phi);3569record_for_optimizer(phi);3570}35713572// Update the memory inputs of MemNodes with the value we computed3573// in Phase 2 and move stores memory users to corresponding memory slices.3574// Disable memory split verification code until the fix for 6984348.3575// Currently it produces false negative results since it does not cover all cases.3576#if 0 // ifdef ASSERT3577visited.Reset();3578Node_Stack old_mems(arena, _compile->unique() >> 2);3579#endif3580for (uint i = 0; i < ideal_nodes.size(); i++) {3581Node* n = ideal_nodes.at(i);3582Node* nmem = get_map(n->_idx);3583assert(nmem != NULL, "sanity");3584if (n->is_Mem()) {3585#if 0 // ifdef ASSERT3586Node* old_mem = n->in(MemNode::Memory);3587if (!visited.test_set(old_mem->_idx)) {3588old_mems.push(old_mem, old_mem->outcnt());3589}3590#endif3591assert(n->in(MemNode::Memory) != nmem, "sanity");3592if (!n->is_Load()) {3593// Move memory users of a store first.3594move_inst_mem(n, orig_phis);3595}3596// Now update memory input3597igvn->hash_delete(n);3598n->set_req(MemNode::Memory, nmem);3599igvn->hash_insert(n);3600record_for_optimizer(n);3601} else {3602assert(n->is_Allocate() || n->is_CheckCastPP() ||3603n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");3604}3605}3606#if 0 // ifdef ASSERT3607// Verify that memory was split correctly3608while (old_mems.is_nonempty()) {3609Node* old_mem = old_mems.node();3610uint old_cnt = old_mems.index();3611old_mems.pop();3612assert(old_cnt == old_mem->outcnt(), "old mem could be lost");3613}3614#endif3615}36163617#ifndef PRODUCT3618static const char *node_type_names[] = {3619"UnknownType",3620"JavaObject",3621"LocalVar",3622"Field",3623"Arraycopy"3624};36253626static const char *esc_names[] = {3627"UnknownEscape",3628"NoEscape",3629"ArgEscape",3630"GlobalEscape"3631};36323633void PointsToNode::dump(bool print_state) const {3634NodeType nt = node_type();3635tty->print("%s ", node_type_names[(int) nt]);3636if (print_state) {3637EscapeState es = escape_state();3638EscapeState fields_es = fields_escape_state();3639tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);3640if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {3641tty->print("NSR ");3642}3643}3644if (is_Field()) {3645FieldNode* f = (FieldNode*)this;3646if (f->is_oop()) {3647tty->print("oop ");3648}3649if (f->offset() > 0) {3650tty->print("+%d ", f->offset());3651}3652tty->print("(");3653for (BaseIterator i(f); i.has_next(); i.next()) {3654PointsToNode* b = i.get();3655tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));3656}3657tty->print(" )");3658}3659tty->print("[");3660for (EdgeIterator i(this); i.has_next(); i.next()) {3661PointsToNode* e = i.get();3662tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");3663}3664tty->print(" [");3665for (UseIterator i(this); i.has_next(); i.next()) {3666PointsToNode* u = i.get();3667bool is_base = false;3668if (PointsToNode::is_base_use(u)) {3669is_base = true;3670u = PointsToNode::get_use_node(u)->as_Field();3671}3672tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");3673}3674tty->print(" ]] ");3675if (_node == NULL) {3676tty->print_cr("<null>");3677} else {3678_node->dump();3679}3680}36813682void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {3683bool first = true;3684int ptnodes_length = ptnodes_worklist.length();3685for (int i = 0; i < ptnodes_length; i++) {3686PointsToNode *ptn = ptnodes_worklist.at(i);3687if (ptn == NULL || !ptn->is_JavaObject()) {3688continue;3689}3690PointsToNode::EscapeState es = ptn->escape_state();3691if ((es != PointsToNode::NoEscape) && !Verbose) {3692continue;3693}3694Node* n = ptn->ideal_node();3695if (n->is_Allocate() || (n->is_CallStaticJava() &&3696n->as_CallStaticJava()->is_boxing_method())) {3697if (first) {3698tty->cr();3699tty->print("======== Connection graph for ");3700_compile->method()->print_short_name();3701tty->cr();3702first = false;3703}3704ptn->dump();3705// Print all locals and fields which reference this allocation3706for (UseIterator j(ptn); j.has_next(); j.next()) {3707PointsToNode* use = j.get();3708if (use->is_LocalVar()) {3709use->dump(Verbose);3710} else if (Verbose) {3711use->dump();3712}3713}3714tty->cr();3715}3716}3717}3718#endif37193720void ConnectionGraph::record_for_optimizer(Node *n) {3721_igvn->_worklist.push(n);3722_igvn->add_users_to_worklist(n);3723}372437253726