Path: blob/master/src/hotspot/share/opto/escape.cpp
64440 views
/*1* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "compiler/compileLog.hpp"27#include "gc/shared/barrierSet.hpp"28#include "gc/shared/c2/barrierSetC2.hpp"29#include "libadt/vectset.hpp"30#include "memory/allocation.hpp"31#include "memory/resourceArea.hpp"32#include "opto/c2compiler.hpp"33#include "opto/arraycopynode.hpp"34#include "opto/callnode.hpp"35#include "opto/cfgnode.hpp"36#include "opto/compile.hpp"37#include "opto/escape.hpp"38#include "opto/phaseX.hpp"39#include "opto/movenode.hpp"40#include "opto/rootnode.hpp"41#include "utilities/macros.hpp"4243ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :44_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),45_in_worklist(C->comp_arena()),46_next_pidx(0),47_collecting(true),48_verify(false),49_compile(C),50_igvn(igvn),51_node_map(C->comp_arena()) {52// Add unknown java object.53add_java_object(C->top(), PointsToNode::GlobalEscape);54phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();55// Add ConP(#NULL) and ConN(#NULL) nodes.56Node* oop_null = igvn->zerocon(T_OBJECT);57assert(oop_null->_idx < nodes_size(), "should be created already");58add_java_object(oop_null, PointsToNode::NoEscape);59null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();60if (UseCompressedOops) {61Node* noop_null = igvn->zerocon(T_NARROWOOP);62assert(noop_null->_idx < nodes_size(), "should be created already");63map_ideal_node(noop_null, null_obj);64}65}6667bool ConnectionGraph::has_candidates(Compile *C) {68// EA brings benefits only when the code has allocations and/or locks which69// are represented by ideal Macro nodes.70int cnt = C->macro_count();71for (int i = 0; i < cnt; i++) {72Node *n = C->macro_node(i);73if (n->is_Allocate()) {74return true;75}76if (n->is_Lock()) {77Node* obj = n->as_Lock()->obj_node()->uncast();78if (!(obj->is_Parm() || obj->is_Con())) {79return true;80}81}82if (n->is_CallStaticJava() &&83n->as_CallStaticJava()->is_boxing_method()) {84return true;85}86}87return false;88}8990void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {91Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);92ResourceMark rm;9394// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction95// to create space for them in ConnectionGraph::_nodes[].96Node* oop_null = igvn->zerocon(T_OBJECT);97Node* noop_null = igvn->zerocon(T_NARROWOOP);98ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);99// Perform escape analysis100if (congraph->compute_escape()) {101// There are non escaping objects.102C->set_congraph(congraph);103}104// Cleanup.105if (oop_null->outcnt() == 0) {106igvn->hash_delete(oop_null);107}108if (noop_null->outcnt() == 0) {109igvn->hash_delete(noop_null);110}111}112113bool ConnectionGraph::compute_escape() {114Compile* C = _compile;115PhaseGVN* igvn = _igvn;116117// Worklists used by EA.118Unique_Node_List delayed_worklist;119GrowableArray<Node*> alloc_worklist;120GrowableArray<Node*> ptr_cmp_worklist;121GrowableArray<Node*> storestore_worklist;122GrowableArray<ArrayCopyNode*> arraycopy_worklist;123GrowableArray<PointsToNode*> ptnodes_worklist;124GrowableArray<JavaObjectNode*> java_objects_worklist;125GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;126GrowableArray<FieldNode*> oop_fields_worklist;127GrowableArray<SafePointNode*> sfn_worklist;128DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )129130{ Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);131132// 1. Populate Connection Graph (CG) with PointsTo nodes.133ideal_nodes.map(C->live_nodes(), NULL); // preallocate space134// Initialize worklist135if (C->root() != NULL) {136ideal_nodes.push(C->root());137}138// Processed ideal nodes are unique on ideal_nodes list139// but several ideal nodes are mapped to the phantom_obj.140// To avoid duplicated entries on the following worklists141// add the phantom_obj only once to them.142ptnodes_worklist.append(phantom_obj);143java_objects_worklist.append(phantom_obj);144for( uint next = 0; next < ideal_nodes.size(); ++next ) {145Node* n = ideal_nodes.at(next);146// Create PointsTo nodes and add them to Connection Graph. Called147// only once per ideal node since ideal_nodes is Unique_Node list.148add_node_to_connection_graph(n, &delayed_worklist);149PointsToNode* ptn = ptnode_adr(n->_idx);150if (ptn != NULL && ptn != phantom_obj) {151ptnodes_worklist.append(ptn);152if (ptn->is_JavaObject()) {153java_objects_worklist.append(ptn->as_JavaObject());154if ((n->is_Allocate() || n->is_CallStaticJava()) &&155(ptn->escape_state() < PointsToNode::GlobalEscape)) {156// Only allocations and java static calls results are interesting.157non_escaped_allocs_worklist.append(ptn->as_JavaObject());158}159} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {160oop_fields_worklist.append(ptn->as_Field());161}162}163if (n->is_MergeMem()) {164// Collect all MergeMem nodes to add memory slices for165// scalar replaceable objects in split_unique_types().166_mergemem_worklist.append(n->as_MergeMem());167} else if (OptimizePtrCompare && n->is_Cmp() &&168(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {169// Collect compare pointers nodes.170ptr_cmp_worklist.append(n);171} else if (n->is_MemBarStoreStore()) {172// Collect all MemBarStoreStore nodes so that depending on the173// escape status of the associated Allocate node some of them174// may be eliminated.175storestore_worklist.append(n);176} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&177(n->req() > MemBarNode::Precedent)) {178record_for_optimizer(n);179#ifdef ASSERT180} else if (n->is_AddP()) {181// Collect address nodes for graph verification.182addp_worklist.append(n);183#endif184} else if (n->is_ArrayCopy()) {185// Keep a list of ArrayCopy nodes so if one of its input is non186// escaping, we can record a unique type187arraycopy_worklist.append(n->as_ArrayCopy());188}189for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {190Node* m = n->fast_out(i); // Get user191ideal_nodes.push(m);192}193if (n-> is_SafePoint()) {194sfn_worklist.append(n->as_SafePoint());195}196}197if (non_escaped_allocs_worklist.length() == 0) {198_collecting = false;199return false; // Nothing to do.200}201// Add final simple edges to graph.202while(delayed_worklist.size() > 0) {203Node* n = delayed_worklist.pop();204add_final_edges(n);205}206207#ifdef ASSERT208if (VerifyConnectionGraph) {209// Verify that no new simple edges could be created and all210// local vars has edges.211_verify = true;212int ptnodes_length = ptnodes_worklist.length();213for (int next = 0; next < ptnodes_length; ++next) {214PointsToNode* ptn = ptnodes_worklist.at(next);215add_final_edges(ptn->ideal_node());216if (ptn->is_LocalVar() && ptn->edge_count() == 0) {217ptn->dump();218assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");219}220}221_verify = false;222}223#endif224// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes225// processing, calls to CI to resolve symbols (types, fields, methods)226// referenced in bytecode. During symbol resolution VM may throw227// an exception which CI cleans and converts to compilation failure.228if (C->failing()) return false;229230// 2. Finish Graph construction by propagating references to all231// java objects through graph.232if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,233java_objects_worklist, oop_fields_worklist)) {234// All objects escaped or hit time or iterations limits.235_collecting = false;236return false;237}238239// 3. Adjust scalar_replaceable state of nonescaping objects and push240// scalar replaceable allocations on alloc_worklist for processing241// in split_unique_types().242int non_escaped_length = non_escaped_allocs_worklist.length();243for (int next = 0; next < non_escaped_length; next++) {244JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);245bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);246Node* n = ptn->ideal_node();247if (n->is_Allocate()) {248n->as_Allocate()->_is_non_escaping = noescape;249}250if (noescape && ptn->scalar_replaceable()) {251adjust_scalar_replaceable_state(ptn);252if (ptn->scalar_replaceable()) {253alloc_worklist.append(ptn->ideal_node());254}255}256}257258#ifdef ASSERT259if (VerifyConnectionGraph) {260// Verify that graph is complete - no new edges could be added or needed.261verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,262java_objects_worklist, addp_worklist);263}264assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");265assert(null_obj->escape_state() == PointsToNode::NoEscape &&266null_obj->edge_count() == 0 &&267!null_obj->arraycopy_src() &&268!null_obj->arraycopy_dst(), "sanity");269#endif270271_collecting = false;272273} // TracePhase t3("connectionGraph")274275// 4. Optimize ideal graph based on EA information.276bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);277if (has_non_escaping_obj) {278optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);279}280281#ifndef PRODUCT282if (PrintEscapeAnalysis) {283dump(ptnodes_worklist); // Dump ConnectionGraph284}285#endif286287#ifdef ASSERT288if (VerifyConnectionGraph) {289int alloc_length = alloc_worklist.length();290for (int next = 0; next < alloc_length; ++next) {291Node* n = alloc_worklist.at(next);292PointsToNode* ptn = ptnode_adr(n->_idx);293assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");294}295}296#endif297298// 5. Separate memory graph for scalar replaceable allcations.299bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);300if (has_scalar_replaceable_candidates &&301C->AliasLevel() >= 3 && EliminateAllocations) {302// Now use the escape information to create unique types for303// scalar replaceable objects.304split_unique_types(alloc_worklist, arraycopy_worklist);305if (C->failing()) return false;306C->print_method(PHASE_AFTER_EA, 2);307308#ifdef ASSERT309} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {310tty->print("=== No allocations eliminated for ");311C->method()->print_short_name();312if (!EliminateAllocations) {313tty->print(" since EliminateAllocations is off ===");314} else if(!has_scalar_replaceable_candidates) {315tty->print(" since there are no scalar replaceable candidates ===");316} else if(C->AliasLevel() < 3) {317tty->print(" since AliasLevel < 3 ===");318}319tty->cr();320#endif321}322323// Annotate at safepoints if they have <= ArgEscape objects in their scope and at324// java calls if they pass ArgEscape objects as parameters.325if (has_non_escaping_obj &&326(C->env()->should_retain_local_variables() ||327C->env()->jvmti_can_get_owned_monitor_info() ||328C->env()->jvmti_can_walk_any_space() ||329DeoptimizeObjectsALot)) {330int sfn_length = sfn_worklist.length();331for (int next = 0; next < sfn_length; next++) {332SafePointNode* sfn = sfn_worklist.at(next);333sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));334if (sfn->is_CallJava()) {335CallJavaNode* call = sfn->as_CallJava();336call->set_arg_escape(has_arg_escape(call));337}338}339}340341return has_non_escaping_obj;342}343344// Returns true if there is an object in the scope of sfn that does not escape globally.345bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {346Compile* C = _compile;347for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) {348if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||349DeoptimizeObjectsALot) {350// Jvmti agents can access locals. Must provide info about local objects at runtime.351int num_locs = jvms->loc_size();352for (int idx = 0; idx < num_locs; idx++) {353Node* l = sfn->local(jvms, idx);354if (not_global_escape(l)) {355return true;356}357}358}359if (C->env()->jvmti_can_get_owned_monitor_info() ||360C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {361// Jvmti agents can read monitors. Must provide info about locked objects at runtime.362int num_mon = jvms->nof_monitors();363for (int idx = 0; idx < num_mon; idx++) {364Node* m = sfn->monitor_obj(jvms, idx);365if (m != NULL && not_global_escape(m)) {366return true;367}368}369}370}371return false;372}373374// Returns true if at least one of the arguments to the call is an object375// that does not escape globally.376bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {377if (call->method() != NULL) {378uint max_idx = TypeFunc::Parms + call->method()->arg_size();379for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {380Node* p = call->in(idx);381if (not_global_escape(p)) {382return true;383}384}385} else {386const char* name = call->as_CallStaticJava()->_name;387assert(name != NULL, "no name");388// no arg escapes through uncommon traps389if (strcmp(name, "uncommon_trap") != 0) {390// process_call_arguments() assumes that all arguments escape globally391const TypeTuple* d = call->tf()->domain();392for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {393const Type* at = d->field_at(i);394if (at->isa_oopptr() != NULL) {395return true;396}397}398}399}400return false;401}402403404405// Utility function for nodes that load an object406void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {407// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because408// ThreadLocal has RawPtr type.409const Type* t = _igvn->type(n);410if (t->make_ptr() != NULL) {411Node* adr = n->in(MemNode::Address);412#ifdef ASSERT413if (!adr->is_AddP()) {414assert(_igvn->type(adr)->isa_rawptr(), "sanity");415} else {416assert((ptnode_adr(adr->_idx) == NULL ||417ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");418}419#endif420add_local_var_and_edge(n, PointsToNode::NoEscape,421adr, delayed_worklist);422}423}424425// Populate Connection Graph with PointsTo nodes and create simple426// connection graph edges.427void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {428assert(!_verify, "this method should not be called for verification");429PhaseGVN* igvn = _igvn;430uint n_idx = n->_idx;431PointsToNode* n_ptn = ptnode_adr(n_idx);432if (n_ptn != NULL) {433return; // No need to redefine PointsTo node during first iteration.434}435int opcode = n->Opcode();436bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);437if (gc_handled) {438return; // Ignore node if already handled by GC.439}440441if (n->is_Call()) {442// Arguments to allocation and locking don't escape.443if (n->is_AbstractLock()) {444// Put Lock and Unlock nodes on IGVN worklist to process them during445// first IGVN optimization when escape information is still available.446record_for_optimizer(n);447} else if (n->is_Allocate()) {448add_call_node(n->as_Call());449record_for_optimizer(n);450} else {451if (n->is_CallStaticJava()) {452const char* name = n->as_CallStaticJava()->_name;453if (name != NULL && strcmp(name, "uncommon_trap") == 0) {454return; // Skip uncommon traps455}456}457// Don't mark as processed since call's arguments have to be processed.458delayed_worklist->push(n);459// Check if a call returns an object.460if ((n->as_Call()->returns_pointer() &&461n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||462(n->is_CallStaticJava() &&463n->as_CallStaticJava()->is_boxing_method())) {464add_call_node(n->as_Call());465}466}467return;468}469// Put this check here to process call arguments since some call nodes470// point to phantom_obj.471if (n_ptn == phantom_obj || n_ptn == null_obj) {472return; // Skip predefined nodes.473}474switch (opcode) {475case Op_AddP: {476Node* base = get_addp_base(n);477PointsToNode* ptn_base = ptnode_adr(base->_idx);478// Field nodes are created for all field types. They are used in479// adjust_scalar_replaceable_state() and split_unique_types().480// Note, non-oop fields will have only base edges in Connection481// Graph because such fields are not used for oop loads and stores.482int offset = address_offset(n, igvn);483add_field(n, PointsToNode::NoEscape, offset);484if (ptn_base == NULL) {485delayed_worklist->push(n); // Process it later.486} else {487n_ptn = ptnode_adr(n_idx);488add_base(n_ptn->as_Field(), ptn_base);489}490break;491}492case Op_CastX2P: {493map_ideal_node(n, phantom_obj);494break;495}496case Op_CastPP:497case Op_CheckCastPP:498case Op_EncodeP:499case Op_DecodeN:500case Op_EncodePKlass:501case Op_DecodeNKlass: {502add_local_var_and_edge(n, PointsToNode::NoEscape,503n->in(1), delayed_worklist);504break;505}506case Op_CMoveP: {507add_local_var(n, PointsToNode::NoEscape);508// Do not add edges during first iteration because some could be509// not defined yet.510delayed_worklist->push(n);511break;512}513case Op_ConP:514case Op_ConN:515case Op_ConNKlass: {516// assume all oop constants globally escape except for null517PointsToNode::EscapeState es;518const Type* t = igvn->type(n);519if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {520es = PointsToNode::NoEscape;521} else {522es = PointsToNode::GlobalEscape;523}524add_java_object(n, es);525break;526}527case Op_CreateEx: {528// assume that all exception objects globally escape529map_ideal_node(n, phantom_obj);530break;531}532case Op_LoadKlass:533case Op_LoadNKlass: {534// Unknown class is loaded535map_ideal_node(n, phantom_obj);536break;537}538case Op_LoadP:539case Op_LoadN:540case Op_LoadPLocked: {541add_objload_to_connection_graph(n, delayed_worklist);542break;543}544case Op_Parm: {545map_ideal_node(n, phantom_obj);546break;547}548case Op_PartialSubtypeCheck: {549// Produces Null or notNull and is used in only in CmpP so550// phantom_obj could be used.551map_ideal_node(n, phantom_obj); // Result is unknown552break;553}554case Op_Phi: {555// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because556// ThreadLocal has RawPtr type.557const Type* t = n->as_Phi()->type();558if (t->make_ptr() != NULL) {559add_local_var(n, PointsToNode::NoEscape);560// Do not add edges during first iteration because some could be561// not defined yet.562delayed_worklist->push(n);563}564break;565}566case Op_Proj: {567// we are only interested in the oop result projection from a call568if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&569n->in(0)->as_Call()->returns_pointer()) {570add_local_var_and_edge(n, PointsToNode::NoEscape,571n->in(0), delayed_worklist);572}573break;574}575case Op_Rethrow: // Exception object escapes576case Op_Return: {577if (n->req() > TypeFunc::Parms &&578igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {579// Treat Return value as LocalVar with GlobalEscape escape state.580add_local_var_and_edge(n, PointsToNode::GlobalEscape,581n->in(TypeFunc::Parms), delayed_worklist);582}583break;584}585case Op_CompareAndExchangeP:586case Op_CompareAndExchangeN:587case Op_GetAndSetP:588case Op_GetAndSetN: {589add_objload_to_connection_graph(n, delayed_worklist);590// fall-through591}592case Op_StoreP:593case Op_StoreN:594case Op_StoreNKlass:595case Op_StorePConditional:596case Op_WeakCompareAndSwapP:597case Op_WeakCompareAndSwapN:598case Op_CompareAndSwapP:599case Op_CompareAndSwapN: {600add_to_congraph_unsafe_access(n, opcode, delayed_worklist);601break;602}603case Op_AryEq:604case Op_HasNegatives:605case Op_StrComp:606case Op_StrEquals:607case Op_StrIndexOf:608case Op_StrIndexOfChar:609case Op_StrInflatedCopy:610case Op_StrCompressedCopy:611case Op_EncodeISOArray: {612add_local_var(n, PointsToNode::ArgEscape);613delayed_worklist->push(n); // Process it later.614break;615}616case Op_ThreadLocal: {617add_java_object(n, PointsToNode::ArgEscape);618break;619}620case Op_Blackhole: {621// All blackhole pointer arguments are globally escaping.622// Only do this if there is at least one pointer argument.623// Do not add edges during first iteration because some could be624// not defined yet, defer to final step.625for (uint i = 0; i < n->req(); i++) {626Node* in = n->in(i);627if (in != nullptr) {628const Type* at = _igvn->type(in);629if (!at->isa_ptr()) continue;630631add_local_var(n, PointsToNode::GlobalEscape);632delayed_worklist->push(n);633break;634}635}636break;637}638default:639; // Do nothing for nodes not related to EA.640}641return;642}643644#ifdef ASSERT645#define ELSE_FAIL(name) \646/* Should not be called for not pointer type. */ \647n->dump(1); \648assert(false, name); \649break;650#else651#define ELSE_FAIL(name) \652break;653#endif654655// Add final simple edges to graph.656void ConnectionGraph::add_final_edges(Node *n) {657PointsToNode* n_ptn = ptnode_adr(n->_idx);658#ifdef ASSERT659if (_verify && n_ptn->is_JavaObject())660return; // This method does not change graph for JavaObject.661#endif662663if (n->is_Call()) {664process_call_arguments(n->as_Call());665return;666}667assert(n->is_Store() || n->is_LoadStore() ||668(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),669"node should be registered already");670int opcode = n->Opcode();671bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);672if (gc_handled) {673return; // Ignore node if already handled by GC.674}675switch (opcode) {676case Op_AddP: {677Node* base = get_addp_base(n);678PointsToNode* ptn_base = ptnode_adr(base->_idx);679assert(ptn_base != NULL, "field's base should be registered");680add_base(n_ptn->as_Field(), ptn_base);681break;682}683case Op_CastPP:684case Op_CheckCastPP:685case Op_EncodeP:686case Op_DecodeN:687case Op_EncodePKlass:688case Op_DecodeNKlass: {689add_local_var_and_edge(n, PointsToNode::NoEscape,690n->in(1), NULL);691break;692}693case Op_CMoveP: {694for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {695Node* in = n->in(i);696if (in == NULL) {697continue; // ignore NULL698}699Node* uncast_in = in->uncast();700if (uncast_in->is_top() || uncast_in == n) {701continue; // ignore top or inputs which go back this node702}703PointsToNode* ptn = ptnode_adr(in->_idx);704assert(ptn != NULL, "node should be registered");705add_edge(n_ptn, ptn);706}707break;708}709case Op_LoadP:710case Op_LoadN:711case Op_LoadPLocked: {712// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because713// ThreadLocal has RawPtr type.714const Type* t = _igvn->type(n);715if (t->make_ptr() != NULL) {716Node* adr = n->in(MemNode::Address);717add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);718break;719}720ELSE_FAIL("Op_LoadP");721}722case Op_Phi: {723// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because724// ThreadLocal has RawPtr type.725const Type* t = n->as_Phi()->type();726if (t->make_ptr() != NULL) {727for (uint i = 1; i < n->req(); i++) {728Node* in = n->in(i);729if (in == NULL) {730continue; // ignore NULL731}732Node* uncast_in = in->uncast();733if (uncast_in->is_top() || uncast_in == n) {734continue; // ignore top or inputs which go back this node735}736PointsToNode* ptn = ptnode_adr(in->_idx);737assert(ptn != NULL, "node should be registered");738add_edge(n_ptn, ptn);739}740break;741}742ELSE_FAIL("Op_Phi");743}744case Op_Proj: {745// we are only interested in the oop result projection from a call746if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&747n->in(0)->as_Call()->returns_pointer()) {748add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);749break;750}751ELSE_FAIL("Op_Proj");752}753case Op_Rethrow: // Exception object escapes754case Op_Return: {755if (n->req() > TypeFunc::Parms &&756_igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {757// Treat Return value as LocalVar with GlobalEscape escape state.758add_local_var_and_edge(n, PointsToNode::GlobalEscape,759n->in(TypeFunc::Parms), NULL);760break;761}762ELSE_FAIL("Op_Return");763}764case Op_StoreP:765case Op_StoreN:766case Op_StoreNKlass:767case Op_StorePConditional:768case Op_CompareAndExchangeP:769case Op_CompareAndExchangeN:770case Op_CompareAndSwapP:771case Op_CompareAndSwapN:772case Op_WeakCompareAndSwapP:773case Op_WeakCompareAndSwapN:774case Op_GetAndSetP:775case Op_GetAndSetN: {776if (add_final_edges_unsafe_access(n, opcode)) {777break;778}779ELSE_FAIL("Op_StoreP");780}781case Op_AryEq:782case Op_HasNegatives:783case Op_StrComp:784case Op_StrEquals:785case Op_StrIndexOf:786case Op_StrIndexOfChar:787case Op_StrInflatedCopy:788case Op_StrCompressedCopy:789case Op_EncodeISOArray: {790// char[]/byte[] arrays passed to string intrinsic do not escape but791// they are not scalar replaceable. Adjust escape state for them.792// Start from in(2) edge since in(1) is memory edge.793for (uint i = 2; i < n->req(); i++) {794Node* adr = n->in(i);795const Type* at = _igvn->type(adr);796if (!adr->is_top() && at->isa_ptr()) {797assert(at == Type::TOP || at == TypePtr::NULL_PTR ||798at->isa_ptr() != NULL, "expecting a pointer");799if (adr->is_AddP()) {800adr = get_addp_base(adr);801}802PointsToNode* ptn = ptnode_adr(adr->_idx);803assert(ptn != NULL, "node should be registered");804add_edge(n_ptn, ptn);805}806}807break;808}809case Op_Blackhole: {810// All blackhole pointer arguments are globally escaping.811for (uint i = 0; i < n->req(); i++) {812Node* in = n->in(i);813if (in != nullptr) {814const Type* at = _igvn->type(in);815if (!at->isa_ptr()) continue;816817if (in->is_AddP()) {818in = get_addp_base(in);819}820821PointsToNode* ptn = ptnode_adr(in->_idx);822assert(ptn != nullptr, "should be defined already");823set_escape_state(ptn, PointsToNode::GlobalEscape);824add_edge(n_ptn, ptn);825}826}827break;828}829default: {830// This method should be called only for EA specific nodes which may831// miss some edges when they were created.832#ifdef ASSERT833n->dump(1);834#endif835guarantee(false, "unknown node");836}837}838return;839}840841void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {842Node* adr = n->in(MemNode::Address);843const Type* adr_type = _igvn->type(adr);844adr_type = adr_type->make_ptr();845if (adr_type == NULL) {846return; // skip dead nodes847}848if (adr_type->isa_oopptr()849|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)850&& adr_type == TypeRawPtr::NOTNULL851&& is_captured_store_address(adr))) {852delayed_worklist->push(n); // Process it later.853#ifdef ASSERT854assert (adr->is_AddP(), "expecting an AddP");855if (adr_type == TypeRawPtr::NOTNULL) {856// Verify a raw address for a store captured by Initialize node.857int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);858assert(offs != Type::OffsetBot, "offset must be a constant");859}860#endif861} else {862// Ignore copy the displaced header to the BoxNode (OSR compilation).863if (adr->is_BoxLock()) {864return;865}866// Stored value escapes in unsafe access.867if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {868delayed_worklist->push(n); // Process unsafe access later.869return;870}871#ifdef ASSERT872n->dump(1);873assert(false, "not unsafe");874#endif875}876}877878bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {879Node* adr = n->in(MemNode::Address);880const Type *adr_type = _igvn->type(adr);881adr_type = adr_type->make_ptr();882#ifdef ASSERT883if (adr_type == NULL) {884n->dump(1);885assert(adr_type != NULL, "dead node should not be on list");886return true;887}888#endif889890if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||891opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {892add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);893}894895if (adr_type->isa_oopptr()896|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)897&& adr_type == TypeRawPtr::NOTNULL898&& is_captured_store_address(adr))) {899// Point Address to Value900PointsToNode* adr_ptn = ptnode_adr(adr->_idx);901assert(adr_ptn != NULL &&902adr_ptn->as_Field()->is_oop(), "node should be registered");903Node* val = n->in(MemNode::ValueIn);904PointsToNode* ptn = ptnode_adr(val->_idx);905assert(ptn != NULL, "node should be registered");906add_edge(adr_ptn, ptn);907return true;908} else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {909// Stored value escapes in unsafe access.910Node* val = n->in(MemNode::ValueIn);911PointsToNode* ptn = ptnode_adr(val->_idx);912assert(ptn != NULL, "node should be registered");913set_escape_state(ptn, PointsToNode::GlobalEscape);914// Add edge to object for unsafe access with offset.915PointsToNode* adr_ptn = ptnode_adr(adr->_idx);916assert(adr_ptn != NULL, "node should be registered");917if (adr_ptn->is_Field()) {918assert(adr_ptn->as_Field()->is_oop(), "should be oop field");919add_edge(adr_ptn, ptn);920}921return true;922}923return false;924}925926void ConnectionGraph::add_call_node(CallNode* call) {927assert(call->returns_pointer(), "only for call which returns pointer");928uint call_idx = call->_idx;929if (call->is_Allocate()) {930Node* k = call->in(AllocateNode::KlassNode);931const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();932assert(kt != NULL, "TypeKlassPtr required.");933ciKlass* cik = kt->klass();934PointsToNode::EscapeState es = PointsToNode::NoEscape;935bool scalar_replaceable = true;936if (call->is_AllocateArray()) {937if (!cik->is_array_klass()) { // StressReflectiveCode938es = PointsToNode::GlobalEscape;939} else {940int length = call->in(AllocateNode::ALength)->find_int_con(-1);941if (length < 0 || length > EliminateAllocationArraySizeLimit) {942// Not scalar replaceable if the length is not constant or too big.943scalar_replaceable = false;944}945}946} else { // Allocate instance947if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||948cik->is_subclass_of(_compile->env()->Reference_klass()) ||949!cik->is_instance_klass() || // StressReflectiveCode950!cik->as_instance_klass()->can_be_instantiated() ||951cik->as_instance_klass()->has_finalizer()) {952es = PointsToNode::GlobalEscape;953} else {954int nfields = cik->as_instance_klass()->nof_nonstatic_fields();955if (nfields > EliminateAllocationFieldsLimit) {956// Not scalar replaceable if there are too many fields.957scalar_replaceable = false;958}959}960}961add_java_object(call, es);962PointsToNode* ptn = ptnode_adr(call_idx);963if (!scalar_replaceable && ptn->scalar_replaceable()) {964ptn->set_scalar_replaceable(false);965}966} else if (call->is_CallStaticJava()) {967// Call nodes could be different types:968//969// 1. CallDynamicJavaNode (what happened during call is unknown):970//971// - mapped to GlobalEscape JavaObject node if oop is returned;972//973// - all oop arguments are escaping globally;974//975// 2. CallStaticJavaNode (execute bytecode analysis if possible):976//977// - the same as CallDynamicJavaNode if can't do bytecode analysis;978//979// - mapped to GlobalEscape JavaObject node if unknown oop is returned;980// - mapped to NoEscape JavaObject node if non-escaping object allocated981// during call is returned;982// - mapped to ArgEscape LocalVar node pointed to object arguments983// which are returned and does not escape during call;984//985// - oop arguments escaping status is defined by bytecode analysis;986//987// For a static call, we know exactly what method is being called.988// Use bytecode estimator to record whether the call's return value escapes.989ciMethod* meth = call->as_CallJava()->method();990if (meth == NULL) {991const char* name = call->as_CallStaticJava()->_name;992assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");993// Returns a newly allocated non-escaped object.994add_java_object(call, PointsToNode::NoEscape);995ptnode_adr(call_idx)->set_scalar_replaceable(false);996} else if (meth->is_boxing_method()) {997// Returns boxing object998PointsToNode::EscapeState es;999vmIntrinsics::ID intr = meth->intrinsic_id();1000if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {1001// It does not escape if object is always allocated.1002es = PointsToNode::NoEscape;1003} else {1004// It escapes globally if object could be loaded from cache.1005es = PointsToNode::GlobalEscape;1006}1007add_java_object(call, es);1008} else {1009BCEscapeAnalyzer* call_analyzer = meth->get_bcea();1010call_analyzer->copy_dependencies(_compile->dependencies());1011if (call_analyzer->is_return_allocated()) {1012// Returns a newly allocated non-escaped object, simply1013// update dependency information.1014// Mark it as NoEscape so that objects referenced by1015// it's fields will be marked as NoEscape at least.1016add_java_object(call, PointsToNode::NoEscape);1017ptnode_adr(call_idx)->set_scalar_replaceable(false);1018} else {1019// Determine whether any arguments are returned.1020const TypeTuple* d = call->tf()->domain();1021bool ret_arg = false;1022for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1023if (d->field_at(i)->isa_ptr() != NULL &&1024call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {1025ret_arg = true;1026break;1027}1028}1029if (ret_arg) {1030add_local_var(call, PointsToNode::ArgEscape);1031} else {1032// Returns unknown object.1033map_ideal_node(call, phantom_obj);1034}1035}1036}1037} else {1038// An other type of call, assume the worst case:1039// returned value is unknown and globally escapes.1040assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");1041map_ideal_node(call, phantom_obj);1042}1043}10441045void ConnectionGraph::process_call_arguments(CallNode *call) {1046bool is_arraycopy = false;1047switch (call->Opcode()) {1048#ifdef ASSERT1049case Op_Allocate:1050case Op_AllocateArray:1051case Op_Lock:1052case Op_Unlock:1053assert(false, "should be done already");1054break;1055#endif1056case Op_ArrayCopy:1057case Op_CallLeafNoFP:1058// Most array copies are ArrayCopy nodes at this point but there1059// are still a few direct calls to the copy subroutines (See1060// PhaseStringOpts::copy_string())1061is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||1062call->as_CallLeaf()->is_call_to_arraycopystub();1063// fall through1064case Op_CallLeafVector:1065case Op_CallLeaf: {1066// Stub calls, objects do not escape but they are not scale replaceable.1067// Adjust escape state for outgoing arguments.1068const TypeTuple * d = call->tf()->domain();1069bool src_has_oops = false;1070for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1071const Type* at = d->field_at(i);1072Node *arg = call->in(i);1073if (arg == NULL) {1074continue;1075}1076const Type *aat = _igvn->type(arg);1077if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {1078continue;1079}1080if (arg->is_AddP()) {1081//1082// The inline_native_clone() case when the arraycopy stub is called1083// after the allocation before Initialize and CheckCastPP nodes.1084// Or normal arraycopy for object arrays case.1085//1086// Set AddP's base (Allocate) as not scalar replaceable since1087// pointer to the base (with offset) is passed as argument.1088//1089arg = get_addp_base(arg);1090}1091PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1092assert(arg_ptn != NULL, "should be registered");1093PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();1094if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {1095assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||1096aat->isa_ptr() != NULL, "expecting an Ptr");1097bool arg_has_oops = aat->isa_oopptr() &&1098(aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||1099(aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));1100if (i == TypeFunc::Parms) {1101src_has_oops = arg_has_oops;1102}1103//1104// src or dst could be j.l.Object when other is basic type array:1105//1106// arraycopy(char[],0,Object*,0,size);1107// arraycopy(Object*,0,char[],0,size);1108//1109// Don't add edges in such cases.1110//1111bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&1112arg_has_oops && (i > TypeFunc::Parms);1113#ifdef ASSERT1114if (!(is_arraycopy ||1115BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||1116(call->as_CallLeaf()->_name != NULL &&1117(strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||1118strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||1119strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||1120strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||1121strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||1122strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||1123strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||1124strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||1125strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||1126strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||1127strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||1128strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||1129strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||1130strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||1131strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||1132strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||1133strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||1134strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||1135strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||1136strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||1137strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||1138strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||1139strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||1140strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||1141strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||1142strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||1143strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||1144strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||1145strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||1146strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||1147strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||1148strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)1149))) {1150call->dump();1151fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);1152}1153#endif1154// Always process arraycopy's destination object since1155// we need to add all possible edges to references in1156// source object.1157if (arg_esc >= PointsToNode::ArgEscape &&1158!arg_is_arraycopy_dest) {1159continue;1160}1161PointsToNode::EscapeState es = PointsToNode::ArgEscape;1162if (call->is_ArrayCopy()) {1163ArrayCopyNode* ac = call->as_ArrayCopy();1164if (ac->is_clonebasic() ||1165ac->is_arraycopy_validated() ||1166ac->is_copyof_validated() ||1167ac->is_copyofrange_validated()) {1168es = PointsToNode::NoEscape;1169}1170}1171set_escape_state(arg_ptn, es);1172if (arg_is_arraycopy_dest) {1173Node* src = call->in(TypeFunc::Parms);1174if (src->is_AddP()) {1175src = get_addp_base(src);1176}1177PointsToNode* src_ptn = ptnode_adr(src->_idx);1178assert(src_ptn != NULL, "should be registered");1179if (arg_ptn != src_ptn) {1180// Special arraycopy edge:1181// A destination object's field can't have the source object1182// as base since objects escape states are not related.1183// Only escape state of destination object's fields affects1184// escape state of fields in source object.1185add_arraycopy(call, es, src_ptn, arg_ptn);1186}1187}1188}1189}1190break;1191}1192case Op_CallStaticJava: {1193// For a static call, we know exactly what method is being called.1194// Use bytecode estimator to record the call's escape affects1195#ifdef ASSERT1196const char* name = call->as_CallStaticJava()->_name;1197assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");1198#endif1199ciMethod* meth = call->as_CallJava()->method();1200if ((meth != NULL) && meth->is_boxing_method()) {1201break; // Boxing methods do not modify any oops.1202}1203BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;1204// fall-through if not a Java method or no analyzer information1205if (call_analyzer != NULL) {1206PointsToNode* call_ptn = ptnode_adr(call->_idx);1207const TypeTuple* d = call->tf()->domain();1208for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1209const Type* at = d->field_at(i);1210int k = i - TypeFunc::Parms;1211Node* arg = call->in(i);1212PointsToNode* arg_ptn = ptnode_adr(arg->_idx);1213if (at->isa_ptr() != NULL &&1214call_analyzer->is_arg_returned(k)) {1215// The call returns arguments.1216if (call_ptn != NULL) { // Is call's result used?1217assert(call_ptn->is_LocalVar(), "node should be registered");1218assert(arg_ptn != NULL, "node should be registered");1219add_edge(call_ptn, arg_ptn);1220}1221}1222if (at->isa_oopptr() != NULL &&1223arg_ptn->escape_state() < PointsToNode::GlobalEscape) {1224if (!call_analyzer->is_arg_stack(k)) {1225// The argument global escapes1226set_escape_state(arg_ptn, PointsToNode::GlobalEscape);1227} else {1228set_escape_state(arg_ptn, PointsToNode::ArgEscape);1229if (!call_analyzer->is_arg_local(k)) {1230// The argument itself doesn't escape, but any fields might1231set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);1232}1233}1234}1235}1236if (call_ptn != NULL && call_ptn->is_LocalVar()) {1237// The call returns arguments.1238assert(call_ptn->edge_count() > 0, "sanity");1239if (!call_analyzer->is_return_local()) {1240// Returns also unknown object.1241add_edge(call_ptn, phantom_obj);1242}1243}1244break;1245}1246}1247default: {1248// Fall-through here if not a Java method or no analyzer information1249// or some other type of call, assume the worst case: all arguments1250// globally escape.1251const TypeTuple* d = call->tf()->domain();1252for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {1253const Type* at = d->field_at(i);1254if (at->isa_oopptr() != NULL) {1255Node* arg = call->in(i);1256if (arg->is_AddP()) {1257arg = get_addp_base(arg);1258}1259assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");1260set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);1261}1262}1263}1264}1265}126612671268// Finish Graph construction.1269bool ConnectionGraph::complete_connection_graph(1270GrowableArray<PointsToNode*>& ptnodes_worklist,1271GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,1272GrowableArray<JavaObjectNode*>& java_objects_worklist,1273GrowableArray<FieldNode*>& oop_fields_worklist) {1274// Normally only 1-3 passes needed to build Connection Graph depending1275// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.1276// Set limit to 20 to catch situation when something did go wrong and1277// bailout Escape Analysis.1278// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.1279#define GRAPH_BUILD_ITER_LIMIT 2012801281// Propagate GlobalEscape and ArgEscape escape states and check that1282// we still have non-escaping objects. The method pushs on _worklist1283// Field nodes which reference phantom_object.1284if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {1285return false; // Nothing to do.1286}1287// Now propagate references to all JavaObject nodes.1288int java_objects_length = java_objects_worklist.length();1289elapsedTimer build_time;1290build_time.start();1291elapsedTimer time;1292bool timeout = false;1293int new_edges = 1;1294int iterations = 0;1295do {1296while ((new_edges > 0) &&1297(iterations++ < GRAPH_BUILD_ITER_LIMIT)) {1298double start_time = time.seconds();1299time.start();1300new_edges = 0;1301// Propagate references to phantom_object for nodes pushed on _worklist1302// by find_non_escaped_objects() and find_field_value().1303new_edges += add_java_object_edges(phantom_obj, false);1304for (int next = 0; next < java_objects_length; ++next) {1305JavaObjectNode* ptn = java_objects_worklist.at(next);1306new_edges += add_java_object_edges(ptn, true);13071308#define SAMPLE_SIZE 41309if ((next % SAMPLE_SIZE) == 0) {1310// Each 4 iterations calculate how much time it will take1311// to complete graph construction.1312time.stop();1313// Poll for requests from shutdown mechanism to quiesce compiler1314// because Connection graph construction may take long time.1315CompileBroker::maybe_block();1316double stop_time = time.seconds();1317double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;1318double time_until_end = time_per_iter * (double)(java_objects_length - next);1319if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {1320timeout = true;1321break; // Timeout1322}1323start_time = stop_time;1324time.start();1325}1326#undef SAMPLE_SIZE13271328}1329if (timeout) break;1330if (new_edges > 0) {1331// Update escape states on each iteration if graph was updated.1332if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {1333return false; // Nothing to do.1334}1335}1336time.stop();1337if (time.seconds() >= EscapeAnalysisTimeout) {1338timeout = true;1339break;1340}1341}1342if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {1343time.start();1344// Find fields which have unknown value.1345int fields_length = oop_fields_worklist.length();1346for (int next = 0; next < fields_length; next++) {1347FieldNode* field = oop_fields_worklist.at(next);1348if (field->edge_count() == 0) {1349new_edges += find_field_value(field);1350// This code may added new edges to phantom_object.1351// Need an other cycle to propagate references to phantom_object.1352}1353}1354time.stop();1355if (time.seconds() >= EscapeAnalysisTimeout) {1356timeout = true;1357break;1358}1359} else {1360new_edges = 0; // Bailout1361}1362} while (new_edges > 0);13631364build_time.stop();1365_build_time = build_time.seconds();1366_build_iterations = iterations;13671368// Bailout if passed limits.1369if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {1370Compile* C = _compile;1371if (C->log() != NULL) {1372C->log()->begin_elem("connectionGraph_bailout reason='reached ");1373C->log()->text("%s", timeout ? "time" : "iterations");1374C->log()->end_elem(" limit'");1375}1376assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",1377_build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());1378// Possible infinite build_connection_graph loop,1379// bailout (no changes to ideal graph were made).1380return false;1381}1382#ifdef ASSERT1383if (Verbose && PrintEscapeAnalysis) {1384tty->print_cr("EA: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",1385_build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());1386}1387#endif13881389#undef GRAPH_BUILD_ITER_LIMIT13901391// Find fields initialized by NULL for non-escaping Allocations.1392int non_escaped_length = non_escaped_allocs_worklist.length();1393for (int next = 0; next < non_escaped_length; next++) {1394JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);1395PointsToNode::EscapeState es = ptn->escape_state();1396assert(es <= PointsToNode::ArgEscape, "sanity");1397if (es == PointsToNode::NoEscape) {1398if (find_init_values_null(ptn, _igvn) > 0) {1399// Adding references to NULL object does not change escape states1400// since it does not escape. Also no fields are added to NULL object.1401add_java_object_edges(null_obj, false);1402}1403}1404Node* n = ptn->ideal_node();1405if (n->is_Allocate()) {1406// The object allocated by this Allocate node will never be1407// seen by an other thread. Mark it so that when it is1408// expanded no MemBarStoreStore is added.1409InitializeNode* ini = n->as_Allocate()->initialization();1410if (ini != NULL)1411ini->set_does_not_escape();1412}1413}1414return true; // Finished graph construction.1415}14161417// Propagate GlobalEscape and ArgEscape escape states to all nodes1418// and check that we still have non-escaping java objects.1419bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,1420GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {1421GrowableArray<PointsToNode*> escape_worklist;1422// First, put all nodes with GlobalEscape and ArgEscape states on worklist.1423int ptnodes_length = ptnodes_worklist.length();1424for (int next = 0; next < ptnodes_length; ++next) {1425PointsToNode* ptn = ptnodes_worklist.at(next);1426if (ptn->escape_state() >= PointsToNode::ArgEscape ||1427ptn->fields_escape_state() >= PointsToNode::ArgEscape) {1428escape_worklist.push(ptn);1429}1430}1431// Set escape states to referenced nodes (edges list).1432while (escape_worklist.length() > 0) {1433PointsToNode* ptn = escape_worklist.pop();1434PointsToNode::EscapeState es = ptn->escape_state();1435PointsToNode::EscapeState field_es = ptn->fields_escape_state();1436if (ptn->is_Field() && ptn->as_Field()->is_oop() &&1437es >= PointsToNode::ArgEscape) {1438// GlobalEscape or ArgEscape state of field means it has unknown value.1439if (add_edge(ptn, phantom_obj)) {1440// New edge was added1441add_field_uses_to_worklist(ptn->as_Field());1442}1443}1444for (EdgeIterator i(ptn); i.has_next(); i.next()) {1445PointsToNode* e = i.get();1446if (e->is_Arraycopy()) {1447assert(ptn->arraycopy_dst(), "sanity");1448// Propagate only fields escape state through arraycopy edge.1449if (e->fields_escape_state() < field_es) {1450set_fields_escape_state(e, field_es);1451escape_worklist.push(e);1452}1453} else if (es >= field_es) {1454// fields_escape_state is also set to 'es' if it is less than 'es'.1455if (e->escape_state() < es) {1456set_escape_state(e, es);1457escape_worklist.push(e);1458}1459} else {1460// Propagate field escape state.1461bool es_changed = false;1462if (e->fields_escape_state() < field_es) {1463set_fields_escape_state(e, field_es);1464es_changed = true;1465}1466if ((e->escape_state() < field_es) &&1467e->is_Field() && ptn->is_JavaObject() &&1468e->as_Field()->is_oop()) {1469// Change escape state of referenced fields.1470set_escape_state(e, field_es);1471es_changed = true;1472} else if (e->escape_state() < es) {1473set_escape_state(e, es);1474es_changed = true;1475}1476if (es_changed) {1477escape_worklist.push(e);1478}1479}1480}1481}1482// Remove escaped objects from non_escaped list.1483for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {1484JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);1485if (ptn->escape_state() >= PointsToNode::GlobalEscape) {1486non_escaped_allocs_worklist.delete_at(next);1487}1488if (ptn->escape_state() == PointsToNode::NoEscape) {1489// Find fields in non-escaped allocations which have unknown value.1490find_init_values_phantom(ptn);1491}1492}1493return (non_escaped_allocs_worklist.length() > 0);1494}14951496// Add all references to JavaObject node by walking over all uses.1497int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {1498int new_edges = 0;1499if (populate_worklist) {1500// Populate _worklist by uses of jobj's uses.1501for (UseIterator i(jobj); i.has_next(); i.next()) {1502PointsToNode* use = i.get();1503if (use->is_Arraycopy()) {1504continue;1505}1506add_uses_to_worklist(use);1507if (use->is_Field() && use->as_Field()->is_oop()) {1508// Put on worklist all field's uses (loads) and1509// related field nodes (same base and offset).1510add_field_uses_to_worklist(use->as_Field());1511}1512}1513}1514for (int l = 0; l < _worklist.length(); l++) {1515PointsToNode* use = _worklist.at(l);1516if (PointsToNode::is_base_use(use)) {1517// Add reference from jobj to field and from field to jobj (field's base).1518use = PointsToNode::get_use_node(use)->as_Field();1519if (add_base(use->as_Field(), jobj)) {1520new_edges++;1521}1522continue;1523}1524assert(!use->is_JavaObject(), "sanity");1525if (use->is_Arraycopy()) {1526if (jobj == null_obj) { // NULL object does not have field edges1527continue;1528}1529// Added edge from Arraycopy node to arraycopy's source java object1530if (add_edge(use, jobj)) {1531jobj->set_arraycopy_src();1532new_edges++;1533}1534// and stop here.1535continue;1536}1537if (!add_edge(use, jobj)) {1538continue; // No new edge added, there was such edge already.1539}1540new_edges++;1541if (use->is_LocalVar()) {1542add_uses_to_worklist(use);1543if (use->arraycopy_dst()) {1544for (EdgeIterator i(use); i.has_next(); i.next()) {1545PointsToNode* e = i.get();1546if (e->is_Arraycopy()) {1547if (jobj == null_obj) { // NULL object does not have field edges1548continue;1549}1550// Add edge from arraycopy's destination java object to Arraycopy node.1551if (add_edge(jobj, e)) {1552new_edges++;1553jobj->set_arraycopy_dst();1554}1555}1556}1557}1558} else {1559// Added new edge to stored in field values.1560// Put on worklist all field's uses (loads) and1561// related field nodes (same base and offset).1562add_field_uses_to_worklist(use->as_Field());1563}1564}1565_worklist.clear();1566_in_worklist.reset();1567return new_edges;1568}15691570// Put on worklist all related field nodes.1571void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {1572assert(field->is_oop(), "sanity");1573int offset = field->offset();1574add_uses_to_worklist(field);1575// Loop over all bases of this field and push on worklist Field nodes1576// with the same offset and base (since they may reference the same field).1577for (BaseIterator i(field); i.has_next(); i.next()) {1578PointsToNode* base = i.get();1579add_fields_to_worklist(field, base);1580// Check if the base was source object of arraycopy and go over arraycopy's1581// destination objects since values stored to a field of source object are1582// accessable by uses (loads) of fields of destination objects.1583if (base->arraycopy_src()) {1584for (UseIterator j(base); j.has_next(); j.next()) {1585PointsToNode* arycp = j.get();1586if (arycp->is_Arraycopy()) {1587for (UseIterator k(arycp); k.has_next(); k.next()) {1588PointsToNode* abase = k.get();1589if (abase->arraycopy_dst() && abase != base) {1590// Look for the same arraycopy reference.1591add_fields_to_worklist(field, abase);1592}1593}1594}1595}1596}1597}1598}15991600// Put on worklist all related field nodes.1601void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {1602int offset = field->offset();1603if (base->is_LocalVar()) {1604for (UseIterator j(base); j.has_next(); j.next()) {1605PointsToNode* f = j.get();1606if (PointsToNode::is_base_use(f)) { // Field1607f = PointsToNode::get_use_node(f);1608if (f == field || !f->as_Field()->is_oop()) {1609continue;1610}1611int offs = f->as_Field()->offset();1612if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1613add_to_worklist(f);1614}1615}1616}1617} else {1618assert(base->is_JavaObject(), "sanity");1619if (// Skip phantom_object since it is only used to indicate that1620// this field's content globally escapes.1621(base != phantom_obj) &&1622// NULL object node does not have fields.1623(base != null_obj)) {1624for (EdgeIterator i(base); i.has_next(); i.next()) {1625PointsToNode* f = i.get();1626// Skip arraycopy edge since store to destination object field1627// does not update value in source object field.1628if (f->is_Arraycopy()) {1629assert(base->arraycopy_dst(), "sanity");1630continue;1631}1632if (f == field || !f->as_Field()->is_oop()) {1633continue;1634}1635int offs = f->as_Field()->offset();1636if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {1637add_to_worklist(f);1638}1639}1640}1641}1642}16431644// Find fields which have unknown value.1645int ConnectionGraph::find_field_value(FieldNode* field) {1646// Escaped fields should have init value already.1647assert(field->escape_state() == PointsToNode::NoEscape, "sanity");1648int new_edges = 0;1649for (BaseIterator i(field); i.has_next(); i.next()) {1650PointsToNode* base = i.get();1651if (base->is_JavaObject()) {1652// Skip Allocate's fields which will be processed later.1653if (base->ideal_node()->is_Allocate()) {1654return 0;1655}1656assert(base == null_obj, "only NULL ptr base expected here");1657}1658}1659if (add_edge(field, phantom_obj)) {1660// New edge was added1661new_edges++;1662add_field_uses_to_worklist(field);1663}1664return new_edges;1665}16661667// Find fields initializing values for allocations.1668int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {1669assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1670Node* alloc = pta->ideal_node();16711672// Do nothing for Allocate nodes since its fields values are1673// "known" unless they are initialized by arraycopy/clone.1674if (alloc->is_Allocate() && !pta->arraycopy_dst()) {1675return 0;1676}1677assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");1678#ifdef ASSERT1679if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {1680const char* name = alloc->as_CallStaticJava()->_name;1681assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");1682}1683#endif1684// Non-escaped allocation returned from Java or runtime call have unknown values in fields.1685int new_edges = 0;1686for (EdgeIterator i(pta); i.has_next(); i.next()) {1687PointsToNode* field = i.get();1688if (field->is_Field() && field->as_Field()->is_oop()) {1689if (add_edge(field, phantom_obj)) {1690// New edge was added1691new_edges++;1692add_field_uses_to_worklist(field->as_Field());1693}1694}1695}1696return new_edges;1697}16981699// Find fields initializing values for allocations.1700int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {1701assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");1702Node* alloc = pta->ideal_node();1703// Do nothing for Call nodes since its fields values are unknown.1704if (!alloc->is_Allocate()) {1705return 0;1706}1707InitializeNode* ini = alloc->as_Allocate()->initialization();1708bool visited_bottom_offset = false;1709GrowableArray<int> offsets_worklist;1710int new_edges = 0;17111712// Check if an oop field's initializing value is recorded and add1713// a corresponding NULL if field's value if it is not recorded.1714// Connection Graph does not record a default initialization by NULL1715// captured by Initialize node.1716//1717for (EdgeIterator i(pta); i.has_next(); i.next()) {1718PointsToNode* field = i.get(); // Field (AddP)1719if (!field->is_Field() || !field->as_Field()->is_oop()) {1720continue; // Not oop field1721}1722int offset = field->as_Field()->offset();1723if (offset == Type::OffsetBot) {1724if (!visited_bottom_offset) {1725// OffsetBot is used to reference array's element,1726// always add reference to NULL to all Field nodes since we don't1727// known which element is referenced.1728if (add_edge(field, null_obj)) {1729// New edge was added1730new_edges++;1731add_field_uses_to_worklist(field->as_Field());1732visited_bottom_offset = true;1733}1734}1735} else {1736// Check only oop fields.1737const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();1738if (adr_type->isa_rawptr()) {1739#ifdef ASSERT1740// Raw pointers are used for initializing stores so skip it1741// since it should be recorded already1742Node* base = get_addp_base(field->ideal_node());1743assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");1744#endif1745continue;1746}1747if (!offsets_worklist.contains(offset)) {1748offsets_worklist.append(offset);1749Node* value = NULL;1750if (ini != NULL) {1751// StoreP::memory_type() == T_ADDRESS1752BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;1753Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);1754// Make sure initializing store has the same type as this AddP.1755// This AddP may reference non existing field because it is on a1756// dead branch of bimorphic call which is not eliminated yet.1757if (store != NULL && store->is_Store() &&1758store->as_Store()->memory_type() == ft) {1759value = store->in(MemNode::ValueIn);1760#ifdef ASSERT1761if (VerifyConnectionGraph) {1762// Verify that AddP already points to all objects the value points to.1763PointsToNode* val = ptnode_adr(value->_idx);1764assert((val != NULL), "should be processed already");1765PointsToNode* missed_obj = NULL;1766if (val->is_JavaObject()) {1767if (!field->points_to(val->as_JavaObject())) {1768missed_obj = val;1769}1770} else {1771if (!val->is_LocalVar() || (val->edge_count() == 0)) {1772tty->print_cr("----------init store has invalid value -----");1773store->dump();1774val->dump();1775assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");1776}1777for (EdgeIterator j(val); j.has_next(); j.next()) {1778PointsToNode* obj = j.get();1779if (obj->is_JavaObject()) {1780if (!field->points_to(obj->as_JavaObject())) {1781missed_obj = obj;1782break;1783}1784}1785}1786}1787if (missed_obj != NULL) {1788tty->print_cr("----------field---------------------------------");1789field->dump();1790tty->print_cr("----------missed referernce to object-----------");1791missed_obj->dump();1792tty->print_cr("----------object referernced by init store -----");1793store->dump();1794val->dump();1795assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");1796}1797}1798#endif1799} else {1800// There could be initializing stores which follow allocation.1801// For example, a volatile field store is not collected1802// by Initialize node.1803//1804// Need to check for dependent loads to separate such stores from1805// stores which follow loads. For now, add initial value NULL so1806// that compare pointers optimization works correctly.1807}1808}1809if (value == NULL) {1810// A field's initializing value was not recorded. Add NULL.1811if (add_edge(field, null_obj)) {1812// New edge was added1813new_edges++;1814add_field_uses_to_worklist(field->as_Field());1815}1816}1817}1818}1819}1820return new_edges;1821}18221823// Adjust scalar_replaceable state after Connection Graph is built.1824void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {1825// Search for non-escaping objects which are not scalar replaceable1826// and mark them to propagate the state to referenced objects.18271828for (UseIterator i(jobj); i.has_next(); i.next()) {1829PointsToNode* use = i.get();1830if (use->is_Arraycopy()) {1831continue;1832}1833if (use->is_Field()) {1834FieldNode* field = use->as_Field();1835assert(field->is_oop() && field->scalar_replaceable(), "sanity");1836// 1. An object is not scalar replaceable if the field into which it is1837// stored has unknown offset (stored into unknown element of an array).1838if (field->offset() == Type::OffsetBot) {1839jobj->set_scalar_replaceable(false);1840return;1841}1842// 2. An object is not scalar replaceable if the field into which it is1843// stored has multiple bases one of which is null.1844if (field->base_count() > 1) {1845for (BaseIterator i(field); i.has_next(); i.next()) {1846PointsToNode* base = i.get();1847if (base == null_obj) {1848jobj->set_scalar_replaceable(false);1849return;1850}1851}1852}1853}1854assert(use->is_Field() || use->is_LocalVar(), "sanity");1855// 3. An object is not scalar replaceable if it is merged with other objects.1856for (EdgeIterator j(use); j.has_next(); j.next()) {1857PointsToNode* ptn = j.get();1858if (ptn->is_JavaObject() && ptn != jobj) {1859// Mark all objects.1860jobj->set_scalar_replaceable(false);1861ptn->set_scalar_replaceable(false);1862}1863}1864if (!jobj->scalar_replaceable()) {1865return;1866}1867}18681869for (EdgeIterator j(jobj); j.has_next(); j.next()) {1870if (j.get()->is_Arraycopy()) {1871continue;1872}18731874// Non-escaping object node should point only to field nodes.1875FieldNode* field = j.get()->as_Field();1876int offset = field->as_Field()->offset();18771878// 4. An object is not scalar replaceable if it has a field with unknown1879// offset (array's element is accessed in loop).1880if (offset == Type::OffsetBot) {1881jobj->set_scalar_replaceable(false);1882return;1883}1884// 5. Currently an object is not scalar replaceable if a LoadStore node1885// access its field since the field value is unknown after it.1886//1887Node* n = field->ideal_node();18881889// Test for an unsafe access that was parsed as maybe off heap1890// (with a CheckCastPP to raw memory).1891assert(n->is_AddP(), "expect an address computation");1892if (n->in(AddPNode::Base)->is_top() &&1893n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {1894assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");1895assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");1896jobj->set_scalar_replaceable(false);1897return;1898}18991900for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {1901Node* u = n->fast_out(i);1902if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {1903jobj->set_scalar_replaceable(false);1904return;1905}1906}19071908// 6. Or the address may point to more then one object. This may produce1909// the false positive result (set not scalar replaceable)1910// since the flow-insensitive escape analysis can't separate1911// the case when stores overwrite the field's value from the case1912// when stores happened on different control branches.1913//1914// Note: it will disable scalar replacement in some cases:1915//1916// Point p[] = new Point[1];1917// p[0] = new Point(); // Will be not scalar replaced1918//1919// but it will save us from incorrect optimizations in next cases:1920//1921// Point p[] = new Point[1];1922// if ( x ) p[0] = new Point(); // Will be not scalar replaced1923//1924if (field->base_count() > 1) {1925for (BaseIterator i(field); i.has_next(); i.next()) {1926PointsToNode* base = i.get();1927// Don't take into account LocalVar nodes which1928// may point to only one object which should be also1929// this field's base by now.1930if (base->is_JavaObject() && base != jobj) {1931// Mark all bases.1932jobj->set_scalar_replaceable(false);1933base->set_scalar_replaceable(false);1934}1935}1936}1937}1938}19391940#ifdef ASSERT1941void ConnectionGraph::verify_connection_graph(1942GrowableArray<PointsToNode*>& ptnodes_worklist,1943GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,1944GrowableArray<JavaObjectNode*>& java_objects_worklist,1945GrowableArray<Node*>& addp_worklist) {1946// Verify that graph is complete - no new edges could be added.1947int java_objects_length = java_objects_worklist.length();1948int non_escaped_length = non_escaped_allocs_worklist.length();1949int new_edges = 0;1950for (int next = 0; next < java_objects_length; ++next) {1951JavaObjectNode* ptn = java_objects_worklist.at(next);1952new_edges += add_java_object_edges(ptn, true);1953}1954assert(new_edges == 0, "graph was not complete");1955// Verify that escape state is final.1956int length = non_escaped_allocs_worklist.length();1957find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);1958assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&1959(non_escaped_length == length) &&1960(_worklist.length() == 0), "escape state was not final");19611962// Verify fields information.1963int addp_length = addp_worklist.length();1964for (int next = 0; next < addp_length; ++next ) {1965Node* n = addp_worklist.at(next);1966FieldNode* field = ptnode_adr(n->_idx)->as_Field();1967if (field->is_oop()) {1968// Verify that field has all bases1969Node* base = get_addp_base(n);1970PointsToNode* ptn = ptnode_adr(base->_idx);1971if (ptn->is_JavaObject()) {1972assert(field->has_base(ptn->as_JavaObject()), "sanity");1973} else {1974assert(ptn->is_LocalVar(), "sanity");1975for (EdgeIterator i(ptn); i.has_next(); i.next()) {1976PointsToNode* e = i.get();1977if (e->is_JavaObject()) {1978assert(field->has_base(e->as_JavaObject()), "sanity");1979}1980}1981}1982// Verify that all fields have initializing values.1983if (field->edge_count() == 0) {1984tty->print_cr("----------field does not have references----------");1985field->dump();1986for (BaseIterator i(field); i.has_next(); i.next()) {1987PointsToNode* base = i.get();1988tty->print_cr("----------field has next base---------------------");1989base->dump();1990if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {1991tty->print_cr("----------base has fields-------------------------");1992for (EdgeIterator j(base); j.has_next(); j.next()) {1993j.get()->dump();1994}1995tty->print_cr("----------base has references---------------------");1996for (UseIterator j(base); j.has_next(); j.next()) {1997j.get()->dump();1998}1999}2000}2001for (UseIterator i(field); i.has_next(); i.next()) {2002i.get()->dump();2003}2004assert(field->edge_count() > 0, "sanity");2005}2006}2007}2008}2009#endif20102011// Optimize ideal graph.2012void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,2013GrowableArray<Node*>& storestore_worklist) {2014Compile* C = _compile;2015PhaseIterGVN* igvn = _igvn;2016if (EliminateLocks) {2017// Mark locks before changing ideal graph.2018int cnt = C->macro_count();2019for (int i = 0; i < cnt; i++) {2020Node *n = C->macro_node(i);2021if (n->is_AbstractLock()) { // Lock and Unlock nodes2022AbstractLockNode* alock = n->as_AbstractLock();2023if (!alock->is_non_esc_obj()) {2024if (not_global_escape(alock->obj_node())) {2025assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");2026// The lock could be marked eliminated by lock coarsening2027// code during first IGVN before EA. Replace coarsened flag2028// to eliminate all associated locks/unlocks.2029#ifdef ASSERT2030alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");2031#endif2032alock->set_non_esc_obj();2033}2034}2035}2036}2037}20382039if (OptimizePtrCompare) {2040for (int i = 0; i < ptr_cmp_worklist.length(); i++) {2041Node *n = ptr_cmp_worklist.at(i);2042const TypeInt* tcmp = optimize_ptr_compare(n);2043if (tcmp->singleton()) {2044Node* cmp = igvn->makecon(tcmp);2045#ifndef PRODUCT2046if (PrintOptimizePtrCompare) {2047tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));2048if (Verbose) {2049n->dump(1);2050}2051}2052#endif2053igvn->replace_node(n, cmp);2054}2055}2056}20572058// For MemBarStoreStore nodes added in library_call.cpp, check2059// escape status of associated AllocateNode and optimize out2060// MemBarStoreStore node if the allocated object never escapes.2061for (int i = 0; i < storestore_worklist.length(); i++) {2062Node* storestore = storestore_worklist.at(i);2063assert(storestore->is_MemBarStoreStore(), "");2064Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);2065if (alloc->is_Allocate() && not_global_escape(alloc)) {2066MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);2067mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));2068mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));2069igvn->register_new_node_with_optimizer(mb);2070igvn->replace_node(storestore, mb);2071}2072}2073}20742075// Optimize objects compare.2076const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {2077assert(OptimizePtrCompare, "sanity");2078const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO2079const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE2080const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]20812082PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);2083PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);2084JavaObjectNode* jobj1 = unique_java_object(n->in(1));2085JavaObjectNode* jobj2 = unique_java_object(n->in(2));2086assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");2087assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");20882089// Check simple cases first.2090if (jobj1 != NULL) {2091if (jobj1->escape_state() == PointsToNode::NoEscape) {2092if (jobj1 == jobj2) {2093// Comparing the same not escaping object.2094return EQ;2095}2096Node* obj = jobj1->ideal_node();2097// Comparing not escaping allocation.2098if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&2099!ptn2->points_to(jobj1)) {2100return NE; // This includes nullness check.2101}2102}2103}2104if (jobj2 != NULL) {2105if (jobj2->escape_state() == PointsToNode::NoEscape) {2106Node* obj = jobj2->ideal_node();2107// Comparing not escaping allocation.2108if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&2109!ptn1->points_to(jobj2)) {2110return NE; // This includes nullness check.2111}2112}2113}2114if (jobj1 != NULL && jobj1 != phantom_obj &&2115jobj2 != NULL && jobj2 != phantom_obj &&2116jobj1->ideal_node()->is_Con() &&2117jobj2->ideal_node()->is_Con()) {2118// Klass or String constants compare. Need to be careful with2119// compressed pointers - compare types of ConN and ConP instead of nodes.2120const Type* t1 = jobj1->ideal_node()->get_ptr_type();2121const Type* t2 = jobj2->ideal_node()->get_ptr_type();2122if (t1->make_ptr() == t2->make_ptr()) {2123return EQ;2124} else {2125return NE;2126}2127}2128if (ptn1->meet(ptn2)) {2129return UNKNOWN; // Sets are not disjoint2130}21312132// Sets are disjoint.2133bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);2134bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);2135bool set1_has_null_ptr = ptn1->points_to(null_obj);2136bool set2_has_null_ptr = ptn2->points_to(null_obj);2137if ((set1_has_unknown_ptr && set2_has_null_ptr) ||2138(set2_has_unknown_ptr && set1_has_null_ptr)) {2139// Check nullness of unknown object.2140return UNKNOWN;2141}21422143// Disjointness by itself is not sufficient since2144// alias analysis is not complete for escaped objects.2145// Disjoint sets are definitely unrelated only when2146// at least one set has only not escaping allocations.2147if (!set1_has_unknown_ptr && !set1_has_null_ptr) {2148if (ptn1->non_escaping_allocation()) {2149return NE;2150}2151}2152if (!set2_has_unknown_ptr && !set2_has_null_ptr) {2153if (ptn2->non_escaping_allocation()) {2154return NE;2155}2156}2157return UNKNOWN;2158}21592160// Connection Graph construction functions.21612162void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {2163PointsToNode* ptadr = _nodes.at(n->_idx);2164if (ptadr != NULL) {2165assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");2166return;2167}2168Compile* C = _compile;2169ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);2170map_ideal_node(n, ptadr);2171}21722173void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {2174PointsToNode* ptadr = _nodes.at(n->_idx);2175if (ptadr != NULL) {2176assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");2177return;2178}2179Compile* C = _compile;2180ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);2181map_ideal_node(n, ptadr);2182}21832184void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {2185PointsToNode* ptadr = _nodes.at(n->_idx);2186if (ptadr != NULL) {2187assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");2188return;2189}2190bool unsafe = false;2191bool is_oop = is_oop_field(n, offset, &unsafe);2192if (unsafe) {2193es = PointsToNode::GlobalEscape;2194}2195Compile* C = _compile;2196FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);2197map_ideal_node(n, field);2198}21992200void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,2201PointsToNode* src, PointsToNode* dst) {2202assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");2203assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");2204PointsToNode* ptadr = _nodes.at(n->_idx);2205if (ptadr != NULL) {2206assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");2207return;2208}2209Compile* C = _compile;2210ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);2211map_ideal_node(n, ptadr);2212// Add edge from arraycopy node to source object.2213(void)add_edge(ptadr, src);2214src->set_arraycopy_src();2215// Add edge from destination object to arraycopy node.2216(void)add_edge(dst, ptadr);2217dst->set_arraycopy_dst();2218}22192220bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {2221const Type* adr_type = n->as_AddP()->bottom_type();2222BasicType bt = T_INT;2223if (offset == Type::OffsetBot) {2224// Check only oop fields.2225if (!adr_type->isa_aryptr() ||2226(adr_type->isa_aryptr()->klass() == NULL) ||2227adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {2228// OffsetBot is used to reference array's element. Ignore first AddP.2229if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {2230bt = T_OBJECT;2231}2232}2233} else if (offset != oopDesc::klass_offset_in_bytes()) {2234if (adr_type->isa_instptr()) {2235ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();2236if (field != NULL) {2237bt = field->layout_type();2238} else {2239// Check for unsafe oop field access2240if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||2241n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||2242n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||2243BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {2244bt = T_OBJECT;2245(*unsafe) = true;2246}2247}2248} else if (adr_type->isa_aryptr()) {2249if (offset == arrayOopDesc::length_offset_in_bytes()) {2250// Ignore array length load.2251} else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {2252// Ignore first AddP.2253} else {2254const Type* elemtype = adr_type->isa_aryptr()->elem();2255bt = elemtype->array_element_basic_type();2256}2257} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {2258// Allocation initialization, ThreadLocal field access, unsafe access2259if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||2260n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||2261n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||2262BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {2263bt = T_OBJECT;2264}2265}2266}2267// Note: T_NARROWOOP is not classed as a real reference type2268return (is_reference_type(bt) || bt == T_NARROWOOP);2269}22702271// Returns unique pointed java object or NULL.2272JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {2273assert(!_collecting, "should not call when constructed graph");2274// If the node was created after the escape computation we can't answer.2275uint idx = n->_idx;2276if (idx >= nodes_size()) {2277return NULL;2278}2279PointsToNode* ptn = ptnode_adr(idx);2280if (ptn == NULL) {2281return NULL;2282}2283if (ptn->is_JavaObject()) {2284return ptn->as_JavaObject();2285}2286assert(ptn->is_LocalVar(), "sanity");2287// Check all java objects it points to.2288JavaObjectNode* jobj = NULL;2289for (EdgeIterator i(ptn); i.has_next(); i.next()) {2290PointsToNode* e = i.get();2291if (e->is_JavaObject()) {2292if (jobj == NULL) {2293jobj = e->as_JavaObject();2294} else if (jobj != e) {2295return NULL;2296}2297}2298}2299return jobj;2300}23012302// Return true if this node points only to non-escaping allocations.2303bool PointsToNode::non_escaping_allocation() {2304if (is_JavaObject()) {2305Node* n = ideal_node();2306if (n->is_Allocate() || n->is_CallStaticJava()) {2307return (escape_state() == PointsToNode::NoEscape);2308} else {2309return false;2310}2311}2312assert(is_LocalVar(), "sanity");2313// Check all java objects it points to.2314for (EdgeIterator i(this); i.has_next(); i.next()) {2315PointsToNode* e = i.get();2316if (e->is_JavaObject()) {2317Node* n = e->ideal_node();2318if ((e->escape_state() != PointsToNode::NoEscape) ||2319!(n->is_Allocate() || n->is_CallStaticJava())) {2320return false;2321}2322}2323}2324return true;2325}23262327// Return true if we know the node does not escape globally.2328bool ConnectionGraph::not_global_escape(Node *n) {2329assert(!_collecting, "should not call during graph construction");2330// If the node was created after the escape computation we can't answer.2331uint idx = n->_idx;2332if (idx >= nodes_size()) {2333return false;2334}2335PointsToNode* ptn = ptnode_adr(idx);2336if (ptn == NULL) {2337return false; // not in congraph (e.g. ConI)2338}2339PointsToNode::EscapeState es = ptn->escape_state();2340// If we have already computed a value, return it.2341if (es >= PointsToNode::GlobalEscape) {2342return false;2343}2344if (ptn->is_JavaObject()) {2345return true; // (es < PointsToNode::GlobalEscape);2346}2347assert(ptn->is_LocalVar(), "sanity");2348// Check all java objects it points to.2349for (EdgeIterator i(ptn); i.has_next(); i.next()) {2350if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {2351return false;2352}2353}2354return true;2355}235623572358// Helper functions23592360// Return true if this node points to specified node or nodes it points to.2361bool PointsToNode::points_to(JavaObjectNode* ptn) const {2362if (is_JavaObject()) {2363return (this == ptn);2364}2365assert(is_LocalVar() || is_Field(), "sanity");2366for (EdgeIterator i(this); i.has_next(); i.next()) {2367if (i.get() == ptn) {2368return true;2369}2370}2371return false;2372}23732374// Return true if one node points to an other.2375bool PointsToNode::meet(PointsToNode* ptn) {2376if (this == ptn) {2377return true;2378} else if (ptn->is_JavaObject()) {2379return this->points_to(ptn->as_JavaObject());2380} else if (this->is_JavaObject()) {2381return ptn->points_to(this->as_JavaObject());2382}2383assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");2384int ptn_count = ptn->edge_count();2385for (EdgeIterator i(this); i.has_next(); i.next()) {2386PointsToNode* this_e = i.get();2387for (int j = 0; j < ptn_count; j++) {2388if (this_e == ptn->edge(j)) {2389return true;2390}2391}2392}2393return false;2394}23952396#ifdef ASSERT2397// Return true if bases point to this java object.2398bool FieldNode::has_base(JavaObjectNode* jobj) const {2399for (BaseIterator i(this); i.has_next(); i.next()) {2400if (i.get() == jobj) {2401return true;2402}2403}2404return false;2405}2406#endif24072408bool ConnectionGraph::is_captured_store_address(Node* addp) {2409// Handle simple case first.2410assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access");2411if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {2412return true;2413} else if (addp->in(AddPNode::Address)->is_Phi()) {2414for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {2415Node* addp_use = addp->fast_out(i);2416if (addp_use->is_Store()) {2417for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {2418if (addp_use->fast_out(j)->is_Initialize()) {2419return true;2420}2421}2422}2423}2424}2425return false;2426}24272428int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {2429const Type *adr_type = phase->type(adr);2430if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {2431// We are computing a raw address for a store captured by an Initialize2432// compute an appropriate address type. AddP cases #3 and #5 (see below).2433int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);2434assert(offs != Type::OffsetBot ||2435adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),2436"offset must be a constant or it is initialization of array");2437return offs;2438}2439const TypePtr *t_ptr = adr_type->isa_ptr();2440assert(t_ptr != NULL, "must be a pointer type");2441return t_ptr->offset();2442}24432444Node* ConnectionGraph::get_addp_base(Node *addp) {2445assert(addp->is_AddP(), "must be AddP");2446//2447// AddP cases for Base and Address inputs:2448// case #1. Direct object's field reference:2449// Allocate2450// |2451// Proj #5 ( oop result )2452// |2453// CheckCastPP (cast to instance type)2454// | |2455// AddP ( base == address )2456//2457// case #2. Indirect object's field reference:2458// Phi2459// |2460// CastPP (cast to instance type)2461// | |2462// AddP ( base == address )2463//2464// case #3. Raw object's field reference for Initialize node:2465// Allocate2466// |2467// Proj #5 ( oop result )2468// top |2469// \ |2470// AddP ( base == top )2471//2472// case #4. Array's element reference:2473// {CheckCastPP | CastPP}2474// | | |2475// | AddP ( array's element offset )2476// | |2477// AddP ( array's offset )2478//2479// case #5. Raw object's field reference for arraycopy stub call:2480// The inline_native_clone() case when the arraycopy stub is called2481// after the allocation before Initialize and CheckCastPP nodes.2482// Allocate2483// |2484// Proj #5 ( oop result )2485// | |2486// AddP ( base == address )2487//2488// case #6. Constant Pool, ThreadLocal, CastX2P or2489// Raw object's field reference:2490// {ConP, ThreadLocal, CastX2P, raw Load}2491// top |2492// \ |2493// AddP ( base == top )2494//2495// case #7. Klass's field reference.2496// LoadKlass2497// | |2498// AddP ( base == address )2499//2500// case #8. narrow Klass's field reference.2501// LoadNKlass2502// |2503// DecodeN2504// | |2505// AddP ( base == address )2506//2507// case #9. Mixed unsafe access2508// {instance}2509// |2510// CheckCastPP (raw)2511// top |2512// \ |2513// AddP ( base == top )2514//2515Node *base = addp->in(AddPNode::Base);2516if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.2517base = addp->in(AddPNode::Address);2518while (base->is_AddP()) {2519// Case #6 (unsafe access) may have several chained AddP nodes.2520assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");2521base = base->in(AddPNode::Address);2522}2523if (base->Opcode() == Op_CheckCastPP &&2524base->bottom_type()->isa_rawptr() &&2525_igvn->type(base->in(1))->isa_oopptr()) {2526base = base->in(1); // Case #92527} else {2528Node* uncast_base = base->uncast();2529int opcode = uncast_base->Opcode();2530assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||2531opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||2532(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||2533is_captured_store_address(addp), "sanity");2534}2535}2536return base;2537}25382539Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {2540assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");2541Node* addp2 = addp->raw_out(0);2542if (addp->outcnt() == 1 && addp2->is_AddP() &&2543addp2->in(AddPNode::Base) == n &&2544addp2->in(AddPNode::Address) == addp) {2545assert(addp->in(AddPNode::Base) == n, "expecting the same base");2546//2547// Find array's offset to push it on worklist first and2548// as result process an array's element offset first (pushed second)2549// to avoid CastPP for the array's offset.2550// Otherwise the inserted CastPP (LocalVar) will point to what2551// the AddP (Field) points to. Which would be wrong since2552// the algorithm expects the CastPP has the same point as2553// as AddP's base CheckCastPP (LocalVar).2554//2555// ArrayAllocation2556// |2557// CheckCastPP2558// |2559// memProj (from ArrayAllocation CheckCastPP)2560// | ||2561// | || Int (element index)2562// | || | ConI (log(element size))2563// | || | /2564// | || LShift2565// | || /2566// | AddP (array's element offset)2567// | |2568// | | ConI (array's offset: #12(32-bits) or #24(64-bits))2569// | / /2570// AddP (array's offset)2571// |2572// Load/Store (memory operation on array's element)2573//2574return addp2;2575}2576return NULL;2577}25782579//2580// Adjust the type and inputs of an AddP which computes the2581// address of a field of an instance2582//2583bool ConnectionGraph::split_AddP(Node *addp, Node *base) {2584PhaseGVN* igvn = _igvn;2585const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();2586assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");2587const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();2588if (t == NULL) {2589// We are computing a raw address for a store captured by an Initialize2590// compute an appropriate address type (cases #3 and #5).2591assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");2592assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");2593intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);2594assert(offs != Type::OffsetBot, "offset must be a constant");2595t = base_t->add_offset(offs)->is_oopptr();2596}2597int inst_id = base_t->instance_id();2598assert(!t->is_known_instance() || t->instance_id() == inst_id,2599"old type must be non-instance or match new type");26002601// The type 't' could be subclass of 'base_t'.2602// As result t->offset() could be large then base_t's size and it will2603// cause the failure in add_offset() with narrow oops since TypeOopPtr()2604// constructor verifies correctness of the offset.2605//2606// It could happened on subclass's branch (from the type profiling2607// inlining) which was not eliminated during parsing since the exactness2608// of the allocation type was not propagated to the subclass type check.2609//2610// Or the type 't' could be not related to 'base_t' at all.2611// It could happened when CHA type is different from MDO type on a dead path2612// (for example, from instanceof check) which is not collapsed during parsing.2613//2614// Do nothing for such AddP node and don't process its users since2615// this code branch will go away.2616//2617if (!t->is_known_instance() &&2618!base_t->klass()->is_subtype_of(t->klass())) {2619return false; // bail out2620}2621const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();2622// Do NOT remove the next line: ensure a new alias index is allocated2623// for the instance type. Note: C++ will not remove it since the call2624// has side effect.2625int alias_idx = _compile->get_alias_index(tinst);2626igvn->set_type(addp, tinst);2627// record the allocation in the node map2628set_map(addp, get_map(base->_idx));2629// Set addp's Base and Address to 'base'.2630Node *abase = addp->in(AddPNode::Base);2631Node *adr = addp->in(AddPNode::Address);2632if (adr->is_Proj() && adr->in(0)->is_Allocate() &&2633adr->in(0)->_idx == (uint)inst_id) {2634// Skip AddP cases #3 and #5.2635} else {2636assert(!abase->is_top(), "sanity"); // AddP case #32637if (abase != base) {2638igvn->hash_delete(addp);2639addp->set_req(AddPNode::Base, base);2640if (abase == adr) {2641addp->set_req(AddPNode::Address, base);2642} else {2643// AddP case #4 (adr is array's element offset AddP node)2644#ifdef ASSERT2645const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();2646assert(adr->is_AddP() && atype != NULL &&2647atype->instance_id() == inst_id, "array's element offset should be processed first");2648#endif2649}2650igvn->hash_insert(addp);2651}2652}2653// Put on IGVN worklist since at least addp's type was changed above.2654record_for_optimizer(addp);2655return true;2656}26572658//2659// Create a new version of orig_phi if necessary. Returns either the newly2660// created phi or an existing phi. Sets create_new to indicate whether a new2661// phi was created. Cache the last newly created phi in the node map.2662//2663PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {2664Compile *C = _compile;2665PhaseGVN* igvn = _igvn;2666new_created = false;2667int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());2668// nothing to do if orig_phi is bottom memory or matches alias_idx2669if (phi_alias_idx == alias_idx) {2670return orig_phi;2671}2672// Have we recently created a Phi for this alias index?2673PhiNode *result = get_map_phi(orig_phi->_idx);2674if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {2675return result;2676}2677// Previous check may fail when the same wide memory Phi was split into Phis2678// for different memory slices. Search all Phis for this region.2679if (result != NULL) {2680Node* region = orig_phi->in(0);2681for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {2682Node* phi = region->fast_out(i);2683if (phi->is_Phi() &&2684C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {2685assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");2686return phi->as_Phi();2687}2688}2689}2690if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {2691if (C->do_escape_analysis() == true && !C->failing()) {2692// Retry compilation without escape analysis.2693// If this is the first failure, the sentinel string will "stick"2694// to the Compile object, and the C2Compiler will see it and retry.2695C->record_failure(C2Compiler::retry_no_escape_analysis());2696}2697return NULL;2698}2699orig_phi_worklist.append_if_missing(orig_phi);2700const TypePtr *atype = C->get_adr_type(alias_idx);2701result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);2702C->copy_node_notes_to(result, orig_phi);2703igvn->set_type(result, result->bottom_type());2704record_for_optimizer(result);2705set_map(orig_phi, result);2706new_created = true;2707return result;2708}27092710//2711// Return a new version of Memory Phi "orig_phi" with the inputs having the2712// specified alias index.2713//2714PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {2715assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");2716Compile *C = _compile;2717PhaseGVN* igvn = _igvn;2718bool new_phi_created;2719PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);2720if (!new_phi_created) {2721return result;2722}2723GrowableArray<PhiNode *> phi_list;2724GrowableArray<uint> cur_input;2725PhiNode *phi = orig_phi;2726uint idx = 1;2727bool finished = false;2728while(!finished) {2729while (idx < phi->req()) {2730Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);2731if (mem != NULL && mem->is_Phi()) {2732PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);2733if (new_phi_created) {2734// found an phi for which we created a new split, push current one on worklist and begin2735// processing new one2736phi_list.push(phi);2737cur_input.push(idx);2738phi = mem->as_Phi();2739result = newphi;2740idx = 1;2741continue;2742} else {2743mem = newphi;2744}2745}2746if (C->failing()) {2747return NULL;2748}2749result->set_req(idx++, mem);2750}2751#ifdef ASSERT2752// verify that the new Phi has an input for each input of the original2753assert( phi->req() == result->req(), "must have same number of inputs.");2754assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");2755#endif2756// Check if all new phi's inputs have specified alias index.2757// Otherwise use old phi.2758for (uint i = 1; i < phi->req(); i++) {2759Node* in = result->in(i);2760assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");2761}2762// we have finished processing a Phi, see if there are any more to do2763finished = (phi_list.length() == 0 );2764if (!finished) {2765phi = phi_list.pop();2766idx = cur_input.pop();2767PhiNode *prev_result = get_map_phi(phi->_idx);2768prev_result->set_req(idx++, result);2769result = prev_result;2770}2771}2772return result;2773}27742775//2776// The next methods are derived from methods in MemNode.2777//2778Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {2779Node *mem = mmem;2780// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally2781// means an array I have not precisely typed yet. Do not do any2782// alias stuff with it any time soon.2783if (toop->base() != Type::AnyPtr &&2784!(toop->klass() != NULL &&2785toop->klass()->is_java_lang_Object() &&2786toop->offset() == Type::OffsetBot)) {2787mem = mmem->memory_at(alias_idx);2788// Update input if it is progress over what we have now2789}2790return mem;2791}27922793//2794// Move memory users to their memory slices.2795//2796void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {2797Compile* C = _compile;2798PhaseGVN* igvn = _igvn;2799const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();2800assert(tp != NULL, "ptr type");2801int alias_idx = C->get_alias_index(tp);2802int general_idx = C->get_general_index(alias_idx);28032804// Move users first2805for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {2806Node* use = n->fast_out(i);2807if (use->is_MergeMem()) {2808MergeMemNode* mmem = use->as_MergeMem();2809assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");2810if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {2811continue; // Nothing to do2812}2813// Replace previous general reference to mem node.2814uint orig_uniq = C->unique();2815Node* m = find_inst_mem(n, general_idx, orig_phis);2816assert(orig_uniq == C->unique(), "no new nodes");2817mmem->set_memory_at(general_idx, m);2818--imax;2819--i;2820} else if (use->is_MemBar()) {2821assert(!use->is_Initialize(), "initializing stores should not be moved");2822if (use->req() > MemBarNode::Precedent &&2823use->in(MemBarNode::Precedent) == n) {2824// Don't move related membars.2825record_for_optimizer(use);2826continue;2827}2828tp = use->as_MemBar()->adr_type()->isa_ptr();2829if ((tp != NULL && C->get_alias_index(tp) == alias_idx) ||2830alias_idx == general_idx) {2831continue; // Nothing to do2832}2833// Move to general memory slice.2834uint orig_uniq = C->unique();2835Node* m = find_inst_mem(n, general_idx, orig_phis);2836assert(orig_uniq == C->unique(), "no new nodes");2837igvn->hash_delete(use);2838imax -= use->replace_edge(n, m, igvn);2839igvn->hash_insert(use);2840record_for_optimizer(use);2841--i;2842#ifdef ASSERT2843} else if (use->is_Mem()) {2844if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {2845// Don't move related cardmark.2846continue;2847}2848// Memory nodes should have new memory input.2849tp = igvn->type(use->in(MemNode::Address))->isa_ptr();2850assert(tp != NULL, "ptr type");2851int idx = C->get_alias_index(tp);2852assert(get_map(use->_idx) != NULL || idx == alias_idx,2853"Following memory nodes should have new memory input or be on the same memory slice");2854} else if (use->is_Phi()) {2855// Phi nodes should be split and moved already.2856tp = use->as_Phi()->adr_type()->isa_ptr();2857assert(tp != NULL, "ptr type");2858int idx = C->get_alias_index(tp);2859assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");2860} else {2861use->dump();2862assert(false, "should not be here");2863#endif2864}2865}2866}28672868//2869// Search memory chain of "mem" to find a MemNode whose address2870// is the specified alias index.2871//2872Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {2873if (orig_mem == NULL) {2874return orig_mem;2875}2876Compile* C = _compile;2877PhaseGVN* igvn = _igvn;2878const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();2879bool is_instance = (toop != NULL) && toop->is_known_instance();2880Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);2881Node *prev = NULL;2882Node *result = orig_mem;2883while (prev != result) {2884prev = result;2885if (result == start_mem) {2886break; // hit one of our sentinels2887}2888if (result->is_Mem()) {2889const Type *at = igvn->type(result->in(MemNode::Address));2890if (at == Type::TOP) {2891break; // Dead2892}2893assert (at->isa_ptr() != NULL, "pointer type required.");2894int idx = C->get_alias_index(at->is_ptr());2895if (idx == alias_idx) {2896break; // Found2897}2898if (!is_instance && (at->isa_oopptr() == NULL ||2899!at->is_oopptr()->is_known_instance())) {2900break; // Do not skip store to general memory slice.2901}2902result = result->in(MemNode::Memory);2903}2904if (!is_instance) {2905continue; // don't search further for non-instance types2906}2907// skip over a call which does not affect this memory slice2908if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {2909Node *proj_in = result->in(0);2910if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {2911break; // hit one of our sentinels2912} else if (proj_in->is_Call()) {2913// ArrayCopy node processed here as well2914CallNode *call = proj_in->as_Call();2915if (!call->may_modify(toop, igvn)) {2916result = call->in(TypeFunc::Memory);2917}2918} else if (proj_in->is_Initialize()) {2919AllocateNode* alloc = proj_in->as_Initialize()->allocation();2920// Stop if this is the initialization for the object instance which2921// which contains this memory slice, otherwise skip over it.2922if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {2923result = proj_in->in(TypeFunc::Memory);2924}2925} else if (proj_in->is_MemBar()) {2926// Check if there is an array copy for a clone2927// Step over GC barrier when ReduceInitialCardMarks is disabled2928BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();2929Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));29302931if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {2932// Stop if it is a clone2933ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();2934if (ac->may_modify(toop, igvn)) {2935break;2936}2937}2938result = proj_in->in(TypeFunc::Memory);2939}2940} else if (result->is_MergeMem()) {2941MergeMemNode *mmem = result->as_MergeMem();2942result = step_through_mergemem(mmem, alias_idx, toop);2943if (result == mmem->base_memory()) {2944// Didn't find instance memory, search through general slice recursively.2945result = mmem->memory_at(C->get_general_index(alias_idx));2946result = find_inst_mem(result, alias_idx, orig_phis);2947if (C->failing()) {2948return NULL;2949}2950mmem->set_memory_at(alias_idx, result);2951}2952} else if (result->is_Phi() &&2953C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {2954Node *un = result->as_Phi()->unique_input(igvn);2955if (un != NULL) {2956orig_phis.append_if_missing(result->as_Phi());2957result = un;2958} else {2959break;2960}2961} else if (result->is_ClearArray()) {2962if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {2963// Can not bypass initialization of the instance2964// we are looking for.2965break;2966}2967// Otherwise skip it (the call updated 'result' value).2968} else if (result->Opcode() == Op_SCMemProj) {2969Node* mem = result->in(0);2970Node* adr = NULL;2971if (mem->is_LoadStore()) {2972adr = mem->in(MemNode::Address);2973} else {2974assert(mem->Opcode() == Op_EncodeISOArray ||2975mem->Opcode() == Op_StrCompressedCopy, "sanity");2976adr = mem->in(3); // Memory edge corresponds to destination array2977}2978const Type *at = igvn->type(adr);2979if (at != Type::TOP) {2980assert(at->isa_ptr() != NULL, "pointer type required.");2981int idx = C->get_alias_index(at->is_ptr());2982if (idx == alias_idx) {2983// Assert in debug mode2984assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");2985break; // In product mode return SCMemProj node2986}2987}2988result = mem->in(MemNode::Memory);2989} else if (result->Opcode() == Op_StrInflatedCopy) {2990Node* adr = result->in(3); // Memory edge corresponds to destination array2991const Type *at = igvn->type(adr);2992if (at != Type::TOP) {2993assert(at->isa_ptr() != NULL, "pointer type required.");2994int idx = C->get_alias_index(at->is_ptr());2995if (idx == alias_idx) {2996// Assert in debug mode2997assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");2998break; // In product mode return SCMemProj node2999}3000}3001result = result->in(MemNode::Memory);3002}3003}3004if (result->is_Phi()) {3005PhiNode *mphi = result->as_Phi();3006assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");3007const TypePtr *t = mphi->adr_type();3008if (!is_instance) {3009// Push all non-instance Phis on the orig_phis worklist to update inputs3010// during Phase 4 if needed.3011orig_phis.append_if_missing(mphi);3012} else if (C->get_alias_index(t) != alias_idx) {3013// Create a new Phi with the specified alias index type.3014result = split_memory_phi(mphi, alias_idx, orig_phis);3015}3016}3017// the result is either MemNode, PhiNode, InitializeNode.3018return result;3019}30203021//3022// Convert the types of non-escaped object to instance types where possible,3023// propagate the new type information through the graph, and update memory3024// edges and MergeMem inputs to reflect the new type.3025//3026// We start with allocations (and calls which may be allocations) on alloc_worklist.3027// The processing is done in 4 phases:3028//3029// Phase 1: Process possible allocations from alloc_worklist. Create instance3030// types for the CheckCastPP for allocations where possible.3031// Propagate the new types through users as follows:3032// casts and Phi: push users on alloc_worklist3033// AddP: cast Base and Address inputs to the instance type3034// push any AddP users on alloc_worklist and push any memnode3035// users onto memnode_worklist.3036// Phase 2: Process MemNode's from memnode_worklist. compute new address type and3037// search the Memory chain for a store with the appropriate type3038// address type. If a Phi is found, create a new version with3039// the appropriate memory slices from each of the Phi inputs.3040// For stores, process the users as follows:3041// MemNode: push on memnode_worklist3042// MergeMem: push on mergemem_worklist3043// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice3044// moving the first node encountered of each instance type to the3045// the input corresponding to its alias index.3046// appropriate memory slice.3047// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.3048//3049// In the following example, the CheckCastPP nodes are the cast of allocation3050// results and the allocation of node 29 is non-escaped and eligible to be an3051// instance type.3052//3053// We start with:3054//3055// 7 Parm #memory3056// 10 ConI "12"3057// 19 CheckCastPP "Foo"3058// 20 AddP _ 19 19 10 Foo+12 alias_index=43059// 29 CheckCastPP "Foo"3060// 30 AddP _ 29 29 10 Foo+12 alias_index=43061//3062// 40 StoreP 25 7 20 ... alias_index=43063// 50 StoreP 35 40 30 ... alias_index=43064// 60 StoreP 45 50 20 ... alias_index=43065// 70 LoadP _ 60 30 ... alias_index=43066// 80 Phi 75 50 60 Memory alias_index=43067// 90 LoadP _ 80 30 ... alias_index=43068// 100 LoadP _ 80 20 ... alias_index=43069//3070//3071// Phase 1 creates an instance type for node 29 assigning it an instance id of 243072// and creating a new alias index for node 30. This gives:3073//3074// 7 Parm #memory3075// 10 ConI "12"3076// 19 CheckCastPP "Foo"3077// 20 AddP _ 19 19 10 Foo+12 alias_index=43078// 29 CheckCastPP "Foo" iid=243079// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=243080//3081// 40 StoreP 25 7 20 ... alias_index=43082// 50 StoreP 35 40 30 ... alias_index=63083// 60 StoreP 45 50 20 ... alias_index=43084// 70 LoadP _ 60 30 ... alias_index=63085// 80 Phi 75 50 60 Memory alias_index=43086// 90 LoadP _ 80 30 ... alias_index=63087// 100 LoadP _ 80 20 ... alias_index=43088//3089// In phase 2, new memory inputs are computed for the loads and stores,3090// And a new version of the phi is created. In phase 4, the inputs to3091// node 80 are updated and then the memory nodes are updated with the3092// values computed in phase 2. This results in:3093//3094// 7 Parm #memory3095// 10 ConI "12"3096// 19 CheckCastPP "Foo"3097// 20 AddP _ 19 19 10 Foo+12 alias_index=43098// 29 CheckCastPP "Foo" iid=243099// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=243100//3101// 40 StoreP 25 7 20 ... alias_index=43102// 50 StoreP 35 7 30 ... alias_index=63103// 60 StoreP 45 40 20 ... alias_index=43104// 70 LoadP _ 50 30 ... alias_index=63105// 80 Phi 75 40 60 Memory alias_index=43106// 120 Phi 75 50 50 Memory alias_index=63107// 90 LoadP _ 120 30 ... alias_index=63108// 100 LoadP _ 80 20 ... alias_index=43109//3110void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) {3111GrowableArray<Node *> memnode_worklist;3112GrowableArray<PhiNode *> orig_phis;3113PhaseIterGVN *igvn = _igvn;3114uint new_index_start = (uint) _compile->num_alias_types();3115VectorSet visited;3116ideal_nodes.clear(); // Reset for use with set_map/get_map.3117uint unique_old = _compile->unique();31183119// Phase 1: Process possible allocations from alloc_worklist.3120// Create instance types for the CheckCastPP for allocations where possible.3121//3122// (Note: don't forget to change the order of the second AddP node on3123// the alloc_worklist if the order of the worklist processing is changed,3124// see the comment in find_second_addp().)3125//3126while (alloc_worklist.length() != 0) {3127Node *n = alloc_worklist.pop();3128uint ni = n->_idx;3129if (n->is_Call()) {3130CallNode *alloc = n->as_Call();3131// copy escape information to call node3132PointsToNode* ptn = ptnode_adr(alloc->_idx);3133PointsToNode::EscapeState es = ptn->escape_state();3134// We have an allocation or call which returns a Java object,3135// see if it is non-escaped.3136if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {3137continue;3138}3139// Find CheckCastPP for the allocate or for the return value of a call3140n = alloc->result_cast();3141if (n == NULL) { // No uses except Initialize node3142if (alloc->is_Allocate()) {3143// Set the scalar_replaceable flag for allocation3144// so it could be eliminated if it has no uses.3145alloc->as_Allocate()->_is_scalar_replaceable = true;3146}3147continue;3148}3149if (!n->is_CheckCastPP()) { // not unique CheckCastPP.3150// we could reach here for allocate case if one init is associated with many allocs.3151if (alloc->is_Allocate()) {3152alloc->as_Allocate()->_is_scalar_replaceable = false;3153}3154continue;3155}31563157// The inline code for Object.clone() casts the allocation result to3158// java.lang.Object and then to the actual type of the allocated3159// object. Detect this case and use the second cast.3160// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when3161// the allocation result is cast to java.lang.Object and then3162// to the actual Array type.3163if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL3164&& (alloc->is_AllocateArray() ||3165igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {3166Node *cast2 = NULL;3167for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3168Node *use = n->fast_out(i);3169if (use->is_CheckCastPP()) {3170cast2 = use;3171break;3172}3173}3174if (cast2 != NULL) {3175n = cast2;3176} else {3177// Non-scalar replaceable if the allocation type is unknown statically3178// (reflection allocation), the object can't be restored during3179// deoptimization without precise type.3180continue;3181}3182}31833184const TypeOopPtr *t = igvn->type(n)->isa_oopptr();3185if (t == NULL) {3186continue; // not a TypeOopPtr3187}3188if (!t->klass_is_exact()) {3189continue; // not an unique type3190}3191if (alloc->is_Allocate()) {3192// Set the scalar_replaceable flag for allocation3193// so it could be eliminated.3194alloc->as_Allocate()->_is_scalar_replaceable = true;3195}3196set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state3197// in order for an object to be scalar-replaceable, it must be:3198// - a direct allocation (not a call returning an object)3199// - non-escaping3200// - eligible to be a unique type3201// - not determined to be ineligible by escape analysis3202set_map(alloc, n);3203set_map(n, alloc);3204const TypeOopPtr* tinst = t->cast_to_instance_id(ni);3205igvn->hash_delete(n);3206igvn->set_type(n, tinst);3207n->raise_bottom_type(tinst);3208igvn->hash_insert(n);3209record_for_optimizer(n);3210// Allocate an alias index for the header fields. Accesses to3211// the header emitted during macro expansion wouldn't have3212// correct memory state otherwise.3213_compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));3214_compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));3215if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {32163217// First, put on the worklist all Field edges from Connection Graph3218// which is more accurate than putting immediate users from Ideal Graph.3219for (EdgeIterator e(ptn); e.has_next(); e.next()) {3220PointsToNode* tgt = e.get();3221if (tgt->is_Arraycopy()) {3222continue;3223}3224Node* use = tgt->ideal_node();3225assert(tgt->is_Field() && use->is_AddP(),3226"only AddP nodes are Field edges in CG");3227if (use->outcnt() > 0) { // Don't process dead nodes3228Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));3229if (addp2 != NULL) {3230assert(alloc->is_AllocateArray(),"array allocation was expected");3231alloc_worklist.append_if_missing(addp2);3232}3233alloc_worklist.append_if_missing(use);3234}3235}32363237// An allocation may have an Initialize which has raw stores. Scan3238// the users of the raw allocation result and push AddP users3239// on alloc_worklist.3240Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);3241assert (raw_result != NULL, "must have an allocation result");3242for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {3243Node *use = raw_result->fast_out(i);3244if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes3245Node* addp2 = find_second_addp(use, raw_result);3246if (addp2 != NULL) {3247assert(alloc->is_AllocateArray(),"array allocation was expected");3248alloc_worklist.append_if_missing(addp2);3249}3250alloc_worklist.append_if_missing(use);3251} else if (use->is_MemBar()) {3252memnode_worklist.append_if_missing(use);3253}3254}3255}3256} else if (n->is_AddP()) {3257JavaObjectNode* jobj = unique_java_object(get_addp_base(n));3258if (jobj == NULL || jobj == phantom_obj) {3259#ifdef ASSERT3260ptnode_adr(get_addp_base(n)->_idx)->dump();3261ptnode_adr(n->_idx)->dump();3262assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");3263#endif3264_compile->record_failure(C2Compiler::retry_no_escape_analysis());3265return;3266}3267Node *base = get_map(jobj->idx()); // CheckCastPP node3268if (!split_AddP(n, base)) continue; // wrong type from dead path3269} else if (n->is_Phi() ||3270n->is_CheckCastPP() ||3271n->is_EncodeP() ||3272n->is_DecodeN() ||3273(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {3274if (visited.test_set(n->_idx)) {3275assert(n->is_Phi(), "loops only through Phi's");3276continue; // already processed3277}3278JavaObjectNode* jobj = unique_java_object(n);3279if (jobj == NULL || jobj == phantom_obj) {3280#ifdef ASSERT3281ptnode_adr(n->_idx)->dump();3282assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");3283#endif3284_compile->record_failure(C2Compiler::retry_no_escape_analysis());3285return;3286} else {3287Node *val = get_map(jobj->idx()); // CheckCastPP node3288TypeNode *tn = n->as_Type();3289const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();3290assert(tinst != NULL && tinst->is_known_instance() &&3291tinst->instance_id() == jobj->idx() , "instance type expected.");32923293const Type *tn_type = igvn->type(tn);3294const TypeOopPtr *tn_t;3295if (tn_type->isa_narrowoop()) {3296tn_t = tn_type->make_ptr()->isa_oopptr();3297} else {3298tn_t = tn_type->isa_oopptr();3299}3300if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {3301if (tn_type->isa_narrowoop()) {3302tn_type = tinst->make_narrowoop();3303} else {3304tn_type = tinst;3305}3306igvn->hash_delete(tn);3307igvn->set_type(tn, tn_type);3308tn->set_type(tn_type);3309igvn->hash_insert(tn);3310record_for_optimizer(n);3311} else {3312assert(tn_type == TypePtr::NULL_PTR ||3313tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),3314"unexpected type");3315continue; // Skip dead path with different type3316}3317}3318} else {3319debug_only(n->dump();)3320assert(false, "EA: unexpected node");3321continue;3322}3323// push allocation's users on appropriate worklist3324for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3325Node *use = n->fast_out(i);3326if(use->is_Mem() && use->in(MemNode::Address) == n) {3327// Load/store to instance's field3328memnode_worklist.append_if_missing(use);3329} else if (use->is_MemBar()) {3330if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3331memnode_worklist.append_if_missing(use);3332}3333} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes3334Node* addp2 = find_second_addp(use, n);3335if (addp2 != NULL) {3336alloc_worklist.append_if_missing(addp2);3337}3338alloc_worklist.append_if_missing(use);3339} else if (use->is_Phi() ||3340use->is_CheckCastPP() ||3341use->is_EncodeNarrowPtr() ||3342use->is_DecodeNarrowPtr() ||3343(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {3344alloc_worklist.append_if_missing(use);3345#ifdef ASSERT3346} else if (use->is_Mem()) {3347assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");3348} else if (use->is_MergeMem()) {3349assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3350} else if (use->is_SafePoint()) {3351// Look for MergeMem nodes for calls which reference unique allocation3352// (through CheckCastPP nodes) even for debug info.3353Node* m = use->in(TypeFunc::Memory);3354if (m->is_MergeMem()) {3355assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");3356}3357} else if (use->Opcode() == Op_EncodeISOArray) {3358if (use->in(MemNode::Memory) == n || use->in(3) == n) {3359// EncodeISOArray overwrites destination array3360memnode_worklist.append_if_missing(use);3361}3362} else {3363uint op = use->Opcode();3364if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&3365(use->in(MemNode::Memory) == n)) {3366// They overwrite memory edge corresponding to destination array,3367memnode_worklist.append_if_missing(use);3368} else if (!(op == Op_CmpP || op == Op_Conv2B ||3369op == Op_CastP2X || op == Op_StoreCM ||3370op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||3371op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||3372op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||3373op == Op_SubTypeCheck ||3374BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {3375n->dump();3376use->dump();3377assert(false, "EA: missing allocation reference path");3378}3379#endif3380}3381}33823383}33843385// Go over all ArrayCopy nodes and if one of the inputs has a unique3386// type, record it in the ArrayCopy node so we know what memory this3387// node uses/modified.3388for (int next = 0; next < arraycopy_worklist.length(); next++) {3389ArrayCopyNode* ac = arraycopy_worklist.at(next);3390Node* dest = ac->in(ArrayCopyNode::Dest);3391if (dest->is_AddP()) {3392dest = get_addp_base(dest);3393}3394JavaObjectNode* jobj = unique_java_object(dest);3395if (jobj != NULL) {3396Node *base = get_map(jobj->idx());3397if (base != NULL) {3398const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();3399ac->_dest_type = base_t;3400}3401}3402Node* src = ac->in(ArrayCopyNode::Src);3403if (src->is_AddP()) {3404src = get_addp_base(src);3405}3406jobj = unique_java_object(src);3407if (jobj != NULL) {3408Node* base = get_map(jobj->idx());3409if (base != NULL) {3410const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();3411ac->_src_type = base_t;3412}3413}3414}34153416// New alias types were created in split_AddP().3417uint new_index_end = (uint) _compile->num_alias_types();3418assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");34193420// Phase 2: Process MemNode's from memnode_worklist. compute new address type and3421// compute new values for Memory inputs (the Memory inputs are not3422// actually updated until phase 4.)3423if (memnode_worklist.length() == 0)3424return; // nothing to do3425while (memnode_worklist.length() != 0) {3426Node *n = memnode_worklist.pop();3427if (visited.test_set(n->_idx)) {3428continue;3429}3430if (n->is_Phi() || n->is_ClearArray()) {3431// we don't need to do anything, but the users must be pushed3432} else if (n->is_MemBar()) { // Initialize, MemBar nodes3433// we don't need to do anything, but the users must be pushed3434n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);3435if (n == NULL) {3436continue;3437}3438} else if (n->Opcode() == Op_StrCompressedCopy ||3439n->Opcode() == Op_EncodeISOArray) {3440// get the memory projection3441n = n->find_out_with(Op_SCMemProj);3442assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");3443} else {3444assert(n->is_Mem(), "memory node required.");3445Node *addr = n->in(MemNode::Address);3446const Type *addr_t = igvn->type(addr);3447if (addr_t == Type::TOP) {3448continue;3449}3450assert (addr_t->isa_ptr() != NULL, "pointer type required.");3451int alias_idx = _compile->get_alias_index(addr_t->is_ptr());3452assert ((uint)alias_idx < new_index_end, "wrong alias index");3453Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);3454if (_compile->failing()) {3455return;3456}3457if (mem != n->in(MemNode::Memory)) {3458// We delay the memory edge update since we need old one in3459// MergeMem code below when instances memory slices are separated.3460set_map(n, mem);3461}3462if (n->is_Load()) {3463continue; // don't push users3464} else if (n->is_LoadStore()) {3465// get the memory projection3466n = n->find_out_with(Op_SCMemProj);3467assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");3468}3469}3470// push user on appropriate worklist3471for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {3472Node *use = n->fast_out(i);3473if (use->is_Phi() || use->is_ClearArray()) {3474memnode_worklist.append_if_missing(use);3475} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {3476if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores3477continue;3478}3479memnode_worklist.append_if_missing(use);3480} else if (use->is_MemBar()) {3481if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge3482memnode_worklist.append_if_missing(use);3483}3484#ifdef ASSERT3485} else if(use->is_Mem()) {3486assert(use->in(MemNode::Memory) != n, "EA: missing memory path");3487} else if (use->is_MergeMem()) {3488assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");3489} else if (use->Opcode() == Op_EncodeISOArray) {3490if (use->in(MemNode::Memory) == n || use->in(3) == n) {3491// EncodeISOArray overwrites destination array3492memnode_worklist.append_if_missing(use);3493}3494} else {3495uint op = use->Opcode();3496if ((use->in(MemNode::Memory) == n) &&3497(op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {3498// They overwrite memory edge corresponding to destination array,3499memnode_worklist.append_if_missing(use);3500} else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||3501op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||3502op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||3503op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {3504n->dump();3505use->dump();3506assert(false, "EA: missing memory path");3507}3508#endif3509}3510}3511}35123513// Phase 3: Process MergeMem nodes from mergemem_worklist.3514// Walk each memory slice moving the first node encountered of each3515// instance type to the the input corresponding to its alias index.3516uint length = _mergemem_worklist.length();3517for( uint next = 0; next < length; ++next ) {3518MergeMemNode* nmm = _mergemem_worklist.at(next);3519assert(!visited.test_set(nmm->_idx), "should not be visited before");3520// Note: we don't want to use MergeMemStream here because we only want to3521// scan inputs which exist at the start, not ones we add during processing.3522// Note 2: MergeMem may already contains instance memory slices added3523// during find_inst_mem() call when memory nodes were processed above.3524igvn->hash_delete(nmm);3525uint nslices = MIN2(nmm->req(), new_index_start);3526for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {3527Node* mem = nmm->in(i);3528Node* cur = NULL;3529if (mem == NULL || mem->is_top()) {3530continue;3531}3532// First, update mergemem by moving memory nodes to corresponding slices3533// if their type became more precise since this mergemem was created.3534while (mem->is_Mem()) {3535const Type *at = igvn->type(mem->in(MemNode::Address));3536if (at != Type::TOP) {3537assert (at->isa_ptr() != NULL, "pointer type required.");3538uint idx = (uint)_compile->get_alias_index(at->is_ptr());3539if (idx == i) {3540if (cur == NULL) {3541cur = mem;3542}3543} else {3544if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {3545nmm->set_memory_at(idx, mem);3546}3547}3548}3549mem = mem->in(MemNode::Memory);3550}3551nmm->set_memory_at(i, (cur != NULL) ? cur : mem);3552// Find any instance of the current type if we haven't encountered3553// already a memory slice of the instance along the memory chain.3554for (uint ni = new_index_start; ni < new_index_end; ni++) {3555if((uint)_compile->get_general_index(ni) == i) {3556Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);3557if (nmm->is_empty_memory(m)) {3558Node* result = find_inst_mem(mem, ni, orig_phis);3559if (_compile->failing()) {3560return;3561}3562nmm->set_memory_at(ni, result);3563}3564}3565}3566}3567// Find the rest of instances values3568for (uint ni = new_index_start; ni < new_index_end; ni++) {3569const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();3570Node* result = step_through_mergemem(nmm, ni, tinst);3571if (result == nmm->base_memory()) {3572// Didn't find instance memory, search through general slice recursively.3573result = nmm->memory_at(_compile->get_general_index(ni));3574result = find_inst_mem(result, ni, orig_phis);3575if (_compile->failing()) {3576return;3577}3578nmm->set_memory_at(ni, result);3579}3580}3581igvn->hash_insert(nmm);3582record_for_optimizer(nmm);3583}35843585// Phase 4: Update the inputs of non-instance memory Phis and3586// the Memory input of memnodes3587// First update the inputs of any non-instance Phi's from3588// which we split out an instance Phi. Note we don't have3589// to recursively process Phi's encountered on the input memory3590// chains as is done in split_memory_phi() since they will3591// also be processed here.3592for (int j = 0; j < orig_phis.length(); j++) {3593PhiNode *phi = orig_phis.at(j);3594int alias_idx = _compile->get_alias_index(phi->adr_type());3595igvn->hash_delete(phi);3596for (uint i = 1; i < phi->req(); i++) {3597Node *mem = phi->in(i);3598Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);3599if (_compile->failing()) {3600return;3601}3602if (mem != new_mem) {3603phi->set_req(i, new_mem);3604}3605}3606igvn->hash_insert(phi);3607record_for_optimizer(phi);3608}36093610// Update the memory inputs of MemNodes with the value we computed3611// in Phase 2 and move stores memory users to corresponding memory slices.3612// Disable memory split verification code until the fix for 6984348.3613// Currently it produces false negative results since it does not cover all cases.3614#if 0 // ifdef ASSERT3615visited.Reset();3616Node_Stack old_mems(arena, _compile->unique() >> 2);3617#endif3618for (uint i = 0; i < ideal_nodes.size(); i++) {3619Node* n = ideal_nodes.at(i);3620Node* nmem = get_map(n->_idx);3621assert(nmem != NULL, "sanity");3622if (n->is_Mem()) {3623#if 0 // ifdef ASSERT3624Node* old_mem = n->in(MemNode::Memory);3625if (!visited.test_set(old_mem->_idx)) {3626old_mems.push(old_mem, old_mem->outcnt());3627}3628#endif3629assert(n->in(MemNode::Memory) != nmem, "sanity");3630if (!n->is_Load()) {3631// Move memory users of a store first.3632move_inst_mem(n, orig_phis);3633}3634// Now update memory input3635igvn->hash_delete(n);3636n->set_req(MemNode::Memory, nmem);3637igvn->hash_insert(n);3638record_for_optimizer(n);3639} else {3640assert(n->is_Allocate() || n->is_CheckCastPP() ||3641n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");3642}3643}3644#if 0 // ifdef ASSERT3645// Verify that memory was split correctly3646while (old_mems.is_nonempty()) {3647Node* old_mem = old_mems.node();3648uint old_cnt = old_mems.index();3649old_mems.pop();3650assert(old_cnt == old_mem->outcnt(), "old mem could be lost");3651}3652#endif3653}36543655#ifndef PRODUCT3656static const char *node_type_names[] = {3657"UnknownType",3658"JavaObject",3659"LocalVar",3660"Field",3661"Arraycopy"3662};36633664static const char *esc_names[] = {3665"UnknownEscape",3666"NoEscape",3667"ArgEscape",3668"GlobalEscape"3669};36703671void PointsToNode::dump(bool print_state) const {3672NodeType nt = node_type();3673tty->print("%s ", node_type_names[(int) nt]);3674if (print_state) {3675EscapeState es = escape_state();3676EscapeState fields_es = fields_escape_state();3677tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);3678if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {3679tty->print("NSR ");3680}3681}3682if (is_Field()) {3683FieldNode* f = (FieldNode*)this;3684if (f->is_oop()) {3685tty->print("oop ");3686}3687if (f->offset() > 0) {3688tty->print("+%d ", f->offset());3689}3690tty->print("(");3691for (BaseIterator i(f); i.has_next(); i.next()) {3692PointsToNode* b = i.get();3693tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));3694}3695tty->print(" )");3696}3697tty->print("[");3698for (EdgeIterator i(this); i.has_next(); i.next()) {3699PointsToNode* e = i.get();3700tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");3701}3702tty->print(" [");3703for (UseIterator i(this); i.has_next(); i.next()) {3704PointsToNode* u = i.get();3705bool is_base = false;3706if (PointsToNode::is_base_use(u)) {3707is_base = true;3708u = PointsToNode::get_use_node(u)->as_Field();3709}3710tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");3711}3712tty->print(" ]] ");3713if (_node == NULL) {3714tty->print_cr("<null>");3715} else {3716_node->dump();3717}3718}37193720void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {3721bool first = true;3722int ptnodes_length = ptnodes_worklist.length();3723for (int i = 0; i < ptnodes_length; i++) {3724PointsToNode *ptn = ptnodes_worklist.at(i);3725if (ptn == NULL || !ptn->is_JavaObject()) {3726continue;3727}3728PointsToNode::EscapeState es = ptn->escape_state();3729if ((es != PointsToNode::NoEscape) && !Verbose) {3730continue;3731}3732Node* n = ptn->ideal_node();3733if (n->is_Allocate() || (n->is_CallStaticJava() &&3734n->as_CallStaticJava()->is_boxing_method())) {3735if (first) {3736tty->cr();3737tty->print("======== Connection graph for ");3738_compile->method()->print_short_name();3739tty->cr();3740first = false;3741}3742ptn->dump();3743// Print all locals and fields which reference this allocation3744for (UseIterator j(ptn); j.has_next(); j.next()) {3745PointsToNode* use = j.get();3746if (use->is_LocalVar()) {3747use->dump(Verbose);3748} else if (Verbose) {3749use->dump();3750}3751}3752tty->cr();3753}3754}3755}3756#endif37573758void ConnectionGraph::record_for_optimizer(Node *n) {3759_igvn->_worklist.push(n);3760_igvn->add_users_to_worklist(n);3761}376237633764