Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/callnode.hpp
32285 views
/*1* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_OPTO_CALLNODE_HPP25#define SHARE_VM_OPTO_CALLNODE_HPP2627#include "opto/connode.hpp"28#include "opto/mulnode.hpp"29#include "opto/multnode.hpp"30#include "opto/opcodes.hpp"31#include "opto/phaseX.hpp"32#include "opto/replacednodes.hpp"33#include "opto/type.hpp"3435// Portions of code courtesy of Clifford Click3637// Optimization - Graph Style3839class Chaitin;40class NamedCounter;41class MultiNode;42class SafePointNode;43class CallNode;44class CallJavaNode;45class CallStaticJavaNode;46class CallDynamicJavaNode;47class CallRuntimeNode;48class CallLeafNode;49class CallLeafNoFPNode;50class AllocateNode;51class AllocateArrayNode;52class BoxLockNode;53class LockNode;54class UnlockNode;55class JVMState;56class OopMap;57class State;58class StartNode;59class MachCallNode;60class FastLockNode;6162//------------------------------StartNode--------------------------------------63// The method start node64class StartNode : public MultiNode {65virtual uint cmp( const Node &n ) const;66virtual uint size_of() const; // Size is bigger67public:68const TypeTuple *_domain;69StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {70init_class_id(Class_Start);71init_req(0,this);72init_req(1,root);73}74virtual int Opcode() const;75virtual bool pinned() const { return true; };76virtual const Type *bottom_type() const;77virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }78virtual const Type *Value( PhaseTransform *phase ) const;79virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);80virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;81virtual const RegMask &in_RegMask(uint) const;82virtual Node *match( const ProjNode *proj, const Matcher *m );83virtual uint ideal_reg() const { return 0; }84#ifndef PRODUCT85virtual void dump_spec(outputStream *st) const;86#endif87};8889//------------------------------StartOSRNode-----------------------------------90// The method start node for on stack replacement code91class StartOSRNode : public StartNode {92public:93StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}94virtual int Opcode() const;95static const TypeTuple *osr_domain();96};979899//------------------------------ParmNode---------------------------------------100// Incoming parameters101class ParmNode : public ProjNode {102static const char * const names[TypeFunc::Parms+1];103public:104ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {105init_class_id(Class_Parm);106}107virtual int Opcode() const;108virtual bool is_CFG() const { return (_con == TypeFunc::Control); }109virtual uint ideal_reg() const;110#ifndef PRODUCT111virtual void dump_spec(outputStream *st) const;112#endif113};114115116//------------------------------ReturnNode-------------------------------------117// Return from subroutine node118class ReturnNode : public Node {119public:120ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );121virtual int Opcode() const;122virtual bool is_CFG() const { return true; }123virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash124virtual bool depends_only_on_test() const { return false; }125virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);126virtual const Type *Value( PhaseTransform *phase ) const;127virtual uint ideal_reg() const { return NotAMachineReg; }128virtual uint match_edge(uint idx) const;129#ifndef PRODUCT130virtual void dump_req(outputStream *st = tty) const;131#endif132};133134135//------------------------------RethrowNode------------------------------------136// Rethrow of exception at call site. Ends a procedure before rethrowing;137// ends the current basic block like a ReturnNode. Restores registers and138// unwinds stack. Rethrow happens in the caller's method.139class RethrowNode : public Node {140public:141RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );142virtual int Opcode() const;143virtual bool is_CFG() const { return true; }144virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash145virtual bool depends_only_on_test() const { return false; }146virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);147virtual const Type *Value( PhaseTransform *phase ) const;148virtual uint match_edge(uint idx) const;149virtual uint ideal_reg() const { return NotAMachineReg; }150#ifndef PRODUCT151virtual void dump_req(outputStream *st = tty) const;152#endif153};154155156//------------------------------TailCallNode-----------------------------------157// Pop stack frame and jump indirect158class TailCallNode : public ReturnNode {159public:160TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )161: ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {162init_req(TypeFunc::Parms, target);163init_req(TypeFunc::Parms+1, moop);164}165166virtual int Opcode() const;167virtual uint match_edge(uint idx) const;168};169170//------------------------------TailJumpNode-----------------------------------171// Pop stack frame and jump indirect172class TailJumpNode : public ReturnNode {173public:174TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)175: ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {176init_req(TypeFunc::Parms, target);177init_req(TypeFunc::Parms+1, ex_oop);178}179180virtual int Opcode() const;181virtual uint match_edge(uint idx) const;182};183184//-------------------------------JVMState-------------------------------------185// A linked list of JVMState nodes captures the whole interpreter state,186// plus GC roots, for all active calls at some call site in this compilation187// unit. (If there is no inlining, then the list has exactly one link.)188// This provides a way to map the optimized program back into the interpreter,189// or to let the GC mark the stack.190class JVMState : public ResourceObj {191friend class VMStructs;192public:193typedef enum {194Reexecute_Undefined = -1, // not defined -- will be translated into false later195Reexecute_False = 0, // false -- do not reexecute196Reexecute_True = 1 // true -- reexecute the bytecode197} ReexecuteState; //Reexecute State198199private:200JVMState* _caller; // List pointer for forming scope chains201uint _depth; // One more than caller depth, or one.202uint _locoff; // Offset to locals in input edge mapping203uint _stkoff; // Offset to stack in input edge mapping204uint _monoff; // Offset to monitors in input edge mapping205uint _scloff; // Offset to fields of scalar objs in input edge mapping206uint _endoff; // Offset to end of input edge mapping207uint _sp; // Jave Expression Stack Pointer for this state208int _bci; // Byte Code Index of this JVM point209ReexecuteState _reexecute; // Whether this bytecode need to be re-executed210ciMethod* _method; // Method Pointer211SafePointNode* _map; // Map node associated with this scope212public:213friend class Compile;214friend class PreserveReexecuteState;215216// Because JVMState objects live over the entire lifetime of the217// Compile object, they are allocated into the comp_arena, which218// does not get resource marked or reset during the compile process219void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }220void operator delete( void * ) { } // fast deallocation221222// Create a new JVMState, ready for abstract interpretation.223JVMState(ciMethod* method, JVMState* caller);224JVMState(int stack_size); // root state; has a null method225226// Access functions for the JVM227// ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|228// \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff229uint locoff() const { return _locoff; }230uint stkoff() const { return _stkoff; }231uint argoff() const { return _stkoff + _sp; }232uint monoff() const { return _monoff; }233uint scloff() const { return _scloff; }234uint endoff() const { return _endoff; }235uint oopoff() const { return debug_end(); }236237int loc_size() const { return stkoff() - locoff(); }238int stk_size() const { return monoff() - stkoff(); }239int mon_size() const { return scloff() - monoff(); }240int scl_size() const { return endoff() - scloff(); }241242bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }243bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }244bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }245bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }246247uint sp() const { return _sp; }248int bci() const { return _bci; }249bool should_reexecute() const { return _reexecute==Reexecute_True; }250bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }251bool has_method() const { return _method != NULL; }252ciMethod* method() const { assert(has_method(), ""); return _method; }253JVMState* caller() const { return _caller; }254SafePointNode* map() const { return _map; }255uint depth() const { return _depth; }256uint debug_start() const; // returns locoff of root caller257uint debug_end() const; // returns endoff of self258uint debug_size() const {259return loc_size() + sp() + mon_size() + scl_size();260}261uint debug_depth() const; // returns sum of debug_size values at all depths262263// Returns the JVM state at the desired depth (1 == root).264JVMState* of_depth(int d) const;265266// Tells if two JVM states have the same call chain (depth, methods, & bcis).267bool same_calls_as(const JVMState* that) const;268269// Monitors (monitors are stored as (boxNode, objNode) pairs270enum { logMonitorEdges = 1 };271int nof_monitors() const { return mon_size() >> logMonitorEdges; }272int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }273int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }274int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }275bool is_monitor_box(uint off) const {276assert(is_mon(off), "should be called only for monitor edge");277return (0 == bitfield(off - monoff(), 0, logMonitorEdges));278}279bool is_monitor_use(uint off) const { return (is_mon(off)280&& is_monitor_box(off))281|| (caller() && caller()->is_monitor_use(off)); }282283// Initialization functions for the JVM284void set_locoff(uint off) { _locoff = off; }285void set_stkoff(uint off) { _stkoff = off; }286void set_monoff(uint off) { _monoff = off; }287void set_scloff(uint off) { _scloff = off; }288void set_endoff(uint off) { _endoff = off; }289void set_offsets(uint off) {290_locoff = _stkoff = _monoff = _scloff = _endoff = off;291}292void set_map(SafePointNode *map) { _map = map; }293void set_sp(uint sp) { _sp = sp; }294// _reexecute is initialized to "undefined" for a new bci295void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }296void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}297298// Miscellaneous utility functions299JVMState* clone_deep(Compile* C) const; // recursively clones caller chain300JVMState* clone_shallow(Compile* C) const; // retains uncloned caller301void set_map_deep(SafePointNode *map);// reset map for all callers302void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.303int interpreter_frame_size() const;304305#ifndef PRODUCT306void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;307void dump_spec(outputStream *st) const;308void dump_on(outputStream* st) const;309void dump() const {310dump_on(tty);311}312#endif313};314315//------------------------------SafePointNode----------------------------------316// A SafePointNode is a subclass of a MultiNode for convenience (and317// potential code sharing) only - conceptually it is independent of318// the Node semantics.319class SafePointNode : public MultiNode {320virtual uint cmp( const Node &n ) const;321virtual uint size_of() const; // Size is bigger322323public:324SafePointNode(uint edges, JVMState* jvms,325// A plain safepoint advertises no memory effects (NULL):326const TypePtr* adr_type = NULL)327: MultiNode( edges ),328_jvms(jvms),329_oop_map(NULL),330_adr_type(adr_type)331{332init_class_id(Class_SafePoint);333}334335OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC336JVMState* const _jvms; // Pointer to list of JVM State objects337const TypePtr* _adr_type; // What type of memory does this node produce?338ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()339340// Many calls take *all* of memory as input,341// but some produce a limited subset of that memory as output.342// The adr_type reports the call's behavior as a store, not a load.343344virtual JVMState* jvms() const { return _jvms; }345void set_jvms(JVMState* s) {346*(JVMState**)&_jvms = s; // override const attribute in the accessor347}348OopMap *oop_map() const { return _oop_map; }349void set_oop_map(OopMap *om) { _oop_map = om; }350351private:352void verify_input(JVMState* jvms, uint idx) const {353assert(verify_jvms(jvms), "jvms must match");354Node* n = in(idx);355assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||356in(idx + 1)->is_top(), "2nd half of long/double");357}358359public:360// Functionality from old debug nodes which has changed361Node *local(JVMState* jvms, uint idx) const {362verify_input(jvms, jvms->locoff() + idx);363return in(jvms->locoff() + idx);364}365Node *stack(JVMState* jvms, uint idx) const {366verify_input(jvms, jvms->stkoff() + idx);367return in(jvms->stkoff() + idx);368}369Node *argument(JVMState* jvms, uint idx) const {370verify_input(jvms, jvms->argoff() + idx);371return in(jvms->argoff() + idx);372}373Node *monitor_box(JVMState* jvms, uint idx) const {374assert(verify_jvms(jvms), "jvms must match");375return in(jvms->monitor_box_offset(idx));376}377Node *monitor_obj(JVMState* jvms, uint idx) const {378assert(verify_jvms(jvms), "jvms must match");379return in(jvms->monitor_obj_offset(idx));380}381382void set_local(JVMState* jvms, uint idx, Node *c);383384void set_stack(JVMState* jvms, uint idx, Node *c) {385assert(verify_jvms(jvms), "jvms must match");386set_req(jvms->stkoff() + idx, c);387}388void set_argument(JVMState* jvms, uint idx, Node *c) {389assert(verify_jvms(jvms), "jvms must match");390set_req(jvms->argoff() + idx, c);391}392void ensure_stack(JVMState* jvms, uint stk_size) {393assert(verify_jvms(jvms), "jvms must match");394int grow_by = (int)stk_size - (int)jvms->stk_size();395if (grow_by > 0) grow_stack(jvms, grow_by);396}397void grow_stack(JVMState* jvms, uint grow_by);398// Handle monitor stack399void push_monitor( const FastLockNode *lock );400void pop_monitor ();401Node *peek_monitor_box() const;402Node *peek_monitor_obj() const;403404// Access functions for the JVM405Node *control () const { return in(TypeFunc::Control ); }406Node *i_o () const { return in(TypeFunc::I_O ); }407Node *memory () const { return in(TypeFunc::Memory ); }408Node *returnadr() const { return in(TypeFunc::ReturnAdr); }409Node *frameptr () const { return in(TypeFunc::FramePtr ); }410411void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }412void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }413void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }414415MergeMemNode* merged_memory() const {416return in(TypeFunc::Memory)->as_MergeMem();417}418419// The parser marks useless maps as dead when it's done with them:420bool is_killed() { return in(TypeFunc::Control) == NULL; }421422// Exception states bubbling out of subgraphs such as inlined calls423// are recorded here. (There might be more than one, hence the "next".)424// This feature is used only for safepoints which serve as "maps"425// for JVM states during parsing, intrinsic expansion, etc.426SafePointNode* next_exception() const;427void set_next_exception(SafePointNode* n);428bool has_exceptions() const { return next_exception() != NULL; }429430// Helper methods to operate on replaced nodes431ReplacedNodes replaced_nodes() const {432return _replaced_nodes;433}434435void set_replaced_nodes(ReplacedNodes replaced_nodes) {436_replaced_nodes = replaced_nodes;437}438439void clone_replaced_nodes() {440_replaced_nodes.clone();441}442void record_replaced_node(Node* initial, Node* improved) {443_replaced_nodes.record(initial, improved);444}445void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {446_replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);447}448void delete_replaced_nodes() {449_replaced_nodes.reset();450}451void apply_replaced_nodes(uint idx) {452_replaced_nodes.apply(this, idx);453}454void merge_replaced_nodes_with(SafePointNode* sfpt) {455_replaced_nodes.merge_with(sfpt->_replaced_nodes);456}457bool has_replaced_nodes() const {458return !_replaced_nodes.is_empty();459}460461void disconnect_from_root(PhaseIterGVN *igvn);462463// Standard Node stuff464virtual int Opcode() const;465virtual bool pinned() const { return true; }466virtual const Type *Value( PhaseTransform *phase ) const;467virtual const Type *bottom_type() const { return Type::CONTROL; }468virtual const TypePtr *adr_type() const { return _adr_type; }469virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);470virtual Node *Identity( PhaseTransform *phase );471virtual uint ideal_reg() const { return 0; }472virtual const RegMask &in_RegMask(uint) const;473virtual const RegMask &out_RegMask() const;474virtual uint match_edge(uint idx) const;475476static bool needs_polling_address_input();477478#ifndef PRODUCT479virtual void dump_spec(outputStream *st) const;480#endif481};482483//------------------------------SafePointScalarObjectNode----------------------484// A SafePointScalarObjectNode represents the state of a scalarized object485// at a safepoint.486487class SafePointScalarObjectNode: public TypeNode {488uint _first_index; // First input edge relative index of a SafePoint node where489// states of the scalarized object fields are collected.490// It is relative to the last (youngest) jvms->_scloff.491uint _n_fields; // Number of non-static fields of the scalarized object.492DEBUG_ONLY(AllocateNode* _alloc;)493494virtual uint hash() const ; // { return NO_HASH; }495virtual uint cmp( const Node &n ) const;496497uint first_index() const { return _first_index; }498499public:500SafePointScalarObjectNode(const TypeOopPtr* tp,501#ifdef ASSERT502AllocateNode* alloc,503#endif504uint first_index, uint n_fields);505virtual int Opcode() const;506virtual uint ideal_reg() const;507virtual const RegMask &in_RegMask(uint) const;508virtual const RegMask &out_RegMask() const;509virtual uint match_edge(uint idx) const;510511uint first_index(JVMState* jvms) const {512assert(jvms != NULL, "missed JVMS");513return jvms->scloff() + _first_index;514}515uint n_fields() const { return _n_fields; }516517#ifdef ASSERT518AllocateNode* alloc() const { return _alloc; }519#endif520521virtual uint size_of() const { return sizeof(*this); }522523// Assumes that "this" is an argument to a safepoint node "s", and that524// "new_call" is being created to correspond to "s". But the difference525// between the start index of the jvmstates of "new_call" and "s" is526// "jvms_adj". Produce and return a SafePointScalarObjectNode that527// corresponds appropriately to "this" in "new_call". Assumes that528// "sosn_map" is a map, specific to the translation of "s" to "new_call",529// mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.530SafePointScalarObjectNode* clone(Dict* sosn_map) const;531532#ifndef PRODUCT533virtual void dump_spec(outputStream *st) const;534#endif535};536537538// Simple container for the outgoing projections of a call. Useful539// for serious surgery on calls.540class CallProjections : public StackObj {541public:542Node* fallthrough_proj;543Node* fallthrough_catchproj;544Node* fallthrough_memproj;545Node* fallthrough_ioproj;546Node* catchall_catchproj;547Node* catchall_memproj;548Node* catchall_ioproj;549Node* resproj;550Node* exobj;551};552553class CallGenerator;554555//------------------------------CallNode---------------------------------------556// Call nodes now subsume the function of debug nodes at callsites, so they557// contain the functionality of a full scope chain of debug nodes.558class CallNode : public SafePointNode {559friend class VMStructs;560public:561const TypeFunc *_tf; // Function type562address _entry_point; // Address of method being called563float _cnt; // Estimate of number of times called564CallGenerator* _generator; // corresponding CallGenerator for some late inline calls565566CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)567: SafePointNode(tf->domain()->cnt(), NULL, adr_type),568_tf(tf),569_entry_point(addr),570_cnt(COUNT_UNKNOWN),571_generator(NULL)572{573init_class_id(Class_Call);574}575576const TypeFunc* tf() const { return _tf; }577const address entry_point() const { return _entry_point; }578const float cnt() const { return _cnt; }579CallGenerator* generator() const { return _generator; }580581void set_tf(const TypeFunc* tf) { _tf = tf; }582void set_entry_point(address p) { _entry_point = p; }583void set_cnt(float c) { _cnt = c; }584void set_generator(CallGenerator* cg) { _generator = cg; }585586virtual const Type *bottom_type() const;587virtual const Type *Value( PhaseTransform *phase ) const;588virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);589virtual Node *Identity( PhaseTransform *phase ) { return this; }590virtual uint cmp( const Node &n ) const;591virtual uint size_of() const = 0;592virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;593virtual Node *match( const ProjNode *proj, const Matcher *m );594virtual uint ideal_reg() const { return NotAMachineReg; }595// Are we guaranteed that this node is a safepoint? Not true for leaf calls and596// for some macro nodes whose expansion does not have a safepoint on the fast path.597virtual bool guaranteed_safepoint() { return true; }598// For macro nodes, the JVMState gets modified during expansion. If calls599// use MachConstantBase, it gets modified during matching. So when cloning600// the node the JVMState must be cloned. Default is not to clone.601virtual void clone_jvms(Compile* C) {602if (C->needs_clone_jvms() && jvms() != NULL) {603set_jvms(jvms()->clone_deep(C));604jvms()->set_map_deep(this);605}606}607608// Returns true if the call may modify n609virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);610// Does this node have a use of n other than in debug information?611bool has_non_debug_use(Node *n);612// Returns the unique CheckCastPP of a call613// or result projection is there are several CheckCastPP614// or returns NULL if there is no one.615Node *result_cast();616// Does this node returns pointer?617bool returns_pointer() const {618const TypeTuple *r = tf()->range();619return (r->cnt() > TypeFunc::Parms &&620r->field_at(TypeFunc::Parms)->isa_ptr());621}622623// Collect all the interesting edges from a call for use in624// replacing the call by something else. Used by macro expansion625// and the late inlining support.626void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);627628virtual uint match_edge(uint idx) const;629630#ifndef PRODUCT631virtual void dump_req(outputStream *st = tty) const;632virtual void dump_spec(outputStream *st) const;633#endif634};635636637//------------------------------CallJavaNode-----------------------------------638// Make a static or dynamic subroutine call node using Java calling639// convention. (The "Java" calling convention is the compiler's calling640// convention, as opposed to the interpreter's or that of native C.)641class CallJavaNode : public CallNode {642friend class VMStructs;643protected:644virtual uint cmp( const Node &n ) const;645virtual uint size_of() const; // Size is bigger646647bool _optimized_virtual;648bool _method_handle_invoke;649ciMethod* _method; // Method being direct called650public:651const int _bci; // Byte Code Index of call byte code652CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)653: CallNode(tf, addr, TypePtr::BOTTOM),654_method(method), _bci(bci),655_optimized_virtual(false),656_method_handle_invoke(false)657{658init_class_id(Class_CallJava);659}660661virtual int Opcode() const;662ciMethod* method() const { return _method; }663void set_method(ciMethod *m) { _method = m; }664void set_optimized_virtual(bool f) { _optimized_virtual = f; }665bool is_optimized_virtual() const { return _optimized_virtual; }666void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }667bool is_method_handle_invoke() const { return _method_handle_invoke; }668669#ifndef PRODUCT670virtual void dump_spec(outputStream *st) const;671#endif672};673674//------------------------------CallStaticJavaNode-----------------------------675// Make a direct subroutine call using Java calling convention (for static676// calls and optimized virtual calls, plus calls to wrappers for run-time677// routines); generates static stub.678class CallStaticJavaNode : public CallJavaNode {679virtual uint cmp( const Node &n ) const;680virtual uint size_of() const; // Size is bigger681public:682CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)683: CallJavaNode(tf, addr, method, bci), _name(NULL) {684init_class_id(Class_CallStaticJava);685if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {686init_flags(Flag_is_macro);687C->add_macro_node(this);688}689_is_scalar_replaceable = false;690_is_non_escaping = false;691}692CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,693const TypePtr* adr_type)694: CallJavaNode(tf, addr, NULL, bci), _name(name) {695init_class_id(Class_CallStaticJava);696// This node calls a runtime stub, which often has narrow memory effects.697_adr_type = adr_type;698_is_scalar_replaceable = false;699_is_non_escaping = false;700}701const char *_name; // Runtime wrapper name702703// Result of Escape Analysis704bool _is_scalar_replaceable;705bool _is_non_escaping;706707// If this is an uncommon trap, return the request code, else zero.708int uncommon_trap_request() const;709static int extract_uncommon_trap_request(const Node* call);710711bool is_boxing_method() const {712return is_macro() && (method() != NULL) && method()->is_boxing_method();713}714// Later inlining modifies the JVMState, so we need to clone it715// when the call node is cloned (because it is macro node).716virtual void clone_jvms(Compile* C) {717if ((jvms() != NULL) && is_boxing_method()) {718set_jvms(jvms()->clone_deep(C));719jvms()->set_map_deep(this);720}721}722723virtual int Opcode() const;724#ifndef PRODUCT725virtual void dump_spec(outputStream *st) const;726#endif727};728729//------------------------------CallDynamicJavaNode----------------------------730// Make a dispatched call using Java calling convention.731class CallDynamicJavaNode : public CallJavaNode {732virtual uint cmp( const Node &n ) const;733virtual uint size_of() const; // Size is bigger734public:735CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {736init_class_id(Class_CallDynamicJava);737}738739int _vtable_index;740virtual int Opcode() const;741#ifndef PRODUCT742virtual void dump_spec(outputStream *st) const;743#endif744};745746//------------------------------CallRuntimeNode--------------------------------747// Make a direct subroutine call node into compiled C++ code.748class CallRuntimeNode : public CallNode {749virtual uint cmp( const Node &n ) const;750virtual uint size_of() const; // Size is bigger751public:752CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,753const TypePtr* adr_type)754: CallNode(tf, addr, adr_type),755_name(name)756{757init_class_id(Class_CallRuntime);758}759760const char *_name; // Printable name, if _method is NULL761virtual int Opcode() const;762virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;763764bool is_call_to_arraycopystub() const;765766#ifndef PRODUCT767virtual void dump_spec(outputStream *st) const;768#endif769};770771//------------------------------CallLeafNode-----------------------------------772// Make a direct subroutine call node into compiled C++ code, without773// safepoints774class CallLeafNode : public CallRuntimeNode {775public:776CallLeafNode(const TypeFunc* tf, address addr, const char* name,777const TypePtr* adr_type)778: CallRuntimeNode(tf, addr, name, adr_type)779{780init_class_id(Class_CallLeaf);781}782virtual int Opcode() const;783virtual bool guaranteed_safepoint() { return false; }784virtual bool is_g1_wb_pre_call() const { return entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre); }785virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);786787static bool has_only_g1_wb_pre_uses(Node* n);788789#ifndef PRODUCT790virtual void dump_spec(outputStream *st) const;791#endif792};793794//------------------------------CallLeafNoFPNode-------------------------------795// CallLeafNode, not using floating point or using it in the same manner as796// the generated code797class CallLeafNoFPNode : public CallLeafNode {798public:799CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,800const TypePtr* adr_type)801: CallLeafNode(tf, addr, name, adr_type)802{803}804virtual int Opcode() const;805};806807808//------------------------------Allocate---------------------------------------809// High-level memory allocation810//811// AllocateNode and AllocateArrayNode are subclasses of CallNode because they will812// get expanded into a code sequence containing a call. Unlike other CallNodes,813// they have 2 memory projections and 2 i_o projections (which are distinguished by814// the _is_io_use flag in the projection.) This is needed when expanding the node in815// order to differentiate the uses of the projection on the normal control path from816// those on the exception return path.817//818class AllocateNode : public CallNode {819public:820enum {821// Output:822RawAddress = TypeFunc::Parms, // the newly-allocated raw address823// Inputs:824AllocSize = TypeFunc::Parms, // size (in bytes) of the new object825KlassNode, // type (maybe dynamic) of the obj.826InitialTest, // slow-path test (may be constant)827ALength, // array length (or TOP if none)828ParmLimit829};830831static const TypeFunc* alloc_type(const Type* t) {832const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);833fields[AllocSize] = TypeInt::POS;834fields[KlassNode] = TypeInstPtr::NOTNULL;835fields[InitialTest] = TypeInt::BOOL;836fields[ALength] = t; // length (can be a bad length)837838const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);839840// create result type (range)841fields = TypeTuple::fields(1);842fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop843844const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);845846return TypeFunc::make(domain, range);847}848849// Result of Escape Analysis850bool _is_scalar_replaceable;851bool _is_non_escaping;852853virtual uint size_of() const; // Size is bigger854AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,855Node *size, Node *klass_node, Node *initial_test);856// Expansion modifies the JVMState, so we need to clone it857virtual void clone_jvms(Compile* C) {858if (jvms() != NULL) {859set_jvms(jvms()->clone_deep(C));860jvms()->set_map_deep(this);861}862}863virtual int Opcode() const;864virtual uint ideal_reg() const { return Op_RegP; }865virtual bool guaranteed_safepoint() { return false; }866867// allocations do not modify their arguments868virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}869870// Pattern-match a possible usage of AllocateNode.871// Return null if no allocation is recognized.872// The operand is the pointer produced by the (possible) allocation.873// It must be a projection of the Allocate or its subsequent CastPP.874// (Note: This function is defined in file graphKit.cpp, near875// GraphKit::new_instance/new_array, whose output it recognizes.)876// The 'ptr' may not have an offset unless the 'offset' argument is given.877static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);878879// Fancy version which uses AddPNode::Ideal_base_and_offset to strip880// an offset, which is reported back to the caller.881// (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)882static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,883intptr_t& offset);884885// Dig the klass operand out of a (possible) allocation site.886static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {887AllocateNode* allo = Ideal_allocation(ptr, phase);888return (allo == NULL) ? NULL : allo->in(KlassNode);889}890891// Conservatively small estimate of offset of first non-header byte.892int minimum_header_size() {893return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :894instanceOopDesc::base_offset_in_bytes();895}896897// Return the corresponding initialization barrier (or null if none).898// Walks out edges to find it...899// (Note: Both InitializeNode::allocation and AllocateNode::initialization900// are defined in graphKit.cpp, which sets up the bidirectional relation.)901InitializeNode* initialization();902903// Convenience for initialization->maybe_set_complete(phase)904bool maybe_set_complete(PhaseGVN* phase);905906#ifdef AARCH64907// Return true if allocation doesn't escape thread, its escape state908// needs be noEscape or ArgEscape. InitializeNode._does_not_escape909// is true when its allocation's escape state is noEscape or910// ArgEscape. In case allocation's InitializeNode is NULL, check911// AlllocateNode._is_non_escaping flag.912// AlllocateNode._is_non_escaping is true when its escape state is913// noEscape.914bool does_not_escape_thread() {915InitializeNode* init = NULL;916return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());917}918#endif919};920921//------------------------------AllocateArray---------------------------------922//923// High-level array allocation924//925class AllocateArrayNode : public AllocateNode {926public:927AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,928Node* size, Node* klass_node, Node* initial_test,929Node* count_val930)931: AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,932initial_test)933{934init_class_id(Class_AllocateArray);935set_req(AllocateNode::ALength, count_val);936}937virtual int Opcode() const;938virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);939940// Dig the length operand out of a array allocation site.941Node* Ideal_length() {942return in(AllocateNode::ALength);943}944945// Dig the length operand out of a array allocation site and narrow the946// type with a CastII, if necesssary947Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);948949// Pattern-match a possible usage of AllocateArrayNode.950// Return null if no allocation is recognized.951static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {952AllocateNode* allo = Ideal_allocation(ptr, phase);953return (allo == NULL || !allo->is_AllocateArray())954? NULL : allo->as_AllocateArray();955}956};957958//------------------------------AbstractLockNode-----------------------------------959class AbstractLockNode: public CallNode {960private:961enum {962Regular = 0, // Normal lock963NonEscObj, // Lock is used for non escaping object964Coarsened, // Lock was coarsened965Nested // Nested lock966} _kind;967#ifndef PRODUCT968NamedCounter* _counter;969#endif970971protected:972// helper functions for lock elimination973//974975bool find_matching_unlock(const Node* ctrl, LockNode* lock,976GrowableArray<AbstractLockNode*> &lock_ops);977bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,978GrowableArray<AbstractLockNode*> &lock_ops);979bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,980GrowableArray<AbstractLockNode*> &lock_ops);981LockNode *find_matching_lock(UnlockNode* unlock);982983// Update the counter to indicate that this lock was eliminated.984void set_eliminated_lock_counter() PRODUCT_RETURN;985986public:987AbstractLockNode(const TypeFunc *tf)988: CallNode(tf, NULL, TypeRawPtr::BOTTOM),989_kind(Regular)990{991#ifndef PRODUCT992_counter = NULL;993#endif994}995virtual int Opcode() const = 0;996Node * obj_node() const {return in(TypeFunc::Parms + 0); }997Node * box_node() const {return in(TypeFunc::Parms + 1); }998Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }999void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }10001001const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}10021003virtual uint size_of() const { return sizeof(*this); }10041005bool is_eliminated() const { return (_kind != Regular); }1006bool is_non_esc_obj() const { return (_kind == NonEscObj); }1007bool is_coarsened() const { return (_kind == Coarsened); }1008bool is_nested() const { return (_kind == Nested); }10091010const char * kind_as_string() const;1011void log_lock_optimization(Compile* c, const char * tag) const;10121013void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }1014void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }1015void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }10161017// locking does not modify its arguments1018virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}10191020#ifndef PRODUCT1021void create_lock_counter(JVMState* s);1022NamedCounter* counter() const { return _counter; }1023#endif1024};10251026//------------------------------Lock---------------------------------------1027// High-level lock operation1028//1029// This is a subclass of CallNode because it is a macro node which gets expanded1030// into a code sequence containing a call. This node takes 3 "parameters":1031// 0 - object to lock1032// 1 - a BoxLockNode1033// 2 - a FastLockNode1034//1035class LockNode : public AbstractLockNode {1036public:10371038static const TypeFunc *lock_type() {1039// create input type (domain)1040const Type **fields = TypeTuple::fields(3);1041fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked1042fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock1043fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock1044const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);10451046// create result type (range)1047fields = TypeTuple::fields(0);10481049const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);10501051return TypeFunc::make(domain,range);1052}10531054virtual int Opcode() const;1055virtual uint size_of() const; // Size is bigger1056LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {1057init_class_id(Class_Lock);1058init_flags(Flag_is_macro);1059C->add_macro_node(this);1060}1061virtual bool guaranteed_safepoint() { return false; }10621063virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);1064// Expansion modifies the JVMState, so we need to clone it1065virtual void clone_jvms(Compile* C) {1066if (jvms() != NULL) {1067set_jvms(jvms()->clone_deep(C));1068jvms()->set_map_deep(this);1069}1070}10711072bool is_nested_lock_region(); // Is this Lock nested?1073bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?1074};10751076//------------------------------Unlock---------------------------------------1077// High-level unlock operation1078class UnlockNode : public AbstractLockNode {1079private:1080#ifdef ASSERT1081JVMState* const _dbg_jvms; // Pointer to list of JVM State objects1082#endif1083public:1084virtual int Opcode() const;1085virtual uint size_of() const; // Size is bigger1086UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )1087#ifdef ASSERT1088, _dbg_jvms(NULL)1089#endif1090{1091init_class_id(Class_Unlock);1092init_flags(Flag_is_macro);1093C->add_macro_node(this);1094}1095virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);1096// unlock is never a safepoint1097virtual bool guaranteed_safepoint() { return false; }1098#ifdef ASSERT1099void set_dbg_jvms(JVMState* s) {1100*(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor1101}1102JVMState* dbg_jvms() const { return _dbg_jvms; }1103#else1104JVMState* dbg_jvms() const { return NULL; }1105#endif1106};11071108#endif // SHARE_VM_OPTO_CALLNODE_HPP110911101111