Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/callnode.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_OPTO_CALLNODE_HPP
26
#define SHARE_VM_OPTO_CALLNODE_HPP
27
28
#include "opto/connode.hpp"
29
#include "opto/mulnode.hpp"
30
#include "opto/multnode.hpp"
31
#include "opto/opcodes.hpp"
32
#include "opto/phaseX.hpp"
33
#include "opto/replacednodes.hpp"
34
#include "opto/type.hpp"
35
36
// Portions of code courtesy of Clifford Click
37
38
// Optimization - Graph Style
39
40
class Chaitin;
41
class NamedCounter;
42
class MultiNode;
43
class SafePointNode;
44
class CallNode;
45
class CallJavaNode;
46
class CallStaticJavaNode;
47
class CallDynamicJavaNode;
48
class CallRuntimeNode;
49
class CallLeafNode;
50
class CallLeafNoFPNode;
51
class AllocateNode;
52
class AllocateArrayNode;
53
class BoxLockNode;
54
class LockNode;
55
class UnlockNode;
56
class JVMState;
57
class OopMap;
58
class State;
59
class StartNode;
60
class MachCallNode;
61
class FastLockNode;
62
63
//------------------------------StartNode--------------------------------------
64
// The method start node
65
class StartNode : public MultiNode {
66
virtual uint cmp( const Node &n ) const;
67
virtual uint size_of() const; // Size is bigger
68
public:
69
const TypeTuple *_domain;
70
StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
71
init_class_id(Class_Start);
72
init_req(0,this);
73
init_req(1,root);
74
}
75
virtual int Opcode() const;
76
virtual bool pinned() const { return true; };
77
virtual const Type *bottom_type() const;
78
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
79
virtual const Type *Value( PhaseTransform *phase ) const;
80
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
81
virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
82
virtual const RegMask &in_RegMask(uint) const;
83
virtual Node *match( const ProjNode *proj, const Matcher *m );
84
virtual uint ideal_reg() const { return 0; }
85
#ifndef PRODUCT
86
virtual void dump_spec(outputStream *st) const;
87
#endif
88
};
89
90
//------------------------------StartOSRNode-----------------------------------
91
// The method start node for on stack replacement code
92
class StartOSRNode : public StartNode {
93
public:
94
StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
95
virtual int Opcode() const;
96
static const TypeTuple *osr_domain();
97
};
98
99
100
//------------------------------ParmNode---------------------------------------
101
// Incoming parameters
102
class ParmNode : public ProjNode {
103
static const char * const names[TypeFunc::Parms+1];
104
public:
105
ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
106
init_class_id(Class_Parm);
107
}
108
virtual int Opcode() const;
109
virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
110
virtual uint ideal_reg() const;
111
#ifndef PRODUCT
112
virtual void dump_spec(outputStream *st) const;
113
#endif
114
};
115
116
117
//------------------------------ReturnNode-------------------------------------
118
// Return from subroutine node
119
class ReturnNode : public Node {
120
public:
121
ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
122
virtual int Opcode() const;
123
virtual bool is_CFG() const { return true; }
124
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
125
virtual bool depends_only_on_test() const { return false; }
126
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
127
virtual const Type *Value( PhaseTransform *phase ) const;
128
virtual uint ideal_reg() const { return NotAMachineReg; }
129
virtual uint match_edge(uint idx) const;
130
#ifndef PRODUCT
131
virtual void dump_req(outputStream *st = tty) const;
132
#endif
133
};
134
135
136
//------------------------------RethrowNode------------------------------------
137
// Rethrow of exception at call site. Ends a procedure before rethrowing;
138
// ends the current basic block like a ReturnNode. Restores registers and
139
// unwinds stack. Rethrow happens in the caller's method.
140
class RethrowNode : public Node {
141
public:
142
RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
143
virtual int Opcode() const;
144
virtual bool is_CFG() const { return true; }
145
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
146
virtual bool depends_only_on_test() const { return false; }
147
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
148
virtual const Type *Value( PhaseTransform *phase ) const;
149
virtual uint match_edge(uint idx) const;
150
virtual uint ideal_reg() const { return NotAMachineReg; }
151
#ifndef PRODUCT
152
virtual void dump_req(outputStream *st = tty) const;
153
#endif
154
};
155
156
157
//------------------------------TailCallNode-----------------------------------
158
// Pop stack frame and jump indirect
159
class TailCallNode : public ReturnNode {
160
public:
161
TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
162
: ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
163
init_req(TypeFunc::Parms, target);
164
init_req(TypeFunc::Parms+1, moop);
165
}
166
167
virtual int Opcode() const;
168
virtual uint match_edge(uint idx) const;
169
};
170
171
//------------------------------TailJumpNode-----------------------------------
172
// Pop stack frame and jump indirect
173
class TailJumpNode : public ReturnNode {
174
public:
175
TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
176
: ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
177
init_req(TypeFunc::Parms, target);
178
init_req(TypeFunc::Parms+1, ex_oop);
179
}
180
181
virtual int Opcode() const;
182
virtual uint match_edge(uint idx) const;
183
};
184
185
//-------------------------------JVMState-------------------------------------
186
// A linked list of JVMState nodes captures the whole interpreter state,
187
// plus GC roots, for all active calls at some call site in this compilation
188
// unit. (If there is no inlining, then the list has exactly one link.)
189
// This provides a way to map the optimized program back into the interpreter,
190
// or to let the GC mark the stack.
191
class JVMState : public ResourceObj {
192
friend class VMStructs;
193
public:
194
typedef enum {
195
Reexecute_Undefined = -1, // not defined -- will be translated into false later
196
Reexecute_False = 0, // false -- do not reexecute
197
Reexecute_True = 1 // true -- reexecute the bytecode
198
} ReexecuteState; //Reexecute State
199
200
private:
201
JVMState* _caller; // List pointer for forming scope chains
202
uint _depth; // One more than caller depth, or one.
203
uint _locoff; // Offset to locals in input edge mapping
204
uint _stkoff; // Offset to stack in input edge mapping
205
uint _monoff; // Offset to monitors in input edge mapping
206
uint _scloff; // Offset to fields of scalar objs in input edge mapping
207
uint _endoff; // Offset to end of input edge mapping
208
uint _sp; // Jave Expression Stack Pointer for this state
209
int _bci; // Byte Code Index of this JVM point
210
ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
211
ciMethod* _method; // Method Pointer
212
SafePointNode* _map; // Map node associated with this scope
213
public:
214
friend class Compile;
215
friend class PreserveReexecuteState;
216
217
// Because JVMState objects live over the entire lifetime of the
218
// Compile object, they are allocated into the comp_arena, which
219
// does not get resource marked or reset during the compile process
220
void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
221
void operator delete( void * ) { } // fast deallocation
222
223
// Create a new JVMState, ready for abstract interpretation.
224
JVMState(ciMethod* method, JVMState* caller);
225
JVMState(int stack_size); // root state; has a null method
226
227
// Access functions for the JVM
228
// ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
229
// \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
230
uint locoff() const { return _locoff; }
231
uint stkoff() const { return _stkoff; }
232
uint argoff() const { return _stkoff + _sp; }
233
uint monoff() const { return _monoff; }
234
uint scloff() const { return _scloff; }
235
uint endoff() const { return _endoff; }
236
uint oopoff() const { return debug_end(); }
237
238
int loc_size() const { return stkoff() - locoff(); }
239
int stk_size() const { return monoff() - stkoff(); }
240
int mon_size() const { return scloff() - monoff(); }
241
int scl_size() const { return endoff() - scloff(); }
242
243
bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
244
bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
245
bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
246
bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
247
248
uint sp() const { return _sp; }
249
int bci() const { return _bci; }
250
bool should_reexecute() const { return _reexecute==Reexecute_True; }
251
bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
252
bool has_method() const { return _method != NULL; }
253
ciMethod* method() const { assert(has_method(), ""); return _method; }
254
JVMState* caller() const { return _caller; }
255
SafePointNode* map() const { return _map; }
256
uint depth() const { return _depth; }
257
uint debug_start() const; // returns locoff of root caller
258
uint debug_end() const; // returns endoff of self
259
uint debug_size() const {
260
return loc_size() + sp() + mon_size() + scl_size();
261
}
262
uint debug_depth() const; // returns sum of debug_size values at all depths
263
264
// Returns the JVM state at the desired depth (1 == root).
265
JVMState* of_depth(int d) const;
266
267
// Tells if two JVM states have the same call chain (depth, methods, & bcis).
268
bool same_calls_as(const JVMState* that) const;
269
270
// Monitors (monitors are stored as (boxNode, objNode) pairs
271
enum { logMonitorEdges = 1 };
272
int nof_monitors() const { return mon_size() >> logMonitorEdges; }
273
int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
274
int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
275
int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
276
bool is_monitor_box(uint off) const {
277
assert(is_mon(off), "should be called only for monitor edge");
278
return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
279
}
280
bool is_monitor_use(uint off) const { return (is_mon(off)
281
&& is_monitor_box(off))
282
|| (caller() && caller()->is_monitor_use(off)); }
283
284
// Initialization functions for the JVM
285
void set_locoff(uint off) { _locoff = off; }
286
void set_stkoff(uint off) { _stkoff = off; }
287
void set_monoff(uint off) { _monoff = off; }
288
void set_scloff(uint off) { _scloff = off; }
289
void set_endoff(uint off) { _endoff = off; }
290
void set_offsets(uint off) {
291
_locoff = _stkoff = _monoff = _scloff = _endoff = off;
292
}
293
void set_map(SafePointNode *map) { _map = map; }
294
void set_sp(uint sp) { _sp = sp; }
295
// _reexecute is initialized to "undefined" for a new bci
296
void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
297
void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
298
299
// Miscellaneous utility functions
300
JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
301
JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
302
void set_map_deep(SafePointNode *map);// reset map for all callers
303
void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
304
int interpreter_frame_size() const;
305
306
#ifndef PRODUCT
307
void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
308
void dump_spec(outputStream *st) const;
309
void dump_on(outputStream* st) const;
310
void dump() const {
311
dump_on(tty);
312
}
313
#endif
314
};
315
316
//------------------------------SafePointNode----------------------------------
317
// A SafePointNode is a subclass of a MultiNode for convenience (and
318
// potential code sharing) only - conceptually it is independent of
319
// the Node semantics.
320
class SafePointNode : public MultiNode {
321
virtual uint cmp( const Node &n ) const;
322
virtual uint size_of() const; // Size is bigger
323
324
public:
325
SafePointNode(uint edges, JVMState* jvms,
326
// A plain safepoint advertises no memory effects (NULL):
327
const TypePtr* adr_type = NULL)
328
: MultiNode( edges ),
329
_jvms(jvms),
330
_oop_map(NULL),
331
_adr_type(adr_type)
332
{
333
init_class_id(Class_SafePoint);
334
}
335
336
OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
337
JVMState* const _jvms; // Pointer to list of JVM State objects
338
const TypePtr* _adr_type; // What type of memory does this node produce?
339
ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
340
341
// Many calls take *all* of memory as input,
342
// but some produce a limited subset of that memory as output.
343
// The adr_type reports the call's behavior as a store, not a load.
344
345
virtual JVMState* jvms() const { return _jvms; }
346
void set_jvms(JVMState* s) {
347
*(JVMState**)&_jvms = s; // override const attribute in the accessor
348
}
349
OopMap *oop_map() const { return _oop_map; }
350
void set_oop_map(OopMap *om) { _oop_map = om; }
351
352
private:
353
void verify_input(JVMState* jvms, uint idx) const {
354
assert(verify_jvms(jvms), "jvms must match");
355
Node* n = in(idx);
356
assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
357
in(idx + 1)->is_top(), "2nd half of long/double");
358
}
359
360
public:
361
// Functionality from old debug nodes which has changed
362
Node *local(JVMState* jvms, uint idx) const {
363
verify_input(jvms, jvms->locoff() + idx);
364
return in(jvms->locoff() + idx);
365
}
366
Node *stack(JVMState* jvms, uint idx) const {
367
verify_input(jvms, jvms->stkoff() + idx);
368
return in(jvms->stkoff() + idx);
369
}
370
Node *argument(JVMState* jvms, uint idx) const {
371
verify_input(jvms, jvms->argoff() + idx);
372
return in(jvms->argoff() + idx);
373
}
374
Node *monitor_box(JVMState* jvms, uint idx) const {
375
assert(verify_jvms(jvms), "jvms must match");
376
return in(jvms->monitor_box_offset(idx));
377
}
378
Node *monitor_obj(JVMState* jvms, uint idx) const {
379
assert(verify_jvms(jvms), "jvms must match");
380
return in(jvms->monitor_obj_offset(idx));
381
}
382
383
void set_local(JVMState* jvms, uint idx, Node *c);
384
385
void set_stack(JVMState* jvms, uint idx, Node *c) {
386
assert(verify_jvms(jvms), "jvms must match");
387
set_req(jvms->stkoff() + idx, c);
388
}
389
void set_argument(JVMState* jvms, uint idx, Node *c) {
390
assert(verify_jvms(jvms), "jvms must match");
391
set_req(jvms->argoff() + idx, c);
392
}
393
void ensure_stack(JVMState* jvms, uint stk_size) {
394
assert(verify_jvms(jvms), "jvms must match");
395
int grow_by = (int)stk_size - (int)jvms->stk_size();
396
if (grow_by > 0) grow_stack(jvms, grow_by);
397
}
398
void grow_stack(JVMState* jvms, uint grow_by);
399
// Handle monitor stack
400
void push_monitor( const FastLockNode *lock );
401
void pop_monitor ();
402
Node *peek_monitor_box() const;
403
Node *peek_monitor_obj() const;
404
405
// Access functions for the JVM
406
Node *control () const { return in(TypeFunc::Control ); }
407
Node *i_o () const { return in(TypeFunc::I_O ); }
408
Node *memory () const { return in(TypeFunc::Memory ); }
409
Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
410
Node *frameptr () const { return in(TypeFunc::FramePtr ); }
411
412
void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
413
void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
414
void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
415
416
MergeMemNode* merged_memory() const {
417
return in(TypeFunc::Memory)->as_MergeMem();
418
}
419
420
// The parser marks useless maps as dead when it's done with them:
421
bool is_killed() { return in(TypeFunc::Control) == NULL; }
422
423
// Exception states bubbling out of subgraphs such as inlined calls
424
// are recorded here. (There might be more than one, hence the "next".)
425
// This feature is used only for safepoints which serve as "maps"
426
// for JVM states during parsing, intrinsic expansion, etc.
427
SafePointNode* next_exception() const;
428
void set_next_exception(SafePointNode* n);
429
bool has_exceptions() const { return next_exception() != NULL; }
430
431
// Helper methods to operate on replaced nodes
432
ReplacedNodes replaced_nodes() const {
433
return _replaced_nodes;
434
}
435
436
void set_replaced_nodes(ReplacedNodes replaced_nodes) {
437
_replaced_nodes = replaced_nodes;
438
}
439
440
void clone_replaced_nodes() {
441
_replaced_nodes.clone();
442
}
443
void record_replaced_node(Node* initial, Node* improved) {
444
_replaced_nodes.record(initial, improved);
445
}
446
void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
447
_replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
448
}
449
void delete_replaced_nodes() {
450
_replaced_nodes.reset();
451
}
452
void apply_replaced_nodes(uint idx) {
453
_replaced_nodes.apply(this, idx);
454
}
455
void merge_replaced_nodes_with(SafePointNode* sfpt) {
456
_replaced_nodes.merge_with(sfpt->_replaced_nodes);
457
}
458
bool has_replaced_nodes() const {
459
return !_replaced_nodes.is_empty();
460
}
461
462
void disconnect_from_root(PhaseIterGVN *igvn);
463
464
// Standard Node stuff
465
virtual int Opcode() const;
466
virtual bool pinned() const { return true; }
467
virtual const Type *Value( PhaseTransform *phase ) const;
468
virtual const Type *bottom_type() const { return Type::CONTROL; }
469
virtual const TypePtr *adr_type() const { return _adr_type; }
470
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
471
virtual Node *Identity( PhaseTransform *phase );
472
virtual uint ideal_reg() const { return 0; }
473
virtual const RegMask &in_RegMask(uint) const;
474
virtual const RegMask &out_RegMask() const;
475
virtual uint match_edge(uint idx) const;
476
477
static bool needs_polling_address_input();
478
479
#ifndef PRODUCT
480
virtual void dump_spec(outputStream *st) const;
481
#endif
482
};
483
484
//------------------------------SafePointScalarObjectNode----------------------
485
// A SafePointScalarObjectNode represents the state of a scalarized object
486
// at a safepoint.
487
488
class SafePointScalarObjectNode: public TypeNode {
489
uint _first_index; // First input edge relative index of a SafePoint node where
490
// states of the scalarized object fields are collected.
491
// It is relative to the last (youngest) jvms->_scloff.
492
uint _n_fields; // Number of non-static fields of the scalarized object.
493
DEBUG_ONLY(AllocateNode* _alloc;)
494
495
virtual uint hash() const ; // { return NO_HASH; }
496
virtual uint cmp( const Node &n ) const;
497
498
uint first_index() const { return _first_index; }
499
500
public:
501
SafePointScalarObjectNode(const TypeOopPtr* tp,
502
#ifdef ASSERT
503
AllocateNode* alloc,
504
#endif
505
uint first_index, uint n_fields);
506
virtual int Opcode() const;
507
virtual uint ideal_reg() const;
508
virtual const RegMask &in_RegMask(uint) const;
509
virtual const RegMask &out_RegMask() const;
510
virtual uint match_edge(uint idx) const;
511
512
uint first_index(JVMState* jvms) const {
513
assert(jvms != NULL, "missed JVMS");
514
return jvms->scloff() + _first_index;
515
}
516
uint n_fields() const { return _n_fields; }
517
518
#ifdef ASSERT
519
AllocateNode* alloc() const { return _alloc; }
520
#endif
521
522
virtual uint size_of() const { return sizeof(*this); }
523
524
// Assumes that "this" is an argument to a safepoint node "s", and that
525
// "new_call" is being created to correspond to "s". But the difference
526
// between the start index of the jvmstates of "new_call" and "s" is
527
// "jvms_adj". Produce and return a SafePointScalarObjectNode that
528
// corresponds appropriately to "this" in "new_call". Assumes that
529
// "sosn_map" is a map, specific to the translation of "s" to "new_call",
530
// mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
531
SafePointScalarObjectNode* clone(Dict* sosn_map) const;
532
533
#ifndef PRODUCT
534
virtual void dump_spec(outputStream *st) const;
535
#endif
536
};
537
538
539
// Simple container for the outgoing projections of a call. Useful
540
// for serious surgery on calls.
541
class CallProjections : public StackObj {
542
public:
543
Node* fallthrough_proj;
544
Node* fallthrough_catchproj;
545
Node* fallthrough_memproj;
546
Node* fallthrough_ioproj;
547
Node* catchall_catchproj;
548
Node* catchall_memproj;
549
Node* catchall_ioproj;
550
Node* resproj;
551
Node* exobj;
552
};
553
554
class CallGenerator;
555
556
//------------------------------CallNode---------------------------------------
557
// Call nodes now subsume the function of debug nodes at callsites, so they
558
// contain the functionality of a full scope chain of debug nodes.
559
class CallNode : public SafePointNode {
560
friend class VMStructs;
561
public:
562
const TypeFunc *_tf; // Function type
563
address _entry_point; // Address of method being called
564
float _cnt; // Estimate of number of times called
565
CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
566
567
CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
568
: SafePointNode(tf->domain()->cnt(), NULL, adr_type),
569
_tf(tf),
570
_entry_point(addr),
571
_cnt(COUNT_UNKNOWN),
572
_generator(NULL)
573
{
574
init_class_id(Class_Call);
575
}
576
577
const TypeFunc* tf() const { return _tf; }
578
const address entry_point() const { return _entry_point; }
579
const float cnt() const { return _cnt; }
580
CallGenerator* generator() const { return _generator; }
581
582
void set_tf(const TypeFunc* tf) { _tf = tf; }
583
void set_entry_point(address p) { _entry_point = p; }
584
void set_cnt(float c) { _cnt = c; }
585
void set_generator(CallGenerator* cg) { _generator = cg; }
586
587
virtual const Type *bottom_type() const;
588
virtual const Type *Value( PhaseTransform *phase ) const;
589
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
590
virtual Node *Identity( PhaseTransform *phase ) { return this; }
591
virtual uint cmp( const Node &n ) const;
592
virtual uint size_of() const = 0;
593
virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
594
virtual Node *match( const ProjNode *proj, const Matcher *m );
595
virtual uint ideal_reg() const { return NotAMachineReg; }
596
// Are we guaranteed that this node is a safepoint? Not true for leaf calls and
597
// for some macro nodes whose expansion does not have a safepoint on the fast path.
598
virtual bool guaranteed_safepoint() { return true; }
599
// For macro nodes, the JVMState gets modified during expansion. If calls
600
// use MachConstantBase, it gets modified during matching. So when cloning
601
// the node the JVMState must be cloned. Default is not to clone.
602
virtual void clone_jvms(Compile* C) {
603
if (C->needs_clone_jvms() && jvms() != NULL) {
604
set_jvms(jvms()->clone_deep(C));
605
jvms()->set_map_deep(this);
606
}
607
}
608
609
// Returns true if the call may modify n
610
virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
611
// Does this node have a use of n other than in debug information?
612
bool has_non_debug_use(Node *n);
613
// Returns the unique CheckCastPP of a call
614
// or result projection is there are several CheckCastPP
615
// or returns NULL if there is no one.
616
Node *result_cast();
617
// Does this node returns pointer?
618
bool returns_pointer() const {
619
const TypeTuple *r = tf()->range();
620
return (r->cnt() > TypeFunc::Parms &&
621
r->field_at(TypeFunc::Parms)->isa_ptr());
622
}
623
624
// Collect all the interesting edges from a call for use in
625
// replacing the call by something else. Used by macro expansion
626
// and the late inlining support.
627
void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
628
629
virtual uint match_edge(uint idx) const;
630
631
#ifndef PRODUCT
632
virtual void dump_req(outputStream *st = tty) const;
633
virtual void dump_spec(outputStream *st) const;
634
#endif
635
};
636
637
638
//------------------------------CallJavaNode-----------------------------------
639
// Make a static or dynamic subroutine call node using Java calling
640
// convention. (The "Java" calling convention is the compiler's calling
641
// convention, as opposed to the interpreter's or that of native C.)
642
class CallJavaNode : public CallNode {
643
friend class VMStructs;
644
protected:
645
virtual uint cmp( const Node &n ) const;
646
virtual uint size_of() const; // Size is bigger
647
648
bool _optimized_virtual;
649
bool _method_handle_invoke;
650
ciMethod* _method; // Method being direct called
651
public:
652
const int _bci; // Byte Code Index of call byte code
653
CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
654
: CallNode(tf, addr, TypePtr::BOTTOM),
655
_method(method), _bci(bci),
656
_optimized_virtual(false),
657
_method_handle_invoke(false)
658
{
659
init_class_id(Class_CallJava);
660
}
661
662
virtual int Opcode() const;
663
ciMethod* method() const { return _method; }
664
void set_method(ciMethod *m) { _method = m; }
665
void set_optimized_virtual(bool f) { _optimized_virtual = f; }
666
bool is_optimized_virtual() const { return _optimized_virtual; }
667
void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
668
bool is_method_handle_invoke() const { return _method_handle_invoke; }
669
670
#ifndef PRODUCT
671
virtual void dump_spec(outputStream *st) const;
672
#endif
673
};
674
675
//------------------------------CallStaticJavaNode-----------------------------
676
// Make a direct subroutine call using Java calling convention (for static
677
// calls and optimized virtual calls, plus calls to wrappers for run-time
678
// routines); generates static stub.
679
class CallStaticJavaNode : public CallJavaNode {
680
virtual uint cmp( const Node &n ) const;
681
virtual uint size_of() const; // Size is bigger
682
public:
683
CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
684
: CallJavaNode(tf, addr, method, bci), _name(NULL) {
685
init_class_id(Class_CallStaticJava);
686
if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
687
init_flags(Flag_is_macro);
688
C->add_macro_node(this);
689
}
690
_is_scalar_replaceable = false;
691
_is_non_escaping = false;
692
}
693
CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
694
const TypePtr* adr_type)
695
: CallJavaNode(tf, addr, NULL, bci), _name(name) {
696
init_class_id(Class_CallStaticJava);
697
// This node calls a runtime stub, which often has narrow memory effects.
698
_adr_type = adr_type;
699
_is_scalar_replaceable = false;
700
_is_non_escaping = false;
701
}
702
const char *_name; // Runtime wrapper name
703
704
// Result of Escape Analysis
705
bool _is_scalar_replaceable;
706
bool _is_non_escaping;
707
708
// If this is an uncommon trap, return the request code, else zero.
709
int uncommon_trap_request() const;
710
static int extract_uncommon_trap_request(const Node* call);
711
712
bool is_boxing_method() const {
713
return is_macro() && (method() != NULL) && method()->is_boxing_method();
714
}
715
// Later inlining modifies the JVMState, so we need to clone it
716
// when the call node is cloned (because it is macro node).
717
virtual void clone_jvms(Compile* C) {
718
if ((jvms() != NULL) && is_boxing_method()) {
719
set_jvms(jvms()->clone_deep(C));
720
jvms()->set_map_deep(this);
721
}
722
}
723
724
virtual int Opcode() const;
725
#ifndef PRODUCT
726
virtual void dump_spec(outputStream *st) const;
727
#endif
728
};
729
730
//------------------------------CallDynamicJavaNode----------------------------
731
// Make a dispatched call using Java calling convention.
732
class CallDynamicJavaNode : public CallJavaNode {
733
virtual uint cmp( const Node &n ) const;
734
virtual uint size_of() const; // Size is bigger
735
public:
736
CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
737
init_class_id(Class_CallDynamicJava);
738
}
739
740
int _vtable_index;
741
virtual int Opcode() const;
742
#ifndef PRODUCT
743
virtual void dump_spec(outputStream *st) const;
744
#endif
745
};
746
747
//------------------------------CallRuntimeNode--------------------------------
748
// Make a direct subroutine call node into compiled C++ code.
749
class CallRuntimeNode : public CallNode {
750
virtual uint cmp( const Node &n ) const;
751
virtual uint size_of() const; // Size is bigger
752
public:
753
CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
754
const TypePtr* adr_type)
755
: CallNode(tf, addr, adr_type),
756
_name(name)
757
{
758
init_class_id(Class_CallRuntime);
759
}
760
761
const char *_name; // Printable name, if _method is NULL
762
virtual int Opcode() const;
763
virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
764
765
bool is_call_to_arraycopystub() const;
766
767
#ifndef PRODUCT
768
virtual void dump_spec(outputStream *st) const;
769
#endif
770
};
771
772
//------------------------------CallLeafNode-----------------------------------
773
// Make a direct subroutine call node into compiled C++ code, without
774
// safepoints
775
class CallLeafNode : public CallRuntimeNode {
776
public:
777
CallLeafNode(const TypeFunc* tf, address addr, const char* name,
778
const TypePtr* adr_type)
779
: CallRuntimeNode(tf, addr, name, adr_type)
780
{
781
init_class_id(Class_CallLeaf);
782
}
783
virtual int Opcode() const;
784
virtual bool guaranteed_safepoint() { return false; }
785
virtual bool is_g1_wb_pre_call() const { return entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre); }
786
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
787
788
static bool has_only_g1_wb_pre_uses(Node* n);
789
790
#ifndef PRODUCT
791
virtual void dump_spec(outputStream *st) const;
792
#endif
793
};
794
795
//------------------------------CallLeafNoFPNode-------------------------------
796
// CallLeafNode, not using floating point or using it in the same manner as
797
// the generated code
798
class CallLeafNoFPNode : public CallLeafNode {
799
public:
800
CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
801
const TypePtr* adr_type)
802
: CallLeafNode(tf, addr, name, adr_type)
803
{
804
}
805
virtual int Opcode() const;
806
};
807
808
809
//------------------------------Allocate---------------------------------------
810
// High-level memory allocation
811
//
812
// AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
813
// get expanded into a code sequence containing a call. Unlike other CallNodes,
814
// they have 2 memory projections and 2 i_o projections (which are distinguished by
815
// the _is_io_use flag in the projection.) This is needed when expanding the node in
816
// order to differentiate the uses of the projection on the normal control path from
817
// those on the exception return path.
818
//
819
class AllocateNode : public CallNode {
820
public:
821
enum {
822
// Output:
823
RawAddress = TypeFunc::Parms, // the newly-allocated raw address
824
// Inputs:
825
AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
826
KlassNode, // type (maybe dynamic) of the obj.
827
InitialTest, // slow-path test (may be constant)
828
ALength, // array length (or TOP if none)
829
ParmLimit
830
};
831
832
static const TypeFunc* alloc_type(const Type* t) {
833
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
834
fields[AllocSize] = TypeInt::POS;
835
fields[KlassNode] = TypeInstPtr::NOTNULL;
836
fields[InitialTest] = TypeInt::BOOL;
837
fields[ALength] = t; // length (can be a bad length)
838
839
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
840
841
// create result type (range)
842
fields = TypeTuple::fields(1);
843
fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
844
845
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
846
847
return TypeFunc::make(domain, range);
848
}
849
850
// Result of Escape Analysis
851
bool _is_scalar_replaceable;
852
bool _is_non_escaping;
853
854
virtual uint size_of() const; // Size is bigger
855
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
856
Node *size, Node *klass_node, Node *initial_test);
857
// Expansion modifies the JVMState, so we need to clone it
858
virtual void clone_jvms(Compile* C) {
859
if (jvms() != NULL) {
860
set_jvms(jvms()->clone_deep(C));
861
jvms()->set_map_deep(this);
862
}
863
}
864
virtual int Opcode() const;
865
virtual uint ideal_reg() const { return Op_RegP; }
866
virtual bool guaranteed_safepoint() { return false; }
867
868
// allocations do not modify their arguments
869
virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
870
871
// Pattern-match a possible usage of AllocateNode.
872
// Return null if no allocation is recognized.
873
// The operand is the pointer produced by the (possible) allocation.
874
// It must be a projection of the Allocate or its subsequent CastPP.
875
// (Note: This function is defined in file graphKit.cpp, near
876
// GraphKit::new_instance/new_array, whose output it recognizes.)
877
// The 'ptr' may not have an offset unless the 'offset' argument is given.
878
static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
879
880
// Fancy version which uses AddPNode::Ideal_base_and_offset to strip
881
// an offset, which is reported back to the caller.
882
// (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
883
static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
884
intptr_t& offset);
885
886
// Dig the klass operand out of a (possible) allocation site.
887
static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
888
AllocateNode* allo = Ideal_allocation(ptr, phase);
889
return (allo == NULL) ? NULL : allo->in(KlassNode);
890
}
891
892
// Conservatively small estimate of offset of first non-header byte.
893
int minimum_header_size() {
894
return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
895
instanceOopDesc::base_offset_in_bytes();
896
}
897
898
// Return the corresponding initialization barrier (or null if none).
899
// Walks out edges to find it...
900
// (Note: Both InitializeNode::allocation and AllocateNode::initialization
901
// are defined in graphKit.cpp, which sets up the bidirectional relation.)
902
InitializeNode* initialization();
903
904
// Convenience for initialization->maybe_set_complete(phase)
905
bool maybe_set_complete(PhaseGVN* phase);
906
907
#ifdef AARCH64
908
// Return true if allocation doesn't escape thread, its escape state
909
// needs be noEscape or ArgEscape. InitializeNode._does_not_escape
910
// is true when its allocation's escape state is noEscape or
911
// ArgEscape. In case allocation's InitializeNode is NULL, check
912
// AlllocateNode._is_non_escaping flag.
913
// AlllocateNode._is_non_escaping is true when its escape state is
914
// noEscape.
915
bool does_not_escape_thread() {
916
InitializeNode* init = NULL;
917
return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
918
}
919
#endif
920
};
921
922
//------------------------------AllocateArray---------------------------------
923
//
924
// High-level array allocation
925
//
926
class AllocateArrayNode : public AllocateNode {
927
public:
928
AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
929
Node* size, Node* klass_node, Node* initial_test,
930
Node* count_val
931
)
932
: AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
933
initial_test)
934
{
935
init_class_id(Class_AllocateArray);
936
set_req(AllocateNode::ALength, count_val);
937
}
938
virtual int Opcode() const;
939
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
940
941
// Dig the length operand out of a array allocation site.
942
Node* Ideal_length() {
943
return in(AllocateNode::ALength);
944
}
945
946
// Dig the length operand out of a array allocation site and narrow the
947
// type with a CastII, if necesssary
948
Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
949
950
// Pattern-match a possible usage of AllocateArrayNode.
951
// Return null if no allocation is recognized.
952
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
953
AllocateNode* allo = Ideal_allocation(ptr, phase);
954
return (allo == NULL || !allo->is_AllocateArray())
955
? NULL : allo->as_AllocateArray();
956
}
957
};
958
959
//------------------------------AbstractLockNode-----------------------------------
960
class AbstractLockNode: public CallNode {
961
private:
962
enum {
963
Regular = 0, // Normal lock
964
NonEscObj, // Lock is used for non escaping object
965
Coarsened, // Lock was coarsened
966
Nested // Nested lock
967
} _kind;
968
#ifndef PRODUCT
969
NamedCounter* _counter;
970
#endif
971
972
protected:
973
// helper functions for lock elimination
974
//
975
976
bool find_matching_unlock(const Node* ctrl, LockNode* lock,
977
GrowableArray<AbstractLockNode*> &lock_ops);
978
bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
979
GrowableArray<AbstractLockNode*> &lock_ops);
980
bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
981
GrowableArray<AbstractLockNode*> &lock_ops);
982
LockNode *find_matching_lock(UnlockNode* unlock);
983
984
// Update the counter to indicate that this lock was eliminated.
985
void set_eliminated_lock_counter() PRODUCT_RETURN;
986
987
public:
988
AbstractLockNode(const TypeFunc *tf)
989
: CallNode(tf, NULL, TypeRawPtr::BOTTOM),
990
_kind(Regular)
991
{
992
#ifndef PRODUCT
993
_counter = NULL;
994
#endif
995
}
996
virtual int Opcode() const = 0;
997
Node * obj_node() const {return in(TypeFunc::Parms + 0); }
998
Node * box_node() const {return in(TypeFunc::Parms + 1); }
999
Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
1000
void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1001
1002
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1003
1004
virtual uint size_of() const { return sizeof(*this); }
1005
1006
bool is_eliminated() const { return (_kind != Regular); }
1007
bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1008
bool is_coarsened() const { return (_kind == Coarsened); }
1009
bool is_nested() const { return (_kind == Nested); }
1010
1011
const char * kind_as_string() const;
1012
void log_lock_optimization(Compile* c, const char * tag) const;
1013
1014
void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1015
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
1016
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
1017
1018
// locking does not modify its arguments
1019
virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
1020
1021
#ifndef PRODUCT
1022
void create_lock_counter(JVMState* s);
1023
NamedCounter* counter() const { return _counter; }
1024
#endif
1025
};
1026
1027
//------------------------------Lock---------------------------------------
1028
// High-level lock operation
1029
//
1030
// This is a subclass of CallNode because it is a macro node which gets expanded
1031
// into a code sequence containing a call. This node takes 3 "parameters":
1032
// 0 - object to lock
1033
// 1 - a BoxLockNode
1034
// 2 - a FastLockNode
1035
//
1036
class LockNode : public AbstractLockNode {
1037
public:
1038
1039
static const TypeFunc *lock_type() {
1040
// create input type (domain)
1041
const Type **fields = TypeTuple::fields(3);
1042
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1043
fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1044
fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1045
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1046
1047
// create result type (range)
1048
fields = TypeTuple::fields(0);
1049
1050
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1051
1052
return TypeFunc::make(domain,range);
1053
}
1054
1055
virtual int Opcode() const;
1056
virtual uint size_of() const; // Size is bigger
1057
LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1058
init_class_id(Class_Lock);
1059
init_flags(Flag_is_macro);
1060
C->add_macro_node(this);
1061
}
1062
virtual bool guaranteed_safepoint() { return false; }
1063
1064
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1065
// Expansion modifies the JVMState, so we need to clone it
1066
virtual void clone_jvms(Compile* C) {
1067
if (jvms() != NULL) {
1068
set_jvms(jvms()->clone_deep(C));
1069
jvms()->set_map_deep(this);
1070
}
1071
}
1072
1073
bool is_nested_lock_region(); // Is this Lock nested?
1074
bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1075
};
1076
1077
//------------------------------Unlock---------------------------------------
1078
// High-level unlock operation
1079
class UnlockNode : public AbstractLockNode {
1080
private:
1081
#ifdef ASSERT
1082
JVMState* const _dbg_jvms; // Pointer to list of JVM State objects
1083
#endif
1084
public:
1085
virtual int Opcode() const;
1086
virtual uint size_of() const; // Size is bigger
1087
UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1088
#ifdef ASSERT
1089
, _dbg_jvms(NULL)
1090
#endif
1091
{
1092
init_class_id(Class_Unlock);
1093
init_flags(Flag_is_macro);
1094
C->add_macro_node(this);
1095
}
1096
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1097
// unlock is never a safepoint
1098
virtual bool guaranteed_safepoint() { return false; }
1099
#ifdef ASSERT
1100
void set_dbg_jvms(JVMState* s) {
1101
*(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor
1102
}
1103
JVMState* dbg_jvms() const { return _dbg_jvms; }
1104
#else
1105
JVMState* dbg_jvms() const { return NULL; }
1106
#endif
1107
};
1108
1109
#endif // SHARE_VM_OPTO_CALLNODE_HPP
1110
1111