Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/opto/callnode.cpp
83404 views
1
/*
2
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "compiler/compileLog.hpp"
27
#include "ci/bcEscapeAnalyzer.hpp"
28
#include "compiler/oopMap.hpp"
29
#include "opto/callGenerator.hpp"
30
#include "opto/callnode.hpp"
31
#include "opto/escape.hpp"
32
#include "opto/locknode.hpp"
33
#include "opto/machnode.hpp"
34
#include "opto/matcher.hpp"
35
#include "opto/parse.hpp"
36
#include "opto/regalloc.hpp"
37
#include "opto/regmask.hpp"
38
#include "opto/rootnode.hpp"
39
#include "opto/runtime.hpp"
40
41
// Portions of code courtesy of Clifford Click
42
43
// Optimization - Graph Style
44
45
//=============================================================================
46
uint StartNode::size_of() const { return sizeof(*this); }
47
uint StartNode::cmp( const Node &n ) const
48
{ return _domain == ((StartNode&)n)._domain; }
49
const Type *StartNode::bottom_type() const { return _domain; }
50
const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
51
#ifndef PRODUCT
52
void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
53
#endif
54
55
//------------------------------Ideal------------------------------------------
56
Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
57
return remove_dead_region(phase, can_reshape) ? this : NULL;
58
}
59
60
//------------------------------calling_convention-----------------------------
61
void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
62
Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
63
}
64
65
//------------------------------Registers--------------------------------------
66
const RegMask &StartNode::in_RegMask(uint) const {
67
return RegMask::Empty;
68
}
69
70
//------------------------------match------------------------------------------
71
// Construct projections for incoming parameters, and their RegMask info
72
Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
73
switch (proj->_con) {
74
case TypeFunc::Control:
75
case TypeFunc::I_O:
76
case TypeFunc::Memory:
77
return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
78
case TypeFunc::FramePtr:
79
return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
80
case TypeFunc::ReturnAdr:
81
return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
82
case TypeFunc::Parms:
83
default: {
84
uint parm_num = proj->_con - TypeFunc::Parms;
85
const Type *t = _domain->field_at(proj->_con);
86
if (t->base() == Type::Half) // 2nd half of Longs and Doubles
87
return new (match->C) ConNode(Type::TOP);
88
uint ideal_reg = t->ideal_reg();
89
RegMask &rm = match->_calling_convention_mask[parm_num];
90
return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
91
}
92
}
93
return NULL;
94
}
95
96
//------------------------------StartOSRNode----------------------------------
97
// The method start node for an on stack replacement adapter
98
99
//------------------------------osr_domain-----------------------------
100
const TypeTuple *StartOSRNode::osr_domain() {
101
const Type **fields = TypeTuple::fields(2);
102
fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
103
104
return TypeTuple::make(TypeFunc::Parms+1, fields);
105
}
106
107
//=============================================================================
108
const char * const ParmNode::names[TypeFunc::Parms+1] = {
109
"Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
110
};
111
112
#ifndef PRODUCT
113
void ParmNode::dump_spec(outputStream *st) const {
114
if( _con < TypeFunc::Parms ) {
115
st->print("%s", names[_con]);
116
} else {
117
st->print("Parm%d: ",_con-TypeFunc::Parms);
118
// Verbose and WizardMode dump bottom_type for all nodes
119
if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
120
}
121
}
122
#endif
123
124
uint ParmNode::ideal_reg() const {
125
switch( _con ) {
126
case TypeFunc::Control : // fall through
127
case TypeFunc::I_O : // fall through
128
case TypeFunc::Memory : return 0;
129
case TypeFunc::FramePtr : // fall through
130
case TypeFunc::ReturnAdr: return Op_RegP;
131
default : assert( _con > TypeFunc::Parms, "" );
132
// fall through
133
case TypeFunc::Parms : {
134
// Type of argument being passed
135
const Type *t = in(0)->as_Start()->_domain->field_at(_con);
136
return t->ideal_reg();
137
}
138
}
139
ShouldNotReachHere();
140
return 0;
141
}
142
143
//=============================================================================
144
ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
145
init_req(TypeFunc::Control,cntrl);
146
init_req(TypeFunc::I_O,i_o);
147
init_req(TypeFunc::Memory,memory);
148
init_req(TypeFunc::FramePtr,frameptr);
149
init_req(TypeFunc::ReturnAdr,retadr);
150
}
151
152
Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
153
return remove_dead_region(phase, can_reshape) ? this : NULL;
154
}
155
156
const Type *ReturnNode::Value( PhaseTransform *phase ) const {
157
return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
158
? Type::TOP
159
: Type::BOTTOM;
160
}
161
162
// Do we Match on this edge index or not? No edges on return nodes
163
uint ReturnNode::match_edge(uint idx) const {
164
return 0;
165
}
166
167
168
#ifndef PRODUCT
169
void ReturnNode::dump_req(outputStream *st) const {
170
// Dump the required inputs, enclosed in '(' and ')'
171
uint i; // Exit value of loop
172
for (i = 0; i < req(); i++) { // For all required inputs
173
if (i == TypeFunc::Parms) st->print("returns");
174
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
175
else st->print("_ ");
176
}
177
}
178
#endif
179
180
//=============================================================================
181
RethrowNode::RethrowNode(
182
Node* cntrl,
183
Node* i_o,
184
Node* memory,
185
Node* frameptr,
186
Node* ret_adr,
187
Node* exception
188
) : Node(TypeFunc::Parms + 1) {
189
init_req(TypeFunc::Control , cntrl );
190
init_req(TypeFunc::I_O , i_o );
191
init_req(TypeFunc::Memory , memory );
192
init_req(TypeFunc::FramePtr , frameptr );
193
init_req(TypeFunc::ReturnAdr, ret_adr);
194
init_req(TypeFunc::Parms , exception);
195
}
196
197
Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
198
return remove_dead_region(phase, can_reshape) ? this : NULL;
199
}
200
201
const Type *RethrowNode::Value( PhaseTransform *phase ) const {
202
return (phase->type(in(TypeFunc::Control)) == Type::TOP)
203
? Type::TOP
204
: Type::BOTTOM;
205
}
206
207
uint RethrowNode::match_edge(uint idx) const {
208
return 0;
209
}
210
211
#ifndef PRODUCT
212
void RethrowNode::dump_req(outputStream *st) const {
213
// Dump the required inputs, enclosed in '(' and ')'
214
uint i; // Exit value of loop
215
for (i = 0; i < req(); i++) { // For all required inputs
216
if (i == TypeFunc::Parms) st->print("exception");
217
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
218
else st->print("_ ");
219
}
220
}
221
#endif
222
223
//=============================================================================
224
// Do we Match on this edge index or not? Match only target address & method
225
uint TailCallNode::match_edge(uint idx) const {
226
return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
227
}
228
229
//=============================================================================
230
// Do we Match on this edge index or not? Match only target address & oop
231
uint TailJumpNode::match_edge(uint idx) const {
232
return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
233
}
234
235
//=============================================================================
236
JVMState::JVMState(ciMethod* method, JVMState* caller) :
237
_method(method) {
238
assert(method != NULL, "must be valid call site");
239
_reexecute = Reexecute_Undefined;
240
debug_only(_bci = -99); // random garbage value
241
debug_only(_map = (SafePointNode*)-1);
242
_caller = caller;
243
_depth = 1 + (caller == NULL ? 0 : caller->depth());
244
_locoff = TypeFunc::Parms;
245
_stkoff = _locoff + _method->max_locals();
246
_monoff = _stkoff + _method->max_stack();
247
_scloff = _monoff;
248
_endoff = _monoff;
249
_sp = 0;
250
}
251
JVMState::JVMState(int stack_size) :
252
_method(NULL) {
253
_bci = InvocationEntryBci;
254
_reexecute = Reexecute_Undefined;
255
debug_only(_map = (SafePointNode*)-1);
256
_caller = NULL;
257
_depth = 1;
258
_locoff = TypeFunc::Parms;
259
_stkoff = _locoff;
260
_monoff = _stkoff + stack_size;
261
_scloff = _monoff;
262
_endoff = _monoff;
263
_sp = 0;
264
}
265
266
//--------------------------------of_depth-------------------------------------
267
JVMState* JVMState::of_depth(int d) const {
268
const JVMState* jvmp = this;
269
assert(0 < d && (uint)d <= depth(), "oob");
270
for (int skip = depth() - d; skip > 0; skip--) {
271
jvmp = jvmp->caller();
272
}
273
assert(jvmp->depth() == (uint)d, "found the right one");
274
return (JVMState*)jvmp;
275
}
276
277
//-----------------------------same_calls_as-----------------------------------
278
bool JVMState::same_calls_as(const JVMState* that) const {
279
if (this == that) return true;
280
if (this->depth() != that->depth()) return false;
281
const JVMState* p = this;
282
const JVMState* q = that;
283
for (;;) {
284
if (p->_method != q->_method) return false;
285
if (p->_method == NULL) return true; // bci is irrelevant
286
if (p->_bci != q->_bci) return false;
287
if (p->_reexecute != q->_reexecute) return false;
288
p = p->caller();
289
q = q->caller();
290
if (p == q) return true;
291
assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
292
}
293
}
294
295
//------------------------------debug_start------------------------------------
296
uint JVMState::debug_start() const {
297
debug_only(JVMState* jvmroot = of_depth(1));
298
assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
299
return of_depth(1)->locoff();
300
}
301
302
//-------------------------------debug_end-------------------------------------
303
uint JVMState::debug_end() const {
304
debug_only(JVMState* jvmroot = of_depth(1));
305
assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
306
return endoff();
307
}
308
309
//------------------------------debug_depth------------------------------------
310
uint JVMState::debug_depth() const {
311
uint total = 0;
312
for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
313
total += jvmp->debug_size();
314
}
315
return total;
316
}
317
318
#ifndef PRODUCT
319
320
//------------------------------format_helper----------------------------------
321
// Given an allocation (a Chaitin object) and a Node decide if the Node carries
322
// any defined value or not. If it does, print out the register or constant.
323
static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
324
if (n == NULL) { st->print(" NULL"); return; }
325
if (n->is_SafePointScalarObject()) {
326
// Scalar replacement.
327
SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
328
scobjs->append_if_missing(spobj);
329
int sco_n = scobjs->find(spobj);
330
assert(sco_n >= 0, "");
331
st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
332
return;
333
}
334
if (regalloc->node_regs_max_index() > 0 &&
335
OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
336
char buf[50];
337
regalloc->dump_register(n,buf);
338
st->print(" %s%d]=%s",msg,i,buf);
339
} else { // No register, but might be constant
340
const Type *t = n->bottom_type();
341
switch (t->base()) {
342
case Type::Int:
343
st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con());
344
break;
345
case Type::AnyPtr:
346
assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
347
st->print(" %s%d]=#NULL",msg,i);
348
break;
349
case Type::AryPtr:
350
case Type::InstPtr:
351
st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
352
break;
353
case Type::KlassPtr:
354
st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass()));
355
break;
356
case Type::MetadataPtr:
357
st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
358
break;
359
case Type::NarrowOop:
360
st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
361
break;
362
case Type::RawPtr:
363
st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
364
break;
365
case Type::DoubleCon:
366
st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
367
break;
368
case Type::FloatCon:
369
st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
370
break;
371
case Type::Long:
372
st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
373
break;
374
case Type::Half:
375
case Type::Top:
376
st->print(" %s%d]=_",msg,i);
377
break;
378
default: ShouldNotReachHere();
379
}
380
}
381
}
382
383
//------------------------------format-----------------------------------------
384
void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
385
st->print(" #");
386
if (_method) {
387
_method->print_short_name(st);
388
st->print(" @ bci:%d ",_bci);
389
} else {
390
st->print_cr(" runtime stub ");
391
return;
392
}
393
if (n->is_MachSafePoint()) {
394
GrowableArray<SafePointScalarObjectNode*> scobjs;
395
MachSafePointNode *mcall = n->as_MachSafePoint();
396
uint i;
397
// Print locals
398
for (i = 0; i < (uint)loc_size(); i++)
399
format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
400
// Print stack
401
for (i = 0; i < (uint)stk_size(); i++) {
402
if ((uint)(_stkoff + i) >= mcall->len())
403
st->print(" oob ");
404
else
405
format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
406
}
407
for (i = 0; (int)i < nof_monitors(); i++) {
408
Node *box = mcall->monitor_box(this, i);
409
Node *obj = mcall->monitor_obj(this, i);
410
if (regalloc->node_regs_max_index() > 0 &&
411
OptoReg::is_valid(regalloc->get_reg_first(box))) {
412
box = BoxLockNode::box_node(box);
413
format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
414
} else {
415
OptoReg::Name box_reg = BoxLockNode::reg(box);
416
st->print(" MON-BOX%d=%s+%d",
417
i,
418
OptoReg::regname(OptoReg::c_frame_pointer),
419
regalloc->reg2offset(box_reg));
420
}
421
const char* obj_msg = "MON-OBJ[";
422
if (EliminateLocks) {
423
if (BoxLockNode::box_node(box)->is_eliminated())
424
obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
425
}
426
format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
427
}
428
429
for (i = 0; i < (uint)scobjs.length(); i++) {
430
// Scalar replaced objects.
431
st->cr();
432
st->print(" # ScObj" INT32_FORMAT " ", i);
433
SafePointScalarObjectNode* spobj = scobjs.at(i);
434
ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
435
assert(cik->is_instance_klass() ||
436
cik->is_array_klass(), "Not supported allocation.");
437
ciInstanceKlass *iklass = NULL;
438
if (cik->is_instance_klass()) {
439
cik->print_name_on(st);
440
iklass = cik->as_instance_klass();
441
} else if (cik->is_type_array_klass()) {
442
cik->as_array_klass()->base_element_type()->print_name_on(st);
443
st->print("[%d]", spobj->n_fields());
444
} else if (cik->is_obj_array_klass()) {
445
ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
446
if (cie->is_instance_klass()) {
447
cie->print_name_on(st);
448
} else if (cie->is_type_array_klass()) {
449
cie->as_array_klass()->base_element_type()->print_name_on(st);
450
} else {
451
ShouldNotReachHere();
452
}
453
st->print("[%d]", spobj->n_fields());
454
int ndim = cik->as_array_klass()->dimension() - 1;
455
while (ndim-- > 0) {
456
st->print("[]");
457
}
458
}
459
st->print("={");
460
uint nf = spobj->n_fields();
461
if (nf > 0) {
462
uint first_ind = spobj->first_index(mcall->jvms());
463
Node* fld_node = mcall->in(first_ind);
464
ciField* cifield;
465
if (iklass != NULL) {
466
st->print(" [");
467
cifield = iklass->nonstatic_field_at(0);
468
cifield->print_name_on(st);
469
format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
470
} else {
471
format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
472
}
473
for (uint j = 1; j < nf; j++) {
474
fld_node = mcall->in(first_ind+j);
475
if (iklass != NULL) {
476
st->print(", [");
477
cifield = iklass->nonstatic_field_at(j);
478
cifield->print_name_on(st);
479
format_helper(regalloc, st, fld_node, ":", j, &scobjs);
480
} else {
481
format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
482
}
483
}
484
}
485
st->print(" }");
486
}
487
}
488
st->cr();
489
if (caller() != NULL) caller()->format(regalloc, n, st);
490
}
491
492
493
void JVMState::dump_spec(outputStream *st) const {
494
if (_method != NULL) {
495
bool printed = false;
496
if (!Verbose) {
497
// The JVMS dumps make really, really long lines.
498
// Take out the most boring parts, which are the package prefixes.
499
char buf[500];
500
stringStream namest(buf, sizeof(buf));
501
_method->print_short_name(&namest);
502
if (namest.count() < sizeof(buf)) {
503
const char* name = namest.base();
504
if (name[0] == ' ') ++name;
505
const char* endcn = strchr(name, ':'); // end of class name
506
if (endcn == NULL) endcn = strchr(name, '(');
507
if (endcn == NULL) endcn = name + strlen(name);
508
while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
509
--endcn;
510
st->print(" %s", endcn);
511
printed = true;
512
}
513
}
514
if (!printed)
515
_method->print_short_name(st);
516
st->print(" @ bci:%d",_bci);
517
if(_reexecute == Reexecute_True)
518
st->print(" reexecute");
519
} else {
520
st->print(" runtime stub");
521
}
522
if (caller() != NULL) caller()->dump_spec(st);
523
}
524
525
526
void JVMState::dump_on(outputStream* st) const {
527
bool print_map = _map && !((uintptr_t)_map & 1) &&
528
((caller() == NULL) || (caller()->map() != _map));
529
if (print_map) {
530
if (_map->len() > _map->req()) { // _map->has_exceptions()
531
Node* ex = _map->in(_map->req()); // _map->next_exception()
532
// skip the first one; it's already being printed
533
while (ex != NULL && ex->len() > ex->req()) {
534
ex = ex->in(ex->req()); // ex->next_exception()
535
ex->dump(1);
536
}
537
}
538
_map->dump(Verbose ? 2 : 1);
539
}
540
if (caller() != NULL) {
541
caller()->dump_on(st);
542
}
543
st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
544
depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
545
if (_method == NULL) {
546
st->print_cr("(none)");
547
} else {
548
_method->print_name(st);
549
st->cr();
550
if (bci() >= 0 && bci() < _method->code_size()) {
551
st->print(" bc: ");
552
_method->print_codes_on(bci(), bci()+1, st);
553
}
554
}
555
}
556
557
// Extra way to dump a jvms from the debugger,
558
// to avoid a bug with C++ member function calls.
559
void dump_jvms(JVMState* jvms) {
560
jvms->dump();
561
}
562
#endif
563
564
//--------------------------clone_shallow--------------------------------------
565
JVMState* JVMState::clone_shallow(Compile* C) const {
566
JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
567
n->set_bci(_bci);
568
n->_reexecute = _reexecute;
569
n->set_locoff(_locoff);
570
n->set_stkoff(_stkoff);
571
n->set_monoff(_monoff);
572
n->set_scloff(_scloff);
573
n->set_endoff(_endoff);
574
n->set_sp(_sp);
575
n->set_map(_map);
576
return n;
577
}
578
579
//---------------------------clone_deep----------------------------------------
580
JVMState* JVMState::clone_deep(Compile* C) const {
581
JVMState* n = clone_shallow(C);
582
for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
583
p->_caller = p->_caller->clone_shallow(C);
584
}
585
assert(n->depth() == depth(), "sanity");
586
assert(n->debug_depth() == debug_depth(), "sanity");
587
return n;
588
}
589
590
/**
591
* Reset map for all callers
592
*/
593
void JVMState::set_map_deep(SafePointNode* map) {
594
for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
595
p->set_map(map);
596
}
597
}
598
599
// Adapt offsets in in-array after adding or removing an edge.
600
// Prerequisite is that the JVMState is used by only one node.
601
void JVMState::adapt_position(int delta) {
602
for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
603
jvms->set_locoff(jvms->locoff() + delta);
604
jvms->set_stkoff(jvms->stkoff() + delta);
605
jvms->set_monoff(jvms->monoff() + delta);
606
jvms->set_scloff(jvms->scloff() + delta);
607
jvms->set_endoff(jvms->endoff() + delta);
608
}
609
}
610
611
// Mirror the stack size calculation in the deopt code
612
// How much stack space would we need at this point in the program in
613
// case of deoptimization?
614
int JVMState::interpreter_frame_size() const {
615
const JVMState* jvms = this;
616
int size = 0;
617
int callee_parameters = 0;
618
int callee_locals = 0;
619
int extra_args = method()->max_stack() - stk_size();
620
621
while (jvms != NULL) {
622
int locks = jvms->nof_monitors();
623
int temps = jvms->stk_size();
624
bool is_top_frame = (jvms == this);
625
ciMethod* method = jvms->method();
626
627
int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
628
temps + callee_parameters,
629
extra_args,
630
locks,
631
callee_parameters,
632
callee_locals,
633
is_top_frame);
634
size += frame_size;
635
636
callee_parameters = method->size_of_parameters();
637
callee_locals = method->max_locals();
638
extra_args = 0;
639
jvms = jvms->caller();
640
}
641
return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
642
}
643
644
//=============================================================================
645
uint CallNode::cmp( const Node &n ) const
646
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
647
#ifndef PRODUCT
648
void CallNode::dump_req(outputStream *st) const {
649
// Dump the required inputs, enclosed in '(' and ')'
650
uint i; // Exit value of loop
651
for (i = 0; i < req(); i++) { // For all required inputs
652
if (i == TypeFunc::Parms) st->print("(");
653
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
654
else st->print("_ ");
655
}
656
st->print(")");
657
}
658
659
void CallNode::dump_spec(outputStream *st) const {
660
st->print(" ");
661
tf()->dump_on(st);
662
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
663
if (jvms() != NULL) jvms()->dump_spec(st);
664
}
665
#endif
666
667
const Type *CallNode::bottom_type() const { return tf()->range(); }
668
const Type *CallNode::Value(PhaseTransform *phase) const {
669
if (phase->type(in(0)) == Type::TOP) return Type::TOP;
670
return tf()->range();
671
}
672
673
//------------------------------calling_convention-----------------------------
674
void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
675
// Use the standard compiler calling convention
676
Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
677
}
678
679
680
//------------------------------match------------------------------------------
681
// Construct projections for control, I/O, memory-fields, ..., and
682
// return result(s) along with their RegMask info
683
Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
684
switch (proj->_con) {
685
case TypeFunc::Control:
686
case TypeFunc::I_O:
687
case TypeFunc::Memory:
688
return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
689
690
case TypeFunc::Parms+1: // For LONG & DOUBLE returns
691
assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
692
// 2nd half of doubles and longs
693
return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
694
695
case TypeFunc::Parms: { // Normal returns
696
uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
697
OptoRegPair regs = is_CallRuntime()
698
? match->c_return_value(ideal_reg,true) // Calls into C runtime
699
: match-> return_value(ideal_reg,true); // Calls into compiled Java code
700
RegMask rm = RegMask(regs.first());
701
if( OptoReg::is_valid(regs.second()) )
702
rm.Insert( regs.second() );
703
return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
704
}
705
706
case TypeFunc::ReturnAdr:
707
case TypeFunc::FramePtr:
708
default:
709
ShouldNotReachHere();
710
}
711
return NULL;
712
}
713
714
// Do we Match on this edge index or not? Match no edges
715
uint CallNode::match_edge(uint idx) const {
716
return 0;
717
}
718
719
//
720
// Determine whether the call could modify the field of the specified
721
// instance at the specified offset.
722
//
723
bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
724
assert((t_oop != NULL), "sanity");
725
if (t_oop->is_known_instance()) {
726
// The instance_id is set only for scalar-replaceable allocations which
727
// are not passed as arguments according to Escape Analysis.
728
return false;
729
}
730
if (t_oop->is_ptr_to_boxed_value()) {
731
ciKlass* boxing_klass = t_oop->klass();
732
if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
733
// Skip unrelated boxing methods.
734
Node* proj = proj_out(TypeFunc::Parms);
735
if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
736
return false;
737
}
738
}
739
if (is_CallJava() && as_CallJava()->method() != NULL) {
740
ciMethod* meth = as_CallJava()->method();
741
if (meth->is_accessor()) {
742
return false;
743
}
744
// May modify (by reflection) if an boxing object is passed
745
// as argument or returned.
746
Node* proj = returns_pointer() ? proj_out(TypeFunc::Parms) : NULL;
747
if (proj != NULL) {
748
const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
749
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
750
(inst_t->klass() == boxing_klass))) {
751
return true;
752
}
753
}
754
const TypeTuple* d = tf()->domain();
755
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
756
const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
757
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
758
(inst_t->klass() == boxing_klass))) {
759
return true;
760
}
761
}
762
return false;
763
}
764
}
765
return true;
766
}
767
768
// Does this call have a direct reference to n other than debug information?
769
bool CallNode::has_non_debug_use(Node *n) {
770
const TypeTuple * d = tf()->domain();
771
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
772
Node *arg = in(i);
773
if (arg == n) {
774
return true;
775
}
776
}
777
return false;
778
}
779
780
// Returns the unique CheckCastPP of a call
781
// or 'this' if there are several CheckCastPP or unexpected uses
782
// or returns NULL if there is no one.
783
Node *CallNode::result_cast() {
784
Node *cast = NULL;
785
786
Node *p = proj_out(TypeFunc::Parms);
787
if (p == NULL)
788
return NULL;
789
790
for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
791
Node *use = p->fast_out(i);
792
if (use->is_CheckCastPP()) {
793
if (cast != NULL) {
794
return this; // more than 1 CheckCastPP
795
}
796
cast = use;
797
} else if (!use->is_Initialize() &&
798
!use->is_AddP()) {
799
// Expected uses are restricted to a CheckCastPP, an Initialize
800
// node, and AddP nodes. If we encounter any other use (a Phi
801
// node can be seen in rare cases) return this to prevent
802
// incorrect optimizations.
803
return this;
804
}
805
}
806
return cast;
807
}
808
809
810
void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
811
projs->fallthrough_proj = NULL;
812
projs->fallthrough_catchproj = NULL;
813
projs->fallthrough_ioproj = NULL;
814
projs->catchall_ioproj = NULL;
815
projs->catchall_catchproj = NULL;
816
projs->fallthrough_memproj = NULL;
817
projs->catchall_memproj = NULL;
818
projs->resproj = NULL;
819
projs->exobj = NULL;
820
821
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
822
ProjNode *pn = fast_out(i)->as_Proj();
823
if (pn->outcnt() == 0) continue;
824
switch (pn->_con) {
825
case TypeFunc::Control:
826
{
827
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
828
projs->fallthrough_proj = pn;
829
DUIterator_Fast jmax, j = pn->fast_outs(jmax);
830
const Node *cn = pn->fast_out(j);
831
if (cn->is_Catch()) {
832
ProjNode *cpn = NULL;
833
for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
834
cpn = cn->fast_out(k)->as_Proj();
835
assert(cpn->is_CatchProj(), "must be a CatchProjNode");
836
if (cpn->_con == CatchProjNode::fall_through_index)
837
projs->fallthrough_catchproj = cpn;
838
else {
839
assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
840
projs->catchall_catchproj = cpn;
841
}
842
}
843
}
844
break;
845
}
846
case TypeFunc::I_O:
847
if (pn->_is_io_use)
848
projs->catchall_ioproj = pn;
849
else
850
projs->fallthrough_ioproj = pn;
851
for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
852
Node* e = pn->out(j);
853
if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
854
assert(projs->exobj == NULL, "only one");
855
projs->exobj = e;
856
}
857
}
858
break;
859
case TypeFunc::Memory:
860
if (pn->_is_io_use)
861
projs->catchall_memproj = pn;
862
else
863
projs->fallthrough_memproj = pn;
864
break;
865
case TypeFunc::Parms:
866
projs->resproj = pn;
867
break;
868
default:
869
assert(false, "unexpected projection from allocation node.");
870
}
871
}
872
873
// The resproj may not exist because the result couuld be ignored
874
// and the exception object may not exist if an exception handler
875
// swallows the exception but all the other must exist and be found.
876
assert(projs->fallthrough_proj != NULL, "must be found");
877
assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found");
878
assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj != NULL, "must be found");
879
assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj != NULL, "must be found");
880
assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj != NULL, "must be found");
881
if (separate_io_proj) {
882
assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj != NULL, "must be found");
883
assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj != NULL, "must be found");
884
}
885
}
886
887
Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
888
CallGenerator* cg = generator();
889
if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
890
// Check whether this MH handle call becomes a candidate for inlining
891
ciMethod* callee = cg->method();
892
vmIntrinsics::ID iid = callee->intrinsic_id();
893
if (iid == vmIntrinsics::_invokeBasic) {
894
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
895
phase->C->prepend_late_inline(cg);
896
set_generator(NULL);
897
}
898
} else {
899
assert(callee->has_member_arg(), "wrong type of call?");
900
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
901
phase->C->prepend_late_inline(cg);
902
set_generator(NULL);
903
}
904
}
905
}
906
return SafePointNode::Ideal(phase, can_reshape);
907
}
908
909
910
//=============================================================================
911
uint CallJavaNode::size_of() const { return sizeof(*this); }
912
uint CallJavaNode::cmp( const Node &n ) const {
913
CallJavaNode &call = (CallJavaNode&)n;
914
return CallNode::cmp(call) && _method == call._method;
915
}
916
#ifndef PRODUCT
917
void CallJavaNode::dump_spec(outputStream *st) const {
918
if( _method ) _method->print_short_name(st);
919
CallNode::dump_spec(st);
920
}
921
#endif
922
923
//=============================================================================
924
uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
925
uint CallStaticJavaNode::cmp( const Node &n ) const {
926
CallStaticJavaNode &call = (CallStaticJavaNode&)n;
927
return CallJavaNode::cmp(call);
928
}
929
930
//----------------------------uncommon_trap_request----------------------------
931
// If this is an uncommon trap, return the request code, else zero.
932
int CallStaticJavaNode::uncommon_trap_request() const {
933
if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
934
return extract_uncommon_trap_request(this);
935
}
936
return 0;
937
}
938
int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
939
#ifndef PRODUCT
940
if (!(call->req() > TypeFunc::Parms &&
941
call->in(TypeFunc::Parms) != NULL &&
942
call->in(TypeFunc::Parms)->is_Con())) {
943
assert(in_dump() != 0, "OK if dumping");
944
tty->print("[bad uncommon trap]");
945
return 0;
946
}
947
#endif
948
return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
949
}
950
951
#ifndef PRODUCT
952
void CallStaticJavaNode::dump_spec(outputStream *st) const {
953
st->print("# Static ");
954
if (_name != NULL) {
955
st->print("%s", _name);
956
int trap_req = uncommon_trap_request();
957
if (trap_req != 0) {
958
char buf[100];
959
st->print("(%s)",
960
Deoptimization::format_trap_request(buf, sizeof(buf),
961
trap_req));
962
}
963
st->print(" ");
964
}
965
CallJavaNode::dump_spec(st);
966
}
967
#endif
968
969
//=============================================================================
970
uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
971
uint CallDynamicJavaNode::cmp( const Node &n ) const {
972
CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
973
return CallJavaNode::cmp(call);
974
}
975
#ifndef PRODUCT
976
void CallDynamicJavaNode::dump_spec(outputStream *st) const {
977
st->print("# Dynamic ");
978
CallJavaNode::dump_spec(st);
979
}
980
#endif
981
982
//=============================================================================
983
uint CallRuntimeNode::size_of() const { return sizeof(*this); }
984
uint CallRuntimeNode::cmp( const Node &n ) const {
985
CallRuntimeNode &call = (CallRuntimeNode&)n;
986
return CallNode::cmp(call) && !strcmp(_name,call._name);
987
}
988
#ifndef PRODUCT
989
void CallRuntimeNode::dump_spec(outputStream *st) const {
990
st->print("# ");
991
st->print("%s", _name);
992
CallNode::dump_spec(st);
993
}
994
#endif
995
996
//------------------------------calling_convention-----------------------------
997
void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
998
Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
999
}
1000
1001
//=============================================================================
1002
//------------------------------calling_convention-----------------------------
1003
1004
1005
//=============================================================================
1006
#ifndef PRODUCT
1007
void CallLeafNode::dump_spec(outputStream *st) const {
1008
st->print("# ");
1009
st->print("%s", _name);
1010
CallNode::dump_spec(st);
1011
}
1012
#endif
1013
1014
//=============================================================================
1015
1016
void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1017
assert(verify_jvms(jvms), "jvms must match");
1018
int loc = jvms->locoff() + idx;
1019
if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1020
// If current local idx is top then local idx - 1 could
1021
// be a long/double that needs to be killed since top could
1022
// represent the 2nd half ofthe long/double.
1023
uint ideal = in(loc -1)->ideal_reg();
1024
if (ideal == Op_RegD || ideal == Op_RegL) {
1025
// set other (low index) half to top
1026
set_req(loc - 1, in(loc));
1027
}
1028
}
1029
set_req(loc, c);
1030
}
1031
1032
uint SafePointNode::size_of() const { return sizeof(*this); }
1033
uint SafePointNode::cmp( const Node &n ) const {
1034
return (&n == this); // Always fail except on self
1035
}
1036
1037
//-------------------------set_next_exception----------------------------------
1038
void SafePointNode::set_next_exception(SafePointNode* n) {
1039
assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1040
if (len() == req()) {
1041
if (n != NULL) add_prec(n);
1042
} else {
1043
set_prec(req(), n);
1044
}
1045
}
1046
1047
1048
//----------------------------next_exception-----------------------------------
1049
SafePointNode* SafePointNode::next_exception() const {
1050
if (len() == req()) {
1051
return NULL;
1052
} else {
1053
Node* n = in(req());
1054
assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1055
return (SafePointNode*) n;
1056
}
1057
}
1058
1059
1060
//------------------------------Ideal------------------------------------------
1061
// Skip over any collapsed Regions
1062
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1063
return remove_dead_region(phase, can_reshape) ? this : NULL;
1064
}
1065
1066
//------------------------------Identity---------------------------------------
1067
// Remove obviously duplicate safepoints
1068
Node *SafePointNode::Identity( PhaseTransform *phase ) {
1069
1070
// If you have back to back safepoints, remove one
1071
if( in(TypeFunc::Control)->is_SafePoint() )
1072
return in(TypeFunc::Control);
1073
1074
if( in(0)->is_Proj() ) {
1075
Node *n0 = in(0)->in(0);
1076
// Check if he is a call projection (except Leaf Call)
1077
if( n0->is_Catch() ) {
1078
n0 = n0->in(0)->in(0);
1079
assert( n0->is_Call(), "expect a call here" );
1080
}
1081
if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1082
// Useless Safepoint, so remove it
1083
return in(TypeFunc::Control);
1084
}
1085
}
1086
1087
return this;
1088
}
1089
1090
//------------------------------Value------------------------------------------
1091
const Type *SafePointNode::Value( PhaseTransform *phase ) const {
1092
if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
1093
if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
1094
return Type::CONTROL;
1095
}
1096
1097
#ifndef PRODUCT
1098
void SafePointNode::dump_spec(outputStream *st) const {
1099
st->print(" SafePoint ");
1100
_replaced_nodes.dump(st);
1101
}
1102
#endif
1103
1104
const RegMask &SafePointNode::in_RegMask(uint idx) const {
1105
if( idx < TypeFunc::Parms ) return RegMask::Empty;
1106
// Values outside the domain represent debug info
1107
return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1108
}
1109
const RegMask &SafePointNode::out_RegMask() const {
1110
return RegMask::Empty;
1111
}
1112
1113
1114
void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1115
assert((int)grow_by > 0, "sanity");
1116
int monoff = jvms->monoff();
1117
int scloff = jvms->scloff();
1118
int endoff = jvms->endoff();
1119
assert(endoff == (int)req(), "no other states or debug info after me");
1120
Node* top = Compile::current()->top();
1121
for (uint i = 0; i < grow_by; i++) {
1122
ins_req(monoff, top);
1123
}
1124
jvms->set_monoff(monoff + grow_by);
1125
jvms->set_scloff(scloff + grow_by);
1126
jvms->set_endoff(endoff + grow_by);
1127
}
1128
1129
void SafePointNode::push_monitor(const FastLockNode *lock) {
1130
// Add a LockNode, which points to both the original BoxLockNode (the
1131
// stack space for the monitor) and the Object being locked.
1132
const int MonitorEdges = 2;
1133
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1134
assert(req() == jvms()->endoff(), "correct sizing");
1135
int nextmon = jvms()->scloff();
1136
if (GenerateSynchronizationCode) {
1137
ins_req(nextmon, lock->box_node());
1138
ins_req(nextmon+1, lock->obj_node());
1139
} else {
1140
Node* top = Compile::current()->top();
1141
ins_req(nextmon, top);
1142
ins_req(nextmon, top);
1143
}
1144
jvms()->set_scloff(nextmon + MonitorEdges);
1145
jvms()->set_endoff(req());
1146
}
1147
1148
void SafePointNode::pop_monitor() {
1149
// Delete last monitor from debug info
1150
debug_only(int num_before_pop = jvms()->nof_monitors());
1151
const int MonitorEdges = 2;
1152
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1153
int scloff = jvms()->scloff();
1154
int endoff = jvms()->endoff();
1155
int new_scloff = scloff - MonitorEdges;
1156
int new_endoff = endoff - MonitorEdges;
1157
jvms()->set_scloff(new_scloff);
1158
jvms()->set_endoff(new_endoff);
1159
while (scloff > new_scloff) del_req_ordered(--scloff);
1160
assert(jvms()->nof_monitors() == num_before_pop-1, "");
1161
}
1162
1163
Node *SafePointNode::peek_monitor_box() const {
1164
int mon = jvms()->nof_monitors() - 1;
1165
assert(mon >= 0, "most have a monitor");
1166
return monitor_box(jvms(), mon);
1167
}
1168
1169
Node *SafePointNode::peek_monitor_obj() const {
1170
int mon = jvms()->nof_monitors() - 1;
1171
assert(mon >= 0, "most have a monitor");
1172
return monitor_obj(jvms(), mon);
1173
}
1174
1175
// Do we Match on this edge index or not? Match no edges
1176
uint SafePointNode::match_edge(uint idx) const {
1177
if( !needs_polling_address_input() )
1178
return 0;
1179
1180
return (TypeFunc::Parms == idx);
1181
}
1182
1183
void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1184
assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1185
int nb = igvn->C->root()->find_prec_edge(this);
1186
if (nb != -1) {
1187
igvn->C->root()->rm_prec(nb);
1188
}
1189
}
1190
1191
//============== SafePointScalarObjectNode ==============
1192
1193
SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1194
#ifdef ASSERT
1195
AllocateNode* alloc,
1196
#endif
1197
uint first_index,
1198
uint n_fields) :
1199
TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1200
#ifdef ASSERT
1201
_alloc(alloc),
1202
#endif
1203
_first_index(first_index),
1204
_n_fields(n_fields)
1205
{
1206
init_class_id(Class_SafePointScalarObject);
1207
}
1208
1209
// Do not allow value-numbering for SafePointScalarObject node.
1210
uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1211
uint SafePointScalarObjectNode::cmp( const Node &n ) const {
1212
return (&n == this); // Always fail except on self
1213
}
1214
1215
uint SafePointScalarObjectNode::ideal_reg() const {
1216
return 0; // No matching to machine instruction
1217
}
1218
1219
const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1220
return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1221
}
1222
1223
const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1224
return RegMask::Empty;
1225
}
1226
1227
uint SafePointScalarObjectNode::match_edge(uint idx) const {
1228
return 0;
1229
}
1230
1231
SafePointScalarObjectNode*
1232
SafePointScalarObjectNode::clone(Dict* sosn_map) const {
1233
void* cached = (*sosn_map)[(void*)this];
1234
if (cached != NULL) {
1235
return (SafePointScalarObjectNode*)cached;
1236
}
1237
SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1238
sosn_map->Insert((void*)this, (void*)res);
1239
return res;
1240
}
1241
1242
1243
#ifndef PRODUCT
1244
void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1245
st->print(" # fields@[%d..%d]", first_index(),
1246
first_index() + n_fields() - 1);
1247
}
1248
1249
#endif
1250
1251
//=============================================================================
1252
uint AllocateNode::size_of() const { return sizeof(*this); }
1253
1254
AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1255
Node *ctrl, Node *mem, Node *abio,
1256
Node *size, Node *klass_node, Node *initial_test)
1257
: CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1258
{
1259
init_class_id(Class_Allocate);
1260
init_flags(Flag_is_macro);
1261
_is_scalar_replaceable = false;
1262
_is_non_escaping = false;
1263
Node *topnode = C->top();
1264
1265
init_req( TypeFunc::Control , ctrl );
1266
init_req( TypeFunc::I_O , abio );
1267
init_req( TypeFunc::Memory , mem );
1268
init_req( TypeFunc::ReturnAdr, topnode );
1269
init_req( TypeFunc::FramePtr , topnode );
1270
init_req( AllocSize , size);
1271
init_req( KlassNode , klass_node);
1272
init_req( InitialTest , initial_test);
1273
init_req( ALength , topnode);
1274
C->add_macro_node(this);
1275
}
1276
1277
//=============================================================================
1278
Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1279
if (remove_dead_region(phase, can_reshape)) return this;
1280
// Don't bother trying to transform a dead node
1281
if (in(0) && in(0)->is_top()) return NULL;
1282
1283
const Type* type = phase->type(Ideal_length());
1284
if (type->isa_int() && type->is_int()->_hi < 0) {
1285
if (can_reshape) {
1286
PhaseIterGVN *igvn = phase->is_IterGVN();
1287
// Unreachable fall through path (negative array length),
1288
// the allocation can only throw so disconnect it.
1289
Node* proj = proj_out(TypeFunc::Control);
1290
Node* catchproj = NULL;
1291
if (proj != NULL) {
1292
for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1293
Node *cn = proj->fast_out(i);
1294
if (cn->is_Catch()) {
1295
catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1296
break;
1297
}
1298
}
1299
}
1300
if (catchproj != NULL && catchproj->outcnt() > 0 &&
1301
(catchproj->outcnt() > 1 ||
1302
catchproj->unique_out()->Opcode() != Op_Halt)) {
1303
assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1304
Node* nproj = catchproj->clone();
1305
igvn->register_new_node_with_optimizer(nproj);
1306
1307
Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr );
1308
frame = phase->transform(frame);
1309
// Halt & Catch Fire
1310
Node *halt = new (phase->C) HaltNode( nproj, frame );
1311
phase->C->root()->add_req(halt);
1312
phase->transform(halt);
1313
1314
igvn->replace_node(catchproj, phase->C->top());
1315
return this;
1316
}
1317
} else {
1318
// Can't correct it during regular GVN so register for IGVN
1319
phase->C->record_for_igvn(this);
1320
}
1321
}
1322
return NULL;
1323
}
1324
1325
// Retrieve the length from the AllocateArrayNode. Narrow the type with a
1326
// CastII, if appropriate. If we are not allowed to create new nodes, and
1327
// a CastII is appropriate, return NULL.
1328
Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1329
Node *length = in(AllocateNode::ALength);
1330
assert(length != NULL, "length is not null");
1331
1332
const TypeInt* length_type = phase->find_int_type(length);
1333
const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1334
1335
if (ary_type != NULL && length_type != NULL) {
1336
const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1337
if (narrow_length_type != length_type) {
1338
// Assert one of:
1339
// - the narrow_length is 0
1340
// - the narrow_length is not wider than length
1341
assert(narrow_length_type == TypeInt::ZERO ||
1342
length_type->is_con() && narrow_length_type->is_con() &&
1343
(narrow_length_type->_hi <= length_type->_lo) ||
1344
(narrow_length_type->_hi <= length_type->_hi &&
1345
narrow_length_type->_lo >= length_type->_lo),
1346
"narrow type must be narrower than length type");
1347
1348
// Return NULL if new nodes are not allowed
1349
if (!allow_new_nodes) return NULL;
1350
// Create a cast which is control dependent on the initialization to
1351
// propagate the fact that the array length must be positive.
1352
length = new (phase->C) CastIINode(length, narrow_length_type);
1353
length->set_req(0, initialization()->proj_out(0));
1354
}
1355
}
1356
1357
return length;
1358
}
1359
1360
//=============================================================================
1361
uint LockNode::size_of() const { return sizeof(*this); }
1362
1363
// Redundant lock elimination
1364
//
1365
// There are various patterns of locking where we release and
1366
// immediately reacquire a lock in a piece of code where no operations
1367
// occur in between that would be observable. In those cases we can
1368
// skip releasing and reacquiring the lock without violating any
1369
// fairness requirements. Doing this around a loop could cause a lock
1370
// to be held for a very long time so we concentrate on non-looping
1371
// control flow. We also require that the operations are fully
1372
// redundant meaning that we don't introduce new lock operations on
1373
// some paths so to be able to eliminate it on others ala PRE. This
1374
// would probably require some more extensive graph manipulation to
1375
// guarantee that the memory edges were all handled correctly.
1376
//
1377
// Assuming p is a simple predicate which can't trap in any way and s
1378
// is a synchronized method consider this code:
1379
//
1380
// s();
1381
// if (p)
1382
// s();
1383
// else
1384
// s();
1385
// s();
1386
//
1387
// 1. The unlocks of the first call to s can be eliminated if the
1388
// locks inside the then and else branches are eliminated.
1389
//
1390
// 2. The unlocks of the then and else branches can be eliminated if
1391
// the lock of the final call to s is eliminated.
1392
//
1393
// Either of these cases subsumes the simple case of sequential control flow
1394
//
1395
// Addtionally we can eliminate versions without the else case:
1396
//
1397
// s();
1398
// if (p)
1399
// s();
1400
// s();
1401
//
1402
// 3. In this case we eliminate the unlock of the first s, the lock
1403
// and unlock in the then case and the lock in the final s.
1404
//
1405
// Note also that in all these cases the then/else pieces don't have
1406
// to be trivial as long as they begin and end with synchronization
1407
// operations.
1408
//
1409
// s();
1410
// if (p)
1411
// s();
1412
// f();
1413
// s();
1414
// s();
1415
//
1416
// The code will work properly for this case, leaving in the unlock
1417
// before the call to f and the relock after it.
1418
//
1419
// A potentially interesting case which isn't handled here is when the
1420
// locking is partially redundant.
1421
//
1422
// s();
1423
// if (p)
1424
// s();
1425
//
1426
// This could be eliminated putting unlocking on the else case and
1427
// eliminating the first unlock and the lock in the then side.
1428
// Alternatively the unlock could be moved out of the then side so it
1429
// was after the merge and the first unlock and second lock
1430
// eliminated. This might require less manipulation of the memory
1431
// state to get correct.
1432
//
1433
// Additionally we might allow work between a unlock and lock before
1434
// giving up eliminating the locks. The current code disallows any
1435
// conditional control flow between these operations. A formulation
1436
// similar to partial redundancy elimination computing the
1437
// availability of unlocking and the anticipatability of locking at a
1438
// program point would allow detection of fully redundant locking with
1439
// some amount of work in between. I'm not sure how often I really
1440
// think that would occur though. Most of the cases I've seen
1441
// indicate it's likely non-trivial work would occur in between.
1442
// There may be other more complicated constructs where we could
1443
// eliminate locking but I haven't seen any others appear as hot or
1444
// interesting.
1445
//
1446
// Locking and unlocking have a canonical form in ideal that looks
1447
// roughly like this:
1448
//
1449
// <obj>
1450
// | \\------+
1451
// | \ \
1452
// | BoxLock \
1453
// | | | \
1454
// | | \ \
1455
// | | FastLock
1456
// | | /
1457
// | | /
1458
// | | |
1459
//
1460
// Lock
1461
// |
1462
// Proj #0
1463
// |
1464
// MembarAcquire
1465
// |
1466
// Proj #0
1467
//
1468
// MembarRelease
1469
// |
1470
// Proj #0
1471
// |
1472
// Unlock
1473
// |
1474
// Proj #0
1475
//
1476
//
1477
// This code proceeds by processing Lock nodes during PhaseIterGVN
1478
// and searching back through its control for the proper code
1479
// patterns. Once it finds a set of lock and unlock operations to
1480
// eliminate they are marked as eliminatable which causes the
1481
// expansion of the Lock and Unlock macro nodes to make the operation a NOP
1482
//
1483
//=============================================================================
1484
1485
//
1486
// Utility function to skip over uninteresting control nodes. Nodes skipped are:
1487
// - copy regions. (These may not have been optimized away yet.)
1488
// - eliminated locking nodes
1489
//
1490
static Node *next_control(Node *ctrl) {
1491
if (ctrl == NULL)
1492
return NULL;
1493
while (1) {
1494
if (ctrl->is_Region()) {
1495
RegionNode *r = ctrl->as_Region();
1496
Node *n = r->is_copy();
1497
if (n == NULL)
1498
break; // hit a region, return it
1499
else
1500
ctrl = n;
1501
} else if (ctrl->is_Proj()) {
1502
Node *in0 = ctrl->in(0);
1503
if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1504
ctrl = in0->in(0);
1505
} else {
1506
break;
1507
}
1508
} else {
1509
break; // found an interesting control
1510
}
1511
}
1512
return ctrl;
1513
}
1514
//
1515
// Given a control, see if it's the control projection of an Unlock which
1516
// operating on the same object as lock.
1517
//
1518
bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1519
GrowableArray<AbstractLockNode*> &lock_ops) {
1520
ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1521
if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1522
Node *n = ctrl_proj->in(0);
1523
if (n != NULL && n->is_Unlock()) {
1524
UnlockNode *unlock = n->as_Unlock();
1525
if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1526
BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1527
!unlock->is_eliminated()) {
1528
lock_ops.append(unlock);
1529
return true;
1530
}
1531
}
1532
}
1533
return false;
1534
}
1535
1536
//
1537
// Find the lock matching an unlock. Returns null if a safepoint
1538
// or complicated control is encountered first.
1539
LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1540
LockNode *lock_result = NULL;
1541
// find the matching lock, or an intervening safepoint
1542
Node *ctrl = next_control(unlock->in(0));
1543
while (1) {
1544
assert(ctrl != NULL, "invalid control graph");
1545
assert(!ctrl->is_Start(), "missing lock for unlock");
1546
if (ctrl->is_top()) break; // dead control path
1547
if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1548
if (ctrl->is_SafePoint()) {
1549
break; // found a safepoint (may be the lock we are searching for)
1550
} else if (ctrl->is_Region()) {
1551
// Check for a simple diamond pattern. Punt on anything more complicated
1552
if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1553
Node *in1 = next_control(ctrl->in(1));
1554
Node *in2 = next_control(ctrl->in(2));
1555
if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1556
(in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1557
ctrl = next_control(in1->in(0)->in(0));
1558
} else {
1559
break;
1560
}
1561
} else {
1562
break;
1563
}
1564
} else {
1565
ctrl = next_control(ctrl->in(0)); // keep searching
1566
}
1567
}
1568
if (ctrl->is_Lock()) {
1569
LockNode *lock = ctrl->as_Lock();
1570
if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1571
BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1572
lock_result = lock;
1573
}
1574
}
1575
return lock_result;
1576
}
1577
1578
// This code corresponds to case 3 above.
1579
1580
bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1581
GrowableArray<AbstractLockNode*> &lock_ops) {
1582
Node* if_node = node->in(0);
1583
bool if_true = node->is_IfTrue();
1584
1585
if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1586
Node *lock_ctrl = next_control(if_node->in(0));
1587
if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1588
Node* lock1_node = NULL;
1589
ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1590
if (if_true) {
1591
if (proj->is_IfFalse() && proj->outcnt() == 1) {
1592
lock1_node = proj->unique_out();
1593
}
1594
} else {
1595
if (proj->is_IfTrue() && proj->outcnt() == 1) {
1596
lock1_node = proj->unique_out();
1597
}
1598
}
1599
if (lock1_node != NULL && lock1_node->is_Lock()) {
1600
LockNode *lock1 = lock1_node->as_Lock();
1601
if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
1602
BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1603
!lock1->is_eliminated()) {
1604
lock_ops.append(lock1);
1605
return true;
1606
}
1607
}
1608
}
1609
}
1610
1611
lock_ops.trunc_to(0);
1612
return false;
1613
}
1614
1615
bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1616
GrowableArray<AbstractLockNode*> &lock_ops) {
1617
// check each control merging at this point for a matching unlock.
1618
// in(0) should be self edge so skip it.
1619
for (int i = 1; i < (int)region->req(); i++) {
1620
Node *in_node = next_control(region->in(i));
1621
if (in_node != NULL) {
1622
if (find_matching_unlock(in_node, lock, lock_ops)) {
1623
// found a match so keep on checking.
1624
continue;
1625
} else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1626
continue;
1627
}
1628
1629
// If we fall through to here then it was some kind of node we
1630
// don't understand or there wasn't a matching unlock, so give
1631
// up trying to merge locks.
1632
lock_ops.trunc_to(0);
1633
return false;
1634
}
1635
}
1636
return true;
1637
1638
}
1639
1640
#ifndef PRODUCT
1641
//
1642
// Create a counter which counts the number of times this lock is acquired
1643
//
1644
void AbstractLockNode::create_lock_counter(JVMState* state) {
1645
_counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1646
}
1647
1648
void AbstractLockNode::set_eliminated_lock_counter() {
1649
if (_counter) {
1650
// Update the counter to indicate that this lock was eliminated.
1651
// The counter update code will stay around even though the
1652
// optimizer will eliminate the lock operation itself.
1653
_counter->set_tag(NamedCounter::EliminatedLockCounter);
1654
}
1655
}
1656
#endif
1657
1658
//=============================================================================
1659
Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1660
1661
// perform any generic optimizations first (returns 'this' or NULL)
1662
Node *result = SafePointNode::Ideal(phase, can_reshape);
1663
if (result != NULL) return result;
1664
// Don't bother trying to transform a dead node
1665
if (in(0) && in(0)->is_top()) return NULL;
1666
1667
// Now see if we can optimize away this lock. We don't actually
1668
// remove the locking here, we simply set the _eliminate flag which
1669
// prevents macro expansion from expanding the lock. Since we don't
1670
// modify the graph, the value returned from this function is the
1671
// one computed above.
1672
if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1673
//
1674
// If we are locking an unescaped object, the lock/unlock is unnecessary
1675
//
1676
ConnectionGraph *cgr = phase->C->congraph();
1677
if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1678
assert(!is_eliminated() || is_coarsened(), "sanity");
1679
// The lock could be marked eliminated by lock coarsening
1680
// code during first IGVN before EA. Replace coarsened flag
1681
// to eliminate all associated locks/unlocks.
1682
#ifdef ASSERT
1683
this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
1684
#endif
1685
this->set_non_esc_obj();
1686
return result;
1687
}
1688
1689
//
1690
// Try lock coarsening
1691
//
1692
PhaseIterGVN* iter = phase->is_IterGVN();
1693
if (iter != NULL && !is_eliminated()) {
1694
1695
GrowableArray<AbstractLockNode*> lock_ops;
1696
1697
Node *ctrl = next_control(in(0));
1698
1699
// now search back for a matching Unlock
1700
if (find_matching_unlock(ctrl, this, lock_ops)) {
1701
// found an unlock directly preceding this lock. This is the
1702
// case of single unlock directly control dependent on a
1703
// single lock which is the trivial version of case 1 or 2.
1704
} else if (ctrl->is_Region() ) {
1705
if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1706
// found lock preceded by multiple unlocks along all paths
1707
// joining at this point which is case 3 in description above.
1708
}
1709
} else {
1710
// see if this lock comes from either half of an if and the
1711
// predecessors merges unlocks and the other half of the if
1712
// performs a lock.
1713
if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1714
// found unlock splitting to an if with locks on both branches.
1715
}
1716
}
1717
1718
if (lock_ops.length() > 0) {
1719
// add ourselves to the list of locks to be eliminated.
1720
lock_ops.append(this);
1721
1722
#ifndef PRODUCT
1723
if (PrintEliminateLocks) {
1724
int locks = 0;
1725
int unlocks = 0;
1726
for (int i = 0; i < lock_ops.length(); i++) {
1727
AbstractLockNode* lock = lock_ops.at(i);
1728
if (lock->Opcode() == Op_Lock)
1729
locks++;
1730
else
1731
unlocks++;
1732
if (Verbose) {
1733
lock->dump(1);
1734
}
1735
}
1736
tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1737
}
1738
#endif
1739
1740
// for each of the identified locks, mark them
1741
// as eliminatable
1742
for (int i = 0; i < lock_ops.length(); i++) {
1743
AbstractLockNode* lock = lock_ops.at(i);
1744
1745
// Mark it eliminated by coarsening and update any counters
1746
#ifdef ASSERT
1747
lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
1748
#endif
1749
lock->set_coarsened();
1750
}
1751
} else if (ctrl->is_Region() &&
1752
iter->_worklist.member(ctrl)) {
1753
// We weren't able to find any opportunities but the region this
1754
// lock is control dependent on hasn't been processed yet so put
1755
// this lock back on the worklist so we can check again once any
1756
// region simplification has occurred.
1757
iter->_worklist.push(this);
1758
}
1759
}
1760
}
1761
1762
return result;
1763
}
1764
1765
//=============================================================================
1766
bool LockNode::is_nested_lock_region() {
1767
return is_nested_lock_region(NULL);
1768
}
1769
1770
// p is used for access to compilation log; no logging if NULL
1771
bool LockNode::is_nested_lock_region(Compile * c) {
1772
BoxLockNode* box = box_node()->as_BoxLock();
1773
int stk_slot = box->stack_slot();
1774
if (stk_slot <= 0) {
1775
#ifdef ASSERT
1776
this->log_lock_optimization(c, "eliminate_lock_INLR_1");
1777
#endif
1778
return false; // External lock or it is not Box (Phi node).
1779
}
1780
1781
// Ignore complex cases: merged locks or multiple locks.
1782
Node* obj = obj_node();
1783
LockNode* unique_lock = NULL;
1784
if (!box->is_simple_lock_region(&unique_lock, obj)) {
1785
#ifdef ASSERT
1786
this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1787
#endif
1788
return false;
1789
}
1790
if (unique_lock != this) {
1791
#ifdef ASSERT
1792
this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1793
#endif
1794
return false;
1795
}
1796
1797
// Look for external lock for the same object.
1798
SafePointNode* sfn = this->as_SafePoint();
1799
JVMState* youngest_jvms = sfn->jvms();
1800
int max_depth = youngest_jvms->depth();
1801
for (int depth = 1; depth <= max_depth; depth++) {
1802
JVMState* jvms = youngest_jvms->of_depth(depth);
1803
int num_mon = jvms->nof_monitors();
1804
// Loop over monitors
1805
for (int idx = 0; idx < num_mon; idx++) {
1806
Node* obj_node = sfn->monitor_obj(jvms, idx);
1807
BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1808
if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1809
return true;
1810
}
1811
}
1812
}
1813
#ifdef ASSERT
1814
this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1815
#endif
1816
return false;
1817
}
1818
1819
//=============================================================================
1820
uint UnlockNode::size_of() const { return sizeof(*this); }
1821
1822
//=============================================================================
1823
Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1824
1825
// perform any generic optimizations first (returns 'this' or NULL)
1826
Node *result = SafePointNode::Ideal(phase, can_reshape);
1827
if (result != NULL) return result;
1828
// Don't bother trying to transform a dead node
1829
if (in(0) && in(0)->is_top()) return NULL;
1830
1831
// Now see if we can optimize away this unlock. We don't actually
1832
// remove the unlocking here, we simply set the _eliminate flag which
1833
// prevents macro expansion from expanding the unlock. Since we don't
1834
// modify the graph, the value returned from this function is the
1835
// one computed above.
1836
// Escape state is defined after Parse phase.
1837
if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1838
//
1839
// If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1840
//
1841
ConnectionGraph *cgr = phase->C->congraph();
1842
if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1843
assert(!is_eliminated() || is_coarsened(), "sanity");
1844
// The lock could be marked eliminated by lock coarsening
1845
// code during first IGVN before EA. Replace coarsened flag
1846
// to eliminate all associated locks/unlocks.
1847
#ifdef ASSERT
1848
this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
1849
#endif
1850
this->set_non_esc_obj();
1851
}
1852
}
1853
return result;
1854
}
1855
1856
const char * AbstractLockNode::kind_as_string() const {
1857
return is_coarsened() ? "coarsened" :
1858
is_nested() ? "nested" :
1859
is_non_esc_obj() ? "non_escaping" :
1860
"?";
1861
}
1862
1863
void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const {
1864
if (C == NULL) {
1865
return;
1866
}
1867
CompileLog* log = C->log();
1868
if (log != NULL) {
1869
log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
1870
tag, is_Lock(), C->compile_id(),
1871
is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
1872
kind_as_string());
1873
log->stamp();
1874
log->end_head();
1875
JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
1876
while (p != NULL) {
1877
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1878
p = p->caller();
1879
}
1880
log->tail(tag);
1881
}
1882
}
1883
1884
1885