Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/opto/escape.cpp
64440 views
1
/*
2
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "ci/bcEscapeAnalyzer.hpp"
27
#include "compiler/compileLog.hpp"
28
#include "gc/shared/barrierSet.hpp"
29
#include "gc/shared/c2/barrierSetC2.hpp"
30
#include "libadt/vectset.hpp"
31
#include "memory/allocation.hpp"
32
#include "memory/resourceArea.hpp"
33
#include "opto/c2compiler.hpp"
34
#include "opto/arraycopynode.hpp"
35
#include "opto/callnode.hpp"
36
#include "opto/cfgnode.hpp"
37
#include "opto/compile.hpp"
38
#include "opto/escape.hpp"
39
#include "opto/phaseX.hpp"
40
#include "opto/movenode.hpp"
41
#include "opto/rootnode.hpp"
42
#include "utilities/macros.hpp"
43
44
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
45
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
46
_in_worklist(C->comp_arena()),
47
_next_pidx(0),
48
_collecting(true),
49
_verify(false),
50
_compile(C),
51
_igvn(igvn),
52
_node_map(C->comp_arena()) {
53
// Add unknown java object.
54
add_java_object(C->top(), PointsToNode::GlobalEscape);
55
phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
56
// Add ConP(#NULL) and ConN(#NULL) nodes.
57
Node* oop_null = igvn->zerocon(T_OBJECT);
58
assert(oop_null->_idx < nodes_size(), "should be created already");
59
add_java_object(oop_null, PointsToNode::NoEscape);
60
null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
61
if (UseCompressedOops) {
62
Node* noop_null = igvn->zerocon(T_NARROWOOP);
63
assert(noop_null->_idx < nodes_size(), "should be created already");
64
map_ideal_node(noop_null, null_obj);
65
}
66
}
67
68
bool ConnectionGraph::has_candidates(Compile *C) {
69
// EA brings benefits only when the code has allocations and/or locks which
70
// are represented by ideal Macro nodes.
71
int cnt = C->macro_count();
72
for (int i = 0; i < cnt; i++) {
73
Node *n = C->macro_node(i);
74
if (n->is_Allocate()) {
75
return true;
76
}
77
if (n->is_Lock()) {
78
Node* obj = n->as_Lock()->obj_node()->uncast();
79
if (!(obj->is_Parm() || obj->is_Con())) {
80
return true;
81
}
82
}
83
if (n->is_CallStaticJava() &&
84
n->as_CallStaticJava()->is_boxing_method()) {
85
return true;
86
}
87
}
88
return false;
89
}
90
91
void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
92
Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
93
ResourceMark rm;
94
95
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
96
// to create space for them in ConnectionGraph::_nodes[].
97
Node* oop_null = igvn->zerocon(T_OBJECT);
98
Node* noop_null = igvn->zerocon(T_NARROWOOP);
99
ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
100
// Perform escape analysis
101
if (congraph->compute_escape()) {
102
// There are non escaping objects.
103
C->set_congraph(congraph);
104
}
105
// Cleanup.
106
if (oop_null->outcnt() == 0) {
107
igvn->hash_delete(oop_null);
108
}
109
if (noop_null->outcnt() == 0) {
110
igvn->hash_delete(noop_null);
111
}
112
}
113
114
bool ConnectionGraph::compute_escape() {
115
Compile* C = _compile;
116
PhaseGVN* igvn = _igvn;
117
118
// Worklists used by EA.
119
Unique_Node_List delayed_worklist;
120
GrowableArray<Node*> alloc_worklist;
121
GrowableArray<Node*> ptr_cmp_worklist;
122
GrowableArray<Node*> storestore_worklist;
123
GrowableArray<ArrayCopyNode*> arraycopy_worklist;
124
GrowableArray<PointsToNode*> ptnodes_worklist;
125
GrowableArray<JavaObjectNode*> java_objects_worklist;
126
GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
127
GrowableArray<FieldNode*> oop_fields_worklist;
128
GrowableArray<SafePointNode*> sfn_worklist;
129
DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
130
131
{ Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
132
133
// 1. Populate Connection Graph (CG) with PointsTo nodes.
134
ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
135
// Initialize worklist
136
if (C->root() != NULL) {
137
ideal_nodes.push(C->root());
138
}
139
// Processed ideal nodes are unique on ideal_nodes list
140
// but several ideal nodes are mapped to the phantom_obj.
141
// To avoid duplicated entries on the following worklists
142
// add the phantom_obj only once to them.
143
ptnodes_worklist.append(phantom_obj);
144
java_objects_worklist.append(phantom_obj);
145
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
146
Node* n = ideal_nodes.at(next);
147
// Create PointsTo nodes and add them to Connection Graph. Called
148
// only once per ideal node since ideal_nodes is Unique_Node list.
149
add_node_to_connection_graph(n, &delayed_worklist);
150
PointsToNode* ptn = ptnode_adr(n->_idx);
151
if (ptn != NULL && ptn != phantom_obj) {
152
ptnodes_worklist.append(ptn);
153
if (ptn->is_JavaObject()) {
154
java_objects_worklist.append(ptn->as_JavaObject());
155
if ((n->is_Allocate() || n->is_CallStaticJava()) &&
156
(ptn->escape_state() < PointsToNode::GlobalEscape)) {
157
// Only allocations and java static calls results are interesting.
158
non_escaped_allocs_worklist.append(ptn->as_JavaObject());
159
}
160
} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
161
oop_fields_worklist.append(ptn->as_Field());
162
}
163
}
164
if (n->is_MergeMem()) {
165
// Collect all MergeMem nodes to add memory slices for
166
// scalar replaceable objects in split_unique_types().
167
_mergemem_worklist.append(n->as_MergeMem());
168
} else if (OptimizePtrCompare && n->is_Cmp() &&
169
(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
170
// Collect compare pointers nodes.
171
ptr_cmp_worklist.append(n);
172
} else if (n->is_MemBarStoreStore()) {
173
// Collect all MemBarStoreStore nodes so that depending on the
174
// escape status of the associated Allocate node some of them
175
// may be eliminated.
176
storestore_worklist.append(n);
177
} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
178
(n->req() > MemBarNode::Precedent)) {
179
record_for_optimizer(n);
180
#ifdef ASSERT
181
} else if (n->is_AddP()) {
182
// Collect address nodes for graph verification.
183
addp_worklist.append(n);
184
#endif
185
} else if (n->is_ArrayCopy()) {
186
// Keep a list of ArrayCopy nodes so if one of its input is non
187
// escaping, we can record a unique type
188
arraycopy_worklist.append(n->as_ArrayCopy());
189
}
190
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
191
Node* m = n->fast_out(i); // Get user
192
ideal_nodes.push(m);
193
}
194
if (n-> is_SafePoint()) {
195
sfn_worklist.append(n->as_SafePoint());
196
}
197
}
198
if (non_escaped_allocs_worklist.length() == 0) {
199
_collecting = false;
200
return false; // Nothing to do.
201
}
202
// Add final simple edges to graph.
203
while(delayed_worklist.size() > 0) {
204
Node* n = delayed_worklist.pop();
205
add_final_edges(n);
206
}
207
208
#ifdef ASSERT
209
if (VerifyConnectionGraph) {
210
// Verify that no new simple edges could be created and all
211
// local vars has edges.
212
_verify = true;
213
int ptnodes_length = ptnodes_worklist.length();
214
for (int next = 0; next < ptnodes_length; ++next) {
215
PointsToNode* ptn = ptnodes_worklist.at(next);
216
add_final_edges(ptn->ideal_node());
217
if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
218
ptn->dump();
219
assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
220
}
221
}
222
_verify = false;
223
}
224
#endif
225
// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
226
// processing, calls to CI to resolve symbols (types, fields, methods)
227
// referenced in bytecode. During symbol resolution VM may throw
228
// an exception which CI cleans and converts to compilation failure.
229
if (C->failing()) return false;
230
231
// 2. Finish Graph construction by propagating references to all
232
// java objects through graph.
233
if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
234
java_objects_worklist, oop_fields_worklist)) {
235
// All objects escaped or hit time or iterations limits.
236
_collecting = false;
237
return false;
238
}
239
240
// 3. Adjust scalar_replaceable state of nonescaping objects and push
241
// scalar replaceable allocations on alloc_worklist for processing
242
// in split_unique_types().
243
int non_escaped_length = non_escaped_allocs_worklist.length();
244
for (int next = 0; next < non_escaped_length; next++) {
245
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
246
bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
247
Node* n = ptn->ideal_node();
248
if (n->is_Allocate()) {
249
n->as_Allocate()->_is_non_escaping = noescape;
250
}
251
if (noescape && ptn->scalar_replaceable()) {
252
adjust_scalar_replaceable_state(ptn);
253
if (ptn->scalar_replaceable()) {
254
alloc_worklist.append(ptn->ideal_node());
255
}
256
}
257
}
258
259
#ifdef ASSERT
260
if (VerifyConnectionGraph) {
261
// Verify that graph is complete - no new edges could be added or needed.
262
verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
263
java_objects_worklist, addp_worklist);
264
}
265
assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
266
assert(null_obj->escape_state() == PointsToNode::NoEscape &&
267
null_obj->edge_count() == 0 &&
268
!null_obj->arraycopy_src() &&
269
!null_obj->arraycopy_dst(), "sanity");
270
#endif
271
272
_collecting = false;
273
274
} // TracePhase t3("connectionGraph")
275
276
// 4. Optimize ideal graph based on EA information.
277
bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
278
if (has_non_escaping_obj) {
279
optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
280
}
281
282
#ifndef PRODUCT
283
if (PrintEscapeAnalysis) {
284
dump(ptnodes_worklist); // Dump ConnectionGraph
285
}
286
#endif
287
288
#ifdef ASSERT
289
if (VerifyConnectionGraph) {
290
int alloc_length = alloc_worklist.length();
291
for (int next = 0; next < alloc_length; ++next) {
292
Node* n = alloc_worklist.at(next);
293
PointsToNode* ptn = ptnode_adr(n->_idx);
294
assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
295
}
296
}
297
#endif
298
299
// 5. Separate memory graph for scalar replaceable allcations.
300
bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
301
if (has_scalar_replaceable_candidates &&
302
C->AliasLevel() >= 3 && EliminateAllocations) {
303
// Now use the escape information to create unique types for
304
// scalar replaceable objects.
305
split_unique_types(alloc_worklist, arraycopy_worklist);
306
if (C->failing()) return false;
307
C->print_method(PHASE_AFTER_EA, 2);
308
309
#ifdef ASSERT
310
} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
311
tty->print("=== No allocations eliminated for ");
312
C->method()->print_short_name();
313
if (!EliminateAllocations) {
314
tty->print(" since EliminateAllocations is off ===");
315
} else if(!has_scalar_replaceable_candidates) {
316
tty->print(" since there are no scalar replaceable candidates ===");
317
} else if(C->AliasLevel() < 3) {
318
tty->print(" since AliasLevel < 3 ===");
319
}
320
tty->cr();
321
#endif
322
}
323
324
// Annotate at safepoints if they have <= ArgEscape objects in their scope and at
325
// java calls if they pass ArgEscape objects as parameters.
326
if (has_non_escaping_obj &&
327
(C->env()->should_retain_local_variables() ||
328
C->env()->jvmti_can_get_owned_monitor_info() ||
329
C->env()->jvmti_can_walk_any_space() ||
330
DeoptimizeObjectsALot)) {
331
int sfn_length = sfn_worklist.length();
332
for (int next = 0; next < sfn_length; next++) {
333
SafePointNode* sfn = sfn_worklist.at(next);
334
sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
335
if (sfn->is_CallJava()) {
336
CallJavaNode* call = sfn->as_CallJava();
337
call->set_arg_escape(has_arg_escape(call));
338
}
339
}
340
}
341
342
return has_non_escaping_obj;
343
}
344
345
// Returns true if there is an object in the scope of sfn that does not escape globally.
346
bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
347
Compile* C = _compile;
348
for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) {
349
if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
350
DeoptimizeObjectsALot) {
351
// Jvmti agents can access locals. Must provide info about local objects at runtime.
352
int num_locs = jvms->loc_size();
353
for (int idx = 0; idx < num_locs; idx++) {
354
Node* l = sfn->local(jvms, idx);
355
if (not_global_escape(l)) {
356
return true;
357
}
358
}
359
}
360
if (C->env()->jvmti_can_get_owned_monitor_info() ||
361
C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
362
// Jvmti agents can read monitors. Must provide info about locked objects at runtime.
363
int num_mon = jvms->nof_monitors();
364
for (int idx = 0; idx < num_mon; idx++) {
365
Node* m = sfn->monitor_obj(jvms, idx);
366
if (m != NULL && not_global_escape(m)) {
367
return true;
368
}
369
}
370
}
371
}
372
return false;
373
}
374
375
// Returns true if at least one of the arguments to the call is an object
376
// that does not escape globally.
377
bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
378
if (call->method() != NULL) {
379
uint max_idx = TypeFunc::Parms + call->method()->arg_size();
380
for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
381
Node* p = call->in(idx);
382
if (not_global_escape(p)) {
383
return true;
384
}
385
}
386
} else {
387
const char* name = call->as_CallStaticJava()->_name;
388
assert(name != NULL, "no name");
389
// no arg escapes through uncommon traps
390
if (strcmp(name, "uncommon_trap") != 0) {
391
// process_call_arguments() assumes that all arguments escape globally
392
const TypeTuple* d = call->tf()->domain();
393
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
394
const Type* at = d->field_at(i);
395
if (at->isa_oopptr() != NULL) {
396
return true;
397
}
398
}
399
}
400
}
401
return false;
402
}
403
404
405
406
// Utility function for nodes that load an object
407
void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
408
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
409
// ThreadLocal has RawPtr type.
410
const Type* t = _igvn->type(n);
411
if (t->make_ptr() != NULL) {
412
Node* adr = n->in(MemNode::Address);
413
#ifdef ASSERT
414
if (!adr->is_AddP()) {
415
assert(_igvn->type(adr)->isa_rawptr(), "sanity");
416
} else {
417
assert((ptnode_adr(adr->_idx) == NULL ||
418
ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
419
}
420
#endif
421
add_local_var_and_edge(n, PointsToNode::NoEscape,
422
adr, delayed_worklist);
423
}
424
}
425
426
// Populate Connection Graph with PointsTo nodes and create simple
427
// connection graph edges.
428
void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
429
assert(!_verify, "this method should not be called for verification");
430
PhaseGVN* igvn = _igvn;
431
uint n_idx = n->_idx;
432
PointsToNode* n_ptn = ptnode_adr(n_idx);
433
if (n_ptn != NULL) {
434
return; // No need to redefine PointsTo node during first iteration.
435
}
436
int opcode = n->Opcode();
437
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
438
if (gc_handled) {
439
return; // Ignore node if already handled by GC.
440
}
441
442
if (n->is_Call()) {
443
// Arguments to allocation and locking don't escape.
444
if (n->is_AbstractLock()) {
445
// Put Lock and Unlock nodes on IGVN worklist to process them during
446
// first IGVN optimization when escape information is still available.
447
record_for_optimizer(n);
448
} else if (n->is_Allocate()) {
449
add_call_node(n->as_Call());
450
record_for_optimizer(n);
451
} else {
452
if (n->is_CallStaticJava()) {
453
const char* name = n->as_CallStaticJava()->_name;
454
if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
455
return; // Skip uncommon traps
456
}
457
}
458
// Don't mark as processed since call's arguments have to be processed.
459
delayed_worklist->push(n);
460
// Check if a call returns an object.
461
if ((n->as_Call()->returns_pointer() &&
462
n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
463
(n->is_CallStaticJava() &&
464
n->as_CallStaticJava()->is_boxing_method())) {
465
add_call_node(n->as_Call());
466
}
467
}
468
return;
469
}
470
// Put this check here to process call arguments since some call nodes
471
// point to phantom_obj.
472
if (n_ptn == phantom_obj || n_ptn == null_obj) {
473
return; // Skip predefined nodes.
474
}
475
switch (opcode) {
476
case Op_AddP: {
477
Node* base = get_addp_base(n);
478
PointsToNode* ptn_base = ptnode_adr(base->_idx);
479
// Field nodes are created for all field types. They are used in
480
// adjust_scalar_replaceable_state() and split_unique_types().
481
// Note, non-oop fields will have only base edges in Connection
482
// Graph because such fields are not used for oop loads and stores.
483
int offset = address_offset(n, igvn);
484
add_field(n, PointsToNode::NoEscape, offset);
485
if (ptn_base == NULL) {
486
delayed_worklist->push(n); // Process it later.
487
} else {
488
n_ptn = ptnode_adr(n_idx);
489
add_base(n_ptn->as_Field(), ptn_base);
490
}
491
break;
492
}
493
case Op_CastX2P: {
494
map_ideal_node(n, phantom_obj);
495
break;
496
}
497
case Op_CastPP:
498
case Op_CheckCastPP:
499
case Op_EncodeP:
500
case Op_DecodeN:
501
case Op_EncodePKlass:
502
case Op_DecodeNKlass: {
503
add_local_var_and_edge(n, PointsToNode::NoEscape,
504
n->in(1), delayed_worklist);
505
break;
506
}
507
case Op_CMoveP: {
508
add_local_var(n, PointsToNode::NoEscape);
509
// Do not add edges during first iteration because some could be
510
// not defined yet.
511
delayed_worklist->push(n);
512
break;
513
}
514
case Op_ConP:
515
case Op_ConN:
516
case Op_ConNKlass: {
517
// assume all oop constants globally escape except for null
518
PointsToNode::EscapeState es;
519
const Type* t = igvn->type(n);
520
if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
521
es = PointsToNode::NoEscape;
522
} else {
523
es = PointsToNode::GlobalEscape;
524
}
525
add_java_object(n, es);
526
break;
527
}
528
case Op_CreateEx: {
529
// assume that all exception objects globally escape
530
map_ideal_node(n, phantom_obj);
531
break;
532
}
533
case Op_LoadKlass:
534
case Op_LoadNKlass: {
535
// Unknown class is loaded
536
map_ideal_node(n, phantom_obj);
537
break;
538
}
539
case Op_LoadP:
540
case Op_LoadN:
541
case Op_LoadPLocked: {
542
add_objload_to_connection_graph(n, delayed_worklist);
543
break;
544
}
545
case Op_Parm: {
546
map_ideal_node(n, phantom_obj);
547
break;
548
}
549
case Op_PartialSubtypeCheck: {
550
// Produces Null or notNull and is used in only in CmpP so
551
// phantom_obj could be used.
552
map_ideal_node(n, phantom_obj); // Result is unknown
553
break;
554
}
555
case Op_Phi: {
556
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
557
// ThreadLocal has RawPtr type.
558
const Type* t = n->as_Phi()->type();
559
if (t->make_ptr() != NULL) {
560
add_local_var(n, PointsToNode::NoEscape);
561
// Do not add edges during first iteration because some could be
562
// not defined yet.
563
delayed_worklist->push(n);
564
}
565
break;
566
}
567
case Op_Proj: {
568
// we are only interested in the oop result projection from a call
569
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
570
n->in(0)->as_Call()->returns_pointer()) {
571
add_local_var_and_edge(n, PointsToNode::NoEscape,
572
n->in(0), delayed_worklist);
573
}
574
break;
575
}
576
case Op_Rethrow: // Exception object escapes
577
case Op_Return: {
578
if (n->req() > TypeFunc::Parms &&
579
igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
580
// Treat Return value as LocalVar with GlobalEscape escape state.
581
add_local_var_and_edge(n, PointsToNode::GlobalEscape,
582
n->in(TypeFunc::Parms), delayed_worklist);
583
}
584
break;
585
}
586
case Op_CompareAndExchangeP:
587
case Op_CompareAndExchangeN:
588
case Op_GetAndSetP:
589
case Op_GetAndSetN: {
590
add_objload_to_connection_graph(n, delayed_worklist);
591
// fall-through
592
}
593
case Op_StoreP:
594
case Op_StoreN:
595
case Op_StoreNKlass:
596
case Op_StorePConditional:
597
case Op_WeakCompareAndSwapP:
598
case Op_WeakCompareAndSwapN:
599
case Op_CompareAndSwapP:
600
case Op_CompareAndSwapN: {
601
add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
602
break;
603
}
604
case Op_AryEq:
605
case Op_HasNegatives:
606
case Op_StrComp:
607
case Op_StrEquals:
608
case Op_StrIndexOf:
609
case Op_StrIndexOfChar:
610
case Op_StrInflatedCopy:
611
case Op_StrCompressedCopy:
612
case Op_EncodeISOArray: {
613
add_local_var(n, PointsToNode::ArgEscape);
614
delayed_worklist->push(n); // Process it later.
615
break;
616
}
617
case Op_ThreadLocal: {
618
add_java_object(n, PointsToNode::ArgEscape);
619
break;
620
}
621
case Op_Blackhole: {
622
// All blackhole pointer arguments are globally escaping.
623
// Only do this if there is at least one pointer argument.
624
// Do not add edges during first iteration because some could be
625
// not defined yet, defer to final step.
626
for (uint i = 0; i < n->req(); i++) {
627
Node* in = n->in(i);
628
if (in != nullptr) {
629
const Type* at = _igvn->type(in);
630
if (!at->isa_ptr()) continue;
631
632
add_local_var(n, PointsToNode::GlobalEscape);
633
delayed_worklist->push(n);
634
break;
635
}
636
}
637
break;
638
}
639
default:
640
; // Do nothing for nodes not related to EA.
641
}
642
return;
643
}
644
645
#ifdef ASSERT
646
#define ELSE_FAIL(name) \
647
/* Should not be called for not pointer type. */ \
648
n->dump(1); \
649
assert(false, name); \
650
break;
651
#else
652
#define ELSE_FAIL(name) \
653
break;
654
#endif
655
656
// Add final simple edges to graph.
657
void ConnectionGraph::add_final_edges(Node *n) {
658
PointsToNode* n_ptn = ptnode_adr(n->_idx);
659
#ifdef ASSERT
660
if (_verify && n_ptn->is_JavaObject())
661
return; // This method does not change graph for JavaObject.
662
#endif
663
664
if (n->is_Call()) {
665
process_call_arguments(n->as_Call());
666
return;
667
}
668
assert(n->is_Store() || n->is_LoadStore() ||
669
(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
670
"node should be registered already");
671
int opcode = n->Opcode();
672
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
673
if (gc_handled) {
674
return; // Ignore node if already handled by GC.
675
}
676
switch (opcode) {
677
case Op_AddP: {
678
Node* base = get_addp_base(n);
679
PointsToNode* ptn_base = ptnode_adr(base->_idx);
680
assert(ptn_base != NULL, "field's base should be registered");
681
add_base(n_ptn->as_Field(), ptn_base);
682
break;
683
}
684
case Op_CastPP:
685
case Op_CheckCastPP:
686
case Op_EncodeP:
687
case Op_DecodeN:
688
case Op_EncodePKlass:
689
case Op_DecodeNKlass: {
690
add_local_var_and_edge(n, PointsToNode::NoEscape,
691
n->in(1), NULL);
692
break;
693
}
694
case Op_CMoveP: {
695
for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
696
Node* in = n->in(i);
697
if (in == NULL) {
698
continue; // ignore NULL
699
}
700
Node* uncast_in = in->uncast();
701
if (uncast_in->is_top() || uncast_in == n) {
702
continue; // ignore top or inputs which go back this node
703
}
704
PointsToNode* ptn = ptnode_adr(in->_idx);
705
assert(ptn != NULL, "node should be registered");
706
add_edge(n_ptn, ptn);
707
}
708
break;
709
}
710
case Op_LoadP:
711
case Op_LoadN:
712
case Op_LoadPLocked: {
713
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
714
// ThreadLocal has RawPtr type.
715
const Type* t = _igvn->type(n);
716
if (t->make_ptr() != NULL) {
717
Node* adr = n->in(MemNode::Address);
718
add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
719
break;
720
}
721
ELSE_FAIL("Op_LoadP");
722
}
723
case Op_Phi: {
724
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
725
// ThreadLocal has RawPtr type.
726
const Type* t = n->as_Phi()->type();
727
if (t->make_ptr() != NULL) {
728
for (uint i = 1; i < n->req(); i++) {
729
Node* in = n->in(i);
730
if (in == NULL) {
731
continue; // ignore NULL
732
}
733
Node* uncast_in = in->uncast();
734
if (uncast_in->is_top() || uncast_in == n) {
735
continue; // ignore top or inputs which go back this node
736
}
737
PointsToNode* ptn = ptnode_adr(in->_idx);
738
assert(ptn != NULL, "node should be registered");
739
add_edge(n_ptn, ptn);
740
}
741
break;
742
}
743
ELSE_FAIL("Op_Phi");
744
}
745
case Op_Proj: {
746
// we are only interested in the oop result projection from a call
747
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
748
n->in(0)->as_Call()->returns_pointer()) {
749
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
750
break;
751
}
752
ELSE_FAIL("Op_Proj");
753
}
754
case Op_Rethrow: // Exception object escapes
755
case Op_Return: {
756
if (n->req() > TypeFunc::Parms &&
757
_igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
758
// Treat Return value as LocalVar with GlobalEscape escape state.
759
add_local_var_and_edge(n, PointsToNode::GlobalEscape,
760
n->in(TypeFunc::Parms), NULL);
761
break;
762
}
763
ELSE_FAIL("Op_Return");
764
}
765
case Op_StoreP:
766
case Op_StoreN:
767
case Op_StoreNKlass:
768
case Op_StorePConditional:
769
case Op_CompareAndExchangeP:
770
case Op_CompareAndExchangeN:
771
case Op_CompareAndSwapP:
772
case Op_CompareAndSwapN:
773
case Op_WeakCompareAndSwapP:
774
case Op_WeakCompareAndSwapN:
775
case Op_GetAndSetP:
776
case Op_GetAndSetN: {
777
if (add_final_edges_unsafe_access(n, opcode)) {
778
break;
779
}
780
ELSE_FAIL("Op_StoreP");
781
}
782
case Op_AryEq:
783
case Op_HasNegatives:
784
case Op_StrComp:
785
case Op_StrEquals:
786
case Op_StrIndexOf:
787
case Op_StrIndexOfChar:
788
case Op_StrInflatedCopy:
789
case Op_StrCompressedCopy:
790
case Op_EncodeISOArray: {
791
// char[]/byte[] arrays passed to string intrinsic do not escape but
792
// they are not scalar replaceable. Adjust escape state for them.
793
// Start from in(2) edge since in(1) is memory edge.
794
for (uint i = 2; i < n->req(); i++) {
795
Node* adr = n->in(i);
796
const Type* at = _igvn->type(adr);
797
if (!adr->is_top() && at->isa_ptr()) {
798
assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
799
at->isa_ptr() != NULL, "expecting a pointer");
800
if (adr->is_AddP()) {
801
adr = get_addp_base(adr);
802
}
803
PointsToNode* ptn = ptnode_adr(adr->_idx);
804
assert(ptn != NULL, "node should be registered");
805
add_edge(n_ptn, ptn);
806
}
807
}
808
break;
809
}
810
case Op_Blackhole: {
811
// All blackhole pointer arguments are globally escaping.
812
for (uint i = 0; i < n->req(); i++) {
813
Node* in = n->in(i);
814
if (in != nullptr) {
815
const Type* at = _igvn->type(in);
816
if (!at->isa_ptr()) continue;
817
818
if (in->is_AddP()) {
819
in = get_addp_base(in);
820
}
821
822
PointsToNode* ptn = ptnode_adr(in->_idx);
823
assert(ptn != nullptr, "should be defined already");
824
set_escape_state(ptn, PointsToNode::GlobalEscape);
825
add_edge(n_ptn, ptn);
826
}
827
}
828
break;
829
}
830
default: {
831
// This method should be called only for EA specific nodes which may
832
// miss some edges when they were created.
833
#ifdef ASSERT
834
n->dump(1);
835
#endif
836
guarantee(false, "unknown node");
837
}
838
}
839
return;
840
}
841
842
void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
843
Node* adr = n->in(MemNode::Address);
844
const Type* adr_type = _igvn->type(adr);
845
adr_type = adr_type->make_ptr();
846
if (adr_type == NULL) {
847
return; // skip dead nodes
848
}
849
if (adr_type->isa_oopptr()
850
|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
851
&& adr_type == TypeRawPtr::NOTNULL
852
&& is_captured_store_address(adr))) {
853
delayed_worklist->push(n); // Process it later.
854
#ifdef ASSERT
855
assert (adr->is_AddP(), "expecting an AddP");
856
if (adr_type == TypeRawPtr::NOTNULL) {
857
// Verify a raw address for a store captured by Initialize node.
858
int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
859
assert(offs != Type::OffsetBot, "offset must be a constant");
860
}
861
#endif
862
} else {
863
// Ignore copy the displaced header to the BoxNode (OSR compilation).
864
if (adr->is_BoxLock()) {
865
return;
866
}
867
// Stored value escapes in unsafe access.
868
if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
869
delayed_worklist->push(n); // Process unsafe access later.
870
return;
871
}
872
#ifdef ASSERT
873
n->dump(1);
874
assert(false, "not unsafe");
875
#endif
876
}
877
}
878
879
bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
880
Node* adr = n->in(MemNode::Address);
881
const Type *adr_type = _igvn->type(adr);
882
adr_type = adr_type->make_ptr();
883
#ifdef ASSERT
884
if (adr_type == NULL) {
885
n->dump(1);
886
assert(adr_type != NULL, "dead node should not be on list");
887
return true;
888
}
889
#endif
890
891
if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
892
opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
893
add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
894
}
895
896
if (adr_type->isa_oopptr()
897
|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
898
&& adr_type == TypeRawPtr::NOTNULL
899
&& is_captured_store_address(adr))) {
900
// Point Address to Value
901
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
902
assert(adr_ptn != NULL &&
903
adr_ptn->as_Field()->is_oop(), "node should be registered");
904
Node* val = n->in(MemNode::ValueIn);
905
PointsToNode* ptn = ptnode_adr(val->_idx);
906
assert(ptn != NULL, "node should be registered");
907
add_edge(adr_ptn, ptn);
908
return true;
909
} else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
910
// Stored value escapes in unsafe access.
911
Node* val = n->in(MemNode::ValueIn);
912
PointsToNode* ptn = ptnode_adr(val->_idx);
913
assert(ptn != NULL, "node should be registered");
914
set_escape_state(ptn, PointsToNode::GlobalEscape);
915
// Add edge to object for unsafe access with offset.
916
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
917
assert(adr_ptn != NULL, "node should be registered");
918
if (adr_ptn->is_Field()) {
919
assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
920
add_edge(adr_ptn, ptn);
921
}
922
return true;
923
}
924
return false;
925
}
926
927
void ConnectionGraph::add_call_node(CallNode* call) {
928
assert(call->returns_pointer(), "only for call which returns pointer");
929
uint call_idx = call->_idx;
930
if (call->is_Allocate()) {
931
Node* k = call->in(AllocateNode::KlassNode);
932
const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
933
assert(kt != NULL, "TypeKlassPtr required.");
934
ciKlass* cik = kt->klass();
935
PointsToNode::EscapeState es = PointsToNode::NoEscape;
936
bool scalar_replaceable = true;
937
if (call->is_AllocateArray()) {
938
if (!cik->is_array_klass()) { // StressReflectiveCode
939
es = PointsToNode::GlobalEscape;
940
} else {
941
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
942
if (length < 0 || length > EliminateAllocationArraySizeLimit) {
943
// Not scalar replaceable if the length is not constant or too big.
944
scalar_replaceable = false;
945
}
946
}
947
} else { // Allocate instance
948
if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
949
cik->is_subclass_of(_compile->env()->Reference_klass()) ||
950
!cik->is_instance_klass() || // StressReflectiveCode
951
!cik->as_instance_klass()->can_be_instantiated() ||
952
cik->as_instance_klass()->has_finalizer()) {
953
es = PointsToNode::GlobalEscape;
954
} else {
955
int nfields = cik->as_instance_klass()->nof_nonstatic_fields();
956
if (nfields > EliminateAllocationFieldsLimit) {
957
// Not scalar replaceable if there are too many fields.
958
scalar_replaceable = false;
959
}
960
}
961
}
962
add_java_object(call, es);
963
PointsToNode* ptn = ptnode_adr(call_idx);
964
if (!scalar_replaceable && ptn->scalar_replaceable()) {
965
ptn->set_scalar_replaceable(false);
966
}
967
} else if (call->is_CallStaticJava()) {
968
// Call nodes could be different types:
969
//
970
// 1. CallDynamicJavaNode (what happened during call is unknown):
971
//
972
// - mapped to GlobalEscape JavaObject node if oop is returned;
973
//
974
// - all oop arguments are escaping globally;
975
//
976
// 2. CallStaticJavaNode (execute bytecode analysis if possible):
977
//
978
// - the same as CallDynamicJavaNode if can't do bytecode analysis;
979
//
980
// - mapped to GlobalEscape JavaObject node if unknown oop is returned;
981
// - mapped to NoEscape JavaObject node if non-escaping object allocated
982
// during call is returned;
983
// - mapped to ArgEscape LocalVar node pointed to object arguments
984
// which are returned and does not escape during call;
985
//
986
// - oop arguments escaping status is defined by bytecode analysis;
987
//
988
// For a static call, we know exactly what method is being called.
989
// Use bytecode estimator to record whether the call's return value escapes.
990
ciMethod* meth = call->as_CallJava()->method();
991
if (meth == NULL) {
992
const char* name = call->as_CallStaticJava()->_name;
993
assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
994
// Returns a newly allocated non-escaped object.
995
add_java_object(call, PointsToNode::NoEscape);
996
ptnode_adr(call_idx)->set_scalar_replaceable(false);
997
} else if (meth->is_boxing_method()) {
998
// Returns boxing object
999
PointsToNode::EscapeState es;
1000
vmIntrinsics::ID intr = meth->intrinsic_id();
1001
if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1002
// It does not escape if object is always allocated.
1003
es = PointsToNode::NoEscape;
1004
} else {
1005
// It escapes globally if object could be loaded from cache.
1006
es = PointsToNode::GlobalEscape;
1007
}
1008
add_java_object(call, es);
1009
} else {
1010
BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1011
call_analyzer->copy_dependencies(_compile->dependencies());
1012
if (call_analyzer->is_return_allocated()) {
1013
// Returns a newly allocated non-escaped object, simply
1014
// update dependency information.
1015
// Mark it as NoEscape so that objects referenced by
1016
// it's fields will be marked as NoEscape at least.
1017
add_java_object(call, PointsToNode::NoEscape);
1018
ptnode_adr(call_idx)->set_scalar_replaceable(false);
1019
} else {
1020
// Determine whether any arguments are returned.
1021
const TypeTuple* d = call->tf()->domain();
1022
bool ret_arg = false;
1023
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1024
if (d->field_at(i)->isa_ptr() != NULL &&
1025
call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1026
ret_arg = true;
1027
break;
1028
}
1029
}
1030
if (ret_arg) {
1031
add_local_var(call, PointsToNode::ArgEscape);
1032
} else {
1033
// Returns unknown object.
1034
map_ideal_node(call, phantom_obj);
1035
}
1036
}
1037
}
1038
} else {
1039
// An other type of call, assume the worst case:
1040
// returned value is unknown and globally escapes.
1041
assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1042
map_ideal_node(call, phantom_obj);
1043
}
1044
}
1045
1046
void ConnectionGraph::process_call_arguments(CallNode *call) {
1047
bool is_arraycopy = false;
1048
switch (call->Opcode()) {
1049
#ifdef ASSERT
1050
case Op_Allocate:
1051
case Op_AllocateArray:
1052
case Op_Lock:
1053
case Op_Unlock:
1054
assert(false, "should be done already");
1055
break;
1056
#endif
1057
case Op_ArrayCopy:
1058
case Op_CallLeafNoFP:
1059
// Most array copies are ArrayCopy nodes at this point but there
1060
// are still a few direct calls to the copy subroutines (See
1061
// PhaseStringOpts::copy_string())
1062
is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1063
call->as_CallLeaf()->is_call_to_arraycopystub();
1064
// fall through
1065
case Op_CallLeafVector:
1066
case Op_CallLeaf: {
1067
// Stub calls, objects do not escape but they are not scale replaceable.
1068
// Adjust escape state for outgoing arguments.
1069
const TypeTuple * d = call->tf()->domain();
1070
bool src_has_oops = false;
1071
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1072
const Type* at = d->field_at(i);
1073
Node *arg = call->in(i);
1074
if (arg == NULL) {
1075
continue;
1076
}
1077
const Type *aat = _igvn->type(arg);
1078
if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1079
continue;
1080
}
1081
if (arg->is_AddP()) {
1082
//
1083
// The inline_native_clone() case when the arraycopy stub is called
1084
// after the allocation before Initialize and CheckCastPP nodes.
1085
// Or normal arraycopy for object arrays case.
1086
//
1087
// Set AddP's base (Allocate) as not scalar replaceable since
1088
// pointer to the base (with offset) is passed as argument.
1089
//
1090
arg = get_addp_base(arg);
1091
}
1092
PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1093
assert(arg_ptn != NULL, "should be registered");
1094
PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1095
if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1096
assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1097
aat->isa_ptr() != NULL, "expecting an Ptr");
1098
bool arg_has_oops = aat->isa_oopptr() &&
1099
(aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
1100
(aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
1101
if (i == TypeFunc::Parms) {
1102
src_has_oops = arg_has_oops;
1103
}
1104
//
1105
// src or dst could be j.l.Object when other is basic type array:
1106
//
1107
// arraycopy(char[],0,Object*,0,size);
1108
// arraycopy(Object*,0,char[],0,size);
1109
//
1110
// Don't add edges in such cases.
1111
//
1112
bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1113
arg_has_oops && (i > TypeFunc::Parms);
1114
#ifdef ASSERT
1115
if (!(is_arraycopy ||
1116
BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1117
(call->as_CallLeaf()->_name != NULL &&
1118
(strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1119
strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1120
strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1121
strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
1122
strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
1123
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
1124
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
1125
strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
1126
strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
1127
strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1128
strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1129
strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1130
strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1131
strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1132
strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1133
strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1134
strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1135
strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1136
strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1137
strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1138
strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1139
strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1140
strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1141
strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1142
strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1143
strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1144
strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1145
strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1146
strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1147
strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1148
strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1149
strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1150
))) {
1151
call->dump();
1152
fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1153
}
1154
#endif
1155
// Always process arraycopy's destination object since
1156
// we need to add all possible edges to references in
1157
// source object.
1158
if (arg_esc >= PointsToNode::ArgEscape &&
1159
!arg_is_arraycopy_dest) {
1160
continue;
1161
}
1162
PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1163
if (call->is_ArrayCopy()) {
1164
ArrayCopyNode* ac = call->as_ArrayCopy();
1165
if (ac->is_clonebasic() ||
1166
ac->is_arraycopy_validated() ||
1167
ac->is_copyof_validated() ||
1168
ac->is_copyofrange_validated()) {
1169
es = PointsToNode::NoEscape;
1170
}
1171
}
1172
set_escape_state(arg_ptn, es);
1173
if (arg_is_arraycopy_dest) {
1174
Node* src = call->in(TypeFunc::Parms);
1175
if (src->is_AddP()) {
1176
src = get_addp_base(src);
1177
}
1178
PointsToNode* src_ptn = ptnode_adr(src->_idx);
1179
assert(src_ptn != NULL, "should be registered");
1180
if (arg_ptn != src_ptn) {
1181
// Special arraycopy edge:
1182
// A destination object's field can't have the source object
1183
// as base since objects escape states are not related.
1184
// Only escape state of destination object's fields affects
1185
// escape state of fields in source object.
1186
add_arraycopy(call, es, src_ptn, arg_ptn);
1187
}
1188
}
1189
}
1190
}
1191
break;
1192
}
1193
case Op_CallStaticJava: {
1194
// For a static call, we know exactly what method is being called.
1195
// Use bytecode estimator to record the call's escape affects
1196
#ifdef ASSERT
1197
const char* name = call->as_CallStaticJava()->_name;
1198
assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1199
#endif
1200
ciMethod* meth = call->as_CallJava()->method();
1201
if ((meth != NULL) && meth->is_boxing_method()) {
1202
break; // Boxing methods do not modify any oops.
1203
}
1204
BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1205
// fall-through if not a Java method or no analyzer information
1206
if (call_analyzer != NULL) {
1207
PointsToNode* call_ptn = ptnode_adr(call->_idx);
1208
const TypeTuple* d = call->tf()->domain();
1209
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1210
const Type* at = d->field_at(i);
1211
int k = i - TypeFunc::Parms;
1212
Node* arg = call->in(i);
1213
PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1214
if (at->isa_ptr() != NULL &&
1215
call_analyzer->is_arg_returned(k)) {
1216
// The call returns arguments.
1217
if (call_ptn != NULL) { // Is call's result used?
1218
assert(call_ptn->is_LocalVar(), "node should be registered");
1219
assert(arg_ptn != NULL, "node should be registered");
1220
add_edge(call_ptn, arg_ptn);
1221
}
1222
}
1223
if (at->isa_oopptr() != NULL &&
1224
arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1225
if (!call_analyzer->is_arg_stack(k)) {
1226
// The argument global escapes
1227
set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1228
} else {
1229
set_escape_state(arg_ptn, PointsToNode::ArgEscape);
1230
if (!call_analyzer->is_arg_local(k)) {
1231
// The argument itself doesn't escape, but any fields might
1232
set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1233
}
1234
}
1235
}
1236
}
1237
if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1238
// The call returns arguments.
1239
assert(call_ptn->edge_count() > 0, "sanity");
1240
if (!call_analyzer->is_return_local()) {
1241
// Returns also unknown object.
1242
add_edge(call_ptn, phantom_obj);
1243
}
1244
}
1245
break;
1246
}
1247
}
1248
default: {
1249
// Fall-through here if not a Java method or no analyzer information
1250
// or some other type of call, assume the worst case: all arguments
1251
// globally escape.
1252
const TypeTuple* d = call->tf()->domain();
1253
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1254
const Type* at = d->field_at(i);
1255
if (at->isa_oopptr() != NULL) {
1256
Node* arg = call->in(i);
1257
if (arg->is_AddP()) {
1258
arg = get_addp_base(arg);
1259
}
1260
assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1261
set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1262
}
1263
}
1264
}
1265
}
1266
}
1267
1268
1269
// Finish Graph construction.
1270
bool ConnectionGraph::complete_connection_graph(
1271
GrowableArray<PointsToNode*>& ptnodes_worklist,
1272
GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
1273
GrowableArray<JavaObjectNode*>& java_objects_worklist,
1274
GrowableArray<FieldNode*>& oop_fields_worklist) {
1275
// Normally only 1-3 passes needed to build Connection Graph depending
1276
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1277
// Set limit to 20 to catch situation when something did go wrong and
1278
// bailout Escape Analysis.
1279
// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
1280
#define GRAPH_BUILD_ITER_LIMIT 20
1281
1282
// Propagate GlobalEscape and ArgEscape escape states and check that
1283
// we still have non-escaping objects. The method pushs on _worklist
1284
// Field nodes which reference phantom_object.
1285
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
1286
return false; // Nothing to do.
1287
}
1288
// Now propagate references to all JavaObject nodes.
1289
int java_objects_length = java_objects_worklist.length();
1290
elapsedTimer build_time;
1291
build_time.start();
1292
elapsedTimer time;
1293
bool timeout = false;
1294
int new_edges = 1;
1295
int iterations = 0;
1296
do {
1297
while ((new_edges > 0) &&
1298
(iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
1299
double start_time = time.seconds();
1300
time.start();
1301
new_edges = 0;
1302
// Propagate references to phantom_object for nodes pushed on _worklist
1303
// by find_non_escaped_objects() and find_field_value().
1304
new_edges += add_java_object_edges(phantom_obj, false);
1305
for (int next = 0; next < java_objects_length; ++next) {
1306
JavaObjectNode* ptn = java_objects_worklist.at(next);
1307
new_edges += add_java_object_edges(ptn, true);
1308
1309
#define SAMPLE_SIZE 4
1310
if ((next % SAMPLE_SIZE) == 0) {
1311
// Each 4 iterations calculate how much time it will take
1312
// to complete graph construction.
1313
time.stop();
1314
// Poll for requests from shutdown mechanism to quiesce compiler
1315
// because Connection graph construction may take long time.
1316
CompileBroker::maybe_block();
1317
double stop_time = time.seconds();
1318
double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
1319
double time_until_end = time_per_iter * (double)(java_objects_length - next);
1320
if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
1321
timeout = true;
1322
break; // Timeout
1323
}
1324
start_time = stop_time;
1325
time.start();
1326
}
1327
#undef SAMPLE_SIZE
1328
1329
}
1330
if (timeout) break;
1331
if (new_edges > 0) {
1332
// Update escape states on each iteration if graph was updated.
1333
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
1334
return false; // Nothing to do.
1335
}
1336
}
1337
time.stop();
1338
if (time.seconds() >= EscapeAnalysisTimeout) {
1339
timeout = true;
1340
break;
1341
}
1342
}
1343
if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
1344
time.start();
1345
// Find fields which have unknown value.
1346
int fields_length = oop_fields_worklist.length();
1347
for (int next = 0; next < fields_length; next++) {
1348
FieldNode* field = oop_fields_worklist.at(next);
1349
if (field->edge_count() == 0) {
1350
new_edges += find_field_value(field);
1351
// This code may added new edges to phantom_object.
1352
// Need an other cycle to propagate references to phantom_object.
1353
}
1354
}
1355
time.stop();
1356
if (time.seconds() >= EscapeAnalysisTimeout) {
1357
timeout = true;
1358
break;
1359
}
1360
} else {
1361
new_edges = 0; // Bailout
1362
}
1363
} while (new_edges > 0);
1364
1365
build_time.stop();
1366
_build_time = build_time.seconds();
1367
_build_iterations = iterations;
1368
1369
// Bailout if passed limits.
1370
if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
1371
Compile* C = _compile;
1372
if (C->log() != NULL) {
1373
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1374
C->log()->text("%s", timeout ? "time" : "iterations");
1375
C->log()->end_elem(" limit'");
1376
}
1377
assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1378
_build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
1379
// Possible infinite build_connection_graph loop,
1380
// bailout (no changes to ideal graph were made).
1381
return false;
1382
}
1383
#ifdef ASSERT
1384
if (Verbose && PrintEscapeAnalysis) {
1385
tty->print_cr("EA: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
1386
_build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
1387
}
1388
#endif
1389
1390
#undef GRAPH_BUILD_ITER_LIMIT
1391
1392
// Find fields initialized by NULL for non-escaping Allocations.
1393
int non_escaped_length = non_escaped_allocs_worklist.length();
1394
for (int next = 0; next < non_escaped_length; next++) {
1395
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
1396
PointsToNode::EscapeState es = ptn->escape_state();
1397
assert(es <= PointsToNode::ArgEscape, "sanity");
1398
if (es == PointsToNode::NoEscape) {
1399
if (find_init_values_null(ptn, _igvn) > 0) {
1400
// Adding references to NULL object does not change escape states
1401
// since it does not escape. Also no fields are added to NULL object.
1402
add_java_object_edges(null_obj, false);
1403
}
1404
}
1405
Node* n = ptn->ideal_node();
1406
if (n->is_Allocate()) {
1407
// The object allocated by this Allocate node will never be
1408
// seen by an other thread. Mark it so that when it is
1409
// expanded no MemBarStoreStore is added.
1410
InitializeNode* ini = n->as_Allocate()->initialization();
1411
if (ini != NULL)
1412
ini->set_does_not_escape();
1413
}
1414
}
1415
return true; // Finished graph construction.
1416
}
1417
1418
// Propagate GlobalEscape and ArgEscape escape states to all nodes
1419
// and check that we still have non-escaping java objects.
1420
bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1421
GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
1422
GrowableArray<PointsToNode*> escape_worklist;
1423
// First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1424
int ptnodes_length = ptnodes_worklist.length();
1425
for (int next = 0; next < ptnodes_length; ++next) {
1426
PointsToNode* ptn = ptnodes_worklist.at(next);
1427
if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1428
ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1429
escape_worklist.push(ptn);
1430
}
1431
}
1432
// Set escape states to referenced nodes (edges list).
1433
while (escape_worklist.length() > 0) {
1434
PointsToNode* ptn = escape_worklist.pop();
1435
PointsToNode::EscapeState es = ptn->escape_state();
1436
PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1437
if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1438
es >= PointsToNode::ArgEscape) {
1439
// GlobalEscape or ArgEscape state of field means it has unknown value.
1440
if (add_edge(ptn, phantom_obj)) {
1441
// New edge was added
1442
add_field_uses_to_worklist(ptn->as_Field());
1443
}
1444
}
1445
for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1446
PointsToNode* e = i.get();
1447
if (e->is_Arraycopy()) {
1448
assert(ptn->arraycopy_dst(), "sanity");
1449
// Propagate only fields escape state through arraycopy edge.
1450
if (e->fields_escape_state() < field_es) {
1451
set_fields_escape_state(e, field_es);
1452
escape_worklist.push(e);
1453
}
1454
} else if (es >= field_es) {
1455
// fields_escape_state is also set to 'es' if it is less than 'es'.
1456
if (e->escape_state() < es) {
1457
set_escape_state(e, es);
1458
escape_worklist.push(e);
1459
}
1460
} else {
1461
// Propagate field escape state.
1462
bool es_changed = false;
1463
if (e->fields_escape_state() < field_es) {
1464
set_fields_escape_state(e, field_es);
1465
es_changed = true;
1466
}
1467
if ((e->escape_state() < field_es) &&
1468
e->is_Field() && ptn->is_JavaObject() &&
1469
e->as_Field()->is_oop()) {
1470
// Change escape state of referenced fields.
1471
set_escape_state(e, field_es);
1472
es_changed = true;
1473
} else if (e->escape_state() < es) {
1474
set_escape_state(e, es);
1475
es_changed = true;
1476
}
1477
if (es_changed) {
1478
escape_worklist.push(e);
1479
}
1480
}
1481
}
1482
}
1483
// Remove escaped objects from non_escaped list.
1484
for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
1485
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
1486
if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1487
non_escaped_allocs_worklist.delete_at(next);
1488
}
1489
if (ptn->escape_state() == PointsToNode::NoEscape) {
1490
// Find fields in non-escaped allocations which have unknown value.
1491
find_init_values_phantom(ptn);
1492
}
1493
}
1494
return (non_escaped_allocs_worklist.length() > 0);
1495
}
1496
1497
// Add all references to JavaObject node by walking over all uses.
1498
int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1499
int new_edges = 0;
1500
if (populate_worklist) {
1501
// Populate _worklist by uses of jobj's uses.
1502
for (UseIterator i(jobj); i.has_next(); i.next()) {
1503
PointsToNode* use = i.get();
1504
if (use->is_Arraycopy()) {
1505
continue;
1506
}
1507
add_uses_to_worklist(use);
1508
if (use->is_Field() && use->as_Field()->is_oop()) {
1509
// Put on worklist all field's uses (loads) and
1510
// related field nodes (same base and offset).
1511
add_field_uses_to_worklist(use->as_Field());
1512
}
1513
}
1514
}
1515
for (int l = 0; l < _worklist.length(); l++) {
1516
PointsToNode* use = _worklist.at(l);
1517
if (PointsToNode::is_base_use(use)) {
1518
// Add reference from jobj to field and from field to jobj (field's base).
1519
use = PointsToNode::get_use_node(use)->as_Field();
1520
if (add_base(use->as_Field(), jobj)) {
1521
new_edges++;
1522
}
1523
continue;
1524
}
1525
assert(!use->is_JavaObject(), "sanity");
1526
if (use->is_Arraycopy()) {
1527
if (jobj == null_obj) { // NULL object does not have field edges
1528
continue;
1529
}
1530
// Added edge from Arraycopy node to arraycopy's source java object
1531
if (add_edge(use, jobj)) {
1532
jobj->set_arraycopy_src();
1533
new_edges++;
1534
}
1535
// and stop here.
1536
continue;
1537
}
1538
if (!add_edge(use, jobj)) {
1539
continue; // No new edge added, there was such edge already.
1540
}
1541
new_edges++;
1542
if (use->is_LocalVar()) {
1543
add_uses_to_worklist(use);
1544
if (use->arraycopy_dst()) {
1545
for (EdgeIterator i(use); i.has_next(); i.next()) {
1546
PointsToNode* e = i.get();
1547
if (e->is_Arraycopy()) {
1548
if (jobj == null_obj) { // NULL object does not have field edges
1549
continue;
1550
}
1551
// Add edge from arraycopy's destination java object to Arraycopy node.
1552
if (add_edge(jobj, e)) {
1553
new_edges++;
1554
jobj->set_arraycopy_dst();
1555
}
1556
}
1557
}
1558
}
1559
} else {
1560
// Added new edge to stored in field values.
1561
// Put on worklist all field's uses (loads) and
1562
// related field nodes (same base and offset).
1563
add_field_uses_to_worklist(use->as_Field());
1564
}
1565
}
1566
_worklist.clear();
1567
_in_worklist.reset();
1568
return new_edges;
1569
}
1570
1571
// Put on worklist all related field nodes.
1572
void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1573
assert(field->is_oop(), "sanity");
1574
int offset = field->offset();
1575
add_uses_to_worklist(field);
1576
// Loop over all bases of this field and push on worklist Field nodes
1577
// with the same offset and base (since they may reference the same field).
1578
for (BaseIterator i(field); i.has_next(); i.next()) {
1579
PointsToNode* base = i.get();
1580
add_fields_to_worklist(field, base);
1581
// Check if the base was source object of arraycopy and go over arraycopy's
1582
// destination objects since values stored to a field of source object are
1583
// accessable by uses (loads) of fields of destination objects.
1584
if (base->arraycopy_src()) {
1585
for (UseIterator j(base); j.has_next(); j.next()) {
1586
PointsToNode* arycp = j.get();
1587
if (arycp->is_Arraycopy()) {
1588
for (UseIterator k(arycp); k.has_next(); k.next()) {
1589
PointsToNode* abase = k.get();
1590
if (abase->arraycopy_dst() && abase != base) {
1591
// Look for the same arraycopy reference.
1592
add_fields_to_worklist(field, abase);
1593
}
1594
}
1595
}
1596
}
1597
}
1598
}
1599
}
1600
1601
// Put on worklist all related field nodes.
1602
void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1603
int offset = field->offset();
1604
if (base->is_LocalVar()) {
1605
for (UseIterator j(base); j.has_next(); j.next()) {
1606
PointsToNode* f = j.get();
1607
if (PointsToNode::is_base_use(f)) { // Field
1608
f = PointsToNode::get_use_node(f);
1609
if (f == field || !f->as_Field()->is_oop()) {
1610
continue;
1611
}
1612
int offs = f->as_Field()->offset();
1613
if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1614
add_to_worklist(f);
1615
}
1616
}
1617
}
1618
} else {
1619
assert(base->is_JavaObject(), "sanity");
1620
if (// Skip phantom_object since it is only used to indicate that
1621
// this field's content globally escapes.
1622
(base != phantom_obj) &&
1623
// NULL object node does not have fields.
1624
(base != null_obj)) {
1625
for (EdgeIterator i(base); i.has_next(); i.next()) {
1626
PointsToNode* f = i.get();
1627
// Skip arraycopy edge since store to destination object field
1628
// does not update value in source object field.
1629
if (f->is_Arraycopy()) {
1630
assert(base->arraycopy_dst(), "sanity");
1631
continue;
1632
}
1633
if (f == field || !f->as_Field()->is_oop()) {
1634
continue;
1635
}
1636
int offs = f->as_Field()->offset();
1637
if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1638
add_to_worklist(f);
1639
}
1640
}
1641
}
1642
}
1643
}
1644
1645
// Find fields which have unknown value.
1646
int ConnectionGraph::find_field_value(FieldNode* field) {
1647
// Escaped fields should have init value already.
1648
assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1649
int new_edges = 0;
1650
for (BaseIterator i(field); i.has_next(); i.next()) {
1651
PointsToNode* base = i.get();
1652
if (base->is_JavaObject()) {
1653
// Skip Allocate's fields which will be processed later.
1654
if (base->ideal_node()->is_Allocate()) {
1655
return 0;
1656
}
1657
assert(base == null_obj, "only NULL ptr base expected here");
1658
}
1659
}
1660
if (add_edge(field, phantom_obj)) {
1661
// New edge was added
1662
new_edges++;
1663
add_field_uses_to_worklist(field);
1664
}
1665
return new_edges;
1666
}
1667
1668
// Find fields initializing values for allocations.
1669
int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1670
assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1671
Node* alloc = pta->ideal_node();
1672
1673
// Do nothing for Allocate nodes since its fields values are
1674
// "known" unless they are initialized by arraycopy/clone.
1675
if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1676
return 0;
1677
}
1678
assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
1679
#ifdef ASSERT
1680
if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
1681
const char* name = alloc->as_CallStaticJava()->_name;
1682
assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1683
}
1684
#endif
1685
// Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1686
int new_edges = 0;
1687
for (EdgeIterator i(pta); i.has_next(); i.next()) {
1688
PointsToNode* field = i.get();
1689
if (field->is_Field() && field->as_Field()->is_oop()) {
1690
if (add_edge(field, phantom_obj)) {
1691
// New edge was added
1692
new_edges++;
1693
add_field_uses_to_worklist(field->as_Field());
1694
}
1695
}
1696
}
1697
return new_edges;
1698
}
1699
1700
// Find fields initializing values for allocations.
1701
int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1702
assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1703
Node* alloc = pta->ideal_node();
1704
// Do nothing for Call nodes since its fields values are unknown.
1705
if (!alloc->is_Allocate()) {
1706
return 0;
1707
}
1708
InitializeNode* ini = alloc->as_Allocate()->initialization();
1709
bool visited_bottom_offset = false;
1710
GrowableArray<int> offsets_worklist;
1711
int new_edges = 0;
1712
1713
// Check if an oop field's initializing value is recorded and add
1714
// a corresponding NULL if field's value if it is not recorded.
1715
// Connection Graph does not record a default initialization by NULL
1716
// captured by Initialize node.
1717
//
1718
for (EdgeIterator i(pta); i.has_next(); i.next()) {
1719
PointsToNode* field = i.get(); // Field (AddP)
1720
if (!field->is_Field() || !field->as_Field()->is_oop()) {
1721
continue; // Not oop field
1722
}
1723
int offset = field->as_Field()->offset();
1724
if (offset == Type::OffsetBot) {
1725
if (!visited_bottom_offset) {
1726
// OffsetBot is used to reference array's element,
1727
// always add reference to NULL to all Field nodes since we don't
1728
// known which element is referenced.
1729
if (add_edge(field, null_obj)) {
1730
// New edge was added
1731
new_edges++;
1732
add_field_uses_to_worklist(field->as_Field());
1733
visited_bottom_offset = true;
1734
}
1735
}
1736
} else {
1737
// Check only oop fields.
1738
const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
1739
if (adr_type->isa_rawptr()) {
1740
#ifdef ASSERT
1741
// Raw pointers are used for initializing stores so skip it
1742
// since it should be recorded already
1743
Node* base = get_addp_base(field->ideal_node());
1744
assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
1745
#endif
1746
continue;
1747
}
1748
if (!offsets_worklist.contains(offset)) {
1749
offsets_worklist.append(offset);
1750
Node* value = NULL;
1751
if (ini != NULL) {
1752
// StoreP::memory_type() == T_ADDRESS
1753
BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
1754
Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
1755
// Make sure initializing store has the same type as this AddP.
1756
// This AddP may reference non existing field because it is on a
1757
// dead branch of bimorphic call which is not eliminated yet.
1758
if (store != NULL && store->is_Store() &&
1759
store->as_Store()->memory_type() == ft) {
1760
value = store->in(MemNode::ValueIn);
1761
#ifdef ASSERT
1762
if (VerifyConnectionGraph) {
1763
// Verify that AddP already points to all objects the value points to.
1764
PointsToNode* val = ptnode_adr(value->_idx);
1765
assert((val != NULL), "should be processed already");
1766
PointsToNode* missed_obj = NULL;
1767
if (val->is_JavaObject()) {
1768
if (!field->points_to(val->as_JavaObject())) {
1769
missed_obj = val;
1770
}
1771
} else {
1772
if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1773
tty->print_cr("----------init store has invalid value -----");
1774
store->dump();
1775
val->dump();
1776
assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1777
}
1778
for (EdgeIterator j(val); j.has_next(); j.next()) {
1779
PointsToNode* obj = j.get();
1780
if (obj->is_JavaObject()) {
1781
if (!field->points_to(obj->as_JavaObject())) {
1782
missed_obj = obj;
1783
break;
1784
}
1785
}
1786
}
1787
}
1788
if (missed_obj != NULL) {
1789
tty->print_cr("----------field---------------------------------");
1790
field->dump();
1791
tty->print_cr("----------missed referernce to object-----------");
1792
missed_obj->dump();
1793
tty->print_cr("----------object referernced by init store -----");
1794
store->dump();
1795
val->dump();
1796
assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1797
}
1798
}
1799
#endif
1800
} else {
1801
// There could be initializing stores which follow allocation.
1802
// For example, a volatile field store is not collected
1803
// by Initialize node.
1804
//
1805
// Need to check for dependent loads to separate such stores from
1806
// stores which follow loads. For now, add initial value NULL so
1807
// that compare pointers optimization works correctly.
1808
}
1809
}
1810
if (value == NULL) {
1811
// A field's initializing value was not recorded. Add NULL.
1812
if (add_edge(field, null_obj)) {
1813
// New edge was added
1814
new_edges++;
1815
add_field_uses_to_worklist(field->as_Field());
1816
}
1817
}
1818
}
1819
}
1820
}
1821
return new_edges;
1822
}
1823
1824
// Adjust scalar_replaceable state after Connection Graph is built.
1825
void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1826
// Search for non-escaping objects which are not scalar replaceable
1827
// and mark them to propagate the state to referenced objects.
1828
1829
for (UseIterator i(jobj); i.has_next(); i.next()) {
1830
PointsToNode* use = i.get();
1831
if (use->is_Arraycopy()) {
1832
continue;
1833
}
1834
if (use->is_Field()) {
1835
FieldNode* field = use->as_Field();
1836
assert(field->is_oop() && field->scalar_replaceable(), "sanity");
1837
// 1. An object is not scalar replaceable if the field into which it is
1838
// stored has unknown offset (stored into unknown element of an array).
1839
if (field->offset() == Type::OffsetBot) {
1840
jobj->set_scalar_replaceable(false);
1841
return;
1842
}
1843
// 2. An object is not scalar replaceable if the field into which it is
1844
// stored has multiple bases one of which is null.
1845
if (field->base_count() > 1) {
1846
for (BaseIterator i(field); i.has_next(); i.next()) {
1847
PointsToNode* base = i.get();
1848
if (base == null_obj) {
1849
jobj->set_scalar_replaceable(false);
1850
return;
1851
}
1852
}
1853
}
1854
}
1855
assert(use->is_Field() || use->is_LocalVar(), "sanity");
1856
// 3. An object is not scalar replaceable if it is merged with other objects.
1857
for (EdgeIterator j(use); j.has_next(); j.next()) {
1858
PointsToNode* ptn = j.get();
1859
if (ptn->is_JavaObject() && ptn != jobj) {
1860
// Mark all objects.
1861
jobj->set_scalar_replaceable(false);
1862
ptn->set_scalar_replaceable(false);
1863
}
1864
}
1865
if (!jobj->scalar_replaceable()) {
1866
return;
1867
}
1868
}
1869
1870
for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1871
if (j.get()->is_Arraycopy()) {
1872
continue;
1873
}
1874
1875
// Non-escaping object node should point only to field nodes.
1876
FieldNode* field = j.get()->as_Field();
1877
int offset = field->as_Field()->offset();
1878
1879
// 4. An object is not scalar replaceable if it has a field with unknown
1880
// offset (array's element is accessed in loop).
1881
if (offset == Type::OffsetBot) {
1882
jobj->set_scalar_replaceable(false);
1883
return;
1884
}
1885
// 5. Currently an object is not scalar replaceable if a LoadStore node
1886
// access its field since the field value is unknown after it.
1887
//
1888
Node* n = field->ideal_node();
1889
1890
// Test for an unsafe access that was parsed as maybe off heap
1891
// (with a CheckCastPP to raw memory).
1892
assert(n->is_AddP(), "expect an address computation");
1893
if (n->in(AddPNode::Base)->is_top() &&
1894
n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
1895
assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
1896
assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
1897
jobj->set_scalar_replaceable(false);
1898
return;
1899
}
1900
1901
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1902
Node* u = n->fast_out(i);
1903
if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
1904
jobj->set_scalar_replaceable(false);
1905
return;
1906
}
1907
}
1908
1909
// 6. Or the address may point to more then one object. This may produce
1910
// the false positive result (set not scalar replaceable)
1911
// since the flow-insensitive escape analysis can't separate
1912
// the case when stores overwrite the field's value from the case
1913
// when stores happened on different control branches.
1914
//
1915
// Note: it will disable scalar replacement in some cases:
1916
//
1917
// Point p[] = new Point[1];
1918
// p[0] = new Point(); // Will be not scalar replaced
1919
//
1920
// but it will save us from incorrect optimizations in next cases:
1921
//
1922
// Point p[] = new Point[1];
1923
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
1924
//
1925
if (field->base_count() > 1) {
1926
for (BaseIterator i(field); i.has_next(); i.next()) {
1927
PointsToNode* base = i.get();
1928
// Don't take into account LocalVar nodes which
1929
// may point to only one object which should be also
1930
// this field's base by now.
1931
if (base->is_JavaObject() && base != jobj) {
1932
// Mark all bases.
1933
jobj->set_scalar_replaceable(false);
1934
base->set_scalar_replaceable(false);
1935
}
1936
}
1937
}
1938
}
1939
}
1940
1941
#ifdef ASSERT
1942
void ConnectionGraph::verify_connection_graph(
1943
GrowableArray<PointsToNode*>& ptnodes_worklist,
1944
GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
1945
GrowableArray<JavaObjectNode*>& java_objects_worklist,
1946
GrowableArray<Node*>& addp_worklist) {
1947
// Verify that graph is complete - no new edges could be added.
1948
int java_objects_length = java_objects_worklist.length();
1949
int non_escaped_length = non_escaped_allocs_worklist.length();
1950
int new_edges = 0;
1951
for (int next = 0; next < java_objects_length; ++next) {
1952
JavaObjectNode* ptn = java_objects_worklist.at(next);
1953
new_edges += add_java_object_edges(ptn, true);
1954
}
1955
assert(new_edges == 0, "graph was not complete");
1956
// Verify that escape state is final.
1957
int length = non_escaped_allocs_worklist.length();
1958
find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
1959
assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
1960
(non_escaped_length == length) &&
1961
(_worklist.length() == 0), "escape state was not final");
1962
1963
// Verify fields information.
1964
int addp_length = addp_worklist.length();
1965
for (int next = 0; next < addp_length; ++next ) {
1966
Node* n = addp_worklist.at(next);
1967
FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1968
if (field->is_oop()) {
1969
// Verify that field has all bases
1970
Node* base = get_addp_base(n);
1971
PointsToNode* ptn = ptnode_adr(base->_idx);
1972
if (ptn->is_JavaObject()) {
1973
assert(field->has_base(ptn->as_JavaObject()), "sanity");
1974
} else {
1975
assert(ptn->is_LocalVar(), "sanity");
1976
for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1977
PointsToNode* e = i.get();
1978
if (e->is_JavaObject()) {
1979
assert(field->has_base(e->as_JavaObject()), "sanity");
1980
}
1981
}
1982
}
1983
// Verify that all fields have initializing values.
1984
if (field->edge_count() == 0) {
1985
tty->print_cr("----------field does not have references----------");
1986
field->dump();
1987
for (BaseIterator i(field); i.has_next(); i.next()) {
1988
PointsToNode* base = i.get();
1989
tty->print_cr("----------field has next base---------------------");
1990
base->dump();
1991
if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
1992
tty->print_cr("----------base has fields-------------------------");
1993
for (EdgeIterator j(base); j.has_next(); j.next()) {
1994
j.get()->dump();
1995
}
1996
tty->print_cr("----------base has references---------------------");
1997
for (UseIterator j(base); j.has_next(); j.next()) {
1998
j.get()->dump();
1999
}
2000
}
2001
}
2002
for (UseIterator i(field); i.has_next(); i.next()) {
2003
i.get()->dump();
2004
}
2005
assert(field->edge_count() > 0, "sanity");
2006
}
2007
}
2008
}
2009
}
2010
#endif
2011
2012
// Optimize ideal graph.
2013
void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2014
GrowableArray<Node*>& storestore_worklist) {
2015
Compile* C = _compile;
2016
PhaseIterGVN* igvn = _igvn;
2017
if (EliminateLocks) {
2018
// Mark locks before changing ideal graph.
2019
int cnt = C->macro_count();
2020
for (int i = 0; i < cnt; i++) {
2021
Node *n = C->macro_node(i);
2022
if (n->is_AbstractLock()) { // Lock and Unlock nodes
2023
AbstractLockNode* alock = n->as_AbstractLock();
2024
if (!alock->is_non_esc_obj()) {
2025
if (not_global_escape(alock->obj_node())) {
2026
assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2027
// The lock could be marked eliminated by lock coarsening
2028
// code during first IGVN before EA. Replace coarsened flag
2029
// to eliminate all associated locks/unlocks.
2030
#ifdef ASSERT
2031
alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2032
#endif
2033
alock->set_non_esc_obj();
2034
}
2035
}
2036
}
2037
}
2038
}
2039
2040
if (OptimizePtrCompare) {
2041
for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2042
Node *n = ptr_cmp_worklist.at(i);
2043
const TypeInt* tcmp = optimize_ptr_compare(n);
2044
if (tcmp->singleton()) {
2045
Node* cmp = igvn->makecon(tcmp);
2046
#ifndef PRODUCT
2047
if (PrintOptimizePtrCompare) {
2048
tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2049
if (Verbose) {
2050
n->dump(1);
2051
}
2052
}
2053
#endif
2054
igvn->replace_node(n, cmp);
2055
}
2056
}
2057
}
2058
2059
// For MemBarStoreStore nodes added in library_call.cpp, check
2060
// escape status of associated AllocateNode and optimize out
2061
// MemBarStoreStore node if the allocated object never escapes.
2062
for (int i = 0; i < storestore_worklist.length(); i++) {
2063
Node* storestore = storestore_worklist.at(i);
2064
assert(storestore->is_MemBarStoreStore(), "");
2065
Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2066
if (alloc->is_Allocate() && not_global_escape(alloc)) {
2067
MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2068
mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
2069
mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2070
igvn->register_new_node_with_optimizer(mb);
2071
igvn->replace_node(storestore, mb);
2072
}
2073
}
2074
}
2075
2076
// Optimize objects compare.
2077
const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2078
assert(OptimizePtrCompare, "sanity");
2079
const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2080
const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2081
const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
2082
2083
PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2084
PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2085
JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2086
JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2087
assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2088
assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2089
2090
// Check simple cases first.
2091
if (jobj1 != NULL) {
2092
if (jobj1->escape_state() == PointsToNode::NoEscape) {
2093
if (jobj1 == jobj2) {
2094
// Comparing the same not escaping object.
2095
return EQ;
2096
}
2097
Node* obj = jobj1->ideal_node();
2098
// Comparing not escaping allocation.
2099
if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2100
!ptn2->points_to(jobj1)) {
2101
return NE; // This includes nullness check.
2102
}
2103
}
2104
}
2105
if (jobj2 != NULL) {
2106
if (jobj2->escape_state() == PointsToNode::NoEscape) {
2107
Node* obj = jobj2->ideal_node();
2108
// Comparing not escaping allocation.
2109
if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2110
!ptn1->points_to(jobj2)) {
2111
return NE; // This includes nullness check.
2112
}
2113
}
2114
}
2115
if (jobj1 != NULL && jobj1 != phantom_obj &&
2116
jobj2 != NULL && jobj2 != phantom_obj &&
2117
jobj1->ideal_node()->is_Con() &&
2118
jobj2->ideal_node()->is_Con()) {
2119
// Klass or String constants compare. Need to be careful with
2120
// compressed pointers - compare types of ConN and ConP instead of nodes.
2121
const Type* t1 = jobj1->ideal_node()->get_ptr_type();
2122
const Type* t2 = jobj2->ideal_node()->get_ptr_type();
2123
if (t1->make_ptr() == t2->make_ptr()) {
2124
return EQ;
2125
} else {
2126
return NE;
2127
}
2128
}
2129
if (ptn1->meet(ptn2)) {
2130
return UNKNOWN; // Sets are not disjoint
2131
}
2132
2133
// Sets are disjoint.
2134
bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
2135
bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
2136
bool set1_has_null_ptr = ptn1->points_to(null_obj);
2137
bool set2_has_null_ptr = ptn2->points_to(null_obj);
2138
if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
2139
(set2_has_unknown_ptr && set1_has_null_ptr)) {
2140
// Check nullness of unknown object.
2141
return UNKNOWN;
2142
}
2143
2144
// Disjointness by itself is not sufficient since
2145
// alias analysis is not complete for escaped objects.
2146
// Disjoint sets are definitely unrelated only when
2147
// at least one set has only not escaping allocations.
2148
if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
2149
if (ptn1->non_escaping_allocation()) {
2150
return NE;
2151
}
2152
}
2153
if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
2154
if (ptn2->non_escaping_allocation()) {
2155
return NE;
2156
}
2157
}
2158
return UNKNOWN;
2159
}
2160
2161
// Connection Graph construction functions.
2162
2163
void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
2164
PointsToNode* ptadr = _nodes.at(n->_idx);
2165
if (ptadr != NULL) {
2166
assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
2167
return;
2168
}
2169
Compile* C = _compile;
2170
ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
2171
map_ideal_node(n, ptadr);
2172
}
2173
2174
void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
2175
PointsToNode* ptadr = _nodes.at(n->_idx);
2176
if (ptadr != NULL) {
2177
assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
2178
return;
2179
}
2180
Compile* C = _compile;
2181
ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
2182
map_ideal_node(n, ptadr);
2183
}
2184
2185
void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
2186
PointsToNode* ptadr = _nodes.at(n->_idx);
2187
if (ptadr != NULL) {
2188
assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
2189
return;
2190
}
2191
bool unsafe = false;
2192
bool is_oop = is_oop_field(n, offset, &unsafe);
2193
if (unsafe) {
2194
es = PointsToNode::GlobalEscape;
2195
}
2196
Compile* C = _compile;
2197
FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
2198
map_ideal_node(n, field);
2199
}
2200
2201
void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
2202
PointsToNode* src, PointsToNode* dst) {
2203
assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2204
assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2205
PointsToNode* ptadr = _nodes.at(n->_idx);
2206
if (ptadr != NULL) {
2207
assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2208
return;
2209
}
2210
Compile* C = _compile;
2211
ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2212
map_ideal_node(n, ptadr);
2213
// Add edge from arraycopy node to source object.
2214
(void)add_edge(ptadr, src);
2215
src->set_arraycopy_src();
2216
// Add edge from destination object to arraycopy node.
2217
(void)add_edge(dst, ptadr);
2218
dst->set_arraycopy_dst();
2219
}
2220
2221
bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2222
const Type* adr_type = n->as_AddP()->bottom_type();
2223
BasicType bt = T_INT;
2224
if (offset == Type::OffsetBot) {
2225
// Check only oop fields.
2226
if (!adr_type->isa_aryptr() ||
2227
(adr_type->isa_aryptr()->klass() == NULL) ||
2228
adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2229
// OffsetBot is used to reference array's element. Ignore first AddP.
2230
if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2231
bt = T_OBJECT;
2232
}
2233
}
2234
} else if (offset != oopDesc::klass_offset_in_bytes()) {
2235
if (adr_type->isa_instptr()) {
2236
ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2237
if (field != NULL) {
2238
bt = field->layout_type();
2239
} else {
2240
// Check for unsafe oop field access
2241
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2242
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2243
n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2244
BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2245
bt = T_OBJECT;
2246
(*unsafe) = true;
2247
}
2248
}
2249
} else if (adr_type->isa_aryptr()) {
2250
if (offset == arrayOopDesc::length_offset_in_bytes()) {
2251
// Ignore array length load.
2252
} else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2253
// Ignore first AddP.
2254
} else {
2255
const Type* elemtype = adr_type->isa_aryptr()->elem();
2256
bt = elemtype->array_element_basic_type();
2257
}
2258
} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2259
// Allocation initialization, ThreadLocal field access, unsafe access
2260
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2261
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2262
n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2263
BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2264
bt = T_OBJECT;
2265
}
2266
}
2267
}
2268
// Note: T_NARROWOOP is not classed as a real reference type
2269
return (is_reference_type(bt) || bt == T_NARROWOOP);
2270
}
2271
2272
// Returns unique pointed java object or NULL.
2273
JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2274
assert(!_collecting, "should not call when constructed graph");
2275
// If the node was created after the escape computation we can't answer.
2276
uint idx = n->_idx;
2277
if (idx >= nodes_size()) {
2278
return NULL;
2279
}
2280
PointsToNode* ptn = ptnode_adr(idx);
2281
if (ptn == NULL) {
2282
return NULL;
2283
}
2284
if (ptn->is_JavaObject()) {
2285
return ptn->as_JavaObject();
2286
}
2287
assert(ptn->is_LocalVar(), "sanity");
2288
// Check all java objects it points to.
2289
JavaObjectNode* jobj = NULL;
2290
for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2291
PointsToNode* e = i.get();
2292
if (e->is_JavaObject()) {
2293
if (jobj == NULL) {
2294
jobj = e->as_JavaObject();
2295
} else if (jobj != e) {
2296
return NULL;
2297
}
2298
}
2299
}
2300
return jobj;
2301
}
2302
2303
// Return true if this node points only to non-escaping allocations.
2304
bool PointsToNode::non_escaping_allocation() {
2305
if (is_JavaObject()) {
2306
Node* n = ideal_node();
2307
if (n->is_Allocate() || n->is_CallStaticJava()) {
2308
return (escape_state() == PointsToNode::NoEscape);
2309
} else {
2310
return false;
2311
}
2312
}
2313
assert(is_LocalVar(), "sanity");
2314
// Check all java objects it points to.
2315
for (EdgeIterator i(this); i.has_next(); i.next()) {
2316
PointsToNode* e = i.get();
2317
if (e->is_JavaObject()) {
2318
Node* n = e->ideal_node();
2319
if ((e->escape_state() != PointsToNode::NoEscape) ||
2320
!(n->is_Allocate() || n->is_CallStaticJava())) {
2321
return false;
2322
}
2323
}
2324
}
2325
return true;
2326
}
2327
2328
// Return true if we know the node does not escape globally.
2329
bool ConnectionGraph::not_global_escape(Node *n) {
2330
assert(!_collecting, "should not call during graph construction");
2331
// If the node was created after the escape computation we can't answer.
2332
uint idx = n->_idx;
2333
if (idx >= nodes_size()) {
2334
return false;
2335
}
2336
PointsToNode* ptn = ptnode_adr(idx);
2337
if (ptn == NULL) {
2338
return false; // not in congraph (e.g. ConI)
2339
}
2340
PointsToNode::EscapeState es = ptn->escape_state();
2341
// If we have already computed a value, return it.
2342
if (es >= PointsToNode::GlobalEscape) {
2343
return false;
2344
}
2345
if (ptn->is_JavaObject()) {
2346
return true; // (es < PointsToNode::GlobalEscape);
2347
}
2348
assert(ptn->is_LocalVar(), "sanity");
2349
// Check all java objects it points to.
2350
for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2351
if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
2352
return false;
2353
}
2354
}
2355
return true;
2356
}
2357
2358
2359
// Helper functions
2360
2361
// Return true if this node points to specified node or nodes it points to.
2362
bool PointsToNode::points_to(JavaObjectNode* ptn) const {
2363
if (is_JavaObject()) {
2364
return (this == ptn);
2365
}
2366
assert(is_LocalVar() || is_Field(), "sanity");
2367
for (EdgeIterator i(this); i.has_next(); i.next()) {
2368
if (i.get() == ptn) {
2369
return true;
2370
}
2371
}
2372
return false;
2373
}
2374
2375
// Return true if one node points to an other.
2376
bool PointsToNode::meet(PointsToNode* ptn) {
2377
if (this == ptn) {
2378
return true;
2379
} else if (ptn->is_JavaObject()) {
2380
return this->points_to(ptn->as_JavaObject());
2381
} else if (this->is_JavaObject()) {
2382
return ptn->points_to(this->as_JavaObject());
2383
}
2384
assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
2385
int ptn_count = ptn->edge_count();
2386
for (EdgeIterator i(this); i.has_next(); i.next()) {
2387
PointsToNode* this_e = i.get();
2388
for (int j = 0; j < ptn_count; j++) {
2389
if (this_e == ptn->edge(j)) {
2390
return true;
2391
}
2392
}
2393
}
2394
return false;
2395
}
2396
2397
#ifdef ASSERT
2398
// Return true if bases point to this java object.
2399
bool FieldNode::has_base(JavaObjectNode* jobj) const {
2400
for (BaseIterator i(this); i.has_next(); i.next()) {
2401
if (i.get() == jobj) {
2402
return true;
2403
}
2404
}
2405
return false;
2406
}
2407
#endif
2408
2409
bool ConnectionGraph::is_captured_store_address(Node* addp) {
2410
// Handle simple case first.
2411
assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access");
2412
if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
2413
return true;
2414
} else if (addp->in(AddPNode::Address)->is_Phi()) {
2415
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2416
Node* addp_use = addp->fast_out(i);
2417
if (addp_use->is_Store()) {
2418
for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
2419
if (addp_use->fast_out(j)->is_Initialize()) {
2420
return true;
2421
}
2422
}
2423
}
2424
}
2425
}
2426
return false;
2427
}
2428
2429
int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2430
const Type *adr_type = phase->type(adr);
2431
if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2432
// We are computing a raw address for a store captured by an Initialize
2433
// compute an appropriate address type. AddP cases #3 and #5 (see below).
2434
int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2435
assert(offs != Type::OffsetBot ||
2436
adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2437
"offset must be a constant or it is initialization of array");
2438
return offs;
2439
}
2440
const TypePtr *t_ptr = adr_type->isa_ptr();
2441
assert(t_ptr != NULL, "must be a pointer type");
2442
return t_ptr->offset();
2443
}
2444
2445
Node* ConnectionGraph::get_addp_base(Node *addp) {
2446
assert(addp->is_AddP(), "must be AddP");
2447
//
2448
// AddP cases for Base and Address inputs:
2449
// case #1. Direct object's field reference:
2450
// Allocate
2451
// |
2452
// Proj #5 ( oop result )
2453
// |
2454
// CheckCastPP (cast to instance type)
2455
// | |
2456
// AddP ( base == address )
2457
//
2458
// case #2. Indirect object's field reference:
2459
// Phi
2460
// |
2461
// CastPP (cast to instance type)
2462
// | |
2463
// AddP ( base == address )
2464
//
2465
// case #3. Raw object's field reference for Initialize node:
2466
// Allocate
2467
// |
2468
// Proj #5 ( oop result )
2469
// top |
2470
// \ |
2471
// AddP ( base == top )
2472
//
2473
// case #4. Array's element reference:
2474
// {CheckCastPP | CastPP}
2475
// | | |
2476
// | AddP ( array's element offset )
2477
// | |
2478
// AddP ( array's offset )
2479
//
2480
// case #5. Raw object's field reference for arraycopy stub call:
2481
// The inline_native_clone() case when the arraycopy stub is called
2482
// after the allocation before Initialize and CheckCastPP nodes.
2483
// Allocate
2484
// |
2485
// Proj #5 ( oop result )
2486
// | |
2487
// AddP ( base == address )
2488
//
2489
// case #6. Constant Pool, ThreadLocal, CastX2P or
2490
// Raw object's field reference:
2491
// {ConP, ThreadLocal, CastX2P, raw Load}
2492
// top |
2493
// \ |
2494
// AddP ( base == top )
2495
//
2496
// case #7. Klass's field reference.
2497
// LoadKlass
2498
// | |
2499
// AddP ( base == address )
2500
//
2501
// case #8. narrow Klass's field reference.
2502
// LoadNKlass
2503
// |
2504
// DecodeN
2505
// | |
2506
// AddP ( base == address )
2507
//
2508
// case #9. Mixed unsafe access
2509
// {instance}
2510
// |
2511
// CheckCastPP (raw)
2512
// top |
2513
// \ |
2514
// AddP ( base == top )
2515
//
2516
Node *base = addp->in(AddPNode::Base);
2517
if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2518
base = addp->in(AddPNode::Address);
2519
while (base->is_AddP()) {
2520
// Case #6 (unsafe access) may have several chained AddP nodes.
2521
assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2522
base = base->in(AddPNode::Address);
2523
}
2524
if (base->Opcode() == Op_CheckCastPP &&
2525
base->bottom_type()->isa_rawptr() &&
2526
_igvn->type(base->in(1))->isa_oopptr()) {
2527
base = base->in(1); // Case #9
2528
} else {
2529
Node* uncast_base = base->uncast();
2530
int opcode = uncast_base->Opcode();
2531
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2532
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2533
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2534
is_captured_store_address(addp), "sanity");
2535
}
2536
}
2537
return base;
2538
}
2539
2540
Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2541
assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2542
Node* addp2 = addp->raw_out(0);
2543
if (addp->outcnt() == 1 && addp2->is_AddP() &&
2544
addp2->in(AddPNode::Base) == n &&
2545
addp2->in(AddPNode::Address) == addp) {
2546
assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2547
//
2548
// Find array's offset to push it on worklist first and
2549
// as result process an array's element offset first (pushed second)
2550
// to avoid CastPP for the array's offset.
2551
// Otherwise the inserted CastPP (LocalVar) will point to what
2552
// the AddP (Field) points to. Which would be wrong since
2553
// the algorithm expects the CastPP has the same point as
2554
// as AddP's base CheckCastPP (LocalVar).
2555
//
2556
// ArrayAllocation
2557
// |
2558
// CheckCastPP
2559
// |
2560
// memProj (from ArrayAllocation CheckCastPP)
2561
// | ||
2562
// | || Int (element index)
2563
// | || | ConI (log(element size))
2564
// | || | /
2565
// | || LShift
2566
// | || /
2567
// | AddP (array's element offset)
2568
// | |
2569
// | | ConI (array's offset: #12(32-bits) or #24(64-bits))
2570
// | / /
2571
// AddP (array's offset)
2572
// |
2573
// Load/Store (memory operation on array's element)
2574
//
2575
return addp2;
2576
}
2577
return NULL;
2578
}
2579
2580
//
2581
// Adjust the type and inputs of an AddP which computes the
2582
// address of a field of an instance
2583
//
2584
bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2585
PhaseGVN* igvn = _igvn;
2586
const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2587
assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2588
const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2589
if (t == NULL) {
2590
// We are computing a raw address for a store captured by an Initialize
2591
// compute an appropriate address type (cases #3 and #5).
2592
assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2593
assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2594
intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2595
assert(offs != Type::OffsetBot, "offset must be a constant");
2596
t = base_t->add_offset(offs)->is_oopptr();
2597
}
2598
int inst_id = base_t->instance_id();
2599
assert(!t->is_known_instance() || t->instance_id() == inst_id,
2600
"old type must be non-instance or match new type");
2601
2602
// The type 't' could be subclass of 'base_t'.
2603
// As result t->offset() could be large then base_t's size and it will
2604
// cause the failure in add_offset() with narrow oops since TypeOopPtr()
2605
// constructor verifies correctness of the offset.
2606
//
2607
// It could happened on subclass's branch (from the type profiling
2608
// inlining) which was not eliminated during parsing since the exactness
2609
// of the allocation type was not propagated to the subclass type check.
2610
//
2611
// Or the type 't' could be not related to 'base_t' at all.
2612
// It could happened when CHA type is different from MDO type on a dead path
2613
// (for example, from instanceof check) which is not collapsed during parsing.
2614
//
2615
// Do nothing for such AddP node and don't process its users since
2616
// this code branch will go away.
2617
//
2618
if (!t->is_known_instance() &&
2619
!base_t->klass()->is_subtype_of(t->klass())) {
2620
return false; // bail out
2621
}
2622
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2623
// Do NOT remove the next line: ensure a new alias index is allocated
2624
// for the instance type. Note: C++ will not remove it since the call
2625
// has side effect.
2626
int alias_idx = _compile->get_alias_index(tinst);
2627
igvn->set_type(addp, tinst);
2628
// record the allocation in the node map
2629
set_map(addp, get_map(base->_idx));
2630
// Set addp's Base and Address to 'base'.
2631
Node *abase = addp->in(AddPNode::Base);
2632
Node *adr = addp->in(AddPNode::Address);
2633
if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2634
adr->in(0)->_idx == (uint)inst_id) {
2635
// Skip AddP cases #3 and #5.
2636
} else {
2637
assert(!abase->is_top(), "sanity"); // AddP case #3
2638
if (abase != base) {
2639
igvn->hash_delete(addp);
2640
addp->set_req(AddPNode::Base, base);
2641
if (abase == adr) {
2642
addp->set_req(AddPNode::Address, base);
2643
} else {
2644
// AddP case #4 (adr is array's element offset AddP node)
2645
#ifdef ASSERT
2646
const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2647
assert(adr->is_AddP() && atype != NULL &&
2648
atype->instance_id() == inst_id, "array's element offset should be processed first");
2649
#endif
2650
}
2651
igvn->hash_insert(addp);
2652
}
2653
}
2654
// Put on IGVN worklist since at least addp's type was changed above.
2655
record_for_optimizer(addp);
2656
return true;
2657
}
2658
2659
//
2660
// Create a new version of orig_phi if necessary. Returns either the newly
2661
// created phi or an existing phi. Sets create_new to indicate whether a new
2662
// phi was created. Cache the last newly created phi in the node map.
2663
//
2664
PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
2665
Compile *C = _compile;
2666
PhaseGVN* igvn = _igvn;
2667
new_created = false;
2668
int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2669
// nothing to do if orig_phi is bottom memory or matches alias_idx
2670
if (phi_alias_idx == alias_idx) {
2671
return orig_phi;
2672
}
2673
// Have we recently created a Phi for this alias index?
2674
PhiNode *result = get_map_phi(orig_phi->_idx);
2675
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2676
return result;
2677
}
2678
// Previous check may fail when the same wide memory Phi was split into Phis
2679
// for different memory slices. Search all Phis for this region.
2680
if (result != NULL) {
2681
Node* region = orig_phi->in(0);
2682
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2683
Node* phi = region->fast_out(i);
2684
if (phi->is_Phi() &&
2685
C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2686
assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2687
return phi->as_Phi();
2688
}
2689
}
2690
}
2691
if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
2692
if (C->do_escape_analysis() == true && !C->failing()) {
2693
// Retry compilation without escape analysis.
2694
// If this is the first failure, the sentinel string will "stick"
2695
// to the Compile object, and the C2Compiler will see it and retry.
2696
C->record_failure(C2Compiler::retry_no_escape_analysis());
2697
}
2698
return NULL;
2699
}
2700
orig_phi_worklist.append_if_missing(orig_phi);
2701
const TypePtr *atype = C->get_adr_type(alias_idx);
2702
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2703
C->copy_node_notes_to(result, orig_phi);
2704
igvn->set_type(result, result->bottom_type());
2705
record_for_optimizer(result);
2706
set_map(orig_phi, result);
2707
new_created = true;
2708
return result;
2709
}
2710
2711
//
2712
// Return a new version of Memory Phi "orig_phi" with the inputs having the
2713
// specified alias index.
2714
//
2715
PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {
2716
assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2717
Compile *C = _compile;
2718
PhaseGVN* igvn = _igvn;
2719
bool new_phi_created;
2720
PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2721
if (!new_phi_created) {
2722
return result;
2723
}
2724
GrowableArray<PhiNode *> phi_list;
2725
GrowableArray<uint> cur_input;
2726
PhiNode *phi = orig_phi;
2727
uint idx = 1;
2728
bool finished = false;
2729
while(!finished) {
2730
while (idx < phi->req()) {
2731
Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2732
if (mem != NULL && mem->is_Phi()) {
2733
PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2734
if (new_phi_created) {
2735
// found an phi for which we created a new split, push current one on worklist and begin
2736
// processing new one
2737
phi_list.push(phi);
2738
cur_input.push(idx);
2739
phi = mem->as_Phi();
2740
result = newphi;
2741
idx = 1;
2742
continue;
2743
} else {
2744
mem = newphi;
2745
}
2746
}
2747
if (C->failing()) {
2748
return NULL;
2749
}
2750
result->set_req(idx++, mem);
2751
}
2752
#ifdef ASSERT
2753
// verify that the new Phi has an input for each input of the original
2754
assert( phi->req() == result->req(), "must have same number of inputs.");
2755
assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2756
#endif
2757
// Check if all new phi's inputs have specified alias index.
2758
// Otherwise use old phi.
2759
for (uint i = 1; i < phi->req(); i++) {
2760
Node* in = result->in(i);
2761
assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2762
}
2763
// we have finished processing a Phi, see if there are any more to do
2764
finished = (phi_list.length() == 0 );
2765
if (!finished) {
2766
phi = phi_list.pop();
2767
idx = cur_input.pop();
2768
PhiNode *prev_result = get_map_phi(phi->_idx);
2769
prev_result->set_req(idx++, result);
2770
result = prev_result;
2771
}
2772
}
2773
return result;
2774
}
2775
2776
//
2777
// The next methods are derived from methods in MemNode.
2778
//
2779
Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2780
Node *mem = mmem;
2781
// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2782
// means an array I have not precisely typed yet. Do not do any
2783
// alias stuff with it any time soon.
2784
if (toop->base() != Type::AnyPtr &&
2785
!(toop->klass() != NULL &&
2786
toop->klass()->is_java_lang_Object() &&
2787
toop->offset() == Type::OffsetBot)) {
2788
mem = mmem->memory_at(alias_idx);
2789
// Update input if it is progress over what we have now
2790
}
2791
return mem;
2792
}
2793
2794
//
2795
// Move memory users to their memory slices.
2796
//
2797
void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {
2798
Compile* C = _compile;
2799
PhaseGVN* igvn = _igvn;
2800
const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2801
assert(tp != NULL, "ptr type");
2802
int alias_idx = C->get_alias_index(tp);
2803
int general_idx = C->get_general_index(alias_idx);
2804
2805
// Move users first
2806
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2807
Node* use = n->fast_out(i);
2808
if (use->is_MergeMem()) {
2809
MergeMemNode* mmem = use->as_MergeMem();
2810
assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2811
if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2812
continue; // Nothing to do
2813
}
2814
// Replace previous general reference to mem node.
2815
uint orig_uniq = C->unique();
2816
Node* m = find_inst_mem(n, general_idx, orig_phis);
2817
assert(orig_uniq == C->unique(), "no new nodes");
2818
mmem->set_memory_at(general_idx, m);
2819
--imax;
2820
--i;
2821
} else if (use->is_MemBar()) {
2822
assert(!use->is_Initialize(), "initializing stores should not be moved");
2823
if (use->req() > MemBarNode::Precedent &&
2824
use->in(MemBarNode::Precedent) == n) {
2825
// Don't move related membars.
2826
record_for_optimizer(use);
2827
continue;
2828
}
2829
tp = use->as_MemBar()->adr_type()->isa_ptr();
2830
if ((tp != NULL && C->get_alias_index(tp) == alias_idx) ||
2831
alias_idx == general_idx) {
2832
continue; // Nothing to do
2833
}
2834
// Move to general memory slice.
2835
uint orig_uniq = C->unique();
2836
Node* m = find_inst_mem(n, general_idx, orig_phis);
2837
assert(orig_uniq == C->unique(), "no new nodes");
2838
igvn->hash_delete(use);
2839
imax -= use->replace_edge(n, m, igvn);
2840
igvn->hash_insert(use);
2841
record_for_optimizer(use);
2842
--i;
2843
#ifdef ASSERT
2844
} else if (use->is_Mem()) {
2845
if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2846
// Don't move related cardmark.
2847
continue;
2848
}
2849
// Memory nodes should have new memory input.
2850
tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2851
assert(tp != NULL, "ptr type");
2852
int idx = C->get_alias_index(tp);
2853
assert(get_map(use->_idx) != NULL || idx == alias_idx,
2854
"Following memory nodes should have new memory input or be on the same memory slice");
2855
} else if (use->is_Phi()) {
2856
// Phi nodes should be split and moved already.
2857
tp = use->as_Phi()->adr_type()->isa_ptr();
2858
assert(tp != NULL, "ptr type");
2859
int idx = C->get_alias_index(tp);
2860
assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2861
} else {
2862
use->dump();
2863
assert(false, "should not be here");
2864
#endif
2865
}
2866
}
2867
}
2868
2869
//
2870
// Search memory chain of "mem" to find a MemNode whose address
2871
// is the specified alias index.
2872
//
2873
Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {
2874
if (orig_mem == NULL) {
2875
return orig_mem;
2876
}
2877
Compile* C = _compile;
2878
PhaseGVN* igvn = _igvn;
2879
const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2880
bool is_instance = (toop != NULL) && toop->is_known_instance();
2881
Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
2882
Node *prev = NULL;
2883
Node *result = orig_mem;
2884
while (prev != result) {
2885
prev = result;
2886
if (result == start_mem) {
2887
break; // hit one of our sentinels
2888
}
2889
if (result->is_Mem()) {
2890
const Type *at = igvn->type(result->in(MemNode::Address));
2891
if (at == Type::TOP) {
2892
break; // Dead
2893
}
2894
assert (at->isa_ptr() != NULL, "pointer type required.");
2895
int idx = C->get_alias_index(at->is_ptr());
2896
if (idx == alias_idx) {
2897
break; // Found
2898
}
2899
if (!is_instance && (at->isa_oopptr() == NULL ||
2900
!at->is_oopptr()->is_known_instance())) {
2901
break; // Do not skip store to general memory slice.
2902
}
2903
result = result->in(MemNode::Memory);
2904
}
2905
if (!is_instance) {
2906
continue; // don't search further for non-instance types
2907
}
2908
// skip over a call which does not affect this memory slice
2909
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2910
Node *proj_in = result->in(0);
2911
if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2912
break; // hit one of our sentinels
2913
} else if (proj_in->is_Call()) {
2914
// ArrayCopy node processed here as well
2915
CallNode *call = proj_in->as_Call();
2916
if (!call->may_modify(toop, igvn)) {
2917
result = call->in(TypeFunc::Memory);
2918
}
2919
} else if (proj_in->is_Initialize()) {
2920
AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2921
// Stop if this is the initialization for the object instance which
2922
// which contains this memory slice, otherwise skip over it.
2923
if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2924
result = proj_in->in(TypeFunc::Memory);
2925
}
2926
} else if (proj_in->is_MemBar()) {
2927
// Check if there is an array copy for a clone
2928
// Step over GC barrier when ReduceInitialCardMarks is disabled
2929
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2930
Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
2931
2932
if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
2933
// Stop if it is a clone
2934
ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
2935
if (ac->may_modify(toop, igvn)) {
2936
break;
2937
}
2938
}
2939
result = proj_in->in(TypeFunc::Memory);
2940
}
2941
} else if (result->is_MergeMem()) {
2942
MergeMemNode *mmem = result->as_MergeMem();
2943
result = step_through_mergemem(mmem, alias_idx, toop);
2944
if (result == mmem->base_memory()) {
2945
// Didn't find instance memory, search through general slice recursively.
2946
result = mmem->memory_at(C->get_general_index(alias_idx));
2947
result = find_inst_mem(result, alias_idx, orig_phis);
2948
if (C->failing()) {
2949
return NULL;
2950
}
2951
mmem->set_memory_at(alias_idx, result);
2952
}
2953
} else if (result->is_Phi() &&
2954
C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2955
Node *un = result->as_Phi()->unique_input(igvn);
2956
if (un != NULL) {
2957
orig_phis.append_if_missing(result->as_Phi());
2958
result = un;
2959
} else {
2960
break;
2961
}
2962
} else if (result->is_ClearArray()) {
2963
if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2964
// Can not bypass initialization of the instance
2965
// we are looking for.
2966
break;
2967
}
2968
// Otherwise skip it (the call updated 'result' value).
2969
} else if (result->Opcode() == Op_SCMemProj) {
2970
Node* mem = result->in(0);
2971
Node* adr = NULL;
2972
if (mem->is_LoadStore()) {
2973
adr = mem->in(MemNode::Address);
2974
} else {
2975
assert(mem->Opcode() == Op_EncodeISOArray ||
2976
mem->Opcode() == Op_StrCompressedCopy, "sanity");
2977
adr = mem->in(3); // Memory edge corresponds to destination array
2978
}
2979
const Type *at = igvn->type(adr);
2980
if (at != Type::TOP) {
2981
assert(at->isa_ptr() != NULL, "pointer type required.");
2982
int idx = C->get_alias_index(at->is_ptr());
2983
if (idx == alias_idx) {
2984
// Assert in debug mode
2985
assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2986
break; // In product mode return SCMemProj node
2987
}
2988
}
2989
result = mem->in(MemNode::Memory);
2990
} else if (result->Opcode() == Op_StrInflatedCopy) {
2991
Node* adr = result->in(3); // Memory edge corresponds to destination array
2992
const Type *at = igvn->type(adr);
2993
if (at != Type::TOP) {
2994
assert(at->isa_ptr() != NULL, "pointer type required.");
2995
int idx = C->get_alias_index(at->is_ptr());
2996
if (idx == alias_idx) {
2997
// Assert in debug mode
2998
assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2999
break; // In product mode return SCMemProj node
3000
}
3001
}
3002
result = result->in(MemNode::Memory);
3003
}
3004
}
3005
if (result->is_Phi()) {
3006
PhiNode *mphi = result->as_Phi();
3007
assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
3008
const TypePtr *t = mphi->adr_type();
3009
if (!is_instance) {
3010
// Push all non-instance Phis on the orig_phis worklist to update inputs
3011
// during Phase 4 if needed.
3012
orig_phis.append_if_missing(mphi);
3013
} else if (C->get_alias_index(t) != alias_idx) {
3014
// Create a new Phi with the specified alias index type.
3015
result = split_memory_phi(mphi, alias_idx, orig_phis);
3016
}
3017
}
3018
// the result is either MemNode, PhiNode, InitializeNode.
3019
return result;
3020
}
3021
3022
//
3023
// Convert the types of non-escaped object to instance types where possible,
3024
// propagate the new type information through the graph, and update memory
3025
// edges and MergeMem inputs to reflect the new type.
3026
//
3027
// We start with allocations (and calls which may be allocations) on alloc_worklist.
3028
// The processing is done in 4 phases:
3029
//
3030
// Phase 1: Process possible allocations from alloc_worklist. Create instance
3031
// types for the CheckCastPP for allocations where possible.
3032
// Propagate the new types through users as follows:
3033
// casts and Phi: push users on alloc_worklist
3034
// AddP: cast Base and Address inputs to the instance type
3035
// push any AddP users on alloc_worklist and push any memnode
3036
// users onto memnode_worklist.
3037
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3038
// search the Memory chain for a store with the appropriate type
3039
// address type. If a Phi is found, create a new version with
3040
// the appropriate memory slices from each of the Phi inputs.
3041
// For stores, process the users as follows:
3042
// MemNode: push on memnode_worklist
3043
// MergeMem: push on mergemem_worklist
3044
// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
3045
// moving the first node encountered of each instance type to the
3046
// the input corresponding to its alias index.
3047
// appropriate memory slice.
3048
// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
3049
//
3050
// In the following example, the CheckCastPP nodes are the cast of allocation
3051
// results and the allocation of node 29 is non-escaped and eligible to be an
3052
// instance type.
3053
//
3054
// We start with:
3055
//
3056
// 7 Parm #memory
3057
// 10 ConI "12"
3058
// 19 CheckCastPP "Foo"
3059
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
3060
// 29 CheckCastPP "Foo"
3061
// 30 AddP _ 29 29 10 Foo+12 alias_index=4
3062
//
3063
// 40 StoreP 25 7 20 ... alias_index=4
3064
// 50 StoreP 35 40 30 ... alias_index=4
3065
// 60 StoreP 45 50 20 ... alias_index=4
3066
// 70 LoadP _ 60 30 ... alias_index=4
3067
// 80 Phi 75 50 60 Memory alias_index=4
3068
// 90 LoadP _ 80 30 ... alias_index=4
3069
// 100 LoadP _ 80 20 ... alias_index=4
3070
//
3071
//
3072
// Phase 1 creates an instance type for node 29 assigning it an instance id of 24
3073
// and creating a new alias index for node 30. This gives:
3074
//
3075
// 7 Parm #memory
3076
// 10 ConI "12"
3077
// 19 CheckCastPP "Foo"
3078
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
3079
// 29 CheckCastPP "Foo" iid=24
3080
// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
3081
//
3082
// 40 StoreP 25 7 20 ... alias_index=4
3083
// 50 StoreP 35 40 30 ... alias_index=6
3084
// 60 StoreP 45 50 20 ... alias_index=4
3085
// 70 LoadP _ 60 30 ... alias_index=6
3086
// 80 Phi 75 50 60 Memory alias_index=4
3087
// 90 LoadP _ 80 30 ... alias_index=6
3088
// 100 LoadP _ 80 20 ... alias_index=4
3089
//
3090
// In phase 2, new memory inputs are computed for the loads and stores,
3091
// And a new version of the phi is created. In phase 4, the inputs to
3092
// node 80 are updated and then the memory nodes are updated with the
3093
// values computed in phase 2. This results in:
3094
//
3095
// 7 Parm #memory
3096
// 10 ConI "12"
3097
// 19 CheckCastPP "Foo"
3098
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
3099
// 29 CheckCastPP "Foo" iid=24
3100
// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
3101
//
3102
// 40 StoreP 25 7 20 ... alias_index=4
3103
// 50 StoreP 35 7 30 ... alias_index=6
3104
// 60 StoreP 45 40 20 ... alias_index=4
3105
// 70 LoadP _ 50 30 ... alias_index=6
3106
// 80 Phi 75 40 60 Memory alias_index=4
3107
// 120 Phi 75 50 50 Memory alias_index=6
3108
// 90 LoadP _ 120 30 ... alias_index=6
3109
// 100 LoadP _ 80 20 ... alias_index=4
3110
//
3111
void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) {
3112
GrowableArray<Node *> memnode_worklist;
3113
GrowableArray<PhiNode *> orig_phis;
3114
PhaseIterGVN *igvn = _igvn;
3115
uint new_index_start = (uint) _compile->num_alias_types();
3116
VectorSet visited;
3117
ideal_nodes.clear(); // Reset for use with set_map/get_map.
3118
uint unique_old = _compile->unique();
3119
3120
// Phase 1: Process possible allocations from alloc_worklist.
3121
// Create instance types for the CheckCastPP for allocations where possible.
3122
//
3123
// (Note: don't forget to change the order of the second AddP node on
3124
// the alloc_worklist if the order of the worklist processing is changed,
3125
// see the comment in find_second_addp().)
3126
//
3127
while (alloc_worklist.length() != 0) {
3128
Node *n = alloc_worklist.pop();
3129
uint ni = n->_idx;
3130
if (n->is_Call()) {
3131
CallNode *alloc = n->as_Call();
3132
// copy escape information to call node
3133
PointsToNode* ptn = ptnode_adr(alloc->_idx);
3134
PointsToNode::EscapeState es = ptn->escape_state();
3135
// We have an allocation or call which returns a Java object,
3136
// see if it is non-escaped.
3137
if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
3138
continue;
3139
}
3140
// Find CheckCastPP for the allocate or for the return value of a call
3141
n = alloc->result_cast();
3142
if (n == NULL) { // No uses except Initialize node
3143
if (alloc->is_Allocate()) {
3144
// Set the scalar_replaceable flag for allocation
3145
// so it could be eliminated if it has no uses.
3146
alloc->as_Allocate()->_is_scalar_replaceable = true;
3147
}
3148
continue;
3149
}
3150
if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
3151
// we could reach here for allocate case if one init is associated with many allocs.
3152
if (alloc->is_Allocate()) {
3153
alloc->as_Allocate()->_is_scalar_replaceable = false;
3154
}
3155
continue;
3156
}
3157
3158
// The inline code for Object.clone() casts the allocation result to
3159
// java.lang.Object and then to the actual type of the allocated
3160
// object. Detect this case and use the second cast.
3161
// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
3162
// the allocation result is cast to java.lang.Object and then
3163
// to the actual Array type.
3164
if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
3165
&& (alloc->is_AllocateArray() ||
3166
igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
3167
Node *cast2 = NULL;
3168
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3169
Node *use = n->fast_out(i);
3170
if (use->is_CheckCastPP()) {
3171
cast2 = use;
3172
break;
3173
}
3174
}
3175
if (cast2 != NULL) {
3176
n = cast2;
3177
} else {
3178
// Non-scalar replaceable if the allocation type is unknown statically
3179
// (reflection allocation), the object can't be restored during
3180
// deoptimization without precise type.
3181
continue;
3182
}
3183
}
3184
3185
const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
3186
if (t == NULL) {
3187
continue; // not a TypeOopPtr
3188
}
3189
if (!t->klass_is_exact()) {
3190
continue; // not an unique type
3191
}
3192
if (alloc->is_Allocate()) {
3193
// Set the scalar_replaceable flag for allocation
3194
// so it could be eliminated.
3195
alloc->as_Allocate()->_is_scalar_replaceable = true;
3196
}
3197
set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
3198
// in order for an object to be scalar-replaceable, it must be:
3199
// - a direct allocation (not a call returning an object)
3200
// - non-escaping
3201
// - eligible to be a unique type
3202
// - not determined to be ineligible by escape analysis
3203
set_map(alloc, n);
3204
set_map(n, alloc);
3205
const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
3206
igvn->hash_delete(n);
3207
igvn->set_type(n, tinst);
3208
n->raise_bottom_type(tinst);
3209
igvn->hash_insert(n);
3210
record_for_optimizer(n);
3211
// Allocate an alias index for the header fields. Accesses to
3212
// the header emitted during macro expansion wouldn't have
3213
// correct memory state otherwise.
3214
_compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
3215
_compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
3216
if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
3217
3218
// First, put on the worklist all Field edges from Connection Graph
3219
// which is more accurate than putting immediate users from Ideal Graph.
3220
for (EdgeIterator e(ptn); e.has_next(); e.next()) {
3221
PointsToNode* tgt = e.get();
3222
if (tgt->is_Arraycopy()) {
3223
continue;
3224
}
3225
Node* use = tgt->ideal_node();
3226
assert(tgt->is_Field() && use->is_AddP(),
3227
"only AddP nodes are Field edges in CG");
3228
if (use->outcnt() > 0) { // Don't process dead nodes
3229
Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
3230
if (addp2 != NULL) {
3231
assert(alloc->is_AllocateArray(),"array allocation was expected");
3232
alloc_worklist.append_if_missing(addp2);
3233
}
3234
alloc_worklist.append_if_missing(use);
3235
}
3236
}
3237
3238
// An allocation may have an Initialize which has raw stores. Scan
3239
// the users of the raw allocation result and push AddP users
3240
// on alloc_worklist.
3241
Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
3242
assert (raw_result != NULL, "must have an allocation result");
3243
for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
3244
Node *use = raw_result->fast_out(i);
3245
if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
3246
Node* addp2 = find_second_addp(use, raw_result);
3247
if (addp2 != NULL) {
3248
assert(alloc->is_AllocateArray(),"array allocation was expected");
3249
alloc_worklist.append_if_missing(addp2);
3250
}
3251
alloc_worklist.append_if_missing(use);
3252
} else if (use->is_MemBar()) {
3253
memnode_worklist.append_if_missing(use);
3254
}
3255
}
3256
}
3257
} else if (n->is_AddP()) {
3258
JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3259
if (jobj == NULL || jobj == phantom_obj) {
3260
#ifdef ASSERT
3261
ptnode_adr(get_addp_base(n)->_idx)->dump();
3262
ptnode_adr(n->_idx)->dump();
3263
assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3264
#endif
3265
_compile->record_failure(C2Compiler::retry_no_escape_analysis());
3266
return;
3267
}
3268
Node *base = get_map(jobj->idx()); // CheckCastPP node
3269
if (!split_AddP(n, base)) continue; // wrong type from dead path
3270
} else if (n->is_Phi() ||
3271
n->is_CheckCastPP() ||
3272
n->is_EncodeP() ||
3273
n->is_DecodeN() ||
3274
(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3275
if (visited.test_set(n->_idx)) {
3276
assert(n->is_Phi(), "loops only through Phi's");
3277
continue; // already processed
3278
}
3279
JavaObjectNode* jobj = unique_java_object(n);
3280
if (jobj == NULL || jobj == phantom_obj) {
3281
#ifdef ASSERT
3282
ptnode_adr(n->_idx)->dump();
3283
assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3284
#endif
3285
_compile->record_failure(C2Compiler::retry_no_escape_analysis());
3286
return;
3287
} else {
3288
Node *val = get_map(jobj->idx()); // CheckCastPP node
3289
TypeNode *tn = n->as_Type();
3290
const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3291
assert(tinst != NULL && tinst->is_known_instance() &&
3292
tinst->instance_id() == jobj->idx() , "instance type expected.");
3293
3294
const Type *tn_type = igvn->type(tn);
3295
const TypeOopPtr *tn_t;
3296
if (tn_type->isa_narrowoop()) {
3297
tn_t = tn_type->make_ptr()->isa_oopptr();
3298
} else {
3299
tn_t = tn_type->isa_oopptr();
3300
}
3301
if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
3302
if (tn_type->isa_narrowoop()) {
3303
tn_type = tinst->make_narrowoop();
3304
} else {
3305
tn_type = tinst;
3306
}
3307
igvn->hash_delete(tn);
3308
igvn->set_type(tn, tn_type);
3309
tn->set_type(tn_type);
3310
igvn->hash_insert(tn);
3311
record_for_optimizer(n);
3312
} else {
3313
assert(tn_type == TypePtr::NULL_PTR ||
3314
tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3315
"unexpected type");
3316
continue; // Skip dead path with different type
3317
}
3318
}
3319
} else {
3320
debug_only(n->dump();)
3321
assert(false, "EA: unexpected node");
3322
continue;
3323
}
3324
// push allocation's users on appropriate worklist
3325
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3326
Node *use = n->fast_out(i);
3327
if(use->is_Mem() && use->in(MemNode::Address) == n) {
3328
// Load/store to instance's field
3329
memnode_worklist.append_if_missing(use);
3330
} else if (use->is_MemBar()) {
3331
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3332
memnode_worklist.append_if_missing(use);
3333
}
3334
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3335
Node* addp2 = find_second_addp(use, n);
3336
if (addp2 != NULL) {
3337
alloc_worklist.append_if_missing(addp2);
3338
}
3339
alloc_worklist.append_if_missing(use);
3340
} else if (use->is_Phi() ||
3341
use->is_CheckCastPP() ||
3342
use->is_EncodeNarrowPtr() ||
3343
use->is_DecodeNarrowPtr() ||
3344
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3345
alloc_worklist.append_if_missing(use);
3346
#ifdef ASSERT
3347
} else if (use->is_Mem()) {
3348
assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3349
} else if (use->is_MergeMem()) {
3350
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3351
} else if (use->is_SafePoint()) {
3352
// Look for MergeMem nodes for calls which reference unique allocation
3353
// (through CheckCastPP nodes) even for debug info.
3354
Node* m = use->in(TypeFunc::Memory);
3355
if (m->is_MergeMem()) {
3356
assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3357
}
3358
} else if (use->Opcode() == Op_EncodeISOArray) {
3359
if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3360
// EncodeISOArray overwrites destination array
3361
memnode_worklist.append_if_missing(use);
3362
}
3363
} else {
3364
uint op = use->Opcode();
3365
if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3366
(use->in(MemNode::Memory) == n)) {
3367
// They overwrite memory edge corresponding to destination array,
3368
memnode_worklist.append_if_missing(use);
3369
} else if (!(op == Op_CmpP || op == Op_Conv2B ||
3370
op == Op_CastP2X || op == Op_StoreCM ||
3371
op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3372
op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3373
op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3374
op == Op_SubTypeCheck ||
3375
BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3376
n->dump();
3377
use->dump();
3378
assert(false, "EA: missing allocation reference path");
3379
}
3380
#endif
3381
}
3382
}
3383
3384
}
3385
3386
// Go over all ArrayCopy nodes and if one of the inputs has a unique
3387
// type, record it in the ArrayCopy node so we know what memory this
3388
// node uses/modified.
3389
for (int next = 0; next < arraycopy_worklist.length(); next++) {
3390
ArrayCopyNode* ac = arraycopy_worklist.at(next);
3391
Node* dest = ac->in(ArrayCopyNode::Dest);
3392
if (dest->is_AddP()) {
3393
dest = get_addp_base(dest);
3394
}
3395
JavaObjectNode* jobj = unique_java_object(dest);
3396
if (jobj != NULL) {
3397
Node *base = get_map(jobj->idx());
3398
if (base != NULL) {
3399
const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3400
ac->_dest_type = base_t;
3401
}
3402
}
3403
Node* src = ac->in(ArrayCopyNode::Src);
3404
if (src->is_AddP()) {
3405
src = get_addp_base(src);
3406
}
3407
jobj = unique_java_object(src);
3408
if (jobj != NULL) {
3409
Node* base = get_map(jobj->idx());
3410
if (base != NULL) {
3411
const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3412
ac->_src_type = base_t;
3413
}
3414
}
3415
}
3416
3417
// New alias types were created in split_AddP().
3418
uint new_index_end = (uint) _compile->num_alias_types();
3419
assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3420
3421
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3422
// compute new values for Memory inputs (the Memory inputs are not
3423
// actually updated until phase 4.)
3424
if (memnode_worklist.length() == 0)
3425
return; // nothing to do
3426
while (memnode_worklist.length() != 0) {
3427
Node *n = memnode_worklist.pop();
3428
if (visited.test_set(n->_idx)) {
3429
continue;
3430
}
3431
if (n->is_Phi() || n->is_ClearArray()) {
3432
// we don't need to do anything, but the users must be pushed
3433
} else if (n->is_MemBar()) { // Initialize, MemBar nodes
3434
// we don't need to do anything, but the users must be pushed
3435
n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3436
if (n == NULL) {
3437
continue;
3438
}
3439
} else if (n->Opcode() == Op_StrCompressedCopy ||
3440
n->Opcode() == Op_EncodeISOArray) {
3441
// get the memory projection
3442
n = n->find_out_with(Op_SCMemProj);
3443
assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3444
} else {
3445
assert(n->is_Mem(), "memory node required.");
3446
Node *addr = n->in(MemNode::Address);
3447
const Type *addr_t = igvn->type(addr);
3448
if (addr_t == Type::TOP) {
3449
continue;
3450
}
3451
assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3452
int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3453
assert ((uint)alias_idx < new_index_end, "wrong alias index");
3454
Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3455
if (_compile->failing()) {
3456
return;
3457
}
3458
if (mem != n->in(MemNode::Memory)) {
3459
// We delay the memory edge update since we need old one in
3460
// MergeMem code below when instances memory slices are separated.
3461
set_map(n, mem);
3462
}
3463
if (n->is_Load()) {
3464
continue; // don't push users
3465
} else if (n->is_LoadStore()) {
3466
// get the memory projection
3467
n = n->find_out_with(Op_SCMemProj);
3468
assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3469
}
3470
}
3471
// push user on appropriate worklist
3472
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3473
Node *use = n->fast_out(i);
3474
if (use->is_Phi() || use->is_ClearArray()) {
3475
memnode_worklist.append_if_missing(use);
3476
} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3477
if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3478
continue;
3479
}
3480
memnode_worklist.append_if_missing(use);
3481
} else if (use->is_MemBar()) {
3482
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3483
memnode_worklist.append_if_missing(use);
3484
}
3485
#ifdef ASSERT
3486
} else if(use->is_Mem()) {
3487
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3488
} else if (use->is_MergeMem()) {
3489
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3490
} else if (use->Opcode() == Op_EncodeISOArray) {
3491
if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3492
// EncodeISOArray overwrites destination array
3493
memnode_worklist.append_if_missing(use);
3494
}
3495
} else {
3496
uint op = use->Opcode();
3497
if ((use->in(MemNode::Memory) == n) &&
3498
(op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3499
// They overwrite memory edge corresponding to destination array,
3500
memnode_worklist.append_if_missing(use);
3501
} else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3502
op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3503
op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3504
op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3505
n->dump();
3506
use->dump();
3507
assert(false, "EA: missing memory path");
3508
}
3509
#endif
3510
}
3511
}
3512
}
3513
3514
// Phase 3: Process MergeMem nodes from mergemem_worklist.
3515
// Walk each memory slice moving the first node encountered of each
3516
// instance type to the the input corresponding to its alias index.
3517
uint length = _mergemem_worklist.length();
3518
for( uint next = 0; next < length; ++next ) {
3519
MergeMemNode* nmm = _mergemem_worklist.at(next);
3520
assert(!visited.test_set(nmm->_idx), "should not be visited before");
3521
// Note: we don't want to use MergeMemStream here because we only want to
3522
// scan inputs which exist at the start, not ones we add during processing.
3523
// Note 2: MergeMem may already contains instance memory slices added
3524
// during find_inst_mem() call when memory nodes were processed above.
3525
igvn->hash_delete(nmm);
3526
uint nslices = MIN2(nmm->req(), new_index_start);
3527
for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3528
Node* mem = nmm->in(i);
3529
Node* cur = NULL;
3530
if (mem == NULL || mem->is_top()) {
3531
continue;
3532
}
3533
// First, update mergemem by moving memory nodes to corresponding slices
3534
// if their type became more precise since this mergemem was created.
3535
while (mem->is_Mem()) {
3536
const Type *at = igvn->type(mem->in(MemNode::Address));
3537
if (at != Type::TOP) {
3538
assert (at->isa_ptr() != NULL, "pointer type required.");
3539
uint idx = (uint)_compile->get_alias_index(at->is_ptr());
3540
if (idx == i) {
3541
if (cur == NULL) {
3542
cur = mem;
3543
}
3544
} else {
3545
if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
3546
nmm->set_memory_at(idx, mem);
3547
}
3548
}
3549
}
3550
mem = mem->in(MemNode::Memory);
3551
}
3552
nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
3553
// Find any instance of the current type if we haven't encountered
3554
// already a memory slice of the instance along the memory chain.
3555
for (uint ni = new_index_start; ni < new_index_end; ni++) {
3556
if((uint)_compile->get_general_index(ni) == i) {
3557
Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
3558
if (nmm->is_empty_memory(m)) {
3559
Node* result = find_inst_mem(mem, ni, orig_phis);
3560
if (_compile->failing()) {
3561
return;
3562
}
3563
nmm->set_memory_at(ni, result);
3564
}
3565
}
3566
}
3567
}
3568
// Find the rest of instances values
3569
for (uint ni = new_index_start; ni < new_index_end; ni++) {
3570
const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3571
Node* result = step_through_mergemem(nmm, ni, tinst);
3572
if (result == nmm->base_memory()) {
3573
// Didn't find instance memory, search through general slice recursively.
3574
result = nmm->memory_at(_compile->get_general_index(ni));
3575
result = find_inst_mem(result, ni, orig_phis);
3576
if (_compile->failing()) {
3577
return;
3578
}
3579
nmm->set_memory_at(ni, result);
3580
}
3581
}
3582
igvn->hash_insert(nmm);
3583
record_for_optimizer(nmm);
3584
}
3585
3586
// Phase 4: Update the inputs of non-instance memory Phis and
3587
// the Memory input of memnodes
3588
// First update the inputs of any non-instance Phi's from
3589
// which we split out an instance Phi. Note we don't have
3590
// to recursively process Phi's encountered on the input memory
3591
// chains as is done in split_memory_phi() since they will
3592
// also be processed here.
3593
for (int j = 0; j < orig_phis.length(); j++) {
3594
PhiNode *phi = orig_phis.at(j);
3595
int alias_idx = _compile->get_alias_index(phi->adr_type());
3596
igvn->hash_delete(phi);
3597
for (uint i = 1; i < phi->req(); i++) {
3598
Node *mem = phi->in(i);
3599
Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3600
if (_compile->failing()) {
3601
return;
3602
}
3603
if (mem != new_mem) {
3604
phi->set_req(i, new_mem);
3605
}
3606
}
3607
igvn->hash_insert(phi);
3608
record_for_optimizer(phi);
3609
}
3610
3611
// Update the memory inputs of MemNodes with the value we computed
3612
// in Phase 2 and move stores memory users to corresponding memory slices.
3613
// Disable memory split verification code until the fix for 6984348.
3614
// Currently it produces false negative results since it does not cover all cases.
3615
#if 0 // ifdef ASSERT
3616
visited.Reset();
3617
Node_Stack old_mems(arena, _compile->unique() >> 2);
3618
#endif
3619
for (uint i = 0; i < ideal_nodes.size(); i++) {
3620
Node* n = ideal_nodes.at(i);
3621
Node* nmem = get_map(n->_idx);
3622
assert(nmem != NULL, "sanity");
3623
if (n->is_Mem()) {
3624
#if 0 // ifdef ASSERT
3625
Node* old_mem = n->in(MemNode::Memory);
3626
if (!visited.test_set(old_mem->_idx)) {
3627
old_mems.push(old_mem, old_mem->outcnt());
3628
}
3629
#endif
3630
assert(n->in(MemNode::Memory) != nmem, "sanity");
3631
if (!n->is_Load()) {
3632
// Move memory users of a store first.
3633
move_inst_mem(n, orig_phis);
3634
}
3635
// Now update memory input
3636
igvn->hash_delete(n);
3637
n->set_req(MemNode::Memory, nmem);
3638
igvn->hash_insert(n);
3639
record_for_optimizer(n);
3640
} else {
3641
assert(n->is_Allocate() || n->is_CheckCastPP() ||
3642
n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3643
}
3644
}
3645
#if 0 // ifdef ASSERT
3646
// Verify that memory was split correctly
3647
while (old_mems.is_nonempty()) {
3648
Node* old_mem = old_mems.node();
3649
uint old_cnt = old_mems.index();
3650
old_mems.pop();
3651
assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3652
}
3653
#endif
3654
}
3655
3656
#ifndef PRODUCT
3657
static const char *node_type_names[] = {
3658
"UnknownType",
3659
"JavaObject",
3660
"LocalVar",
3661
"Field",
3662
"Arraycopy"
3663
};
3664
3665
static const char *esc_names[] = {
3666
"UnknownEscape",
3667
"NoEscape",
3668
"ArgEscape",
3669
"GlobalEscape"
3670
};
3671
3672
void PointsToNode::dump(bool print_state) const {
3673
NodeType nt = node_type();
3674
tty->print("%s ", node_type_names[(int) nt]);
3675
if (print_state) {
3676
EscapeState es = escape_state();
3677
EscapeState fields_es = fields_escape_state();
3678
tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3679
if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
3680
tty->print("NSR ");
3681
}
3682
}
3683
if (is_Field()) {
3684
FieldNode* f = (FieldNode*)this;
3685
if (f->is_oop()) {
3686
tty->print("oop ");
3687
}
3688
if (f->offset() > 0) {
3689
tty->print("+%d ", f->offset());
3690
}
3691
tty->print("(");
3692
for (BaseIterator i(f); i.has_next(); i.next()) {
3693
PointsToNode* b = i.get();
3694
tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3695
}
3696
tty->print(" )");
3697
}
3698
tty->print("[");
3699
for (EdgeIterator i(this); i.has_next(); i.next()) {
3700
PointsToNode* e = i.get();
3701
tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3702
}
3703
tty->print(" [");
3704
for (UseIterator i(this); i.has_next(); i.next()) {
3705
PointsToNode* u = i.get();
3706
bool is_base = false;
3707
if (PointsToNode::is_base_use(u)) {
3708
is_base = true;
3709
u = PointsToNode::get_use_node(u)->as_Field();
3710
}
3711
tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3712
}
3713
tty->print(" ]] ");
3714
if (_node == NULL) {
3715
tty->print_cr("<null>");
3716
} else {
3717
_node->dump();
3718
}
3719
}
3720
3721
void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3722
bool first = true;
3723
int ptnodes_length = ptnodes_worklist.length();
3724
for (int i = 0; i < ptnodes_length; i++) {
3725
PointsToNode *ptn = ptnodes_worklist.at(i);
3726
if (ptn == NULL || !ptn->is_JavaObject()) {
3727
continue;
3728
}
3729
PointsToNode::EscapeState es = ptn->escape_state();
3730
if ((es != PointsToNode::NoEscape) && !Verbose) {
3731
continue;
3732
}
3733
Node* n = ptn->ideal_node();
3734
if (n->is_Allocate() || (n->is_CallStaticJava() &&
3735
n->as_CallStaticJava()->is_boxing_method())) {
3736
if (first) {
3737
tty->cr();
3738
tty->print("======== Connection graph for ");
3739
_compile->method()->print_short_name();
3740
tty->cr();
3741
first = false;
3742
}
3743
ptn->dump();
3744
// Print all locals and fields which reference this allocation
3745
for (UseIterator j(ptn); j.has_next(); j.next()) {
3746
PointsToNode* use = j.get();
3747
if (use->is_LocalVar()) {
3748
use->dump(Verbose);
3749
} else if (Verbose) {
3750
use->dump();
3751
}
3752
}
3753
tty->cr();
3754
}
3755
}
3756
}
3757
#endif
3758
3759
void ConnectionGraph::record_for_optimizer(Node *n) {
3760
_igvn->_worklist.push(n);
3761
_igvn->add_users_to_worklist(n);
3762
}
3763
3764