Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/opto/callGenerator.cpp
40930 views
1
/*
2
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "ci/bcEscapeAnalyzer.hpp"
27
#include "ci/ciCallSite.hpp"
28
#include "ci/ciObjArray.hpp"
29
#include "ci/ciMemberName.hpp"
30
#include "ci/ciMethodHandle.hpp"
31
#include "classfile/javaClasses.hpp"
32
#include "compiler/compileLog.hpp"
33
#include "opto/addnode.hpp"
34
#include "opto/callGenerator.hpp"
35
#include "opto/callnode.hpp"
36
#include "opto/castnode.hpp"
37
#include "opto/cfgnode.hpp"
38
#include "opto/parse.hpp"
39
#include "opto/rootnode.hpp"
40
#include "opto/runtime.hpp"
41
#include "opto/subnode.hpp"
42
#include "runtime/sharedRuntime.hpp"
43
#include "ci/ciNativeEntryPoint.hpp"
44
#include "utilities/debug.hpp"
45
46
// Utility function.
47
const TypeFunc* CallGenerator::tf() const {
48
return TypeFunc::make(method());
49
}
50
51
bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52
return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53
}
54
55
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56
ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57
return is_inlined_method_handle_intrinsic(symbolic_info, m);
58
}
59
60
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
61
return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
62
}
63
64
//-----------------------------ParseGenerator---------------------------------
65
// Internal class which handles all direct bytecode traversal.
66
class ParseGenerator : public InlineCallGenerator {
67
private:
68
bool _is_osr;
69
float _expected_uses;
70
71
public:
72
ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
73
: InlineCallGenerator(method)
74
{
75
_is_osr = is_osr;
76
_expected_uses = expected_uses;
77
assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
78
}
79
80
virtual bool is_parse() const { return true; }
81
virtual JVMState* generate(JVMState* jvms);
82
int is_osr() { return _is_osr; }
83
84
};
85
86
JVMState* ParseGenerator::generate(JVMState* jvms) {
87
Compile* C = Compile::current();
88
C->print_inlining_update(this);
89
90
if (is_osr()) {
91
// The JVMS for a OSR has a single argument (see its TypeFunc).
92
assert(jvms->depth() == 1, "no inline OSR");
93
}
94
95
if (C->failing()) {
96
return NULL; // bailing out of the compile; do not try to parse
97
}
98
99
Parse parser(jvms, method(), _expected_uses);
100
// Grab signature for matching/allocation
101
GraphKit& exits = parser.exits();
102
103
if (C->failing()) {
104
while (exits.pop_exception_state() != NULL) ;
105
return NULL;
106
}
107
108
assert(exits.jvms()->same_calls_as(jvms), "sanity");
109
110
// Simply return the exit state of the parser,
111
// augmented by any exceptional states.
112
return exits.transfer_exceptions_into_jvms();
113
}
114
115
//---------------------------DirectCallGenerator------------------------------
116
// Internal class which handles all out-of-line calls w/o receiver type checks.
117
class DirectCallGenerator : public CallGenerator {
118
private:
119
CallStaticJavaNode* _call_node;
120
// Force separate memory and I/O projections for the exceptional
121
// paths to facilitate late inlinig.
122
bool _separate_io_proj;
123
124
protected:
125
void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126
127
public:
128
DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129
: CallGenerator(method),
130
_separate_io_proj(separate_io_proj)
131
{
132
}
133
virtual JVMState* generate(JVMState* jvms);
134
135
virtual CallNode* call_node() const { return _call_node; }
136
virtual CallGenerator* with_call_node(CallNode* call) {
137
DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138
dcg->set_call_node(call->as_CallStaticJava());
139
return dcg;
140
}
141
};
142
143
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144
GraphKit kit(jvms);
145
kit.C->print_inlining_update(this);
146
bool is_static = method()->is_static();
147
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
148
: SharedRuntime::get_resolve_opt_virtual_call_stub();
149
150
if (kit.C->log() != NULL) {
151
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
152
}
153
154
CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
155
if (is_inlined_method_handle_intrinsic(jvms, method())) {
156
// To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
157
// additional information about the method being invoked should be attached
158
// to the call site to make resolution logic work
159
// (see SharedRuntime::resolve_static_call_C).
160
call->set_override_symbolic_info(true);
161
}
162
_call_node = call; // Save the call node in case we need it later
163
if (!is_static) {
164
// Make an explicit receiver null_check as part of this call.
165
// Since we share a map with the caller, his JVMS gets adjusted.
166
kit.null_check_receiver_before_call(method());
167
if (kit.stopped()) {
168
// And dump it back to the caller, decorated with any exceptions:
169
return kit.transfer_exceptions_into_jvms();
170
}
171
// Mark the call node as virtual, sort of:
172
call->set_optimized_virtual(true);
173
if (method()->is_method_handle_intrinsic() ||
174
method()->is_compiled_lambda_form()) {
175
call->set_method_handle_invoke(true);
176
}
177
}
178
kit.set_arguments_for_java_call(call);
179
kit.set_edges_for_java_call(call, false, _separate_io_proj);
180
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
181
kit.push_node(method()->return_type()->basic_type(), ret);
182
return kit.transfer_exceptions_into_jvms();
183
}
184
185
//--------------------------VirtualCallGenerator------------------------------
186
// Internal class which handles all out-of-line calls checking receiver type.
187
class VirtualCallGenerator : public CallGenerator {
188
private:
189
int _vtable_index;
190
bool _separate_io_proj;
191
CallDynamicJavaNode* _call_node;
192
193
protected:
194
void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
195
196
public:
197
VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
198
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
199
{
200
assert(vtable_index == Method::invalid_vtable_index ||
201
vtable_index >= 0, "either invalid or usable");
202
}
203
virtual bool is_virtual() const { return true; }
204
virtual JVMState* generate(JVMState* jvms);
205
206
virtual CallNode* call_node() const { return _call_node; }
207
int vtable_index() const { return _vtable_index; }
208
209
virtual CallGenerator* with_call_node(CallNode* call) {
210
VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
211
cg->set_call_node(call->as_CallDynamicJava());
212
return cg;
213
}
214
};
215
216
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
217
GraphKit kit(jvms);
218
Node* receiver = kit.argument(0);
219
220
kit.C->print_inlining_update(this);
221
222
if (kit.C->log() != NULL) {
223
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
224
}
225
226
// If the receiver is a constant null, do not torture the system
227
// by attempting to call through it. The compile will proceed
228
// correctly, but may bail out in final_graph_reshaping, because
229
// the call instruction will have a seemingly deficient out-count.
230
// (The bailout says something misleading about an "infinite loop".)
231
if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
232
assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
233
ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
234
int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
235
kit.inc_sp(arg_size); // restore arguments
236
kit.uncommon_trap(Deoptimization::Reason_null_check,
237
Deoptimization::Action_none,
238
NULL, "null receiver");
239
return kit.transfer_exceptions_into_jvms();
240
}
241
242
// Ideally we would unconditionally do a null check here and let it
243
// be converted to an implicit check based on profile information.
244
// However currently the conversion to implicit null checks in
245
// Block::implicit_null_check() only looks for loads and stores, not calls.
246
ciMethod *caller = kit.method();
247
ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
248
if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
249
((ImplicitNullCheckThreshold > 0) && caller_md &&
250
(caller_md->trap_count(Deoptimization::Reason_null_check)
251
>= (uint)ImplicitNullCheckThreshold))) {
252
// Make an explicit receiver null_check as part of this call.
253
// Since we share a map with the caller, his JVMS gets adjusted.
254
receiver = kit.null_check_receiver_before_call(method());
255
if (kit.stopped()) {
256
// And dump it back to the caller, decorated with any exceptions:
257
return kit.transfer_exceptions_into_jvms();
258
}
259
}
260
261
assert(!method()->is_static(), "virtual call must not be to static");
262
assert(!method()->is_final(), "virtual call should not be to final");
263
assert(!method()->is_private(), "virtual call should not be to private");
264
assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265
"no vtable calls if +UseInlineCaches ");
266
address target = SharedRuntime::get_resolve_virtual_call_stub();
267
// Normal inline cache used for call
268
CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
269
if (is_inlined_method_handle_intrinsic(jvms, method())) {
270
// To be able to issue a direct call (optimized virtual or virtual)
271
// and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272
// about the method being invoked should be attached to the call site to
273
// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274
call->set_override_symbolic_info(true);
275
}
276
_call_node = call; // Save the call node in case we need it later
277
278
kit.set_arguments_for_java_call(call);
279
kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
280
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
281
kit.push_node(method()->return_type()->basic_type(), ret);
282
283
// Represent the effect of an implicit receiver null_check
284
// as part of this call. Since we share a map with the caller,
285
// his JVMS gets adjusted.
286
kit.cast_not_null(receiver);
287
return kit.transfer_exceptions_into_jvms();
288
}
289
290
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
291
if (InlineTree::check_can_parse(m) != NULL) return NULL;
292
return new ParseGenerator(m, expected_uses);
293
}
294
295
// As a special case, the JVMS passed to this CallGenerator is
296
// for the method execution already in progress, not just the JVMS
297
// of the caller. Thus, this CallGenerator cannot be mixed with others!
298
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
299
if (InlineTree::check_can_parse(m) != NULL) return NULL;
300
float past_uses = m->interpreter_invocation_count();
301
float expected_uses = past_uses;
302
return new ParseGenerator(m, expected_uses, true);
303
}
304
305
CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
306
assert(!m->is_abstract(), "for_direct_call mismatch");
307
return new DirectCallGenerator(m, separate_io_proj);
308
}
309
310
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
311
assert(!m->is_static(), "for_virtual_call mismatch");
312
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
313
return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
314
}
315
316
// Allow inlining decisions to be delayed
317
class LateInlineCallGenerator : public DirectCallGenerator {
318
private:
319
jlong _unique_id; // unique id for log compilation
320
bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
321
322
protected:
323
CallGenerator* _inline_cg;
324
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
325
virtual CallGenerator* inline_cg() const { return _inline_cg; }
326
virtual bool is_pure_call() const { return _is_pure_call; }
327
328
public:
329
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
330
DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
331
332
virtual bool is_late_inline() const { return true; }
333
334
// Convert the CallStaticJava into an inline
335
virtual void do_late_inline();
336
337
virtual JVMState* generate(JVMState* jvms) {
338
Compile *C = Compile::current();
339
340
C->log_inline_id(this);
341
342
// Record that this call site should be revisited once the main
343
// parse is finished.
344
if (!is_mh_late_inline()) {
345
C->add_late_inline(this);
346
}
347
348
// Emit the CallStaticJava and request separate projections so
349
// that the late inlining logic can distinguish between fall
350
// through and exceptional uses of the memory and io projections
351
// as is done for allocations and macro expansion.
352
return DirectCallGenerator::generate(jvms);
353
}
354
355
virtual void print_inlining_late(const char* msg) {
356
CallNode* call = call_node();
357
Compile* C = Compile::current();
358
C->print_inlining_assert_ready();
359
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
360
C->print_inlining_move_to(this);
361
C->print_inlining_update_delayed(this);
362
}
363
364
virtual void set_unique_id(jlong id) {
365
_unique_id = id;
366
}
367
368
virtual jlong unique_id() const {
369
return _unique_id;
370
}
371
372
virtual CallGenerator* with_call_node(CallNode* call) {
373
LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
374
cg->set_call_node(call->as_CallStaticJava());
375
return cg;
376
}
377
};
378
379
CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
380
return new LateInlineCallGenerator(method, inline_cg);
381
}
382
383
class LateInlineMHCallGenerator : public LateInlineCallGenerator {
384
ciMethod* _caller;
385
bool _input_not_const;
386
387
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
388
389
public:
390
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
391
LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
392
393
virtual bool is_mh_late_inline() const { return true; }
394
395
// Convert the CallStaticJava into an inline
396
virtual void do_late_inline();
397
398
virtual JVMState* generate(JVMState* jvms) {
399
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
400
401
Compile* C = Compile::current();
402
if (_input_not_const) {
403
// inlining won't be possible so no need to enqueue right now.
404
call_node()->set_generator(this);
405
} else {
406
C->add_late_inline(this);
407
}
408
return new_jvms;
409
}
410
411
virtual CallGenerator* with_call_node(CallNode* call) {
412
LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
413
cg->set_call_node(call->as_CallStaticJava());
414
return cg;
415
}
416
};
417
418
bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
419
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
420
bool allow_inline = C->inlining_incrementally();
421
bool input_not_const = true;
422
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
423
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
424
425
if (cg != NULL) {
426
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
427
_inline_cg = cg;
428
C->dec_number_of_mh_late_inlines();
429
return true;
430
} else {
431
// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
432
// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
433
// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
434
return false;
435
}
436
}
437
438
CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
439
assert(IncrementalInlineMH, "required");
440
Compile::current()->inc_number_of_mh_late_inlines();
441
CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
442
return cg;
443
}
444
445
// Allow inlining decisions to be delayed
446
class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
447
private:
448
jlong _unique_id; // unique id for log compilation
449
CallGenerator* _inline_cg;
450
ciMethod* _callee;
451
bool _is_pure_call;
452
float _prof_factor;
453
454
protected:
455
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
456
virtual CallGenerator* inline_cg() const { return _inline_cg; }
457
virtual bool is_pure_call() const { return _is_pure_call; }
458
459
public:
460
LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
461
: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
462
_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {}
463
464
virtual bool is_late_inline() const { return true; }
465
466
virtual bool is_virtual_late_inline() const { return true; }
467
468
// Convert the CallDynamicJava into an inline
469
virtual void do_late_inline();
470
471
virtual void set_callee_method(ciMethod* m) {
472
assert(_callee == NULL, "repeated inlining attempt");
473
_callee = m;
474
}
475
476
virtual JVMState* generate(JVMState* jvms) {
477
// Emit the CallDynamicJava and request separate projections so
478
// that the late inlining logic can distinguish between fall
479
// through and exceptional uses of the memory and io projections
480
// as is done for allocations and macro expansion.
481
JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
482
if (call_node() != NULL) {
483
call_node()->set_generator(this);
484
}
485
return new_jvms;
486
}
487
488
virtual void print_inlining_late(const char* msg) {
489
CallNode* call = call_node();
490
Compile* C = Compile::current();
491
C->print_inlining_assert_ready();
492
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
493
C->print_inlining_move_to(this);
494
C->print_inlining_update_delayed(this);
495
}
496
497
virtual void set_unique_id(jlong id) {
498
_unique_id = id;
499
}
500
501
virtual jlong unique_id() const {
502
return _unique_id;
503
}
504
505
virtual CallGenerator* with_call_node(CallNode* call) {
506
LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
507
cg->set_call_node(call->as_CallDynamicJava());
508
return cg;
509
}
510
};
511
512
bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
513
// Method handle linker case is handled in CallDynamicJavaNode::Ideal().
514
// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
515
516
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
517
bool allow_inline = C->inlining_incrementally();
518
CallGenerator* cg = C->call_generator(_callee,
519
vtable_index(),
520
false /*call_does_dispatch*/,
521
jvms,
522
allow_inline,
523
_prof_factor,
524
NULL /*speculative_receiver_type*/,
525
true /*allow_intrinsics*/);
526
527
if (cg != NULL) {
528
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
529
_inline_cg = cg;
530
return true;
531
} else {
532
// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
533
assert(false, "no progress");
534
return false;
535
}
536
}
537
538
CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
539
assert(IncrementalInlineVirtual, "required");
540
assert(!m->is_static(), "for_virtual_call mismatch");
541
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
542
return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
543
}
544
545
void LateInlineCallGenerator::do_late_inline() {
546
CallGenerator::do_late_inline_helper();
547
}
548
549
void LateInlineMHCallGenerator::do_late_inline() {
550
CallGenerator::do_late_inline_helper();
551
}
552
553
void LateInlineVirtualCallGenerator::do_late_inline() {
554
assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
555
CallGenerator::do_late_inline_helper();
556
}
557
558
static bool has_non_debug_usages(Node* n) {
559
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
560
Node* m = n->fast_out(i);
561
if (!m->is_SafePoint()
562
|| (m->is_Call() && m->as_Call()->has_non_debug_use(n))) {
563
return true;
564
}
565
}
566
return false;
567
}
568
569
static bool is_box_cache_valid(CallNode* call) {
570
ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
571
return klass->is_box_cache_valid();
572
}
573
574
// delay box in runtime, treat box as a scalarized object
575
static void scalarize_debug_usages(CallNode* call, Node* resproj) {
576
GraphKit kit(call->jvms());
577
PhaseGVN& gvn = kit.gvn();
578
579
ProjNode* res = resproj->as_Proj();
580
ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
581
int n_fields = klass->nof_nonstatic_fields();
582
assert(n_fields == 1, "the klass must be an auto-boxing klass");
583
584
for (DUIterator_Last imin, i = res->last_outs(imin); i >= imin;) {
585
SafePointNode* sfpt = res->last_out(i)->as_SafePoint();
586
uint first_ind = sfpt->req() - sfpt->jvms()->scloff();
587
Node* sobj = new SafePointScalarObjectNode(gvn.type(res)->isa_oopptr(),
588
#ifdef ASSERT
589
call,
590
#endif // ASSERT
591
first_ind, n_fields, true);
592
sobj->init_req(0, kit.root());
593
sfpt->add_req(call->in(TypeFunc::Parms));
594
sobj = gvn.transform(sobj);
595
JVMState* jvms = sfpt->jvms();
596
jvms->set_endoff(sfpt->req());
597
int start = jvms->debug_start();
598
int end = jvms->debug_end();
599
int num_edges = sfpt->replace_edges_in_range(res, sobj, start, end, &gvn);
600
i -= num_edges;
601
}
602
603
assert(res->outcnt() == 0, "the box must have no use after replace");
604
605
#ifndef PRODUCT
606
if (PrintEliminateAllocations) {
607
tty->print("++++ Eliminated: %d ", call->_idx);
608
call->as_CallStaticJava()->method()->print_short_name(tty);
609
tty->cr();
610
}
611
#endif
612
}
613
614
void CallGenerator::do_late_inline_helper() {
615
assert(is_late_inline(), "only late inline allowed");
616
617
// Can't inline it
618
CallNode* call = call_node();
619
if (call == NULL || call->outcnt() == 0 ||
620
call->in(0) == NULL || call->in(0)->is_top()) {
621
return;
622
}
623
624
const TypeTuple *r = call->tf()->domain();
625
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
626
if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
627
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
628
return;
629
}
630
}
631
632
if (call->in(TypeFunc::Memory)->is_top()) {
633
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
634
return;
635
}
636
if (call->in(TypeFunc::Memory)->is_MergeMem()) {
637
MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
638
if (merge_mem->base_memory() == merge_mem->empty_memory()) {
639
return; // dead path
640
}
641
}
642
643
// check for unreachable loop
644
CallProjections callprojs;
645
call->extract_projections(&callprojs, true);
646
if ((callprojs.fallthrough_catchproj == call->in(0)) ||
647
(callprojs.catchall_catchproj == call->in(0)) ||
648
(callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
649
(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
650
(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
651
(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
652
(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
653
(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
654
return;
655
}
656
657
Compile* C = Compile::current();
658
// Remove inlined methods from Compiler's lists.
659
if (call->is_macro()) {
660
C->remove_macro_node(call);
661
}
662
663
bool result_not_used = false;
664
665
if (is_pure_call()) {
666
if (is_boxing_late_inline() && callprojs.resproj != nullptr) {
667
// replace box node to scalar node only in case it is directly referenced by debug info
668
assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
669
if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {
670
scalarize_debug_usages(call, callprojs.resproj);
671
}
672
}
673
674
// The call is marked as pure (no important side effects), but result isn't used.
675
// It's safe to remove the call.
676
result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
677
}
678
679
if (result_not_used) {
680
GraphKit kit(call->jvms());
681
kit.replace_call(call, C->top(), true);
682
} else {
683
// Make a clone of the JVMState that appropriate to use for driving a parse
684
JVMState* old_jvms = call->jvms();
685
JVMState* jvms = old_jvms->clone_shallow(C);
686
uint size = call->req();
687
SafePointNode* map = new SafePointNode(size, jvms);
688
for (uint i1 = 0; i1 < size; i1++) {
689
map->init_req(i1, call->in(i1));
690
}
691
692
// Make sure the state is a MergeMem for parsing.
693
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
694
Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
695
C->initial_gvn()->set_type_bottom(mem);
696
map->set_req(TypeFunc::Memory, mem);
697
}
698
699
uint nargs = method()->arg_size();
700
// blow away old call arguments
701
Node* top = C->top();
702
for (uint i1 = 0; i1 < nargs; i1++) {
703
map->set_req(TypeFunc::Parms + i1, top);
704
}
705
jvms->set_map(map);
706
707
// Make enough space in the expression stack to transfer
708
// the incoming arguments and return value.
709
map->ensure_stack(jvms, jvms->method()->max_stack());
710
for (uint i1 = 0; i1 < nargs; i1++) {
711
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
712
}
713
714
C->print_inlining_assert_ready();
715
716
C->print_inlining_move_to(this);
717
718
C->log_late_inline(this);
719
720
// JVMState is ready, so time to perform some checks and prepare for inlining attempt.
721
if (!do_late_inline_check(C, jvms)) {
722
map->disconnect_inputs(C);
723
C->print_inlining_update_delayed(this);
724
return;
725
}
726
727
// Setup default node notes to be picked up by the inlining
728
Node_Notes* old_nn = C->node_notes_at(call->_idx);
729
if (old_nn != NULL) {
730
Node_Notes* entry_nn = old_nn->clone(C);
731
entry_nn->set_jvms(jvms);
732
C->set_default_node_notes(entry_nn);
733
}
734
735
// Virtual call involves a receiver null check which can be made implicit.
736
if (is_virtual_late_inline()) {
737
GraphKit kit(jvms);
738
kit.null_check_receiver();
739
jvms = kit.transfer_exceptions_into_jvms();
740
}
741
742
// Now perform the inlining using the synthesized JVMState
743
JVMState* new_jvms = inline_cg()->generate(jvms);
744
if (new_jvms == NULL) return; // no change
745
if (C->failing()) return;
746
747
// Capture any exceptional control flow
748
GraphKit kit(new_jvms);
749
750
// Find the result object
751
Node* result = C->top();
752
int result_size = method()->return_type()->size();
753
if (result_size != 0 && !kit.stopped()) {
754
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
755
}
756
757
if (inline_cg()->is_inline()) {
758
C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
759
C->env()->notice_inlined_method(inline_cg()->method());
760
}
761
C->set_inlining_progress(true);
762
C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
763
kit.replace_call(call, result, true);
764
}
765
}
766
767
class LateInlineStringCallGenerator : public LateInlineCallGenerator {
768
769
public:
770
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
771
LateInlineCallGenerator(method, inline_cg) {}
772
773
virtual JVMState* generate(JVMState* jvms) {
774
Compile *C = Compile::current();
775
776
C->log_inline_id(this);
777
778
C->add_string_late_inline(this);
779
780
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
781
return new_jvms;
782
}
783
784
virtual bool is_string_late_inline() const { return true; }
785
786
virtual CallGenerator* with_call_node(CallNode* call) {
787
LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
788
cg->set_call_node(call->as_CallStaticJava());
789
return cg;
790
}
791
};
792
793
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
794
return new LateInlineStringCallGenerator(method, inline_cg);
795
}
796
797
class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
798
799
public:
800
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
801
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
802
803
virtual JVMState* generate(JVMState* jvms) {
804
Compile *C = Compile::current();
805
806
C->log_inline_id(this);
807
808
C->add_boxing_late_inline(this);
809
810
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
811
return new_jvms;
812
}
813
814
virtual bool is_boxing_late_inline() const { return true; }
815
816
virtual CallGenerator* with_call_node(CallNode* call) {
817
LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
818
cg->set_call_node(call->as_CallStaticJava());
819
return cg;
820
}
821
};
822
823
CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
824
return new LateInlineBoxingCallGenerator(method, inline_cg);
825
}
826
827
class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
828
829
public:
830
LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
831
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
832
833
virtual JVMState* generate(JVMState* jvms) {
834
Compile *C = Compile::current();
835
836
C->log_inline_id(this);
837
838
C->add_vector_reboxing_late_inline(this);
839
840
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
841
return new_jvms;
842
}
843
844
virtual CallGenerator* with_call_node(CallNode* call) {
845
LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
846
cg->set_call_node(call->as_CallStaticJava());
847
return cg;
848
}
849
};
850
851
// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
852
CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
853
return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
854
}
855
856
//------------------------PredictedCallGenerator------------------------------
857
// Internal class which handles all out-of-line calls checking receiver type.
858
class PredictedCallGenerator : public CallGenerator {
859
ciKlass* _predicted_receiver;
860
CallGenerator* _if_missed;
861
CallGenerator* _if_hit;
862
float _hit_prob;
863
bool _exact_check;
864
865
public:
866
PredictedCallGenerator(ciKlass* predicted_receiver,
867
CallGenerator* if_missed,
868
CallGenerator* if_hit, bool exact_check,
869
float hit_prob)
870
: CallGenerator(if_missed->method())
871
{
872
// The call profile data may predict the hit_prob as extreme as 0 or 1.
873
// Remove the extremes values from the range.
874
if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
875
if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
876
877
_predicted_receiver = predicted_receiver;
878
_if_missed = if_missed;
879
_if_hit = if_hit;
880
_hit_prob = hit_prob;
881
_exact_check = exact_check;
882
}
883
884
virtual bool is_virtual() const { return true; }
885
virtual bool is_inline() const { return _if_hit->is_inline(); }
886
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
887
888
virtual JVMState* generate(JVMState* jvms);
889
};
890
891
892
CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
893
CallGenerator* if_missed,
894
CallGenerator* if_hit,
895
float hit_prob) {
896
return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
897
/*exact_check=*/true, hit_prob);
898
}
899
900
CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
901
CallGenerator* if_missed,
902
CallGenerator* if_hit) {
903
return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
904
/*exact_check=*/false, PROB_ALWAYS);
905
}
906
907
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
908
GraphKit kit(jvms);
909
kit.C->print_inlining_update(this);
910
PhaseGVN& gvn = kit.gvn();
911
// We need an explicit receiver null_check before checking its type.
912
// We share a map with the caller, so his JVMS gets adjusted.
913
Node* receiver = kit.argument(0);
914
CompileLog* log = kit.C->log();
915
if (log != NULL) {
916
log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
917
jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
918
}
919
920
receiver = kit.null_check_receiver_before_call(method());
921
if (kit.stopped()) {
922
return kit.transfer_exceptions_into_jvms();
923
}
924
925
// Make a copy of the replaced nodes in case we need to restore them
926
ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
927
replaced_nodes.clone();
928
929
Node* casted_receiver = receiver; // will get updated in place...
930
Node* slow_ctl = NULL;
931
if (_exact_check) {
932
slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
933
&casted_receiver);
934
} else {
935
slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
936
&casted_receiver);
937
}
938
939
SafePointNode* slow_map = NULL;
940
JVMState* slow_jvms = NULL;
941
{ PreserveJVMState pjvms(&kit);
942
kit.set_control(slow_ctl);
943
if (!kit.stopped()) {
944
slow_jvms = _if_missed->generate(kit.sync_jvms());
945
if (kit.failing())
946
return NULL; // might happen because of NodeCountInliningCutoff
947
assert(slow_jvms != NULL, "must be");
948
kit.add_exception_states_from(slow_jvms);
949
kit.set_map(slow_jvms->map());
950
if (!kit.stopped())
951
slow_map = kit.stop();
952
}
953
}
954
955
if (kit.stopped()) {
956
// Instance exactly does not matches the desired type.
957
kit.set_jvms(slow_jvms);
958
return kit.transfer_exceptions_into_jvms();
959
}
960
961
// fall through if the instance exactly matches the desired type
962
kit.replace_in_map(receiver, casted_receiver);
963
964
// Make the hot call:
965
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
966
if (new_jvms == NULL) {
967
// Inline failed, so make a direct call.
968
assert(_if_hit->is_inline(), "must have been a failed inline");
969
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
970
new_jvms = cg->generate(kit.sync_jvms());
971
}
972
kit.add_exception_states_from(new_jvms);
973
kit.set_jvms(new_jvms);
974
975
// Need to merge slow and fast?
976
if (slow_map == NULL) {
977
// The fast path is the only path remaining.
978
return kit.transfer_exceptions_into_jvms();
979
}
980
981
if (kit.stopped()) {
982
// Inlined method threw an exception, so it's just the slow path after all.
983
kit.set_jvms(slow_jvms);
984
return kit.transfer_exceptions_into_jvms();
985
}
986
987
// There are 2 branches and the replaced nodes are only valid on
988
// one: restore the replaced nodes to what they were before the
989
// branch.
990
kit.map()->set_replaced_nodes(replaced_nodes);
991
992
// Finish the diamond.
993
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
994
RegionNode* region = new RegionNode(3);
995
region->init_req(1, kit.control());
996
region->init_req(2, slow_map->control());
997
kit.set_control(gvn.transform(region));
998
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
999
iophi->set_req(2, slow_map->i_o());
1000
kit.set_i_o(gvn.transform(iophi));
1001
// Merge memory
1002
kit.merge_memory(slow_map->merged_memory(), region, 2);
1003
// Transform new memory Phis.
1004
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1005
Node* phi = mms.memory();
1006
if (phi->is_Phi() && phi->in(0) == region) {
1007
mms.set_memory(gvn.transform(phi));
1008
}
1009
}
1010
uint tos = kit.jvms()->stkoff() + kit.sp();
1011
uint limit = slow_map->req();
1012
for (uint i = TypeFunc::Parms; i < limit; i++) {
1013
// Skip unused stack slots; fast forward to monoff();
1014
if (i == tos) {
1015
i = kit.jvms()->monoff();
1016
if( i >= limit ) break;
1017
}
1018
Node* m = kit.map()->in(i);
1019
Node* n = slow_map->in(i);
1020
if (m != n) {
1021
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1022
Node* phi = PhiNode::make(region, m, t);
1023
phi->set_req(2, n);
1024
kit.map()->set_req(i, gvn.transform(phi));
1025
}
1026
}
1027
return kit.transfer_exceptions_into_jvms();
1028
}
1029
1030
1031
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1032
assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1033
bool input_not_const;
1034
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1035
Compile* C = Compile::current();
1036
if (cg != NULL) {
1037
if (AlwaysIncrementalInline) {
1038
return CallGenerator::for_late_inline(callee, cg);
1039
} else {
1040
return cg;
1041
}
1042
}
1043
int bci = jvms->bci();
1044
ciCallProfile profile = caller->call_profile_at_bci(bci);
1045
int call_site_count = caller->scale_count(profile.count());
1046
1047
if (IncrementalInlineMH && call_site_count > 0 &&
1048
(input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1049
return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1050
} else {
1051
// Out-of-line call.
1052
return CallGenerator::for_direct_call(callee);
1053
}
1054
}
1055
1056
class NativeCallGenerator : public CallGenerator {
1057
private:
1058
address _call_addr;
1059
ciNativeEntryPoint* _nep;
1060
public:
1061
NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
1062
: CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
1063
1064
virtual JVMState* generate(JVMState* jvms);
1065
};
1066
1067
JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1068
GraphKit kit(jvms);
1069
1070
Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
1071
if (call == NULL) return NULL;
1072
1073
kit.C->print_inlining_update(this);
1074
if (kit.C->log() != NULL) {
1075
kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
1076
}
1077
1078
return kit.transfer_exceptions_into_jvms();
1079
}
1080
1081
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1082
GraphKit kit(jvms);
1083
PhaseGVN& gvn = kit.gvn();
1084
Compile* C = kit.C;
1085
vmIntrinsics::ID iid = callee->intrinsic_id();
1086
input_not_const = true;
1087
if (StressMethodHandleLinkerInlining) {
1088
allow_inline = false;
1089
}
1090
switch (iid) {
1091
case vmIntrinsics::_invokeBasic:
1092
{
1093
// Get MethodHandle receiver:
1094
Node* receiver = kit.argument(0);
1095
if (receiver->Opcode() == Op_ConP) {
1096
input_not_const = false;
1097
const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
1098
ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
1099
const int vtable_index = Method::invalid_vtable_index;
1100
1101
if (!ciMethod::is_consistent_info(callee, target)) {
1102
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1103
"signatures mismatch");
1104
return NULL;
1105
}
1106
1107
CallGenerator* cg = C->call_generator(target, vtable_index,
1108
false /* call_does_dispatch */,
1109
jvms,
1110
allow_inline,
1111
PROB_ALWAYS);
1112
return cg;
1113
} else {
1114
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1115
"receiver not constant");
1116
}
1117
}
1118
break;
1119
1120
case vmIntrinsics::_linkToVirtual:
1121
case vmIntrinsics::_linkToStatic:
1122
case vmIntrinsics::_linkToSpecial:
1123
case vmIntrinsics::_linkToInterface:
1124
{
1125
// Get MemberName argument:
1126
Node* member_name = kit.argument(callee->arg_size() - 1);
1127
if (member_name->Opcode() == Op_ConP) {
1128
input_not_const = false;
1129
const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1130
ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1131
1132
if (!ciMethod::is_consistent_info(callee, target)) {
1133
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1134
"signatures mismatch");
1135
return NULL;
1136
}
1137
1138
// In lambda forms we erase signature types to avoid resolving issues
1139
// involving class loaders. When we optimize a method handle invoke
1140
// to a direct call we must cast the receiver and arguments to its
1141
// actual types.
1142
ciSignature* signature = target->signature();
1143
const int receiver_skip = target->is_static() ? 0 : 1;
1144
// Cast receiver to its type.
1145
if (!target->is_static()) {
1146
Node* arg = kit.argument(0);
1147
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1148
const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1149
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1150
const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
1151
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1152
kit.set_argument(0, cast_obj);
1153
}
1154
}
1155
// Cast reference arguments to its type.
1156
for (int i = 0, j = 0; i < signature->count(); i++) {
1157
ciType* t = signature->type_at(i);
1158
if (t->is_klass()) {
1159
Node* arg = kit.argument(receiver_skip + j);
1160
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1161
const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1162
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1163
const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
1164
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1165
kit.set_argument(receiver_skip + j, cast_obj);
1166
}
1167
}
1168
j += t->size(); // long and double take two slots
1169
}
1170
1171
// Try to get the most accurate receiver type
1172
const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1173
const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1174
int vtable_index = Method::invalid_vtable_index;
1175
bool call_does_dispatch = false;
1176
1177
ciKlass* speculative_receiver_type = NULL;
1178
if (is_virtual_or_interface) {
1179
ciInstanceKlass* klass = target->holder();
1180
Node* receiver_node = kit.argument(0);
1181
const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1182
// call_does_dispatch and vtable_index are out-parameters. They might be changed.
1183
// optimize_virtual_call() takes 2 different holder
1184
// arguments for a corner case that doesn't apply here (see
1185
// Parse::do_call())
1186
target = C->optimize_virtual_call(caller, klass, klass,
1187
target, receiver_type, is_virtual,
1188
call_does_dispatch, vtable_index, // out-parameters
1189
false /* check_access */);
1190
// We lack profiling at this call but type speculation may
1191
// provide us with a type
1192
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1193
}
1194
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1195
allow_inline,
1196
PROB_ALWAYS,
1197
speculative_receiver_type);
1198
return cg;
1199
} else {
1200
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1201
"member_name not constant");
1202
}
1203
}
1204
break;
1205
1206
case vmIntrinsics::_linkToNative:
1207
{
1208
Node* addr_n = kit.argument(1); // target address
1209
Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
1210
// This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
1211
if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
1212
input_not_const = false;
1213
const TypeLong* addr_t = addr_n->bottom_type()->is_long();
1214
const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
1215
address addr = (address) addr_t->get_con();
1216
ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
1217
return new NativeCallGenerator(callee, addr, nep);
1218
} else {
1219
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1220
"NativeEntryPoint not constant");
1221
}
1222
}
1223
break;
1224
1225
default:
1226
fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1227
break;
1228
}
1229
return NULL;
1230
}
1231
1232
1233
//------------------------PredicatedIntrinsicGenerator------------------------------
1234
// Internal class which handles all predicated Intrinsic calls.
1235
class PredicatedIntrinsicGenerator : public CallGenerator {
1236
CallGenerator* _intrinsic;
1237
CallGenerator* _cg;
1238
1239
public:
1240
PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1241
CallGenerator* cg)
1242
: CallGenerator(cg->method())
1243
{
1244
_intrinsic = intrinsic;
1245
_cg = cg;
1246
}
1247
1248
virtual bool is_virtual() const { return true; }
1249
virtual bool is_inline() const { return true; }
1250
virtual bool is_intrinsic() const { return true; }
1251
1252
virtual JVMState* generate(JVMState* jvms);
1253
};
1254
1255
1256
CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1257
CallGenerator* cg) {
1258
return new PredicatedIntrinsicGenerator(intrinsic, cg);
1259
}
1260
1261
1262
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1263
// The code we want to generate here is:
1264
// if (receiver == NULL)
1265
// uncommon_Trap
1266
// if (predicate(0))
1267
// do_intrinsic(0)
1268
// else
1269
// if (predicate(1))
1270
// do_intrinsic(1)
1271
// ...
1272
// else
1273
// do_java_comp
1274
1275
GraphKit kit(jvms);
1276
PhaseGVN& gvn = kit.gvn();
1277
1278
CompileLog* log = kit.C->log();
1279
if (log != NULL) {
1280
log->elem("predicated_intrinsic bci='%d' method='%d'",
1281
jvms->bci(), log->identify(method()));
1282
}
1283
1284
if (!method()->is_static()) {
1285
// We need an explicit receiver null_check before checking its type in predicate.
1286
// We share a map with the caller, so his JVMS gets adjusted.
1287
Node* receiver = kit.null_check_receiver_before_call(method());
1288
if (kit.stopped()) {
1289
return kit.transfer_exceptions_into_jvms();
1290
}
1291
}
1292
1293
int n_predicates = _intrinsic->predicates_count();
1294
assert(n_predicates > 0, "sanity");
1295
1296
JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1297
1298
// Region for normal compilation code if intrinsic failed.
1299
Node* slow_region = new RegionNode(1);
1300
1301
int results = 0;
1302
for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1303
#ifdef ASSERT
1304
JVMState* old_jvms = kit.jvms();
1305
SafePointNode* old_map = kit.map();
1306
Node* old_io = old_map->i_o();
1307
Node* old_mem = old_map->memory();
1308
Node* old_exc = old_map->next_exception();
1309
#endif
1310
Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1311
#ifdef ASSERT
1312
// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1313
assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1314
SafePointNode* new_map = kit.map();
1315
assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1316
assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1317
assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1318
#endif
1319
if (!kit.stopped()) {
1320
PreserveJVMState pjvms(&kit);
1321
// Generate intrinsic code:
1322
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1323
if (new_jvms == NULL) {
1324
// Intrinsic failed, use normal compilation path for this predicate.
1325
slow_region->add_req(kit.control());
1326
} else {
1327
kit.add_exception_states_from(new_jvms);
1328
kit.set_jvms(new_jvms);
1329
if (!kit.stopped()) {
1330
result_jvms[results++] = kit.jvms();
1331
}
1332
}
1333
}
1334
if (else_ctrl == NULL) {
1335
else_ctrl = kit.C->top();
1336
}
1337
kit.set_control(else_ctrl);
1338
}
1339
if (!kit.stopped()) {
1340
// Final 'else' after predicates.
1341
slow_region->add_req(kit.control());
1342
}
1343
if (slow_region->req() > 1) {
1344
PreserveJVMState pjvms(&kit);
1345
// Generate normal compilation code:
1346
kit.set_control(gvn.transform(slow_region));
1347
JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1348
if (kit.failing())
1349
return NULL; // might happen because of NodeCountInliningCutoff
1350
assert(new_jvms != NULL, "must be");
1351
kit.add_exception_states_from(new_jvms);
1352
kit.set_jvms(new_jvms);
1353
if (!kit.stopped()) {
1354
result_jvms[results++] = kit.jvms();
1355
}
1356
}
1357
1358
if (results == 0) {
1359
// All paths ended in uncommon traps.
1360
(void) kit.stop();
1361
return kit.transfer_exceptions_into_jvms();
1362
}
1363
1364
if (results == 1) { // Only one path
1365
kit.set_jvms(result_jvms[0]);
1366
return kit.transfer_exceptions_into_jvms();
1367
}
1368
1369
// Merge all paths.
1370
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1371
RegionNode* region = new RegionNode(results + 1);
1372
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1373
for (int i = 0; i < results; i++) {
1374
JVMState* jvms = result_jvms[i];
1375
int path = i + 1;
1376
SafePointNode* map = jvms->map();
1377
region->init_req(path, map->control());
1378
iophi->set_req(path, map->i_o());
1379
if (i == 0) {
1380
kit.set_jvms(jvms);
1381
} else {
1382
kit.merge_memory(map->merged_memory(), region, path);
1383
}
1384
}
1385
kit.set_control(gvn.transform(region));
1386
kit.set_i_o(gvn.transform(iophi));
1387
// Transform new memory Phis.
1388
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1389
Node* phi = mms.memory();
1390
if (phi->is_Phi() && phi->in(0) == region) {
1391
mms.set_memory(gvn.transform(phi));
1392
}
1393
}
1394
1395
// Merge debug info.
1396
Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1397
uint tos = kit.jvms()->stkoff() + kit.sp();
1398
Node* map = kit.map();
1399
uint limit = map->req();
1400
for (uint i = TypeFunc::Parms; i < limit; i++) {
1401
// Skip unused stack slots; fast forward to monoff();
1402
if (i == tos) {
1403
i = kit.jvms()->monoff();
1404
if( i >= limit ) break;
1405
}
1406
Node* n = map->in(i);
1407
ins[0] = n;
1408
const Type* t = gvn.type(n);
1409
bool needs_phi = false;
1410
for (int j = 1; j < results; j++) {
1411
JVMState* jvms = result_jvms[j];
1412
Node* jmap = jvms->map();
1413
Node* m = NULL;
1414
if (jmap->req() > i) {
1415
m = jmap->in(i);
1416
if (m != n) {
1417
needs_phi = true;
1418
t = t->meet_speculative(gvn.type(m));
1419
}
1420
}
1421
ins[j] = m;
1422
}
1423
if (needs_phi) {
1424
Node* phi = PhiNode::make(region, n, t);
1425
for (int j = 1; j < results; j++) {
1426
phi->set_req(j + 1, ins[j]);
1427
}
1428
map->set_req(i, gvn.transform(phi));
1429
}
1430
}
1431
1432
return kit.transfer_exceptions_into_jvms();
1433
}
1434
1435
//-------------------------UncommonTrapCallGenerator-----------------------------
1436
// Internal class which handles all out-of-line calls checking receiver type.
1437
class UncommonTrapCallGenerator : public CallGenerator {
1438
Deoptimization::DeoptReason _reason;
1439
Deoptimization::DeoptAction _action;
1440
1441
public:
1442
UncommonTrapCallGenerator(ciMethod* m,
1443
Deoptimization::DeoptReason reason,
1444
Deoptimization::DeoptAction action)
1445
: CallGenerator(m)
1446
{
1447
_reason = reason;
1448
_action = action;
1449
}
1450
1451
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1452
virtual bool is_trap() const { return true; }
1453
1454
virtual JVMState* generate(JVMState* jvms);
1455
};
1456
1457
1458
CallGenerator*
1459
CallGenerator::for_uncommon_trap(ciMethod* m,
1460
Deoptimization::DeoptReason reason,
1461
Deoptimization::DeoptAction action) {
1462
return new UncommonTrapCallGenerator(m, reason, action);
1463
}
1464
1465
1466
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1467
GraphKit kit(jvms);
1468
kit.C->print_inlining_update(this);
1469
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1470
// Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1471
// Use callsite signature always.
1472
ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1473
int nargs = declared_method->arg_size();
1474
kit.inc_sp(nargs);
1475
assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1476
if (_reason == Deoptimization::Reason_class_check &&
1477
_action == Deoptimization::Action_maybe_recompile) {
1478
// Temp fix for 6529811
1479
// Don't allow uncommon_trap to override our decision to recompile in the event
1480
// of a class cast failure for a monomorphic call as it will never let us convert
1481
// the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1482
bool keep_exact_action = true;
1483
kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1484
} else {
1485
kit.uncommon_trap(_reason, _action);
1486
}
1487
return kit.transfer_exceptions_into_jvms();
1488
}
1489
1490
// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1491
1492
// (Node: Merged hook_up_exits into ParseGenerator::generate.)
1493
1494