Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/opto/callGenerator.cpp
64441 views
1
/*
2
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "ci/bcEscapeAnalyzer.hpp"
27
#include "ci/ciCallSite.hpp"
28
#include "ci/ciObjArray.hpp"
29
#include "ci/ciMemberName.hpp"
30
#include "ci/ciMethodHandle.hpp"
31
#include "classfile/javaClasses.hpp"
32
#include "compiler/compileLog.hpp"
33
#include "opto/addnode.hpp"
34
#include "opto/callGenerator.hpp"
35
#include "opto/callnode.hpp"
36
#include "opto/castnode.hpp"
37
#include "opto/cfgnode.hpp"
38
#include "opto/parse.hpp"
39
#include "opto/rootnode.hpp"
40
#include "opto/runtime.hpp"
41
#include "opto/subnode.hpp"
42
#include "runtime/sharedRuntime.hpp"
43
#include "ci/ciNativeEntryPoint.hpp"
44
#include "utilities/debug.hpp"
45
46
// Utility function.
47
const TypeFunc* CallGenerator::tf() const {
48
return TypeFunc::make(method());
49
}
50
51
bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52
return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53
}
54
55
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56
ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57
return is_inlined_method_handle_intrinsic(symbolic_info, m);
58
}
59
60
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
61
return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
62
}
63
64
//-----------------------------ParseGenerator---------------------------------
65
// Internal class which handles all direct bytecode traversal.
66
class ParseGenerator : public InlineCallGenerator {
67
private:
68
bool _is_osr;
69
float _expected_uses;
70
71
public:
72
ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
73
: InlineCallGenerator(method)
74
{
75
_is_osr = is_osr;
76
_expected_uses = expected_uses;
77
assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
78
}
79
80
virtual bool is_parse() const { return true; }
81
virtual JVMState* generate(JVMState* jvms);
82
int is_osr() { return _is_osr; }
83
84
};
85
86
JVMState* ParseGenerator::generate(JVMState* jvms) {
87
Compile* C = Compile::current();
88
C->print_inlining_update(this);
89
90
if (is_osr()) {
91
// The JVMS for a OSR has a single argument (see its TypeFunc).
92
assert(jvms->depth() == 1, "no inline OSR");
93
}
94
95
if (C->failing()) {
96
return NULL; // bailing out of the compile; do not try to parse
97
}
98
99
Parse parser(jvms, method(), _expected_uses);
100
// Grab signature for matching/allocation
101
GraphKit& exits = parser.exits();
102
103
if (C->failing()) {
104
while (exits.pop_exception_state() != NULL) ;
105
return NULL;
106
}
107
108
assert(exits.jvms()->same_calls_as(jvms), "sanity");
109
110
// Simply return the exit state of the parser,
111
// augmented by any exceptional states.
112
return exits.transfer_exceptions_into_jvms();
113
}
114
115
//---------------------------DirectCallGenerator------------------------------
116
// Internal class which handles all out-of-line calls w/o receiver type checks.
117
class DirectCallGenerator : public CallGenerator {
118
private:
119
CallStaticJavaNode* _call_node;
120
// Force separate memory and I/O projections for the exceptional
121
// paths to facilitate late inlinig.
122
bool _separate_io_proj;
123
124
protected:
125
void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126
127
public:
128
DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129
: CallGenerator(method),
130
_separate_io_proj(separate_io_proj)
131
{
132
}
133
virtual JVMState* generate(JVMState* jvms);
134
135
virtual CallNode* call_node() const { return _call_node; }
136
virtual CallGenerator* with_call_node(CallNode* call) {
137
DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138
dcg->set_call_node(call->as_CallStaticJava());
139
return dcg;
140
}
141
};
142
143
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144
GraphKit kit(jvms);
145
kit.C->print_inlining_update(this);
146
bool is_static = method()->is_static();
147
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
148
: SharedRuntime::get_resolve_opt_virtual_call_stub();
149
150
if (kit.C->log() != NULL) {
151
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
152
}
153
154
CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
155
if (is_inlined_method_handle_intrinsic(jvms, method())) {
156
// To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
157
// additional information about the method being invoked should be attached
158
// to the call site to make resolution logic work
159
// (see SharedRuntime::resolve_static_call_C).
160
call->set_override_symbolic_info(true);
161
}
162
_call_node = call; // Save the call node in case we need it later
163
if (!is_static) {
164
// Make an explicit receiver null_check as part of this call.
165
// Since we share a map with the caller, his JVMS gets adjusted.
166
kit.null_check_receiver_before_call(method());
167
if (kit.stopped()) {
168
// And dump it back to the caller, decorated with any exceptions:
169
return kit.transfer_exceptions_into_jvms();
170
}
171
// Mark the call node as virtual, sort of:
172
call->set_optimized_virtual(true);
173
if (method()->is_method_handle_intrinsic() ||
174
method()->is_compiled_lambda_form()) {
175
call->set_method_handle_invoke(true);
176
}
177
}
178
kit.set_arguments_for_java_call(call);
179
kit.set_edges_for_java_call(call, false, _separate_io_proj);
180
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
181
kit.push_node(method()->return_type()->basic_type(), ret);
182
return kit.transfer_exceptions_into_jvms();
183
}
184
185
//--------------------------VirtualCallGenerator------------------------------
186
// Internal class which handles all out-of-line calls checking receiver type.
187
class VirtualCallGenerator : public CallGenerator {
188
private:
189
int _vtable_index;
190
bool _separate_io_proj;
191
CallDynamicJavaNode* _call_node;
192
193
protected:
194
void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
195
196
public:
197
VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
198
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
199
{
200
assert(vtable_index == Method::invalid_vtable_index ||
201
vtable_index >= 0, "either invalid or usable");
202
}
203
virtual bool is_virtual() const { return true; }
204
virtual JVMState* generate(JVMState* jvms);
205
206
virtual CallNode* call_node() const { return _call_node; }
207
int vtable_index() const { return _vtable_index; }
208
209
virtual CallGenerator* with_call_node(CallNode* call) {
210
VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
211
cg->set_call_node(call->as_CallDynamicJava());
212
return cg;
213
}
214
};
215
216
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
217
GraphKit kit(jvms);
218
Node* receiver = kit.argument(0);
219
220
kit.C->print_inlining_update(this);
221
222
if (kit.C->log() != NULL) {
223
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
224
}
225
226
// If the receiver is a constant null, do not torture the system
227
// by attempting to call through it. The compile will proceed
228
// correctly, but may bail out in final_graph_reshaping, because
229
// the call instruction will have a seemingly deficient out-count.
230
// (The bailout says something misleading about an "infinite loop".)
231
if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
232
assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
233
ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
234
int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
235
kit.inc_sp(arg_size); // restore arguments
236
kit.uncommon_trap(Deoptimization::Reason_null_check,
237
Deoptimization::Action_none,
238
NULL, "null receiver");
239
return kit.transfer_exceptions_into_jvms();
240
}
241
242
// Ideally we would unconditionally do a null check here and let it
243
// be converted to an implicit check based on profile information.
244
// However currently the conversion to implicit null checks in
245
// Block::implicit_null_check() only looks for loads and stores, not calls.
246
ciMethod *caller = kit.method();
247
ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
248
if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
249
((ImplicitNullCheckThreshold > 0) && caller_md &&
250
(caller_md->trap_count(Deoptimization::Reason_null_check)
251
>= (uint)ImplicitNullCheckThreshold))) {
252
// Make an explicit receiver null_check as part of this call.
253
// Since we share a map with the caller, his JVMS gets adjusted.
254
receiver = kit.null_check_receiver_before_call(method());
255
if (kit.stopped()) {
256
// And dump it back to the caller, decorated with any exceptions:
257
return kit.transfer_exceptions_into_jvms();
258
}
259
}
260
261
assert(!method()->is_static(), "virtual call must not be to static");
262
assert(!method()->is_final(), "virtual call should not be to final");
263
assert(!method()->is_private(), "virtual call should not be to private");
264
assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265
"no vtable calls if +UseInlineCaches ");
266
address target = SharedRuntime::get_resolve_virtual_call_stub();
267
// Normal inline cache used for call
268
CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
269
if (is_inlined_method_handle_intrinsic(jvms, method())) {
270
// To be able to issue a direct call (optimized virtual or virtual)
271
// and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272
// about the method being invoked should be attached to the call site to
273
// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274
call->set_override_symbolic_info(true);
275
}
276
_call_node = call; // Save the call node in case we need it later
277
278
kit.set_arguments_for_java_call(call);
279
kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
280
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
281
kit.push_node(method()->return_type()->basic_type(), ret);
282
283
// Represent the effect of an implicit receiver null_check
284
// as part of this call. Since we share a map with the caller,
285
// his JVMS gets adjusted.
286
kit.cast_not_null(receiver);
287
return kit.transfer_exceptions_into_jvms();
288
}
289
290
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
291
if (InlineTree::check_can_parse(m) != NULL) return NULL;
292
return new ParseGenerator(m, expected_uses);
293
}
294
295
// As a special case, the JVMS passed to this CallGenerator is
296
// for the method execution already in progress, not just the JVMS
297
// of the caller. Thus, this CallGenerator cannot be mixed with others!
298
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
299
if (InlineTree::check_can_parse(m) != NULL) return NULL;
300
float past_uses = m->interpreter_invocation_count();
301
float expected_uses = past_uses;
302
return new ParseGenerator(m, expected_uses, true);
303
}
304
305
CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
306
assert(!m->is_abstract(), "for_direct_call mismatch");
307
return new DirectCallGenerator(m, separate_io_proj);
308
}
309
310
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
311
assert(!m->is_static(), "for_virtual_call mismatch");
312
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
313
return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
314
}
315
316
// Allow inlining decisions to be delayed
317
class LateInlineCallGenerator : public DirectCallGenerator {
318
private:
319
jlong _unique_id; // unique id for log compilation
320
bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
321
322
protected:
323
CallGenerator* _inline_cg;
324
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
325
virtual CallGenerator* inline_cg() const { return _inline_cg; }
326
virtual bool is_pure_call() const { return _is_pure_call; }
327
328
public:
329
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
330
DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
331
332
virtual bool is_late_inline() const { return true; }
333
334
// Convert the CallStaticJava into an inline
335
virtual void do_late_inline();
336
337
virtual JVMState* generate(JVMState* jvms) {
338
Compile *C = Compile::current();
339
340
C->log_inline_id(this);
341
342
// Record that this call site should be revisited once the main
343
// parse is finished.
344
if (!is_mh_late_inline()) {
345
C->add_late_inline(this);
346
}
347
348
// Emit the CallStaticJava and request separate projections so
349
// that the late inlining logic can distinguish between fall
350
// through and exceptional uses of the memory and io projections
351
// as is done for allocations and macro expansion.
352
return DirectCallGenerator::generate(jvms);
353
}
354
355
virtual void print_inlining_late(const char* msg) {
356
CallNode* call = call_node();
357
Compile* C = Compile::current();
358
C->print_inlining_assert_ready();
359
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
360
C->print_inlining_move_to(this);
361
C->print_inlining_update_delayed(this);
362
}
363
364
virtual void set_unique_id(jlong id) {
365
_unique_id = id;
366
}
367
368
virtual jlong unique_id() const {
369
return _unique_id;
370
}
371
372
virtual CallGenerator* with_call_node(CallNode* call) {
373
LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
374
cg->set_call_node(call->as_CallStaticJava());
375
return cg;
376
}
377
};
378
379
CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
380
return new LateInlineCallGenerator(method, inline_cg);
381
}
382
383
class LateInlineMHCallGenerator : public LateInlineCallGenerator {
384
ciMethod* _caller;
385
bool _input_not_const;
386
387
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
388
389
public:
390
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
391
LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
392
393
virtual bool is_mh_late_inline() const { return true; }
394
395
// Convert the CallStaticJava into an inline
396
virtual void do_late_inline();
397
398
virtual JVMState* generate(JVMState* jvms) {
399
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
400
401
Compile* C = Compile::current();
402
if (_input_not_const) {
403
// inlining won't be possible so no need to enqueue right now.
404
call_node()->set_generator(this);
405
} else {
406
C->add_late_inline(this);
407
}
408
return new_jvms;
409
}
410
411
virtual CallGenerator* with_call_node(CallNode* call) {
412
LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
413
cg->set_call_node(call->as_CallStaticJava());
414
return cg;
415
}
416
};
417
418
bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
419
// When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
420
// expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method wih
421
// exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
422
// of late inlining with exceptions.
423
assert(!jvms->method()->has_exception_handlers() ||
424
(method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
425
method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
426
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
427
bool allow_inline = C->inlining_incrementally();
428
bool input_not_const = true;
429
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
430
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
431
432
if (cg != NULL) {
433
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
434
_inline_cg = cg;
435
C->dec_number_of_mh_late_inlines();
436
return true;
437
} else {
438
// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
439
// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
440
// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
441
return false;
442
}
443
}
444
445
CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
446
assert(IncrementalInlineMH, "required");
447
Compile::current()->inc_number_of_mh_late_inlines();
448
CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
449
return cg;
450
}
451
452
// Allow inlining decisions to be delayed
453
class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
454
private:
455
jlong _unique_id; // unique id for log compilation
456
CallGenerator* _inline_cg;
457
ciMethod* _callee;
458
bool _is_pure_call;
459
float _prof_factor;
460
461
protected:
462
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
463
virtual CallGenerator* inline_cg() const { return _inline_cg; }
464
virtual bool is_pure_call() const { return _is_pure_call; }
465
466
public:
467
LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
468
: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
469
_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {
470
assert(IncrementalInlineVirtual, "required");
471
}
472
473
virtual bool is_late_inline() const { return true; }
474
475
virtual bool is_virtual_late_inline() const { return true; }
476
477
// Convert the CallDynamicJava into an inline
478
virtual void do_late_inline();
479
480
virtual void set_callee_method(ciMethod* m) {
481
assert(_callee == NULL, "repeated inlining attempt");
482
_callee = m;
483
}
484
485
virtual JVMState* generate(JVMState* jvms) {
486
// Emit the CallDynamicJava and request separate projections so
487
// that the late inlining logic can distinguish between fall
488
// through and exceptional uses of the memory and io projections
489
// as is done for allocations and macro expansion.
490
JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
491
if (call_node() != NULL) {
492
call_node()->set_generator(this);
493
}
494
return new_jvms;
495
}
496
497
virtual void print_inlining_late(const char* msg) {
498
CallNode* call = call_node();
499
Compile* C = Compile::current();
500
C->print_inlining_assert_ready();
501
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
502
C->print_inlining_move_to(this);
503
C->print_inlining_update_delayed(this);
504
}
505
506
virtual void set_unique_id(jlong id) {
507
_unique_id = id;
508
}
509
510
virtual jlong unique_id() const {
511
return _unique_id;
512
}
513
514
virtual CallGenerator* with_call_node(CallNode* call) {
515
LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
516
cg->set_call_node(call->as_CallDynamicJava());
517
return cg;
518
}
519
};
520
521
bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
522
// Method handle linker case is handled in CallDynamicJavaNode::Ideal().
523
// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
524
525
// Implicit receiver null checks introduce problems when exception states are combined.
526
Node* receiver = jvms->map()->argument(jvms, 0);
527
const Type* recv_type = C->initial_gvn()->type(receiver);
528
if (recv_type->maybe_null()) {
529
if (C->print_inlining() || C->print_intrinsics()) {
530
C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
531
"late call devirtualization failed (receiver may be null)");
532
}
533
return false;
534
}
535
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
536
bool allow_inline = C->inlining_incrementally();
537
if (!allow_inline && _callee->holder()->is_interface()) {
538
// Don't convert the interface call to a direct call guarded by an interface subtype check.
539
if (C->print_inlining() || C->print_intrinsics()) {
540
C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
541
"late call devirtualization failed (interface call)");
542
}
543
return false;
544
}
545
CallGenerator* cg = C->call_generator(_callee,
546
vtable_index(),
547
false /*call_does_dispatch*/,
548
jvms,
549
allow_inline,
550
_prof_factor,
551
NULL /*speculative_receiver_type*/,
552
true /*allow_intrinsics*/);
553
554
if (cg != NULL) {
555
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
556
_inline_cg = cg;
557
return true;
558
} else {
559
// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
560
assert(false, "no progress");
561
return false;
562
}
563
}
564
565
CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
566
assert(IncrementalInlineVirtual, "required");
567
assert(!m->is_static(), "for_virtual_call mismatch");
568
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
569
return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
570
}
571
572
void LateInlineCallGenerator::do_late_inline() {
573
CallGenerator::do_late_inline_helper();
574
}
575
576
void LateInlineMHCallGenerator::do_late_inline() {
577
CallGenerator::do_late_inline_helper();
578
}
579
580
void LateInlineVirtualCallGenerator::do_late_inline() {
581
assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
582
CallGenerator::do_late_inline_helper();
583
}
584
585
static bool has_non_debug_usages(Node* n) {
586
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
587
Node* m = n->fast_out(i);
588
if (!m->is_SafePoint()
589
|| (m->is_Call() && m->as_Call()->has_non_debug_use(n))) {
590
return true;
591
}
592
}
593
return false;
594
}
595
596
static bool is_box_cache_valid(CallNode* call) {
597
ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
598
return klass->is_box_cache_valid();
599
}
600
601
// delay box in runtime, treat box as a scalarized object
602
static void scalarize_debug_usages(CallNode* call, Node* resproj) {
603
GraphKit kit(call->jvms());
604
PhaseGVN& gvn = kit.gvn();
605
606
ProjNode* res = resproj->as_Proj();
607
ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
608
int n_fields = klass->nof_nonstatic_fields();
609
assert(n_fields == 1, "the klass must be an auto-boxing klass");
610
611
for (DUIterator_Last imin, i = res->last_outs(imin); i >= imin;) {
612
SafePointNode* sfpt = res->last_out(i)->as_SafePoint();
613
uint first_ind = sfpt->req() - sfpt->jvms()->scloff();
614
Node* sobj = new SafePointScalarObjectNode(gvn.type(res)->isa_oopptr(),
615
#ifdef ASSERT
616
call,
617
#endif // ASSERT
618
first_ind, n_fields, true);
619
sobj->init_req(0, kit.root());
620
sfpt->add_req(call->in(TypeFunc::Parms));
621
sobj = gvn.transform(sobj);
622
JVMState* jvms = sfpt->jvms();
623
jvms->set_endoff(sfpt->req());
624
int start = jvms->debug_start();
625
int end = jvms->debug_end();
626
int num_edges = sfpt->replace_edges_in_range(res, sobj, start, end, &gvn);
627
i -= num_edges;
628
}
629
630
assert(res->outcnt() == 0, "the box must have no use after replace");
631
632
#ifndef PRODUCT
633
if (PrintEliminateAllocations) {
634
tty->print("++++ Eliminated: %d ", call->_idx);
635
call->as_CallStaticJava()->method()->print_short_name(tty);
636
tty->cr();
637
}
638
#endif
639
}
640
641
void CallGenerator::do_late_inline_helper() {
642
assert(is_late_inline(), "only late inline allowed");
643
644
// Can't inline it
645
CallNode* call = call_node();
646
if (call == NULL || call->outcnt() == 0 ||
647
call->in(0) == NULL || call->in(0)->is_top()) {
648
return;
649
}
650
651
const TypeTuple *r = call->tf()->domain();
652
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
653
if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
654
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
655
return;
656
}
657
}
658
659
if (call->in(TypeFunc::Memory)->is_top()) {
660
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
661
return;
662
}
663
if (call->in(TypeFunc::Memory)->is_MergeMem()) {
664
MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
665
if (merge_mem->base_memory() == merge_mem->empty_memory()) {
666
return; // dead path
667
}
668
}
669
670
// check for unreachable loop
671
CallProjections callprojs;
672
call->extract_projections(&callprojs, true);
673
if ((callprojs.fallthrough_catchproj == call->in(0)) ||
674
(callprojs.catchall_catchproj == call->in(0)) ||
675
(callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
676
(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
677
(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
678
(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
679
(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
680
(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
681
return;
682
}
683
684
Compile* C = Compile::current();
685
// Remove inlined methods from Compiler's lists.
686
if (call->is_macro()) {
687
C->remove_macro_node(call);
688
}
689
690
bool result_not_used = false;
691
692
if (is_pure_call()) {
693
// Disabled due to JDK-8276112
694
if (false && is_boxing_late_inline() && callprojs.resproj != nullptr) {
695
// replace box node to scalar node only in case it is directly referenced by debug info
696
assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
697
if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {
698
scalarize_debug_usages(call, callprojs.resproj);
699
}
700
}
701
702
// The call is marked as pure (no important side effects), but result isn't used.
703
// It's safe to remove the call.
704
result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
705
}
706
707
if (result_not_used) {
708
GraphKit kit(call->jvms());
709
kit.replace_call(call, C->top(), true);
710
} else {
711
// Make a clone of the JVMState that appropriate to use for driving a parse
712
JVMState* old_jvms = call->jvms();
713
JVMState* jvms = old_jvms->clone_shallow(C);
714
uint size = call->req();
715
SafePointNode* map = new SafePointNode(size, jvms);
716
for (uint i1 = 0; i1 < size; i1++) {
717
map->init_req(i1, call->in(i1));
718
}
719
720
// Make sure the state is a MergeMem for parsing.
721
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
722
Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
723
C->initial_gvn()->set_type_bottom(mem);
724
map->set_req(TypeFunc::Memory, mem);
725
}
726
727
uint nargs = method()->arg_size();
728
// blow away old call arguments
729
Node* top = C->top();
730
for (uint i1 = 0; i1 < nargs; i1++) {
731
map->set_req(TypeFunc::Parms + i1, top);
732
}
733
jvms->set_map(map);
734
735
// Make enough space in the expression stack to transfer
736
// the incoming arguments and return value.
737
map->ensure_stack(jvms, jvms->method()->max_stack());
738
for (uint i1 = 0; i1 < nargs; i1++) {
739
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
740
}
741
742
C->print_inlining_assert_ready();
743
744
C->print_inlining_move_to(this);
745
746
C->log_late_inline(this);
747
748
// JVMState is ready, so time to perform some checks and prepare for inlining attempt.
749
if (!do_late_inline_check(C, jvms)) {
750
map->disconnect_inputs(C);
751
C->print_inlining_update_delayed(this);
752
return;
753
}
754
755
// Setup default node notes to be picked up by the inlining
756
Node_Notes* old_nn = C->node_notes_at(call->_idx);
757
if (old_nn != NULL) {
758
Node_Notes* entry_nn = old_nn->clone(C);
759
entry_nn->set_jvms(jvms);
760
C->set_default_node_notes(entry_nn);
761
}
762
763
// Now perform the inlining using the synthesized JVMState
764
JVMState* new_jvms = inline_cg()->generate(jvms);
765
if (new_jvms == NULL) return; // no change
766
if (C->failing()) return;
767
768
// Capture any exceptional control flow
769
GraphKit kit(new_jvms);
770
771
// Find the result object
772
Node* result = C->top();
773
int result_size = method()->return_type()->size();
774
if (result_size != 0 && !kit.stopped()) {
775
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
776
}
777
778
if (inline_cg()->is_inline()) {
779
C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
780
C->env()->notice_inlined_method(inline_cg()->method());
781
}
782
C->set_inlining_progress(true);
783
C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
784
kit.replace_call(call, result, true);
785
}
786
}
787
788
class LateInlineStringCallGenerator : public LateInlineCallGenerator {
789
790
public:
791
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
792
LateInlineCallGenerator(method, inline_cg) {}
793
794
virtual JVMState* generate(JVMState* jvms) {
795
Compile *C = Compile::current();
796
797
C->log_inline_id(this);
798
799
C->add_string_late_inline(this);
800
801
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
802
return new_jvms;
803
}
804
805
virtual bool is_string_late_inline() const { return true; }
806
807
virtual CallGenerator* with_call_node(CallNode* call) {
808
LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
809
cg->set_call_node(call->as_CallStaticJava());
810
return cg;
811
}
812
};
813
814
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
815
return new LateInlineStringCallGenerator(method, inline_cg);
816
}
817
818
class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
819
820
public:
821
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
822
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
823
824
virtual JVMState* generate(JVMState* jvms) {
825
Compile *C = Compile::current();
826
827
C->log_inline_id(this);
828
829
C->add_boxing_late_inline(this);
830
831
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
832
return new_jvms;
833
}
834
835
virtual bool is_boxing_late_inline() const { return true; }
836
837
virtual CallGenerator* with_call_node(CallNode* call) {
838
LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
839
cg->set_call_node(call->as_CallStaticJava());
840
return cg;
841
}
842
};
843
844
CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
845
return new LateInlineBoxingCallGenerator(method, inline_cg);
846
}
847
848
class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
849
850
public:
851
LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
852
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
853
854
virtual JVMState* generate(JVMState* jvms) {
855
Compile *C = Compile::current();
856
857
C->log_inline_id(this);
858
859
C->add_vector_reboxing_late_inline(this);
860
861
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
862
return new_jvms;
863
}
864
865
virtual CallGenerator* with_call_node(CallNode* call) {
866
LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
867
cg->set_call_node(call->as_CallStaticJava());
868
return cg;
869
}
870
};
871
872
// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
873
CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
874
return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
875
}
876
877
//------------------------PredictedCallGenerator------------------------------
878
// Internal class which handles all out-of-line calls checking receiver type.
879
class PredictedCallGenerator : public CallGenerator {
880
ciKlass* _predicted_receiver;
881
CallGenerator* _if_missed;
882
CallGenerator* _if_hit;
883
float _hit_prob;
884
bool _exact_check;
885
886
public:
887
PredictedCallGenerator(ciKlass* predicted_receiver,
888
CallGenerator* if_missed,
889
CallGenerator* if_hit, bool exact_check,
890
float hit_prob)
891
: CallGenerator(if_missed->method())
892
{
893
// The call profile data may predict the hit_prob as extreme as 0 or 1.
894
// Remove the extremes values from the range.
895
if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
896
if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
897
898
_predicted_receiver = predicted_receiver;
899
_if_missed = if_missed;
900
_if_hit = if_hit;
901
_hit_prob = hit_prob;
902
_exact_check = exact_check;
903
}
904
905
virtual bool is_virtual() const { return true; }
906
virtual bool is_inline() const { return _if_hit->is_inline(); }
907
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
908
909
virtual JVMState* generate(JVMState* jvms);
910
};
911
912
913
CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
914
CallGenerator* if_missed,
915
CallGenerator* if_hit,
916
float hit_prob) {
917
return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
918
/*exact_check=*/true, hit_prob);
919
}
920
921
CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
922
CallGenerator* if_missed,
923
CallGenerator* if_hit) {
924
return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
925
/*exact_check=*/false, PROB_ALWAYS);
926
}
927
928
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
929
GraphKit kit(jvms);
930
kit.C->print_inlining_update(this);
931
PhaseGVN& gvn = kit.gvn();
932
// We need an explicit receiver null_check before checking its type.
933
// We share a map with the caller, so his JVMS gets adjusted.
934
Node* receiver = kit.argument(0);
935
CompileLog* log = kit.C->log();
936
if (log != NULL) {
937
log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
938
jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
939
}
940
941
receiver = kit.null_check_receiver_before_call(method());
942
if (kit.stopped()) {
943
return kit.transfer_exceptions_into_jvms();
944
}
945
946
// Make a copy of the replaced nodes in case we need to restore them
947
ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
948
replaced_nodes.clone();
949
950
Node* casted_receiver = receiver; // will get updated in place...
951
Node* slow_ctl = NULL;
952
if (_exact_check) {
953
slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
954
&casted_receiver);
955
} else {
956
slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
957
&casted_receiver);
958
}
959
960
SafePointNode* slow_map = NULL;
961
JVMState* slow_jvms = NULL;
962
{ PreserveJVMState pjvms(&kit);
963
kit.set_control(slow_ctl);
964
if (!kit.stopped()) {
965
slow_jvms = _if_missed->generate(kit.sync_jvms());
966
if (kit.failing())
967
return NULL; // might happen because of NodeCountInliningCutoff
968
assert(slow_jvms != NULL, "must be");
969
kit.add_exception_states_from(slow_jvms);
970
kit.set_map(slow_jvms->map());
971
if (!kit.stopped())
972
slow_map = kit.stop();
973
}
974
}
975
976
if (kit.stopped()) {
977
// Instance does not match the predicted type.
978
kit.set_jvms(slow_jvms);
979
return kit.transfer_exceptions_into_jvms();
980
}
981
982
// Fall through if the instance matches the desired type.
983
kit.replace_in_map(receiver, casted_receiver);
984
985
// Make the hot call:
986
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
987
if (new_jvms == NULL) {
988
// Inline failed, so make a direct call.
989
assert(_if_hit->is_inline(), "must have been a failed inline");
990
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
991
new_jvms = cg->generate(kit.sync_jvms());
992
}
993
kit.add_exception_states_from(new_jvms);
994
kit.set_jvms(new_jvms);
995
996
// Need to merge slow and fast?
997
if (slow_map == NULL) {
998
// The fast path is the only path remaining.
999
return kit.transfer_exceptions_into_jvms();
1000
}
1001
1002
if (kit.stopped()) {
1003
// Inlined method threw an exception, so it's just the slow path after all.
1004
kit.set_jvms(slow_jvms);
1005
return kit.transfer_exceptions_into_jvms();
1006
}
1007
1008
// There are 2 branches and the replaced nodes are only valid on
1009
// one: restore the replaced nodes to what they were before the
1010
// branch.
1011
kit.map()->set_replaced_nodes(replaced_nodes);
1012
1013
// Finish the diamond.
1014
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1015
RegionNode* region = new RegionNode(3);
1016
region->init_req(1, kit.control());
1017
region->init_req(2, slow_map->control());
1018
kit.set_control(gvn.transform(region));
1019
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1020
iophi->set_req(2, slow_map->i_o());
1021
kit.set_i_o(gvn.transform(iophi));
1022
// Merge memory
1023
kit.merge_memory(slow_map->merged_memory(), region, 2);
1024
// Transform new memory Phis.
1025
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1026
Node* phi = mms.memory();
1027
if (phi->is_Phi() && phi->in(0) == region) {
1028
mms.set_memory(gvn.transform(phi));
1029
}
1030
}
1031
uint tos = kit.jvms()->stkoff() + kit.sp();
1032
uint limit = slow_map->req();
1033
for (uint i = TypeFunc::Parms; i < limit; i++) {
1034
// Skip unused stack slots; fast forward to monoff();
1035
if (i == tos) {
1036
i = kit.jvms()->monoff();
1037
if( i >= limit ) break;
1038
}
1039
Node* m = kit.map()->in(i);
1040
Node* n = slow_map->in(i);
1041
if (m != n) {
1042
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1043
Node* phi = PhiNode::make(region, m, t);
1044
phi->set_req(2, n);
1045
kit.map()->set_req(i, gvn.transform(phi));
1046
}
1047
}
1048
return kit.transfer_exceptions_into_jvms();
1049
}
1050
1051
1052
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1053
assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1054
bool input_not_const;
1055
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1056
Compile* C = Compile::current();
1057
if (cg != NULL) {
1058
if (AlwaysIncrementalInline) {
1059
return CallGenerator::for_late_inline(callee, cg);
1060
} else {
1061
return cg;
1062
}
1063
}
1064
int bci = jvms->bci();
1065
ciCallProfile profile = caller->call_profile_at_bci(bci);
1066
int call_site_count = caller->scale_count(profile.count());
1067
1068
if (IncrementalInlineMH && call_site_count > 0 &&
1069
(input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1070
return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1071
} else {
1072
// Out-of-line call.
1073
return CallGenerator::for_direct_call(callee);
1074
}
1075
}
1076
1077
class NativeCallGenerator : public CallGenerator {
1078
private:
1079
address _call_addr;
1080
ciNativeEntryPoint* _nep;
1081
public:
1082
NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
1083
: CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
1084
1085
virtual JVMState* generate(JVMState* jvms);
1086
};
1087
1088
JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1089
GraphKit kit(jvms);
1090
1091
Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
1092
if (call == NULL) return NULL;
1093
1094
kit.C->print_inlining_update(this);
1095
if (kit.C->log() != NULL) {
1096
kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
1097
}
1098
1099
return kit.transfer_exceptions_into_jvms();
1100
}
1101
1102
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1103
GraphKit kit(jvms);
1104
PhaseGVN& gvn = kit.gvn();
1105
Compile* C = kit.C;
1106
vmIntrinsics::ID iid = callee->intrinsic_id();
1107
input_not_const = true;
1108
if (StressMethodHandleLinkerInlining) {
1109
allow_inline = false;
1110
}
1111
switch (iid) {
1112
case vmIntrinsics::_invokeBasic:
1113
{
1114
// Get MethodHandle receiver:
1115
Node* receiver = kit.argument(0);
1116
if (receiver->Opcode() == Op_ConP) {
1117
input_not_const = false;
1118
const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1119
if (recv_toop != NULL) {
1120
ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1121
const int vtable_index = Method::invalid_vtable_index;
1122
1123
if (!ciMethod::is_consistent_info(callee, target)) {
1124
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1125
"signatures mismatch");
1126
return NULL;
1127
}
1128
1129
CallGenerator *cg = C->call_generator(target, vtable_index,
1130
false /* call_does_dispatch */,
1131
jvms,
1132
allow_inline,
1133
PROB_ALWAYS);
1134
return cg;
1135
} else {
1136
assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1137
Type::str(receiver->bottom_type()));
1138
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1139
"receiver is always null");
1140
}
1141
} else {
1142
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1143
"receiver not constant");
1144
}
1145
}
1146
break;
1147
1148
case vmIntrinsics::_linkToVirtual:
1149
case vmIntrinsics::_linkToStatic:
1150
case vmIntrinsics::_linkToSpecial:
1151
case vmIntrinsics::_linkToInterface:
1152
{
1153
// Get MemberName argument:
1154
Node* member_name = kit.argument(callee->arg_size() - 1);
1155
if (member_name->Opcode() == Op_ConP) {
1156
input_not_const = false;
1157
const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1158
ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1159
1160
if (!ciMethod::is_consistent_info(callee, target)) {
1161
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1162
"signatures mismatch");
1163
return NULL;
1164
}
1165
1166
// In lambda forms we erase signature types to avoid resolving issues
1167
// involving class loaders. When we optimize a method handle invoke
1168
// to a direct call we must cast the receiver and arguments to its
1169
// actual types.
1170
ciSignature* signature = target->signature();
1171
const int receiver_skip = target->is_static() ? 0 : 1;
1172
// Cast receiver to its type.
1173
if (!target->is_static()) {
1174
Node* arg = kit.argument(0);
1175
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1176
const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1177
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1178
const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
1179
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1180
kit.set_argument(0, cast_obj);
1181
}
1182
}
1183
// Cast reference arguments to its type.
1184
for (int i = 0, j = 0; i < signature->count(); i++) {
1185
ciType* t = signature->type_at(i);
1186
if (t->is_klass()) {
1187
Node* arg = kit.argument(receiver_skip + j);
1188
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1189
const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1190
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1191
const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1192
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1193
kit.set_argument(receiver_skip + j, cast_obj);
1194
}
1195
}
1196
j += t->size(); // long and double take two slots
1197
}
1198
1199
// Try to get the most accurate receiver type
1200
const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1201
const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1202
int vtable_index = Method::invalid_vtable_index;
1203
bool call_does_dispatch = false;
1204
1205
ciKlass* speculative_receiver_type = NULL;
1206
if (is_virtual_or_interface) {
1207
ciInstanceKlass* klass = target->holder();
1208
Node* receiver_node = kit.argument(0);
1209
const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1210
// call_does_dispatch and vtable_index are out-parameters. They might be changed.
1211
// optimize_virtual_call() takes 2 different holder
1212
// arguments for a corner case that doesn't apply here (see
1213
// Parse::do_call())
1214
target = C->optimize_virtual_call(caller, klass, klass,
1215
target, receiver_type, is_virtual,
1216
call_does_dispatch, vtable_index, // out-parameters
1217
false /* check_access */);
1218
// We lack profiling at this call but type speculation may
1219
// provide us with a type
1220
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1221
}
1222
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1223
allow_inline,
1224
PROB_ALWAYS,
1225
speculative_receiver_type);
1226
return cg;
1227
} else {
1228
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1229
"member_name not constant");
1230
}
1231
}
1232
break;
1233
1234
case vmIntrinsics::_linkToNative:
1235
{
1236
Node* addr_n = kit.argument(1); // target address
1237
Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
1238
// This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
1239
if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
1240
input_not_const = false;
1241
const TypeLong* addr_t = addr_n->bottom_type()->is_long();
1242
const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
1243
address addr = (address) addr_t->get_con();
1244
ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
1245
return new NativeCallGenerator(callee, addr, nep);
1246
} else {
1247
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1248
"NativeEntryPoint not constant");
1249
}
1250
}
1251
break;
1252
1253
default:
1254
fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1255
break;
1256
}
1257
return NULL;
1258
}
1259
1260
1261
//------------------------PredicatedIntrinsicGenerator------------------------------
1262
// Internal class which handles all predicated Intrinsic calls.
1263
class PredicatedIntrinsicGenerator : public CallGenerator {
1264
CallGenerator* _intrinsic;
1265
CallGenerator* _cg;
1266
1267
public:
1268
PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1269
CallGenerator* cg)
1270
: CallGenerator(cg->method())
1271
{
1272
_intrinsic = intrinsic;
1273
_cg = cg;
1274
}
1275
1276
virtual bool is_virtual() const { return true; }
1277
virtual bool is_inline() const { return true; }
1278
virtual bool is_intrinsic() const { return true; }
1279
1280
virtual JVMState* generate(JVMState* jvms);
1281
};
1282
1283
1284
CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1285
CallGenerator* cg) {
1286
return new PredicatedIntrinsicGenerator(intrinsic, cg);
1287
}
1288
1289
1290
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1291
// The code we want to generate here is:
1292
// if (receiver == NULL)
1293
// uncommon_Trap
1294
// if (predicate(0))
1295
// do_intrinsic(0)
1296
// else
1297
// if (predicate(1))
1298
// do_intrinsic(1)
1299
// ...
1300
// else
1301
// do_java_comp
1302
1303
GraphKit kit(jvms);
1304
PhaseGVN& gvn = kit.gvn();
1305
1306
CompileLog* log = kit.C->log();
1307
if (log != NULL) {
1308
log->elem("predicated_intrinsic bci='%d' method='%d'",
1309
jvms->bci(), log->identify(method()));
1310
}
1311
1312
if (!method()->is_static()) {
1313
// We need an explicit receiver null_check before checking its type in predicate.
1314
// We share a map with the caller, so his JVMS gets adjusted.
1315
Node* receiver = kit.null_check_receiver_before_call(method());
1316
if (kit.stopped()) {
1317
return kit.transfer_exceptions_into_jvms();
1318
}
1319
}
1320
1321
int n_predicates = _intrinsic->predicates_count();
1322
assert(n_predicates > 0, "sanity");
1323
1324
JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1325
1326
// Region for normal compilation code if intrinsic failed.
1327
Node* slow_region = new RegionNode(1);
1328
1329
int results = 0;
1330
for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1331
#ifdef ASSERT
1332
JVMState* old_jvms = kit.jvms();
1333
SafePointNode* old_map = kit.map();
1334
Node* old_io = old_map->i_o();
1335
Node* old_mem = old_map->memory();
1336
Node* old_exc = old_map->next_exception();
1337
#endif
1338
Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1339
#ifdef ASSERT
1340
// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1341
assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1342
SafePointNode* new_map = kit.map();
1343
assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1344
assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1345
assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1346
#endif
1347
if (!kit.stopped()) {
1348
PreserveJVMState pjvms(&kit);
1349
// Generate intrinsic code:
1350
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1351
if (new_jvms == NULL) {
1352
// Intrinsic failed, use normal compilation path for this predicate.
1353
slow_region->add_req(kit.control());
1354
} else {
1355
kit.add_exception_states_from(new_jvms);
1356
kit.set_jvms(new_jvms);
1357
if (!kit.stopped()) {
1358
result_jvms[results++] = kit.jvms();
1359
}
1360
}
1361
}
1362
if (else_ctrl == NULL) {
1363
else_ctrl = kit.C->top();
1364
}
1365
kit.set_control(else_ctrl);
1366
}
1367
if (!kit.stopped()) {
1368
// Final 'else' after predicates.
1369
slow_region->add_req(kit.control());
1370
}
1371
if (slow_region->req() > 1) {
1372
PreserveJVMState pjvms(&kit);
1373
// Generate normal compilation code:
1374
kit.set_control(gvn.transform(slow_region));
1375
JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1376
if (kit.failing())
1377
return NULL; // might happen because of NodeCountInliningCutoff
1378
assert(new_jvms != NULL, "must be");
1379
kit.add_exception_states_from(new_jvms);
1380
kit.set_jvms(new_jvms);
1381
if (!kit.stopped()) {
1382
result_jvms[results++] = kit.jvms();
1383
}
1384
}
1385
1386
if (results == 0) {
1387
// All paths ended in uncommon traps.
1388
(void) kit.stop();
1389
return kit.transfer_exceptions_into_jvms();
1390
}
1391
1392
if (results == 1) { // Only one path
1393
kit.set_jvms(result_jvms[0]);
1394
return kit.transfer_exceptions_into_jvms();
1395
}
1396
1397
// Merge all paths.
1398
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1399
RegionNode* region = new RegionNode(results + 1);
1400
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1401
for (int i = 0; i < results; i++) {
1402
JVMState* jvms = result_jvms[i];
1403
int path = i + 1;
1404
SafePointNode* map = jvms->map();
1405
region->init_req(path, map->control());
1406
iophi->set_req(path, map->i_o());
1407
if (i == 0) {
1408
kit.set_jvms(jvms);
1409
} else {
1410
kit.merge_memory(map->merged_memory(), region, path);
1411
}
1412
}
1413
kit.set_control(gvn.transform(region));
1414
kit.set_i_o(gvn.transform(iophi));
1415
// Transform new memory Phis.
1416
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1417
Node* phi = mms.memory();
1418
if (phi->is_Phi() && phi->in(0) == region) {
1419
mms.set_memory(gvn.transform(phi));
1420
}
1421
}
1422
1423
// Merge debug info.
1424
Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1425
uint tos = kit.jvms()->stkoff() + kit.sp();
1426
Node* map = kit.map();
1427
uint limit = map->req();
1428
for (uint i = TypeFunc::Parms; i < limit; i++) {
1429
// Skip unused stack slots; fast forward to monoff();
1430
if (i == tos) {
1431
i = kit.jvms()->monoff();
1432
if( i >= limit ) break;
1433
}
1434
Node* n = map->in(i);
1435
ins[0] = n;
1436
const Type* t = gvn.type(n);
1437
bool needs_phi = false;
1438
for (int j = 1; j < results; j++) {
1439
JVMState* jvms = result_jvms[j];
1440
Node* jmap = jvms->map();
1441
Node* m = NULL;
1442
if (jmap->req() > i) {
1443
m = jmap->in(i);
1444
if (m != n) {
1445
needs_phi = true;
1446
t = t->meet_speculative(gvn.type(m));
1447
}
1448
}
1449
ins[j] = m;
1450
}
1451
if (needs_phi) {
1452
Node* phi = PhiNode::make(region, n, t);
1453
for (int j = 1; j < results; j++) {
1454
phi->set_req(j + 1, ins[j]);
1455
}
1456
map->set_req(i, gvn.transform(phi));
1457
}
1458
}
1459
1460
return kit.transfer_exceptions_into_jvms();
1461
}
1462
1463
//-------------------------UncommonTrapCallGenerator-----------------------------
1464
// Internal class which handles all out-of-line calls checking receiver type.
1465
class UncommonTrapCallGenerator : public CallGenerator {
1466
Deoptimization::DeoptReason _reason;
1467
Deoptimization::DeoptAction _action;
1468
1469
public:
1470
UncommonTrapCallGenerator(ciMethod* m,
1471
Deoptimization::DeoptReason reason,
1472
Deoptimization::DeoptAction action)
1473
: CallGenerator(m)
1474
{
1475
_reason = reason;
1476
_action = action;
1477
}
1478
1479
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1480
virtual bool is_trap() const { return true; }
1481
1482
virtual JVMState* generate(JVMState* jvms);
1483
};
1484
1485
1486
CallGenerator*
1487
CallGenerator::for_uncommon_trap(ciMethod* m,
1488
Deoptimization::DeoptReason reason,
1489
Deoptimization::DeoptAction action) {
1490
return new UncommonTrapCallGenerator(m, reason, action);
1491
}
1492
1493
1494
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1495
GraphKit kit(jvms);
1496
kit.C->print_inlining_update(this);
1497
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1498
// Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1499
// Use callsite signature always.
1500
ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1501
int nargs = declared_method->arg_size();
1502
kit.inc_sp(nargs);
1503
assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1504
if (_reason == Deoptimization::Reason_class_check &&
1505
_action == Deoptimization::Action_maybe_recompile) {
1506
// Temp fix for 6529811
1507
// Don't allow uncommon_trap to override our decision to recompile in the event
1508
// of a class cast failure for a monomorphic call as it will never let us convert
1509
// the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1510
bool keep_exact_action = true;
1511
kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1512
} else {
1513
kit.uncommon_trap(_reason, _action);
1514
}
1515
return kit.transfer_exceptions_into_jvms();
1516
}
1517
1518
// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1519
1520
// (Node: Merged hook_up_exits into ParseGenerator::generate.)
1521
1522