Path: blob/master/src/hotspot/share/opto/callGenerator.cpp
64441 views
/*1* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "ci/ciCallSite.hpp"27#include "ci/ciObjArray.hpp"28#include "ci/ciMemberName.hpp"29#include "ci/ciMethodHandle.hpp"30#include "classfile/javaClasses.hpp"31#include "compiler/compileLog.hpp"32#include "opto/addnode.hpp"33#include "opto/callGenerator.hpp"34#include "opto/callnode.hpp"35#include "opto/castnode.hpp"36#include "opto/cfgnode.hpp"37#include "opto/parse.hpp"38#include "opto/rootnode.hpp"39#include "opto/runtime.hpp"40#include "opto/subnode.hpp"41#include "runtime/sharedRuntime.hpp"42#include "ci/ciNativeEntryPoint.hpp"43#include "utilities/debug.hpp"4445// Utility function.46const TypeFunc* CallGenerator::tf() const {47return TypeFunc::make(method());48}4950bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {51return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);52}5354bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {55ciMethod* symbolic_info = caller->get_method_at_bci(bci);56return is_inlined_method_handle_intrinsic(symbolic_info, m);57}5859bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {60return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();61}6263//-----------------------------ParseGenerator---------------------------------64// Internal class which handles all direct bytecode traversal.65class ParseGenerator : public InlineCallGenerator {66private:67bool _is_osr;68float _expected_uses;6970public:71ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)72: InlineCallGenerator(method)73{74_is_osr = is_osr;75_expected_uses = expected_uses;76assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");77}7879virtual bool is_parse() const { return true; }80virtual JVMState* generate(JVMState* jvms);81int is_osr() { return _is_osr; }8283};8485JVMState* ParseGenerator::generate(JVMState* jvms) {86Compile* C = Compile::current();87C->print_inlining_update(this);8889if (is_osr()) {90// The JVMS for a OSR has a single argument (see its TypeFunc).91assert(jvms->depth() == 1, "no inline OSR");92}9394if (C->failing()) {95return NULL; // bailing out of the compile; do not try to parse96}9798Parse parser(jvms, method(), _expected_uses);99// Grab signature for matching/allocation100GraphKit& exits = parser.exits();101102if (C->failing()) {103while (exits.pop_exception_state() != NULL) ;104return NULL;105}106107assert(exits.jvms()->same_calls_as(jvms), "sanity");108109// Simply return the exit state of the parser,110// augmented by any exceptional states.111return exits.transfer_exceptions_into_jvms();112}113114//---------------------------DirectCallGenerator------------------------------115// Internal class which handles all out-of-line calls w/o receiver type checks.116class DirectCallGenerator : public CallGenerator {117private:118CallStaticJavaNode* _call_node;119// Force separate memory and I/O projections for the exceptional120// paths to facilitate late inlinig.121bool _separate_io_proj;122123protected:124void set_call_node(CallStaticJavaNode* call) { _call_node = call; }125126public:127DirectCallGenerator(ciMethod* method, bool separate_io_proj)128: CallGenerator(method),129_separate_io_proj(separate_io_proj)130{131}132virtual JVMState* generate(JVMState* jvms);133134virtual CallNode* call_node() const { return _call_node; }135virtual CallGenerator* with_call_node(CallNode* call) {136DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);137dcg->set_call_node(call->as_CallStaticJava());138return dcg;139}140};141142JVMState* DirectCallGenerator::generate(JVMState* jvms) {143GraphKit kit(jvms);144kit.C->print_inlining_update(this);145bool is_static = method()->is_static();146address target = is_static ? SharedRuntime::get_resolve_static_call_stub()147: SharedRuntime::get_resolve_opt_virtual_call_stub();148149if (kit.C->log() != NULL) {150kit.C->log()->elem("direct_call bci='%d'", jvms->bci());151}152153CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());154if (is_inlined_method_handle_intrinsic(jvms, method())) {155// To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,156// additional information about the method being invoked should be attached157// to the call site to make resolution logic work158// (see SharedRuntime::resolve_static_call_C).159call->set_override_symbolic_info(true);160}161_call_node = call; // Save the call node in case we need it later162if (!is_static) {163// Make an explicit receiver null_check as part of this call.164// Since we share a map with the caller, his JVMS gets adjusted.165kit.null_check_receiver_before_call(method());166if (kit.stopped()) {167// And dump it back to the caller, decorated with any exceptions:168return kit.transfer_exceptions_into_jvms();169}170// Mark the call node as virtual, sort of:171call->set_optimized_virtual(true);172if (method()->is_method_handle_intrinsic() ||173method()->is_compiled_lambda_form()) {174call->set_method_handle_invoke(true);175}176}177kit.set_arguments_for_java_call(call);178kit.set_edges_for_java_call(call, false, _separate_io_proj);179Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);180kit.push_node(method()->return_type()->basic_type(), ret);181return kit.transfer_exceptions_into_jvms();182}183184//--------------------------VirtualCallGenerator------------------------------185// Internal class which handles all out-of-line calls checking receiver type.186class VirtualCallGenerator : public CallGenerator {187private:188int _vtable_index;189bool _separate_io_proj;190CallDynamicJavaNode* _call_node;191192protected:193void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }194195public:196VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)197: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)198{199assert(vtable_index == Method::invalid_vtable_index ||200vtable_index >= 0, "either invalid or usable");201}202virtual bool is_virtual() const { return true; }203virtual JVMState* generate(JVMState* jvms);204205virtual CallNode* call_node() const { return _call_node; }206int vtable_index() const { return _vtable_index; }207208virtual CallGenerator* with_call_node(CallNode* call) {209VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);210cg->set_call_node(call->as_CallDynamicJava());211return cg;212}213};214215JVMState* VirtualCallGenerator::generate(JVMState* jvms) {216GraphKit kit(jvms);217Node* receiver = kit.argument(0);218219kit.C->print_inlining_update(this);220221if (kit.C->log() != NULL) {222kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());223}224225// If the receiver is a constant null, do not torture the system226// by attempting to call through it. The compile will proceed227// correctly, but may bail out in final_graph_reshaping, because228// the call instruction will have a seemingly deficient out-count.229// (The bailout says something misleading about an "infinite loop".)230if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {231assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));232ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());233int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());234kit.inc_sp(arg_size); // restore arguments235kit.uncommon_trap(Deoptimization::Reason_null_check,236Deoptimization::Action_none,237NULL, "null receiver");238return kit.transfer_exceptions_into_jvms();239}240241// Ideally we would unconditionally do a null check here and let it242// be converted to an implicit check based on profile information.243// However currently the conversion to implicit null checks in244// Block::implicit_null_check() only looks for loads and stores, not calls.245ciMethod *caller = kit.method();246ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();247if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||248((ImplicitNullCheckThreshold > 0) && caller_md &&249(caller_md->trap_count(Deoptimization::Reason_null_check)250>= (uint)ImplicitNullCheckThreshold))) {251// Make an explicit receiver null_check as part of this call.252// Since we share a map with the caller, his JVMS gets adjusted.253receiver = kit.null_check_receiver_before_call(method());254if (kit.stopped()) {255// And dump it back to the caller, decorated with any exceptions:256return kit.transfer_exceptions_into_jvms();257}258}259260assert(!method()->is_static(), "virtual call must not be to static");261assert(!method()->is_final(), "virtual call should not be to final");262assert(!method()->is_private(), "virtual call should not be to private");263assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,264"no vtable calls if +UseInlineCaches ");265address target = SharedRuntime::get_resolve_virtual_call_stub();266// Normal inline cache used for call267CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);268if (is_inlined_method_handle_intrinsic(jvms, method())) {269// To be able to issue a direct call (optimized virtual or virtual)270// and skip a call to MH.linkTo*/invokeBasic adapter, additional information271// about the method being invoked should be attached to the call site to272// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).273call->set_override_symbolic_info(true);274}275_call_node = call; // Save the call node in case we need it later276277kit.set_arguments_for_java_call(call);278kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);279Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);280kit.push_node(method()->return_type()->basic_type(), ret);281282// Represent the effect of an implicit receiver null_check283// as part of this call. Since we share a map with the caller,284// his JVMS gets adjusted.285kit.cast_not_null(receiver);286return kit.transfer_exceptions_into_jvms();287}288289CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {290if (InlineTree::check_can_parse(m) != NULL) return NULL;291return new ParseGenerator(m, expected_uses);292}293294// As a special case, the JVMS passed to this CallGenerator is295// for the method execution already in progress, not just the JVMS296// of the caller. Thus, this CallGenerator cannot be mixed with others!297CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {298if (InlineTree::check_can_parse(m) != NULL) return NULL;299float past_uses = m->interpreter_invocation_count();300float expected_uses = past_uses;301return new ParseGenerator(m, expected_uses, true);302}303304CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {305assert(!m->is_abstract(), "for_direct_call mismatch");306return new DirectCallGenerator(m, separate_io_proj);307}308309CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {310assert(!m->is_static(), "for_virtual_call mismatch");311assert(!m->is_method_handle_intrinsic(), "should be a direct call");312return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);313}314315// Allow inlining decisions to be delayed316class LateInlineCallGenerator : public DirectCallGenerator {317private:318jlong _unique_id; // unique id for log compilation319bool _is_pure_call; // a hint that the call doesn't have important side effects to care about320321protected:322CallGenerator* _inline_cg;323virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }324virtual CallGenerator* inline_cg() const { return _inline_cg; }325virtual bool is_pure_call() const { return _is_pure_call; }326327public:328LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :329DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}330331virtual bool is_late_inline() const { return true; }332333// Convert the CallStaticJava into an inline334virtual void do_late_inline();335336virtual JVMState* generate(JVMState* jvms) {337Compile *C = Compile::current();338339C->log_inline_id(this);340341// Record that this call site should be revisited once the main342// parse is finished.343if (!is_mh_late_inline()) {344C->add_late_inline(this);345}346347// Emit the CallStaticJava and request separate projections so348// that the late inlining logic can distinguish between fall349// through and exceptional uses of the memory and io projections350// as is done for allocations and macro expansion.351return DirectCallGenerator::generate(jvms);352}353354virtual void print_inlining_late(const char* msg) {355CallNode* call = call_node();356Compile* C = Compile::current();357C->print_inlining_assert_ready();358C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);359C->print_inlining_move_to(this);360C->print_inlining_update_delayed(this);361}362363virtual void set_unique_id(jlong id) {364_unique_id = id;365}366367virtual jlong unique_id() const {368return _unique_id;369}370371virtual CallGenerator* with_call_node(CallNode* call) {372LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);373cg->set_call_node(call->as_CallStaticJava());374return cg;375}376};377378CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {379return new LateInlineCallGenerator(method, inline_cg);380}381382class LateInlineMHCallGenerator : public LateInlineCallGenerator {383ciMethod* _caller;384bool _input_not_const;385386virtual bool do_late_inline_check(Compile* C, JVMState* jvms);387388public:389LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :390LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}391392virtual bool is_mh_late_inline() const { return true; }393394// Convert the CallStaticJava into an inline395virtual void do_late_inline();396397virtual JVMState* generate(JVMState* jvms) {398JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);399400Compile* C = Compile::current();401if (_input_not_const) {402// inlining won't be possible so no need to enqueue right now.403call_node()->set_generator(this);404} else {405C->add_late_inline(this);406}407return new_jvms;408}409410virtual CallGenerator* with_call_node(CallNode* call) {411LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);412cg->set_call_node(call->as_CallStaticJava());413return cg;414}415};416417bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {418// When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different419// expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method wih420// exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue421// of late inlining with exceptions.422assert(!jvms->method()->has_exception_handlers() ||423(method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&424method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");425// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.426bool allow_inline = C->inlining_incrementally();427bool input_not_const = true;428CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);429assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place430431if (cg != NULL) {432assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");433_inline_cg = cg;434C->dec_number_of_mh_late_inlines();435return true;436} else {437// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call438// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,439// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.440return false;441}442}443444CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {445assert(IncrementalInlineMH, "required");446Compile::current()->inc_number_of_mh_late_inlines();447CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);448return cg;449}450451// Allow inlining decisions to be delayed452class LateInlineVirtualCallGenerator : public VirtualCallGenerator {453private:454jlong _unique_id; // unique id for log compilation455CallGenerator* _inline_cg;456ciMethod* _callee;457bool _is_pure_call;458float _prof_factor;459460protected:461virtual bool do_late_inline_check(Compile* C, JVMState* jvms);462virtual CallGenerator* inline_cg() const { return _inline_cg; }463virtual bool is_pure_call() const { return _is_pure_call; }464465public:466LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)467: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),468_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {469assert(IncrementalInlineVirtual, "required");470}471472virtual bool is_late_inline() const { return true; }473474virtual bool is_virtual_late_inline() const { return true; }475476// Convert the CallDynamicJava into an inline477virtual void do_late_inline();478479virtual void set_callee_method(ciMethod* m) {480assert(_callee == NULL, "repeated inlining attempt");481_callee = m;482}483484virtual JVMState* generate(JVMState* jvms) {485// Emit the CallDynamicJava and request separate projections so486// that the late inlining logic can distinguish between fall487// through and exceptional uses of the memory and io projections488// as is done for allocations and macro expansion.489JVMState* new_jvms = VirtualCallGenerator::generate(jvms);490if (call_node() != NULL) {491call_node()->set_generator(this);492}493return new_jvms;494}495496virtual void print_inlining_late(const char* msg) {497CallNode* call = call_node();498Compile* C = Compile::current();499C->print_inlining_assert_ready();500C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);501C->print_inlining_move_to(this);502C->print_inlining_update_delayed(this);503}504505virtual void set_unique_id(jlong id) {506_unique_id = id;507}508509virtual jlong unique_id() const {510return _unique_id;511}512513virtual CallGenerator* with_call_node(CallNode* call) {514LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);515cg->set_call_node(call->as_CallDynamicJava());516return cg;517}518};519520bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {521// Method handle linker case is handled in CallDynamicJavaNode::Ideal().522// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().523524// Implicit receiver null checks introduce problems when exception states are combined.525Node* receiver = jvms->map()->argument(jvms, 0);526const Type* recv_type = C->initial_gvn()->type(receiver);527if (recv_type->maybe_null()) {528if (C->print_inlining() || C->print_intrinsics()) {529C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),530"late call devirtualization failed (receiver may be null)");531}532return false;533}534// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.535bool allow_inline = C->inlining_incrementally();536if (!allow_inline && _callee->holder()->is_interface()) {537// Don't convert the interface call to a direct call guarded by an interface subtype check.538if (C->print_inlining() || C->print_intrinsics()) {539C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),540"late call devirtualization failed (interface call)");541}542return false;543}544CallGenerator* cg = C->call_generator(_callee,545vtable_index(),546false /*call_does_dispatch*/,547jvms,548allow_inline,549_prof_factor,550NULL /*speculative_receiver_type*/,551true /*allow_intrinsics*/);552553if (cg != NULL) {554assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");555_inline_cg = cg;556return true;557} else {558// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.559assert(false, "no progress");560return false;561}562}563564CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {565assert(IncrementalInlineVirtual, "required");566assert(!m->is_static(), "for_virtual_call mismatch");567assert(!m->is_method_handle_intrinsic(), "should be a direct call");568return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);569}570571void LateInlineCallGenerator::do_late_inline() {572CallGenerator::do_late_inline_helper();573}574575void LateInlineMHCallGenerator::do_late_inline() {576CallGenerator::do_late_inline_helper();577}578579void LateInlineVirtualCallGenerator::do_late_inline() {580assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal581CallGenerator::do_late_inline_helper();582}583584static bool has_non_debug_usages(Node* n) {585for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {586Node* m = n->fast_out(i);587if (!m->is_SafePoint()588|| (m->is_Call() && m->as_Call()->has_non_debug_use(n))) {589return true;590}591}592return false;593}594595static bool is_box_cache_valid(CallNode* call) {596ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();597return klass->is_box_cache_valid();598}599600// delay box in runtime, treat box as a scalarized object601static void scalarize_debug_usages(CallNode* call, Node* resproj) {602GraphKit kit(call->jvms());603PhaseGVN& gvn = kit.gvn();604605ProjNode* res = resproj->as_Proj();606ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();607int n_fields = klass->nof_nonstatic_fields();608assert(n_fields == 1, "the klass must be an auto-boxing klass");609610for (DUIterator_Last imin, i = res->last_outs(imin); i >= imin;) {611SafePointNode* sfpt = res->last_out(i)->as_SafePoint();612uint first_ind = sfpt->req() - sfpt->jvms()->scloff();613Node* sobj = new SafePointScalarObjectNode(gvn.type(res)->isa_oopptr(),614#ifdef ASSERT615call,616#endif // ASSERT617first_ind, n_fields, true);618sobj->init_req(0, kit.root());619sfpt->add_req(call->in(TypeFunc::Parms));620sobj = gvn.transform(sobj);621JVMState* jvms = sfpt->jvms();622jvms->set_endoff(sfpt->req());623int start = jvms->debug_start();624int end = jvms->debug_end();625int num_edges = sfpt->replace_edges_in_range(res, sobj, start, end, &gvn);626i -= num_edges;627}628629assert(res->outcnt() == 0, "the box must have no use after replace");630631#ifndef PRODUCT632if (PrintEliminateAllocations) {633tty->print("++++ Eliminated: %d ", call->_idx);634call->as_CallStaticJava()->method()->print_short_name(tty);635tty->cr();636}637#endif638}639640void CallGenerator::do_late_inline_helper() {641assert(is_late_inline(), "only late inline allowed");642643// Can't inline it644CallNode* call = call_node();645if (call == NULL || call->outcnt() == 0 ||646call->in(0) == NULL || call->in(0)->is_top()) {647return;648}649650const TypeTuple *r = call->tf()->domain();651for (int i1 = 0; i1 < method()->arg_size(); i1++) {652if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {653assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");654return;655}656}657658if (call->in(TypeFunc::Memory)->is_top()) {659assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");660return;661}662if (call->in(TypeFunc::Memory)->is_MergeMem()) {663MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();664if (merge_mem->base_memory() == merge_mem->empty_memory()) {665return; // dead path666}667}668669// check for unreachable loop670CallProjections callprojs;671call->extract_projections(&callprojs, true);672if ((callprojs.fallthrough_catchproj == call->in(0)) ||673(callprojs.catchall_catchproj == call->in(0)) ||674(callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||675(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||676(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||677(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||678(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||679(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {680return;681}682683Compile* C = Compile::current();684// Remove inlined methods from Compiler's lists.685if (call->is_macro()) {686C->remove_macro_node(call);687}688689bool result_not_used = false;690691if (is_pure_call()) {692// Disabled due to JDK-8276112693if (false && is_boxing_late_inline() && callprojs.resproj != nullptr) {694// replace box node to scalar node only in case it is directly referenced by debug info695assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");696if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {697scalarize_debug_usages(call, callprojs.resproj);698}699}700701// The call is marked as pure (no important side effects), but result isn't used.702// It's safe to remove the call.703result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);704}705706if (result_not_used) {707GraphKit kit(call->jvms());708kit.replace_call(call, C->top(), true);709} else {710// Make a clone of the JVMState that appropriate to use for driving a parse711JVMState* old_jvms = call->jvms();712JVMState* jvms = old_jvms->clone_shallow(C);713uint size = call->req();714SafePointNode* map = new SafePointNode(size, jvms);715for (uint i1 = 0; i1 < size; i1++) {716map->init_req(i1, call->in(i1));717}718719// Make sure the state is a MergeMem for parsing.720if (!map->in(TypeFunc::Memory)->is_MergeMem()) {721Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));722C->initial_gvn()->set_type_bottom(mem);723map->set_req(TypeFunc::Memory, mem);724}725726uint nargs = method()->arg_size();727// blow away old call arguments728Node* top = C->top();729for (uint i1 = 0; i1 < nargs; i1++) {730map->set_req(TypeFunc::Parms + i1, top);731}732jvms->set_map(map);733734// Make enough space in the expression stack to transfer735// the incoming arguments and return value.736map->ensure_stack(jvms, jvms->method()->max_stack());737for (uint i1 = 0; i1 < nargs; i1++) {738map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));739}740741C->print_inlining_assert_ready();742743C->print_inlining_move_to(this);744745C->log_late_inline(this);746747// JVMState is ready, so time to perform some checks and prepare for inlining attempt.748if (!do_late_inline_check(C, jvms)) {749map->disconnect_inputs(C);750C->print_inlining_update_delayed(this);751return;752}753754// Setup default node notes to be picked up by the inlining755Node_Notes* old_nn = C->node_notes_at(call->_idx);756if (old_nn != NULL) {757Node_Notes* entry_nn = old_nn->clone(C);758entry_nn->set_jvms(jvms);759C->set_default_node_notes(entry_nn);760}761762// Now perform the inlining using the synthesized JVMState763JVMState* new_jvms = inline_cg()->generate(jvms);764if (new_jvms == NULL) return; // no change765if (C->failing()) return;766767// Capture any exceptional control flow768GraphKit kit(new_jvms);769770// Find the result object771Node* result = C->top();772int result_size = method()->return_type()->size();773if (result_size != 0 && !kit.stopped()) {774result = (result_size == 1) ? kit.pop() : kit.pop_pair();775}776777if (inline_cg()->is_inline()) {778C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());779C->env()->notice_inlined_method(inline_cg()->method());780}781C->set_inlining_progress(true);782C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup783kit.replace_call(call, result, true);784}785}786787class LateInlineStringCallGenerator : public LateInlineCallGenerator {788789public:790LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :791LateInlineCallGenerator(method, inline_cg) {}792793virtual JVMState* generate(JVMState* jvms) {794Compile *C = Compile::current();795796C->log_inline_id(this);797798C->add_string_late_inline(this);799800JVMState* new_jvms = DirectCallGenerator::generate(jvms);801return new_jvms;802}803804virtual bool is_string_late_inline() const { return true; }805806virtual CallGenerator* with_call_node(CallNode* call) {807LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);808cg->set_call_node(call->as_CallStaticJava());809return cg;810}811};812813CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {814return new LateInlineStringCallGenerator(method, inline_cg);815}816817class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {818819public:820LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :821LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}822823virtual JVMState* generate(JVMState* jvms) {824Compile *C = Compile::current();825826C->log_inline_id(this);827828C->add_boxing_late_inline(this);829830JVMState* new_jvms = DirectCallGenerator::generate(jvms);831return new_jvms;832}833834virtual bool is_boxing_late_inline() const { return true; }835836virtual CallGenerator* with_call_node(CallNode* call) {837LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);838cg->set_call_node(call->as_CallStaticJava());839return cg;840}841};842843CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {844return new LateInlineBoxingCallGenerator(method, inline_cg);845}846847class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {848849public:850LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :851LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}852853virtual JVMState* generate(JVMState* jvms) {854Compile *C = Compile::current();855856C->log_inline_id(this);857858C->add_vector_reboxing_late_inline(this);859860JVMState* new_jvms = DirectCallGenerator::generate(jvms);861return new_jvms;862}863864virtual CallGenerator* with_call_node(CallNode* call) {865LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);866cg->set_call_node(call->as_CallStaticJava());867return cg;868}869};870871// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);872CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {873return new LateInlineVectorReboxingCallGenerator(method, inline_cg);874}875876//------------------------PredictedCallGenerator------------------------------877// Internal class which handles all out-of-line calls checking receiver type.878class PredictedCallGenerator : public CallGenerator {879ciKlass* _predicted_receiver;880CallGenerator* _if_missed;881CallGenerator* _if_hit;882float _hit_prob;883bool _exact_check;884885public:886PredictedCallGenerator(ciKlass* predicted_receiver,887CallGenerator* if_missed,888CallGenerator* if_hit, bool exact_check,889float hit_prob)890: CallGenerator(if_missed->method())891{892// The call profile data may predict the hit_prob as extreme as 0 or 1.893// Remove the extremes values from the range.894if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;895if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;896897_predicted_receiver = predicted_receiver;898_if_missed = if_missed;899_if_hit = if_hit;900_hit_prob = hit_prob;901_exact_check = exact_check;902}903904virtual bool is_virtual() const { return true; }905virtual bool is_inline() const { return _if_hit->is_inline(); }906virtual bool is_deferred() const { return _if_hit->is_deferred(); }907908virtual JVMState* generate(JVMState* jvms);909};910911912CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,913CallGenerator* if_missed,914CallGenerator* if_hit,915float hit_prob) {916return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,917/*exact_check=*/true, hit_prob);918}919920CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,921CallGenerator* if_missed,922CallGenerator* if_hit) {923return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,924/*exact_check=*/false, PROB_ALWAYS);925}926927JVMState* PredictedCallGenerator::generate(JVMState* jvms) {928GraphKit kit(jvms);929kit.C->print_inlining_update(this);930PhaseGVN& gvn = kit.gvn();931// We need an explicit receiver null_check before checking its type.932// We share a map with the caller, so his JVMS gets adjusted.933Node* receiver = kit.argument(0);934CompileLog* log = kit.C->log();935if (log != NULL) {936log->elem("predicted_call bci='%d' exact='%d' klass='%d'",937jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));938}939940receiver = kit.null_check_receiver_before_call(method());941if (kit.stopped()) {942return kit.transfer_exceptions_into_jvms();943}944945// Make a copy of the replaced nodes in case we need to restore them946ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();947replaced_nodes.clone();948949Node* casted_receiver = receiver; // will get updated in place...950Node* slow_ctl = NULL;951if (_exact_check) {952slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,953&casted_receiver);954} else {955slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,956&casted_receiver);957}958959SafePointNode* slow_map = NULL;960JVMState* slow_jvms = NULL;961{ PreserveJVMState pjvms(&kit);962kit.set_control(slow_ctl);963if (!kit.stopped()) {964slow_jvms = _if_missed->generate(kit.sync_jvms());965if (kit.failing())966return NULL; // might happen because of NodeCountInliningCutoff967assert(slow_jvms != NULL, "must be");968kit.add_exception_states_from(slow_jvms);969kit.set_map(slow_jvms->map());970if (!kit.stopped())971slow_map = kit.stop();972}973}974975if (kit.stopped()) {976// Instance does not match the predicted type.977kit.set_jvms(slow_jvms);978return kit.transfer_exceptions_into_jvms();979}980981// Fall through if the instance matches the desired type.982kit.replace_in_map(receiver, casted_receiver);983984// Make the hot call:985JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());986if (new_jvms == NULL) {987// Inline failed, so make a direct call.988assert(_if_hit->is_inline(), "must have been a failed inline");989CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());990new_jvms = cg->generate(kit.sync_jvms());991}992kit.add_exception_states_from(new_jvms);993kit.set_jvms(new_jvms);994995// Need to merge slow and fast?996if (slow_map == NULL) {997// The fast path is the only path remaining.998return kit.transfer_exceptions_into_jvms();999}10001001if (kit.stopped()) {1002// Inlined method threw an exception, so it's just the slow path after all.1003kit.set_jvms(slow_jvms);1004return kit.transfer_exceptions_into_jvms();1005}10061007// There are 2 branches and the replaced nodes are only valid on1008// one: restore the replaced nodes to what they were before the1009// branch.1010kit.map()->set_replaced_nodes(replaced_nodes);10111012// Finish the diamond.1013kit.C->set_has_split_ifs(true); // Has chance for split-if optimization1014RegionNode* region = new RegionNode(3);1015region->init_req(1, kit.control());1016region->init_req(2, slow_map->control());1017kit.set_control(gvn.transform(region));1018Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);1019iophi->set_req(2, slow_map->i_o());1020kit.set_i_o(gvn.transform(iophi));1021// Merge memory1022kit.merge_memory(slow_map->merged_memory(), region, 2);1023// Transform new memory Phis.1024for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {1025Node* phi = mms.memory();1026if (phi->is_Phi() && phi->in(0) == region) {1027mms.set_memory(gvn.transform(phi));1028}1029}1030uint tos = kit.jvms()->stkoff() + kit.sp();1031uint limit = slow_map->req();1032for (uint i = TypeFunc::Parms; i < limit; i++) {1033// Skip unused stack slots; fast forward to monoff();1034if (i == tos) {1035i = kit.jvms()->monoff();1036if( i >= limit ) break;1037}1038Node* m = kit.map()->in(i);1039Node* n = slow_map->in(i);1040if (m != n) {1041const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));1042Node* phi = PhiNode::make(region, m, t);1043phi->set_req(2, n);1044kit.map()->set_req(i, gvn.transform(phi));1045}1046}1047return kit.transfer_exceptions_into_jvms();1048}104910501051CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {1052assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");1053bool input_not_const;1054CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);1055Compile* C = Compile::current();1056if (cg != NULL) {1057if (AlwaysIncrementalInline) {1058return CallGenerator::for_late_inline(callee, cg);1059} else {1060return cg;1061}1062}1063int bci = jvms->bci();1064ciCallProfile profile = caller->call_profile_at_bci(bci);1065int call_site_count = caller->scale_count(profile.count());10661067if (IncrementalInlineMH && call_site_count > 0 &&1068(input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {1069return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);1070} else {1071// Out-of-line call.1072return CallGenerator::for_direct_call(callee);1073}1074}10751076class NativeCallGenerator : public CallGenerator {1077private:1078address _call_addr;1079ciNativeEntryPoint* _nep;1080public:1081NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)1082: CallGenerator(m), _call_addr(call_addr), _nep(nep) {}10831084virtual JVMState* generate(JVMState* jvms);1085};10861087JVMState* NativeCallGenerator::generate(JVMState* jvms) {1088GraphKit kit(jvms);10891090Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep1091if (call == NULL) return NULL;10921093kit.C->print_inlining_update(this);1094if (kit.C->log() != NULL) {1095kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));1096}10971098return kit.transfer_exceptions_into_jvms();1099}11001101CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {1102GraphKit kit(jvms);1103PhaseGVN& gvn = kit.gvn();1104Compile* C = kit.C;1105vmIntrinsics::ID iid = callee->intrinsic_id();1106input_not_const = true;1107if (StressMethodHandleLinkerInlining) {1108allow_inline = false;1109}1110switch (iid) {1111case vmIntrinsics::_invokeBasic:1112{1113// Get MethodHandle receiver:1114Node* receiver = kit.argument(0);1115if (receiver->Opcode() == Op_ConP) {1116input_not_const = false;1117const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();1118if (recv_toop != NULL) {1119ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();1120const int vtable_index = Method::invalid_vtable_index;11211122if (!ciMethod::is_consistent_info(callee, target)) {1123print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1124"signatures mismatch");1125return NULL;1126}11271128CallGenerator *cg = C->call_generator(target, vtable_index,1129false /* call_does_dispatch */,1130jvms,1131allow_inline,1132PROB_ALWAYS);1133return cg;1134} else {1135assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",1136Type::str(receiver->bottom_type()));1137print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1138"receiver is always null");1139}1140} else {1141print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1142"receiver not constant");1143}1144}1145break;11461147case vmIntrinsics::_linkToVirtual:1148case vmIntrinsics::_linkToStatic:1149case vmIntrinsics::_linkToSpecial:1150case vmIntrinsics::_linkToInterface:1151{1152// Get MemberName argument:1153Node* member_name = kit.argument(callee->arg_size() - 1);1154if (member_name->Opcode() == Op_ConP) {1155input_not_const = false;1156const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();1157ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();11581159if (!ciMethod::is_consistent_info(callee, target)) {1160print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1161"signatures mismatch");1162return NULL;1163}11641165// In lambda forms we erase signature types to avoid resolving issues1166// involving class loaders. When we optimize a method handle invoke1167// to a direct call we must cast the receiver and arguments to its1168// actual types.1169ciSignature* signature = target->signature();1170const int receiver_skip = target->is_static() ? 0 : 1;1171// Cast receiver to its type.1172if (!target->is_static()) {1173Node* arg = kit.argument(0);1174const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();1175const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());1176if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {1177const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part1178Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));1179kit.set_argument(0, cast_obj);1180}1181}1182// Cast reference arguments to its type.1183for (int i = 0, j = 0; i < signature->count(); i++) {1184ciType* t = signature->type_at(i);1185if (t->is_klass()) {1186Node* arg = kit.argument(receiver_skip + j);1187const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();1188const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());1189if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {1190const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part1191Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));1192kit.set_argument(receiver_skip + j, cast_obj);1193}1194}1195j += t->size(); // long and double take two slots1196}11971198// Try to get the most accurate receiver type1199const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);1200const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);1201int vtable_index = Method::invalid_vtable_index;1202bool call_does_dispatch = false;12031204ciKlass* speculative_receiver_type = NULL;1205if (is_virtual_or_interface) {1206ciInstanceKlass* klass = target->holder();1207Node* receiver_node = kit.argument(0);1208const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();1209// call_does_dispatch and vtable_index are out-parameters. They might be changed.1210// optimize_virtual_call() takes 2 different holder1211// arguments for a corner case that doesn't apply here (see1212// Parse::do_call())1213target = C->optimize_virtual_call(caller, klass, klass,1214target, receiver_type, is_virtual,1215call_does_dispatch, vtable_index, // out-parameters1216false /* check_access */);1217// We lack profiling at this call but type speculation may1218// provide us with a type1219speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;1220}1221CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,1222allow_inline,1223PROB_ALWAYS,1224speculative_receiver_type);1225return cg;1226} else {1227print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1228"member_name not constant");1229}1230}1231break;12321233case vmIntrinsics::_linkToNative:1234{1235Node* addr_n = kit.argument(1); // target address1236Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint1237// This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal1238if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {1239input_not_const = false;1240const TypeLong* addr_t = addr_n->bottom_type()->is_long();1241const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();1242address addr = (address) addr_t->get_con();1243ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();1244return new NativeCallGenerator(callee, addr, nep);1245} else {1246print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1247"NativeEntryPoint not constant");1248}1249}1250break;12511252default:1253fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));1254break;1255}1256return NULL;1257}125812591260//------------------------PredicatedIntrinsicGenerator------------------------------1261// Internal class which handles all predicated Intrinsic calls.1262class PredicatedIntrinsicGenerator : public CallGenerator {1263CallGenerator* _intrinsic;1264CallGenerator* _cg;12651266public:1267PredicatedIntrinsicGenerator(CallGenerator* intrinsic,1268CallGenerator* cg)1269: CallGenerator(cg->method())1270{1271_intrinsic = intrinsic;1272_cg = cg;1273}12741275virtual bool is_virtual() const { return true; }1276virtual bool is_inline() const { return true; }1277virtual bool is_intrinsic() const { return true; }12781279virtual JVMState* generate(JVMState* jvms);1280};128112821283CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,1284CallGenerator* cg) {1285return new PredicatedIntrinsicGenerator(intrinsic, cg);1286}128712881289JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {1290// The code we want to generate here is:1291// if (receiver == NULL)1292// uncommon_Trap1293// if (predicate(0))1294// do_intrinsic(0)1295// else1296// if (predicate(1))1297// do_intrinsic(1)1298// ...1299// else1300// do_java_comp13011302GraphKit kit(jvms);1303PhaseGVN& gvn = kit.gvn();13041305CompileLog* log = kit.C->log();1306if (log != NULL) {1307log->elem("predicated_intrinsic bci='%d' method='%d'",1308jvms->bci(), log->identify(method()));1309}13101311if (!method()->is_static()) {1312// We need an explicit receiver null_check before checking its type in predicate.1313// We share a map with the caller, so his JVMS gets adjusted.1314Node* receiver = kit.null_check_receiver_before_call(method());1315if (kit.stopped()) {1316return kit.transfer_exceptions_into_jvms();1317}1318}13191320int n_predicates = _intrinsic->predicates_count();1321assert(n_predicates > 0, "sanity");13221323JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));13241325// Region for normal compilation code if intrinsic failed.1326Node* slow_region = new RegionNode(1);13271328int results = 0;1329for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {1330#ifdef ASSERT1331JVMState* old_jvms = kit.jvms();1332SafePointNode* old_map = kit.map();1333Node* old_io = old_map->i_o();1334Node* old_mem = old_map->memory();1335Node* old_exc = old_map->next_exception();1336#endif1337Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);1338#ifdef ASSERT1339// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.1340assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");1341SafePointNode* new_map = kit.map();1342assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");1343assert(old_mem == new_map->memory(), "generate_predicate should not change memory");1344assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");1345#endif1346if (!kit.stopped()) {1347PreserveJVMState pjvms(&kit);1348// Generate intrinsic code:1349JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());1350if (new_jvms == NULL) {1351// Intrinsic failed, use normal compilation path for this predicate.1352slow_region->add_req(kit.control());1353} else {1354kit.add_exception_states_from(new_jvms);1355kit.set_jvms(new_jvms);1356if (!kit.stopped()) {1357result_jvms[results++] = kit.jvms();1358}1359}1360}1361if (else_ctrl == NULL) {1362else_ctrl = kit.C->top();1363}1364kit.set_control(else_ctrl);1365}1366if (!kit.stopped()) {1367// Final 'else' after predicates.1368slow_region->add_req(kit.control());1369}1370if (slow_region->req() > 1) {1371PreserveJVMState pjvms(&kit);1372// Generate normal compilation code:1373kit.set_control(gvn.transform(slow_region));1374JVMState* new_jvms = _cg->generate(kit.sync_jvms());1375if (kit.failing())1376return NULL; // might happen because of NodeCountInliningCutoff1377assert(new_jvms != NULL, "must be");1378kit.add_exception_states_from(new_jvms);1379kit.set_jvms(new_jvms);1380if (!kit.stopped()) {1381result_jvms[results++] = kit.jvms();1382}1383}13841385if (results == 0) {1386// All paths ended in uncommon traps.1387(void) kit.stop();1388return kit.transfer_exceptions_into_jvms();1389}13901391if (results == 1) { // Only one path1392kit.set_jvms(result_jvms[0]);1393return kit.transfer_exceptions_into_jvms();1394}13951396// Merge all paths.1397kit.C->set_has_split_ifs(true); // Has chance for split-if optimization1398RegionNode* region = new RegionNode(results + 1);1399Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);1400for (int i = 0; i < results; i++) {1401JVMState* jvms = result_jvms[i];1402int path = i + 1;1403SafePointNode* map = jvms->map();1404region->init_req(path, map->control());1405iophi->set_req(path, map->i_o());1406if (i == 0) {1407kit.set_jvms(jvms);1408} else {1409kit.merge_memory(map->merged_memory(), region, path);1410}1411}1412kit.set_control(gvn.transform(region));1413kit.set_i_o(gvn.transform(iophi));1414// Transform new memory Phis.1415for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {1416Node* phi = mms.memory();1417if (phi->is_Phi() && phi->in(0) == region) {1418mms.set_memory(gvn.transform(phi));1419}1420}14211422// Merge debug info.1423Node** ins = NEW_RESOURCE_ARRAY(Node*, results);1424uint tos = kit.jvms()->stkoff() + kit.sp();1425Node* map = kit.map();1426uint limit = map->req();1427for (uint i = TypeFunc::Parms; i < limit; i++) {1428// Skip unused stack slots; fast forward to monoff();1429if (i == tos) {1430i = kit.jvms()->monoff();1431if( i >= limit ) break;1432}1433Node* n = map->in(i);1434ins[0] = n;1435const Type* t = gvn.type(n);1436bool needs_phi = false;1437for (int j = 1; j < results; j++) {1438JVMState* jvms = result_jvms[j];1439Node* jmap = jvms->map();1440Node* m = NULL;1441if (jmap->req() > i) {1442m = jmap->in(i);1443if (m != n) {1444needs_phi = true;1445t = t->meet_speculative(gvn.type(m));1446}1447}1448ins[j] = m;1449}1450if (needs_phi) {1451Node* phi = PhiNode::make(region, n, t);1452for (int j = 1; j < results; j++) {1453phi->set_req(j + 1, ins[j]);1454}1455map->set_req(i, gvn.transform(phi));1456}1457}14581459return kit.transfer_exceptions_into_jvms();1460}14611462//-------------------------UncommonTrapCallGenerator-----------------------------1463// Internal class which handles all out-of-line calls checking receiver type.1464class UncommonTrapCallGenerator : public CallGenerator {1465Deoptimization::DeoptReason _reason;1466Deoptimization::DeoptAction _action;14671468public:1469UncommonTrapCallGenerator(ciMethod* m,1470Deoptimization::DeoptReason reason,1471Deoptimization::DeoptAction action)1472: CallGenerator(m)1473{1474_reason = reason;1475_action = action;1476}14771478virtual bool is_virtual() const { ShouldNotReachHere(); return false; }1479virtual bool is_trap() const { return true; }14801481virtual JVMState* generate(JVMState* jvms);1482};148314841485CallGenerator*1486CallGenerator::for_uncommon_trap(ciMethod* m,1487Deoptimization::DeoptReason reason,1488Deoptimization::DeoptAction action) {1489return new UncommonTrapCallGenerator(m, reason, action);1490}149114921493JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {1494GraphKit kit(jvms);1495kit.C->print_inlining_update(this);1496// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).1497// Callsite signature can be different from actual method being called (i.e _linkTo* sites).1498// Use callsite signature always.1499ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());1500int nargs = declared_method->arg_size();1501kit.inc_sp(nargs);1502assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");1503if (_reason == Deoptimization::Reason_class_check &&1504_action == Deoptimization::Action_maybe_recompile) {1505// Temp fix for 65298111506// Don't allow uncommon_trap to override our decision to recompile in the event1507// of a class cast failure for a monomorphic call as it will never let us convert1508// the call to either bi-morphic or megamorphic and can lead to unc-trap loops1509bool keep_exact_action = true;1510kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);1511} else {1512kit.uncommon_trap(_reason, _action);1513}1514return kit.transfer_exceptions_into_jvms();1515}15161517// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)15181519// (Node: Merged hook_up_exits into ParseGenerator::generate.)152015211522