Path: blob/master/src/hotspot/share/opto/callGenerator.cpp
40930 views
/*1* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/bcEscapeAnalyzer.hpp"26#include "ci/ciCallSite.hpp"27#include "ci/ciObjArray.hpp"28#include "ci/ciMemberName.hpp"29#include "ci/ciMethodHandle.hpp"30#include "classfile/javaClasses.hpp"31#include "compiler/compileLog.hpp"32#include "opto/addnode.hpp"33#include "opto/callGenerator.hpp"34#include "opto/callnode.hpp"35#include "opto/castnode.hpp"36#include "opto/cfgnode.hpp"37#include "opto/parse.hpp"38#include "opto/rootnode.hpp"39#include "opto/runtime.hpp"40#include "opto/subnode.hpp"41#include "runtime/sharedRuntime.hpp"42#include "ci/ciNativeEntryPoint.hpp"43#include "utilities/debug.hpp"4445// Utility function.46const TypeFunc* CallGenerator::tf() const {47return TypeFunc::make(method());48}4950bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {51return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);52}5354bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {55ciMethod* symbolic_info = caller->get_method_at_bci(bci);56return is_inlined_method_handle_intrinsic(symbolic_info, m);57}5859bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {60return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();61}6263//-----------------------------ParseGenerator---------------------------------64// Internal class which handles all direct bytecode traversal.65class ParseGenerator : public InlineCallGenerator {66private:67bool _is_osr;68float _expected_uses;6970public:71ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)72: InlineCallGenerator(method)73{74_is_osr = is_osr;75_expected_uses = expected_uses;76assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");77}7879virtual bool is_parse() const { return true; }80virtual JVMState* generate(JVMState* jvms);81int is_osr() { return _is_osr; }8283};8485JVMState* ParseGenerator::generate(JVMState* jvms) {86Compile* C = Compile::current();87C->print_inlining_update(this);8889if (is_osr()) {90// The JVMS for a OSR has a single argument (see its TypeFunc).91assert(jvms->depth() == 1, "no inline OSR");92}9394if (C->failing()) {95return NULL; // bailing out of the compile; do not try to parse96}9798Parse parser(jvms, method(), _expected_uses);99// Grab signature for matching/allocation100GraphKit& exits = parser.exits();101102if (C->failing()) {103while (exits.pop_exception_state() != NULL) ;104return NULL;105}106107assert(exits.jvms()->same_calls_as(jvms), "sanity");108109// Simply return the exit state of the parser,110// augmented by any exceptional states.111return exits.transfer_exceptions_into_jvms();112}113114//---------------------------DirectCallGenerator------------------------------115// Internal class which handles all out-of-line calls w/o receiver type checks.116class DirectCallGenerator : public CallGenerator {117private:118CallStaticJavaNode* _call_node;119// Force separate memory and I/O projections for the exceptional120// paths to facilitate late inlinig.121bool _separate_io_proj;122123protected:124void set_call_node(CallStaticJavaNode* call) { _call_node = call; }125126public:127DirectCallGenerator(ciMethod* method, bool separate_io_proj)128: CallGenerator(method),129_separate_io_proj(separate_io_proj)130{131}132virtual JVMState* generate(JVMState* jvms);133134virtual CallNode* call_node() const { return _call_node; }135virtual CallGenerator* with_call_node(CallNode* call) {136DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);137dcg->set_call_node(call->as_CallStaticJava());138return dcg;139}140};141142JVMState* DirectCallGenerator::generate(JVMState* jvms) {143GraphKit kit(jvms);144kit.C->print_inlining_update(this);145bool is_static = method()->is_static();146address target = is_static ? SharedRuntime::get_resolve_static_call_stub()147: SharedRuntime::get_resolve_opt_virtual_call_stub();148149if (kit.C->log() != NULL) {150kit.C->log()->elem("direct_call bci='%d'", jvms->bci());151}152153CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());154if (is_inlined_method_handle_intrinsic(jvms, method())) {155// To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,156// additional information about the method being invoked should be attached157// to the call site to make resolution logic work158// (see SharedRuntime::resolve_static_call_C).159call->set_override_symbolic_info(true);160}161_call_node = call; // Save the call node in case we need it later162if (!is_static) {163// Make an explicit receiver null_check as part of this call.164// Since we share a map with the caller, his JVMS gets adjusted.165kit.null_check_receiver_before_call(method());166if (kit.stopped()) {167// And dump it back to the caller, decorated with any exceptions:168return kit.transfer_exceptions_into_jvms();169}170// Mark the call node as virtual, sort of:171call->set_optimized_virtual(true);172if (method()->is_method_handle_intrinsic() ||173method()->is_compiled_lambda_form()) {174call->set_method_handle_invoke(true);175}176}177kit.set_arguments_for_java_call(call);178kit.set_edges_for_java_call(call, false, _separate_io_proj);179Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);180kit.push_node(method()->return_type()->basic_type(), ret);181return kit.transfer_exceptions_into_jvms();182}183184//--------------------------VirtualCallGenerator------------------------------185// Internal class which handles all out-of-line calls checking receiver type.186class VirtualCallGenerator : public CallGenerator {187private:188int _vtable_index;189bool _separate_io_proj;190CallDynamicJavaNode* _call_node;191192protected:193void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }194195public:196VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)197: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)198{199assert(vtable_index == Method::invalid_vtable_index ||200vtable_index >= 0, "either invalid or usable");201}202virtual bool is_virtual() const { return true; }203virtual JVMState* generate(JVMState* jvms);204205virtual CallNode* call_node() const { return _call_node; }206int vtable_index() const { return _vtable_index; }207208virtual CallGenerator* with_call_node(CallNode* call) {209VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);210cg->set_call_node(call->as_CallDynamicJava());211return cg;212}213};214215JVMState* VirtualCallGenerator::generate(JVMState* jvms) {216GraphKit kit(jvms);217Node* receiver = kit.argument(0);218219kit.C->print_inlining_update(this);220221if (kit.C->log() != NULL) {222kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());223}224225// If the receiver is a constant null, do not torture the system226// by attempting to call through it. The compile will proceed227// correctly, but may bail out in final_graph_reshaping, because228// the call instruction will have a seemingly deficient out-count.229// (The bailout says something misleading about an "infinite loop".)230if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {231assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));232ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());233int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());234kit.inc_sp(arg_size); // restore arguments235kit.uncommon_trap(Deoptimization::Reason_null_check,236Deoptimization::Action_none,237NULL, "null receiver");238return kit.transfer_exceptions_into_jvms();239}240241// Ideally we would unconditionally do a null check here and let it242// be converted to an implicit check based on profile information.243// However currently the conversion to implicit null checks in244// Block::implicit_null_check() only looks for loads and stores, not calls.245ciMethod *caller = kit.method();246ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();247if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||248((ImplicitNullCheckThreshold > 0) && caller_md &&249(caller_md->trap_count(Deoptimization::Reason_null_check)250>= (uint)ImplicitNullCheckThreshold))) {251// Make an explicit receiver null_check as part of this call.252// Since we share a map with the caller, his JVMS gets adjusted.253receiver = kit.null_check_receiver_before_call(method());254if (kit.stopped()) {255// And dump it back to the caller, decorated with any exceptions:256return kit.transfer_exceptions_into_jvms();257}258}259260assert(!method()->is_static(), "virtual call must not be to static");261assert(!method()->is_final(), "virtual call should not be to final");262assert(!method()->is_private(), "virtual call should not be to private");263assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,264"no vtable calls if +UseInlineCaches ");265address target = SharedRuntime::get_resolve_virtual_call_stub();266// Normal inline cache used for call267CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);268if (is_inlined_method_handle_intrinsic(jvms, method())) {269// To be able to issue a direct call (optimized virtual or virtual)270// and skip a call to MH.linkTo*/invokeBasic adapter, additional information271// about the method being invoked should be attached to the call site to272// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).273call->set_override_symbolic_info(true);274}275_call_node = call; // Save the call node in case we need it later276277kit.set_arguments_for_java_call(call);278kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);279Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);280kit.push_node(method()->return_type()->basic_type(), ret);281282// Represent the effect of an implicit receiver null_check283// as part of this call. Since we share a map with the caller,284// his JVMS gets adjusted.285kit.cast_not_null(receiver);286return kit.transfer_exceptions_into_jvms();287}288289CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {290if (InlineTree::check_can_parse(m) != NULL) return NULL;291return new ParseGenerator(m, expected_uses);292}293294// As a special case, the JVMS passed to this CallGenerator is295// for the method execution already in progress, not just the JVMS296// of the caller. Thus, this CallGenerator cannot be mixed with others!297CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {298if (InlineTree::check_can_parse(m) != NULL) return NULL;299float past_uses = m->interpreter_invocation_count();300float expected_uses = past_uses;301return new ParseGenerator(m, expected_uses, true);302}303304CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {305assert(!m->is_abstract(), "for_direct_call mismatch");306return new DirectCallGenerator(m, separate_io_proj);307}308309CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {310assert(!m->is_static(), "for_virtual_call mismatch");311assert(!m->is_method_handle_intrinsic(), "should be a direct call");312return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);313}314315// Allow inlining decisions to be delayed316class LateInlineCallGenerator : public DirectCallGenerator {317private:318jlong _unique_id; // unique id for log compilation319bool _is_pure_call; // a hint that the call doesn't have important side effects to care about320321protected:322CallGenerator* _inline_cg;323virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }324virtual CallGenerator* inline_cg() const { return _inline_cg; }325virtual bool is_pure_call() const { return _is_pure_call; }326327public:328LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :329DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}330331virtual bool is_late_inline() const { return true; }332333// Convert the CallStaticJava into an inline334virtual void do_late_inline();335336virtual JVMState* generate(JVMState* jvms) {337Compile *C = Compile::current();338339C->log_inline_id(this);340341// Record that this call site should be revisited once the main342// parse is finished.343if (!is_mh_late_inline()) {344C->add_late_inline(this);345}346347// Emit the CallStaticJava and request separate projections so348// that the late inlining logic can distinguish between fall349// through and exceptional uses of the memory and io projections350// as is done for allocations and macro expansion.351return DirectCallGenerator::generate(jvms);352}353354virtual void print_inlining_late(const char* msg) {355CallNode* call = call_node();356Compile* C = Compile::current();357C->print_inlining_assert_ready();358C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);359C->print_inlining_move_to(this);360C->print_inlining_update_delayed(this);361}362363virtual void set_unique_id(jlong id) {364_unique_id = id;365}366367virtual jlong unique_id() const {368return _unique_id;369}370371virtual CallGenerator* with_call_node(CallNode* call) {372LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);373cg->set_call_node(call->as_CallStaticJava());374return cg;375}376};377378CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {379return new LateInlineCallGenerator(method, inline_cg);380}381382class LateInlineMHCallGenerator : public LateInlineCallGenerator {383ciMethod* _caller;384bool _input_not_const;385386virtual bool do_late_inline_check(Compile* C, JVMState* jvms);387388public:389LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :390LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}391392virtual bool is_mh_late_inline() const { return true; }393394// Convert the CallStaticJava into an inline395virtual void do_late_inline();396397virtual JVMState* generate(JVMState* jvms) {398JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);399400Compile* C = Compile::current();401if (_input_not_const) {402// inlining won't be possible so no need to enqueue right now.403call_node()->set_generator(this);404} else {405C->add_late_inline(this);406}407return new_jvms;408}409410virtual CallGenerator* with_call_node(CallNode* call) {411LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);412cg->set_call_node(call->as_CallStaticJava());413return cg;414}415};416417bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {418// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.419bool allow_inline = C->inlining_incrementally();420bool input_not_const = true;421CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);422assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place423424if (cg != NULL) {425assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");426_inline_cg = cg;427C->dec_number_of_mh_late_inlines();428return true;429} else {430// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call431// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,432// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.433return false;434}435}436437CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {438assert(IncrementalInlineMH, "required");439Compile::current()->inc_number_of_mh_late_inlines();440CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);441return cg;442}443444// Allow inlining decisions to be delayed445class LateInlineVirtualCallGenerator : public VirtualCallGenerator {446private:447jlong _unique_id; // unique id for log compilation448CallGenerator* _inline_cg;449ciMethod* _callee;450bool _is_pure_call;451float _prof_factor;452453protected:454virtual bool do_late_inline_check(Compile* C, JVMState* jvms);455virtual CallGenerator* inline_cg() const { return _inline_cg; }456virtual bool is_pure_call() const { return _is_pure_call; }457458public:459LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)460: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),461_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {}462463virtual bool is_late_inline() const { return true; }464465virtual bool is_virtual_late_inline() const { return true; }466467// Convert the CallDynamicJava into an inline468virtual void do_late_inline();469470virtual void set_callee_method(ciMethod* m) {471assert(_callee == NULL, "repeated inlining attempt");472_callee = m;473}474475virtual JVMState* generate(JVMState* jvms) {476// Emit the CallDynamicJava and request separate projections so477// that the late inlining logic can distinguish between fall478// through and exceptional uses of the memory and io projections479// as is done for allocations and macro expansion.480JVMState* new_jvms = VirtualCallGenerator::generate(jvms);481if (call_node() != NULL) {482call_node()->set_generator(this);483}484return new_jvms;485}486487virtual void print_inlining_late(const char* msg) {488CallNode* call = call_node();489Compile* C = Compile::current();490C->print_inlining_assert_ready();491C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);492C->print_inlining_move_to(this);493C->print_inlining_update_delayed(this);494}495496virtual void set_unique_id(jlong id) {497_unique_id = id;498}499500virtual jlong unique_id() const {501return _unique_id;502}503504virtual CallGenerator* with_call_node(CallNode* call) {505LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);506cg->set_call_node(call->as_CallDynamicJava());507return cg;508}509};510511bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {512// Method handle linker case is handled in CallDynamicJavaNode::Ideal().513// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().514515// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.516bool allow_inline = C->inlining_incrementally();517CallGenerator* cg = C->call_generator(_callee,518vtable_index(),519false /*call_does_dispatch*/,520jvms,521allow_inline,522_prof_factor,523NULL /*speculative_receiver_type*/,524true /*allow_intrinsics*/);525526if (cg != NULL) {527assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");528_inline_cg = cg;529return true;530} else {531// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.532assert(false, "no progress");533return false;534}535}536537CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {538assert(IncrementalInlineVirtual, "required");539assert(!m->is_static(), "for_virtual_call mismatch");540assert(!m->is_method_handle_intrinsic(), "should be a direct call");541return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);542}543544void LateInlineCallGenerator::do_late_inline() {545CallGenerator::do_late_inline_helper();546}547548void LateInlineMHCallGenerator::do_late_inline() {549CallGenerator::do_late_inline_helper();550}551552void LateInlineVirtualCallGenerator::do_late_inline() {553assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal554CallGenerator::do_late_inline_helper();555}556557static bool has_non_debug_usages(Node* n) {558for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {559Node* m = n->fast_out(i);560if (!m->is_SafePoint()561|| (m->is_Call() && m->as_Call()->has_non_debug_use(n))) {562return true;563}564}565return false;566}567568static bool is_box_cache_valid(CallNode* call) {569ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();570return klass->is_box_cache_valid();571}572573// delay box in runtime, treat box as a scalarized object574static void scalarize_debug_usages(CallNode* call, Node* resproj) {575GraphKit kit(call->jvms());576PhaseGVN& gvn = kit.gvn();577578ProjNode* res = resproj->as_Proj();579ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();580int n_fields = klass->nof_nonstatic_fields();581assert(n_fields == 1, "the klass must be an auto-boxing klass");582583for (DUIterator_Last imin, i = res->last_outs(imin); i >= imin;) {584SafePointNode* sfpt = res->last_out(i)->as_SafePoint();585uint first_ind = sfpt->req() - sfpt->jvms()->scloff();586Node* sobj = new SafePointScalarObjectNode(gvn.type(res)->isa_oopptr(),587#ifdef ASSERT588call,589#endif // ASSERT590first_ind, n_fields, true);591sobj->init_req(0, kit.root());592sfpt->add_req(call->in(TypeFunc::Parms));593sobj = gvn.transform(sobj);594JVMState* jvms = sfpt->jvms();595jvms->set_endoff(sfpt->req());596int start = jvms->debug_start();597int end = jvms->debug_end();598int num_edges = sfpt->replace_edges_in_range(res, sobj, start, end, &gvn);599i -= num_edges;600}601602assert(res->outcnt() == 0, "the box must have no use after replace");603604#ifndef PRODUCT605if (PrintEliminateAllocations) {606tty->print("++++ Eliminated: %d ", call->_idx);607call->as_CallStaticJava()->method()->print_short_name(tty);608tty->cr();609}610#endif611}612613void CallGenerator::do_late_inline_helper() {614assert(is_late_inline(), "only late inline allowed");615616// Can't inline it617CallNode* call = call_node();618if (call == NULL || call->outcnt() == 0 ||619call->in(0) == NULL || call->in(0)->is_top()) {620return;621}622623const TypeTuple *r = call->tf()->domain();624for (int i1 = 0; i1 < method()->arg_size(); i1++) {625if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {626assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");627return;628}629}630631if (call->in(TypeFunc::Memory)->is_top()) {632assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");633return;634}635if (call->in(TypeFunc::Memory)->is_MergeMem()) {636MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();637if (merge_mem->base_memory() == merge_mem->empty_memory()) {638return; // dead path639}640}641642// check for unreachable loop643CallProjections callprojs;644call->extract_projections(&callprojs, true);645if ((callprojs.fallthrough_catchproj == call->in(0)) ||646(callprojs.catchall_catchproj == call->in(0)) ||647(callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||648(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||649(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||650(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||651(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||652(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {653return;654}655656Compile* C = Compile::current();657// Remove inlined methods from Compiler's lists.658if (call->is_macro()) {659C->remove_macro_node(call);660}661662bool result_not_used = false;663664if (is_pure_call()) {665if (is_boxing_late_inline() && callprojs.resproj != nullptr) {666// replace box node to scalar node only in case it is directly referenced by debug info667assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");668if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {669scalarize_debug_usages(call, callprojs.resproj);670}671}672673// The call is marked as pure (no important side effects), but result isn't used.674// It's safe to remove the call.675result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);676}677678if (result_not_used) {679GraphKit kit(call->jvms());680kit.replace_call(call, C->top(), true);681} else {682// Make a clone of the JVMState that appropriate to use for driving a parse683JVMState* old_jvms = call->jvms();684JVMState* jvms = old_jvms->clone_shallow(C);685uint size = call->req();686SafePointNode* map = new SafePointNode(size, jvms);687for (uint i1 = 0; i1 < size; i1++) {688map->init_req(i1, call->in(i1));689}690691// Make sure the state is a MergeMem for parsing.692if (!map->in(TypeFunc::Memory)->is_MergeMem()) {693Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));694C->initial_gvn()->set_type_bottom(mem);695map->set_req(TypeFunc::Memory, mem);696}697698uint nargs = method()->arg_size();699// blow away old call arguments700Node* top = C->top();701for (uint i1 = 0; i1 < nargs; i1++) {702map->set_req(TypeFunc::Parms + i1, top);703}704jvms->set_map(map);705706// Make enough space in the expression stack to transfer707// the incoming arguments and return value.708map->ensure_stack(jvms, jvms->method()->max_stack());709for (uint i1 = 0; i1 < nargs; i1++) {710map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));711}712713C->print_inlining_assert_ready();714715C->print_inlining_move_to(this);716717C->log_late_inline(this);718719// JVMState is ready, so time to perform some checks and prepare for inlining attempt.720if (!do_late_inline_check(C, jvms)) {721map->disconnect_inputs(C);722C->print_inlining_update_delayed(this);723return;724}725726// Setup default node notes to be picked up by the inlining727Node_Notes* old_nn = C->node_notes_at(call->_idx);728if (old_nn != NULL) {729Node_Notes* entry_nn = old_nn->clone(C);730entry_nn->set_jvms(jvms);731C->set_default_node_notes(entry_nn);732}733734// Virtual call involves a receiver null check which can be made implicit.735if (is_virtual_late_inline()) {736GraphKit kit(jvms);737kit.null_check_receiver();738jvms = kit.transfer_exceptions_into_jvms();739}740741// Now perform the inlining using the synthesized JVMState742JVMState* new_jvms = inline_cg()->generate(jvms);743if (new_jvms == NULL) return; // no change744if (C->failing()) return;745746// Capture any exceptional control flow747GraphKit kit(new_jvms);748749// Find the result object750Node* result = C->top();751int result_size = method()->return_type()->size();752if (result_size != 0 && !kit.stopped()) {753result = (result_size == 1) ? kit.pop() : kit.pop_pair();754}755756if (inline_cg()->is_inline()) {757C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());758C->env()->notice_inlined_method(inline_cg()->method());759}760C->set_inlining_progress(true);761C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup762kit.replace_call(call, result, true);763}764}765766class LateInlineStringCallGenerator : public LateInlineCallGenerator {767768public:769LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :770LateInlineCallGenerator(method, inline_cg) {}771772virtual JVMState* generate(JVMState* jvms) {773Compile *C = Compile::current();774775C->log_inline_id(this);776777C->add_string_late_inline(this);778779JVMState* new_jvms = DirectCallGenerator::generate(jvms);780return new_jvms;781}782783virtual bool is_string_late_inline() const { return true; }784785virtual CallGenerator* with_call_node(CallNode* call) {786LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);787cg->set_call_node(call->as_CallStaticJava());788return cg;789}790};791792CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {793return new LateInlineStringCallGenerator(method, inline_cg);794}795796class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {797798public:799LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :800LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}801802virtual JVMState* generate(JVMState* jvms) {803Compile *C = Compile::current();804805C->log_inline_id(this);806807C->add_boxing_late_inline(this);808809JVMState* new_jvms = DirectCallGenerator::generate(jvms);810return new_jvms;811}812813virtual bool is_boxing_late_inline() const { return true; }814815virtual CallGenerator* with_call_node(CallNode* call) {816LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);817cg->set_call_node(call->as_CallStaticJava());818return cg;819}820};821822CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {823return new LateInlineBoxingCallGenerator(method, inline_cg);824}825826class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {827828public:829LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :830LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}831832virtual JVMState* generate(JVMState* jvms) {833Compile *C = Compile::current();834835C->log_inline_id(this);836837C->add_vector_reboxing_late_inline(this);838839JVMState* new_jvms = DirectCallGenerator::generate(jvms);840return new_jvms;841}842843virtual CallGenerator* with_call_node(CallNode* call) {844LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);845cg->set_call_node(call->as_CallStaticJava());846return cg;847}848};849850// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);851CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {852return new LateInlineVectorReboxingCallGenerator(method, inline_cg);853}854855//------------------------PredictedCallGenerator------------------------------856// Internal class which handles all out-of-line calls checking receiver type.857class PredictedCallGenerator : public CallGenerator {858ciKlass* _predicted_receiver;859CallGenerator* _if_missed;860CallGenerator* _if_hit;861float _hit_prob;862bool _exact_check;863864public:865PredictedCallGenerator(ciKlass* predicted_receiver,866CallGenerator* if_missed,867CallGenerator* if_hit, bool exact_check,868float hit_prob)869: CallGenerator(if_missed->method())870{871// The call profile data may predict the hit_prob as extreme as 0 or 1.872// Remove the extremes values from the range.873if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;874if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;875876_predicted_receiver = predicted_receiver;877_if_missed = if_missed;878_if_hit = if_hit;879_hit_prob = hit_prob;880_exact_check = exact_check;881}882883virtual bool is_virtual() const { return true; }884virtual bool is_inline() const { return _if_hit->is_inline(); }885virtual bool is_deferred() const { return _if_hit->is_deferred(); }886887virtual JVMState* generate(JVMState* jvms);888};889890891CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,892CallGenerator* if_missed,893CallGenerator* if_hit,894float hit_prob) {895return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,896/*exact_check=*/true, hit_prob);897}898899CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,900CallGenerator* if_missed,901CallGenerator* if_hit) {902return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,903/*exact_check=*/false, PROB_ALWAYS);904}905906JVMState* PredictedCallGenerator::generate(JVMState* jvms) {907GraphKit kit(jvms);908kit.C->print_inlining_update(this);909PhaseGVN& gvn = kit.gvn();910// We need an explicit receiver null_check before checking its type.911// We share a map with the caller, so his JVMS gets adjusted.912Node* receiver = kit.argument(0);913CompileLog* log = kit.C->log();914if (log != NULL) {915log->elem("predicted_call bci='%d' exact='%d' klass='%d'",916jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));917}918919receiver = kit.null_check_receiver_before_call(method());920if (kit.stopped()) {921return kit.transfer_exceptions_into_jvms();922}923924// Make a copy of the replaced nodes in case we need to restore them925ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();926replaced_nodes.clone();927928Node* casted_receiver = receiver; // will get updated in place...929Node* slow_ctl = NULL;930if (_exact_check) {931slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,932&casted_receiver);933} else {934slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,935&casted_receiver);936}937938SafePointNode* slow_map = NULL;939JVMState* slow_jvms = NULL;940{ PreserveJVMState pjvms(&kit);941kit.set_control(slow_ctl);942if (!kit.stopped()) {943slow_jvms = _if_missed->generate(kit.sync_jvms());944if (kit.failing())945return NULL; // might happen because of NodeCountInliningCutoff946assert(slow_jvms != NULL, "must be");947kit.add_exception_states_from(slow_jvms);948kit.set_map(slow_jvms->map());949if (!kit.stopped())950slow_map = kit.stop();951}952}953954if (kit.stopped()) {955// Instance exactly does not matches the desired type.956kit.set_jvms(slow_jvms);957return kit.transfer_exceptions_into_jvms();958}959960// fall through if the instance exactly matches the desired type961kit.replace_in_map(receiver, casted_receiver);962963// Make the hot call:964JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());965if (new_jvms == NULL) {966// Inline failed, so make a direct call.967assert(_if_hit->is_inline(), "must have been a failed inline");968CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());969new_jvms = cg->generate(kit.sync_jvms());970}971kit.add_exception_states_from(new_jvms);972kit.set_jvms(new_jvms);973974// Need to merge slow and fast?975if (slow_map == NULL) {976// The fast path is the only path remaining.977return kit.transfer_exceptions_into_jvms();978}979980if (kit.stopped()) {981// Inlined method threw an exception, so it's just the slow path after all.982kit.set_jvms(slow_jvms);983return kit.transfer_exceptions_into_jvms();984}985986// There are 2 branches and the replaced nodes are only valid on987// one: restore the replaced nodes to what they were before the988// branch.989kit.map()->set_replaced_nodes(replaced_nodes);990991// Finish the diamond.992kit.C->set_has_split_ifs(true); // Has chance for split-if optimization993RegionNode* region = new RegionNode(3);994region->init_req(1, kit.control());995region->init_req(2, slow_map->control());996kit.set_control(gvn.transform(region));997Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);998iophi->set_req(2, slow_map->i_o());999kit.set_i_o(gvn.transform(iophi));1000// Merge memory1001kit.merge_memory(slow_map->merged_memory(), region, 2);1002// Transform new memory Phis.1003for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {1004Node* phi = mms.memory();1005if (phi->is_Phi() && phi->in(0) == region) {1006mms.set_memory(gvn.transform(phi));1007}1008}1009uint tos = kit.jvms()->stkoff() + kit.sp();1010uint limit = slow_map->req();1011for (uint i = TypeFunc::Parms; i < limit; i++) {1012// Skip unused stack slots; fast forward to monoff();1013if (i == tos) {1014i = kit.jvms()->monoff();1015if( i >= limit ) break;1016}1017Node* m = kit.map()->in(i);1018Node* n = slow_map->in(i);1019if (m != n) {1020const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));1021Node* phi = PhiNode::make(region, m, t);1022phi->set_req(2, n);1023kit.map()->set_req(i, gvn.transform(phi));1024}1025}1026return kit.transfer_exceptions_into_jvms();1027}102810291030CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {1031assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");1032bool input_not_const;1033CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);1034Compile* C = Compile::current();1035if (cg != NULL) {1036if (AlwaysIncrementalInline) {1037return CallGenerator::for_late_inline(callee, cg);1038} else {1039return cg;1040}1041}1042int bci = jvms->bci();1043ciCallProfile profile = caller->call_profile_at_bci(bci);1044int call_site_count = caller->scale_count(profile.count());10451046if (IncrementalInlineMH && call_site_count > 0 &&1047(input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {1048return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);1049} else {1050// Out-of-line call.1051return CallGenerator::for_direct_call(callee);1052}1053}10541055class NativeCallGenerator : public CallGenerator {1056private:1057address _call_addr;1058ciNativeEntryPoint* _nep;1059public:1060NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)1061: CallGenerator(m), _call_addr(call_addr), _nep(nep) {}10621063virtual JVMState* generate(JVMState* jvms);1064};10651066JVMState* NativeCallGenerator::generate(JVMState* jvms) {1067GraphKit kit(jvms);10681069Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep1070if (call == NULL) return NULL;10711072kit.C->print_inlining_update(this);1073if (kit.C->log() != NULL) {1074kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));1075}10761077return kit.transfer_exceptions_into_jvms();1078}10791080CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {1081GraphKit kit(jvms);1082PhaseGVN& gvn = kit.gvn();1083Compile* C = kit.C;1084vmIntrinsics::ID iid = callee->intrinsic_id();1085input_not_const = true;1086if (StressMethodHandleLinkerInlining) {1087allow_inline = false;1088}1089switch (iid) {1090case vmIntrinsics::_invokeBasic:1091{1092// Get MethodHandle receiver:1093Node* receiver = kit.argument(0);1094if (receiver->Opcode() == Op_ConP) {1095input_not_const = false;1096const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();1097ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();1098const int vtable_index = Method::invalid_vtable_index;10991100if (!ciMethod::is_consistent_info(callee, target)) {1101print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1102"signatures mismatch");1103return NULL;1104}11051106CallGenerator* cg = C->call_generator(target, vtable_index,1107false /* call_does_dispatch */,1108jvms,1109allow_inline,1110PROB_ALWAYS);1111return cg;1112} else {1113print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1114"receiver not constant");1115}1116}1117break;11181119case vmIntrinsics::_linkToVirtual:1120case vmIntrinsics::_linkToStatic:1121case vmIntrinsics::_linkToSpecial:1122case vmIntrinsics::_linkToInterface:1123{1124// Get MemberName argument:1125Node* member_name = kit.argument(callee->arg_size() - 1);1126if (member_name->Opcode() == Op_ConP) {1127input_not_const = false;1128const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();1129ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();11301131if (!ciMethod::is_consistent_info(callee, target)) {1132print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1133"signatures mismatch");1134return NULL;1135}11361137// In lambda forms we erase signature types to avoid resolving issues1138// involving class loaders. When we optimize a method handle invoke1139// to a direct call we must cast the receiver and arguments to its1140// actual types.1141ciSignature* signature = target->signature();1142const int receiver_skip = target->is_static() ? 0 : 1;1143// Cast receiver to its type.1144if (!target->is_static()) {1145Node* arg = kit.argument(0);1146const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();1147const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());1148if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {1149const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part1150Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));1151kit.set_argument(0, cast_obj);1152}1153}1154// Cast reference arguments to its type.1155for (int i = 0, j = 0; i < signature->count(); i++) {1156ciType* t = signature->type_at(i);1157if (t->is_klass()) {1158Node* arg = kit.argument(receiver_skip + j);1159const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();1160const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());1161if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {1162const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part1163Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));1164kit.set_argument(receiver_skip + j, cast_obj);1165}1166}1167j += t->size(); // long and double take two slots1168}11691170// Try to get the most accurate receiver type1171const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);1172const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);1173int vtable_index = Method::invalid_vtable_index;1174bool call_does_dispatch = false;11751176ciKlass* speculative_receiver_type = NULL;1177if (is_virtual_or_interface) {1178ciInstanceKlass* klass = target->holder();1179Node* receiver_node = kit.argument(0);1180const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();1181// call_does_dispatch and vtable_index are out-parameters. They might be changed.1182// optimize_virtual_call() takes 2 different holder1183// arguments for a corner case that doesn't apply here (see1184// Parse::do_call())1185target = C->optimize_virtual_call(caller, klass, klass,1186target, receiver_type, is_virtual,1187call_does_dispatch, vtable_index, // out-parameters1188false /* check_access */);1189// We lack profiling at this call but type speculation may1190// provide us with a type1191speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;1192}1193CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,1194allow_inline,1195PROB_ALWAYS,1196speculative_receiver_type);1197return cg;1198} else {1199print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1200"member_name not constant");1201}1202}1203break;12041205case vmIntrinsics::_linkToNative:1206{1207Node* addr_n = kit.argument(1); // target address1208Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint1209// This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal1210if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {1211input_not_const = false;1212const TypeLong* addr_t = addr_n->bottom_type()->is_long();1213const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();1214address addr = (address) addr_t->get_con();1215ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();1216return new NativeCallGenerator(callee, addr, nep);1217} else {1218print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),1219"NativeEntryPoint not constant");1220}1221}1222break;12231224default:1225fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));1226break;1227}1228return NULL;1229}123012311232//------------------------PredicatedIntrinsicGenerator------------------------------1233// Internal class which handles all predicated Intrinsic calls.1234class PredicatedIntrinsicGenerator : public CallGenerator {1235CallGenerator* _intrinsic;1236CallGenerator* _cg;12371238public:1239PredicatedIntrinsicGenerator(CallGenerator* intrinsic,1240CallGenerator* cg)1241: CallGenerator(cg->method())1242{1243_intrinsic = intrinsic;1244_cg = cg;1245}12461247virtual bool is_virtual() const { return true; }1248virtual bool is_inline() const { return true; }1249virtual bool is_intrinsic() const { return true; }12501251virtual JVMState* generate(JVMState* jvms);1252};125312541255CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,1256CallGenerator* cg) {1257return new PredicatedIntrinsicGenerator(intrinsic, cg);1258}125912601261JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {1262// The code we want to generate here is:1263// if (receiver == NULL)1264// uncommon_Trap1265// if (predicate(0))1266// do_intrinsic(0)1267// else1268// if (predicate(1))1269// do_intrinsic(1)1270// ...1271// else1272// do_java_comp12731274GraphKit kit(jvms);1275PhaseGVN& gvn = kit.gvn();12761277CompileLog* log = kit.C->log();1278if (log != NULL) {1279log->elem("predicated_intrinsic bci='%d' method='%d'",1280jvms->bci(), log->identify(method()));1281}12821283if (!method()->is_static()) {1284// We need an explicit receiver null_check before checking its type in predicate.1285// We share a map with the caller, so his JVMS gets adjusted.1286Node* receiver = kit.null_check_receiver_before_call(method());1287if (kit.stopped()) {1288return kit.transfer_exceptions_into_jvms();1289}1290}12911292int n_predicates = _intrinsic->predicates_count();1293assert(n_predicates > 0, "sanity");12941295JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));12961297// Region for normal compilation code if intrinsic failed.1298Node* slow_region = new RegionNode(1);12991300int results = 0;1301for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {1302#ifdef ASSERT1303JVMState* old_jvms = kit.jvms();1304SafePointNode* old_map = kit.map();1305Node* old_io = old_map->i_o();1306Node* old_mem = old_map->memory();1307Node* old_exc = old_map->next_exception();1308#endif1309Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);1310#ifdef ASSERT1311// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.1312assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");1313SafePointNode* new_map = kit.map();1314assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");1315assert(old_mem == new_map->memory(), "generate_predicate should not change memory");1316assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");1317#endif1318if (!kit.stopped()) {1319PreserveJVMState pjvms(&kit);1320// Generate intrinsic code:1321JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());1322if (new_jvms == NULL) {1323// Intrinsic failed, use normal compilation path for this predicate.1324slow_region->add_req(kit.control());1325} else {1326kit.add_exception_states_from(new_jvms);1327kit.set_jvms(new_jvms);1328if (!kit.stopped()) {1329result_jvms[results++] = kit.jvms();1330}1331}1332}1333if (else_ctrl == NULL) {1334else_ctrl = kit.C->top();1335}1336kit.set_control(else_ctrl);1337}1338if (!kit.stopped()) {1339// Final 'else' after predicates.1340slow_region->add_req(kit.control());1341}1342if (slow_region->req() > 1) {1343PreserveJVMState pjvms(&kit);1344// Generate normal compilation code:1345kit.set_control(gvn.transform(slow_region));1346JVMState* new_jvms = _cg->generate(kit.sync_jvms());1347if (kit.failing())1348return NULL; // might happen because of NodeCountInliningCutoff1349assert(new_jvms != NULL, "must be");1350kit.add_exception_states_from(new_jvms);1351kit.set_jvms(new_jvms);1352if (!kit.stopped()) {1353result_jvms[results++] = kit.jvms();1354}1355}13561357if (results == 0) {1358// All paths ended in uncommon traps.1359(void) kit.stop();1360return kit.transfer_exceptions_into_jvms();1361}13621363if (results == 1) { // Only one path1364kit.set_jvms(result_jvms[0]);1365return kit.transfer_exceptions_into_jvms();1366}13671368// Merge all paths.1369kit.C->set_has_split_ifs(true); // Has chance for split-if optimization1370RegionNode* region = new RegionNode(results + 1);1371Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);1372for (int i = 0; i < results; i++) {1373JVMState* jvms = result_jvms[i];1374int path = i + 1;1375SafePointNode* map = jvms->map();1376region->init_req(path, map->control());1377iophi->set_req(path, map->i_o());1378if (i == 0) {1379kit.set_jvms(jvms);1380} else {1381kit.merge_memory(map->merged_memory(), region, path);1382}1383}1384kit.set_control(gvn.transform(region));1385kit.set_i_o(gvn.transform(iophi));1386// Transform new memory Phis.1387for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {1388Node* phi = mms.memory();1389if (phi->is_Phi() && phi->in(0) == region) {1390mms.set_memory(gvn.transform(phi));1391}1392}13931394// Merge debug info.1395Node** ins = NEW_RESOURCE_ARRAY(Node*, results);1396uint tos = kit.jvms()->stkoff() + kit.sp();1397Node* map = kit.map();1398uint limit = map->req();1399for (uint i = TypeFunc::Parms; i < limit; i++) {1400// Skip unused stack slots; fast forward to monoff();1401if (i == tos) {1402i = kit.jvms()->monoff();1403if( i >= limit ) break;1404}1405Node* n = map->in(i);1406ins[0] = n;1407const Type* t = gvn.type(n);1408bool needs_phi = false;1409for (int j = 1; j < results; j++) {1410JVMState* jvms = result_jvms[j];1411Node* jmap = jvms->map();1412Node* m = NULL;1413if (jmap->req() > i) {1414m = jmap->in(i);1415if (m != n) {1416needs_phi = true;1417t = t->meet_speculative(gvn.type(m));1418}1419}1420ins[j] = m;1421}1422if (needs_phi) {1423Node* phi = PhiNode::make(region, n, t);1424for (int j = 1; j < results; j++) {1425phi->set_req(j + 1, ins[j]);1426}1427map->set_req(i, gvn.transform(phi));1428}1429}14301431return kit.transfer_exceptions_into_jvms();1432}14331434//-------------------------UncommonTrapCallGenerator-----------------------------1435// Internal class which handles all out-of-line calls checking receiver type.1436class UncommonTrapCallGenerator : public CallGenerator {1437Deoptimization::DeoptReason _reason;1438Deoptimization::DeoptAction _action;14391440public:1441UncommonTrapCallGenerator(ciMethod* m,1442Deoptimization::DeoptReason reason,1443Deoptimization::DeoptAction action)1444: CallGenerator(m)1445{1446_reason = reason;1447_action = action;1448}14491450virtual bool is_virtual() const { ShouldNotReachHere(); return false; }1451virtual bool is_trap() const { return true; }14521453virtual JVMState* generate(JVMState* jvms);1454};145514561457CallGenerator*1458CallGenerator::for_uncommon_trap(ciMethod* m,1459Deoptimization::DeoptReason reason,1460Deoptimization::DeoptAction action) {1461return new UncommonTrapCallGenerator(m, reason, action);1462}146314641465JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {1466GraphKit kit(jvms);1467kit.C->print_inlining_update(this);1468// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).1469// Callsite signature can be different from actual method being called (i.e _linkTo* sites).1470// Use callsite signature always.1471ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());1472int nargs = declared_method->arg_size();1473kit.inc_sp(nargs);1474assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");1475if (_reason == Deoptimization::Reason_class_check &&1476_action == Deoptimization::Action_maybe_recompile) {1477// Temp fix for 65298111478// Don't allow uncommon_trap to override our decision to recompile in the event1479// of a class cast failure for a monomorphic call as it will never let us convert1480// the call to either bi-morphic or megamorphic and can lead to unc-trap loops1481bool keep_exact_action = true;1482kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);1483} else {1484kit.uncommon_trap(_reason, _action);1485}1486return kit.transfer_exceptions_into_jvms();1487}14881489// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)14901491// (Node: Merged hook_up_exits into ParseGenerator::generate.)149214931494