Path: blob/master/src/hotspot/share/opto/doCall.cpp
40930 views
/*1* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/ciCallSite.hpp"26#include "ci/ciMethodHandle.hpp"27#include "ci/ciSymbols.hpp"28#include "classfile/vmSymbols.hpp"29#include "compiler/compileBroker.hpp"30#include "compiler/compileLog.hpp"31#include "interpreter/linkResolver.hpp"32#include "opto/addnode.hpp"33#include "opto/callGenerator.hpp"34#include "opto/castnode.hpp"35#include "opto/cfgnode.hpp"36#include "opto/mulnode.hpp"37#include "opto/parse.hpp"38#include "opto/rootnode.hpp"39#include "opto/runtime.hpp"40#include "opto/subnode.hpp"41#include "prims/methodHandles.hpp"42#include "runtime/sharedRuntime.hpp"4344void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {45if (TraceTypeProfile || C->print_inlining()) {46outputStream* out = tty;47if (!C->print_inlining()) {48if (!PrintOpto && !PrintCompilation) {49method->print_short_name();50tty->cr();51}52CompileTask::print_inlining_tty(prof_method, depth, bci);53} else {54out = C->print_inlining_stream();55}56CompileTask::print_inline_indent(depth, out);57out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);58stringStream ss;59prof_klass->name()->print_symbol_on(&ss);60out->print("%s", ss.as_string());61out->cr();62}63}6465CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,66JVMState* jvms, bool allow_inline,67float prof_factor, ciKlass* speculative_receiver_type,68bool allow_intrinsics) {69ciMethod* caller = jvms->method();70int bci = jvms->bci();71Bytecodes::Code bytecode = caller->java_code_at_bci(bci);72guarantee(callee != NULL, "failed method resolution");7374// Dtrace currently doesn't work unless all calls are vanilla75if (env()->dtrace_method_probes()) {76allow_inline = false;77}7879// Note: When we get profiling during stage-1 compiles, we want to pull80// from more specific profile data which pertains to this inlining.81// Right now, ignore the information in jvms->caller(), and do method[bci].82ciCallProfile profile = caller->call_profile_at_bci(bci);8384// See how many times this site has been invoked.85int site_count = profile.count();86int receiver_count = -1;87if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {88// Receivers in the profile structure are ordered by call counts89// so that the most called (major) receiver is profile.receiver(0).90receiver_count = profile.receiver_count(0);91}9293CompileLog* log = this->log();94if (log != NULL) {95int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;96int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;97log->begin_elem("call method='%d' count='%d' prof_factor='%f'",98log->identify(callee), site_count, prof_factor);99if (call_does_dispatch) log->print(" virtual='1'");100if (allow_inline) log->print(" inline='1'");101if (receiver_count >= 0) {102log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);103if (profile.has_receiver(1)) {104log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));105}106}107if (callee->is_method_handle_intrinsic()) {108log->print(" method_handle_intrinsic='1'");109}110log->end_elem();111}112113// Special case the handling of certain common, profitable library114// methods. If these methods are replaced with specialized code,115// then we return it as the inlined version of the call.116CallGenerator* cg_intrinsic = NULL;117if (allow_inline && allow_intrinsics) {118CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);119if (cg != NULL) {120if (cg->is_predicated()) {121// Code without intrinsic but, hopefully, inlined.122CallGenerator* inline_cg = this->call_generator(callee,123vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);124if (inline_cg != NULL) {125cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);126}127}128129// If intrinsic does the virtual dispatch, we try to use the type profile130// first, and hopefully inline it as the regular virtual call below.131// We will retry the intrinsic if nothing had claimed it afterwards.132if (cg->does_virtual_dispatch()) {133cg_intrinsic = cg;134cg = NULL;135} else if (should_delay_vector_inlining(callee, jvms)) {136return CallGenerator::for_late_inline(callee, cg);137} else {138return cg;139}140}141}142143// Do method handle calls.144// NOTE: This must happen before normal inlining logic below since145// MethodHandle.invoke* are native methods which obviously don't146// have bytecodes and so normal inlining fails.147if (callee->is_method_handle_intrinsic()) {148CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline);149return cg;150}151152// Attempt to inline...153if (allow_inline) {154// The profile data is only partly attributable to this caller,155// scale back the call site information.156float past_uses = jvms->method()->scale_count(site_count, prof_factor);157// This is the number of times we expect the call code to be used.158float expected_uses = past_uses;159160// Try inlining a bytecoded method:161if (!call_does_dispatch) {162InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());163bool should_delay = false;164if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {165CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);166if (cg != NULL) {167// Delay the inlining of this method to give us the168// opportunity to perform some high level optimizations169// first.170if (should_delay_string_inlining(callee, jvms)) {171return CallGenerator::for_string_late_inline(callee, cg);172} else if (should_delay_boxing_inlining(callee, jvms)) {173return CallGenerator::for_boxing_late_inline(callee, cg);174} else if (should_delay_vector_reboxing_inlining(callee, jvms)) {175return CallGenerator::for_vector_reboxing_late_inline(callee, cg);176} else if ((should_delay || AlwaysIncrementalInline)) {177return CallGenerator::for_late_inline(callee, cg);178} else {179return cg;180}181}182}183}184185// Try using the type profile.186if (call_does_dispatch && site_count > 0 && UseTypeProfile) {187// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.188bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);189ciMethod* receiver_method = NULL;190191int morphism = profile.morphism();192if (speculative_receiver_type != NULL) {193if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) {194// We have a speculative type, we should be able to resolve195// the call. We do that before looking at the profiling at196// this invoke because it may lead to bimorphic inlining which197// a speculative type should help us avoid.198receiver_method = callee->resolve_invoke(jvms->method()->holder(),199speculative_receiver_type);200if (receiver_method == NULL) {201speculative_receiver_type = NULL;202} else {203morphism = 1;204}205} else {206// speculation failed before. Use profiling at the call207// (could allow bimorphic inlining for instance).208speculative_receiver_type = NULL;209}210}211if (receiver_method == NULL &&212(have_major_receiver || morphism == 1 ||213(morphism == 2 && UseBimorphicInlining))) {214// receiver_method = profile.method();215// Profiles do not suggest methods now. Look it up in the major receiver.216receiver_method = callee->resolve_invoke(jvms->method()->holder(),217profile.receiver(0));218}219if (receiver_method != NULL) {220// The single majority receiver sufficiently outweighs the minority.221CallGenerator* hit_cg = this->call_generator(receiver_method,222vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);223if (hit_cg != NULL) {224// Look up second receiver.225CallGenerator* next_hit_cg = NULL;226ciMethod* next_receiver_method = NULL;227if (morphism == 2 && UseBimorphicInlining) {228next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),229profile.receiver(1));230if (next_receiver_method != NULL) {231next_hit_cg = this->call_generator(next_receiver_method,232vtable_index, !call_does_dispatch, jvms,233allow_inline, prof_factor);234if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&235have_major_receiver && UseOnlyInlinedBimorphic) {236// Skip if we can't inline second receiver's method237next_hit_cg = NULL;238}239}240}241CallGenerator* miss_cg;242Deoptimization::DeoptReason reason = (morphism == 2243? Deoptimization::Reason_bimorphic244: Deoptimization::reason_class_check(speculative_receiver_type != NULL));245if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&246!too_many_traps_or_recompiles(caller, bci, reason)247) {248// Generate uncommon trap for class check failure path249// in case of monomorphic or bimorphic virtual call site.250miss_cg = CallGenerator::for_uncommon_trap(callee, reason,251Deoptimization::Action_maybe_recompile);252} else {253// Generate virtual call for class check failure path254// in case of polymorphic virtual call site.255miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)256: CallGenerator::for_virtual_call(callee, vtable_index));257}258if (miss_cg != NULL) {259if (next_hit_cg != NULL) {260assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");261trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));262// We don't need to record dependency on a receiver here and below.263// Whenever we inline, the dependency is added by Parse::Parse().264miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);265}266if (miss_cg != NULL) {267ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);268trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count);269float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);270CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);271if (cg != NULL) return cg;272}273}274}275}276}277278// If there is only one implementor of this interface then we279// may be able to bind this invoke directly to the implementing280// klass but we need both a dependence on the single interface281// and on the method we bind to. Additionally since all we know282// about the receiver type is that it's supposed to implement the283// interface we have to insert a check that it's the class we284// expect. Interface types are not checked by the verifier so285// they are roughly equivalent to Object.286// The number of implementors for declared_interface is less or287// equal to the number of implementors for target->holder() so288// if number of implementors of target->holder() == 1 then289// number of implementors for decl_interface is 0 or 1. If290// it's 0 then no class implements decl_interface and there's291// no point in inlining.292if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {293ciInstanceKlass* declared_interface =294caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();295ciInstanceKlass* singleton = declared_interface->unique_implementor();296297if (singleton != NULL) {298assert(singleton != declared_interface, "not a unique implementor");299300ciMethod* cha_monomorphic_target =301callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);302303if (cha_monomorphic_target != NULL &&304cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless305ciKlass* holder = cha_monomorphic_target->holder();306307// Try to inline the method found by CHA. Inlined method is guarded by the type check.308CallGenerator* hit_cg = call_generator(cha_monomorphic_target,309vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);310311// Deoptimize on type check fail. The interpreter will throw ICCE for us.312CallGenerator* miss_cg = CallGenerator::for_uncommon_trap(callee,313Deoptimization::Reason_class_check, Deoptimization::Action_none);314315CallGenerator* cg = CallGenerator::for_guarded_call(holder, miss_cg, hit_cg);316if (hit_cg != NULL && cg != NULL) {317dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee);318return cg;319}320}321}322} // call_does_dispatch && bytecode == Bytecodes::_invokeinterface323324// Nothing claimed the intrinsic, we go with straight-forward inlining325// for already discovered intrinsic.326if (allow_intrinsics && cg_intrinsic != NULL) {327assert(cg_intrinsic->does_virtual_dispatch(), "sanity");328return cg_intrinsic;329}330} // allow_inline331332// There was no special inlining tactic, or it bailed out.333// Use a more generic tactic, like a simple call.334if (call_does_dispatch) {335const char* msg = "virtual call";336if (C->print_inlining()) {337print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);338}339C->log_inline_failure(msg);340if (IncrementalInlineVirtual && allow_inline) {341return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later342} else {343return CallGenerator::for_virtual_call(callee, vtable_index);344}345} else {346// Class Hierarchy Analysis or Type Profile reveals a unique target,347// or it is a static or special call.348return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));349}350}351352// Return true for methods that shouldn't be inlined early so that353// they are easier to analyze and optimize as intrinsics.354bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {355if (has_stringbuilder()) {356357if ((call_method->holder() == C->env()->StringBuilder_klass() ||358call_method->holder() == C->env()->StringBuffer_klass()) &&359(jvms->method()->holder() == C->env()->StringBuilder_klass() ||360jvms->method()->holder() == C->env()->StringBuffer_klass())) {361// Delay SB calls only when called from non-SB code362return false;363}364365switch (call_method->intrinsic_id()) {366case vmIntrinsics::_StringBuilder_void:367case vmIntrinsics::_StringBuilder_int:368case vmIntrinsics::_StringBuilder_String:369case vmIntrinsics::_StringBuilder_append_char:370case vmIntrinsics::_StringBuilder_append_int:371case vmIntrinsics::_StringBuilder_append_String:372case vmIntrinsics::_StringBuilder_toString:373case vmIntrinsics::_StringBuffer_void:374case vmIntrinsics::_StringBuffer_int:375case vmIntrinsics::_StringBuffer_String:376case vmIntrinsics::_StringBuffer_append_char:377case vmIntrinsics::_StringBuffer_append_int:378case vmIntrinsics::_StringBuffer_append_String:379case vmIntrinsics::_StringBuffer_toString:380case vmIntrinsics::_Integer_toString:381return true;382383case vmIntrinsics::_String_String:384{385Node* receiver = jvms->map()->in(jvms->argoff() + 1);386if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {387CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();388ciMethod* m = csj->method();389if (m != NULL &&390(m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||391m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))392// Delay String.<init>(new SB())393return true;394}395return false;396}397398default:399return false;400}401}402return false;403}404405bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {406if (eliminate_boxing() && call_method->is_boxing_method()) {407set_has_boxed_value(true);408return aggressive_unboxing();409}410return false;411}412413bool Compile::should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms) {414return EnableVectorSupport && call_method->is_vector_method();415}416417bool Compile::should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms) {418return EnableVectorSupport && (call_method->intrinsic_id() == vmIntrinsics::_VectorRebox);419}420421// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link422bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {423// Additional inputs to consider...424// bc = bc()425// caller = method()426// iter().get_method_holder_index()427assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );428// Interface classes can be loaded & linked and never get around to429// being initialized. Uncommon-trap for not-initialized static or430// v-calls. Let interface calls happen.431ciInstanceKlass* holder_klass = dest_method->holder();432if (!holder_klass->is_being_initialized() &&433!holder_klass->is_initialized() &&434!holder_klass->is_interface()) {435uncommon_trap(Deoptimization::Reason_uninitialized,436Deoptimization::Action_reinterpret,437holder_klass);438return true;439}440441assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");442return false;443}444445#ifdef ASSERT446static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) {447ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());448ciMethod* resolved_method = cg->method();449if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) {450tty->print_cr("JVMS:");451jvms->dump();452tty->print_cr("Bytecode info:");453jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr();454tty->print_cr("Resolved method:");455cg->method()->print(); tty->cr();456return false;457}458return true;459}460#endif // ASSERT461462//------------------------------do_call----------------------------------------463// Handle your basic call. Inline if we can & want to, else just setup call.464void Parse::do_call() {465// It's likely we are going to add debug info soon.466// Also, if we inline a guy who eventually needs debug info for this JVMS,467// our contribution to it is cleaned up right here.468kill_dead_locals();469470C->print_inlining_assert_ready();471472// Set frequently used booleans473const bool is_virtual = bc() == Bytecodes::_invokevirtual;474const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;475const bool has_receiver = Bytecodes::has_receiver(bc());476477// Find target being called478bool will_link;479ciSignature* declared_signature = NULL;480ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode481ciInstanceKlass* holder_klass = orig_callee->holder();482ciKlass* holder = iter().get_declared_method_holder();483ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);484assert(declared_signature != NULL, "cannot be null");485486// Bump max node limit for JSR292 users487if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {488C->set_max_node_limit(3*MaxNodeLimit);489}490491// uncommon-trap when callee is unloaded, uninitialized or will not link492// bailout when too many arguments for register representation493if (!will_link || can_not_compile_call_site(orig_callee, klass)) {494if (PrintOpto && (Verbose || WizardMode)) {495method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());496orig_callee->print_name(); tty->cr();497}498return;499}500assert(holder_klass->is_loaded(), "");501//assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)502// Note: this takes into account invokeinterface of methods declared in java/lang/Object,503// which should be invokevirtuals but according to the VM spec may be invokeinterfaces504assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");505// Note: In the absence of miranda methods, an abstract class K can perform506// an invokevirtual directly on an interface method I.m if K implements I.507508// orig_callee is the resolved callee which's signature includes the509// appendix argument.510const int nargs = orig_callee->arg_size();511const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());512513// Push appendix argument (MethodType, CallSite, etc.), if one.514if (iter().has_appendix()) {515ciObject* appendix_arg = iter().get_appendix();516const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg, /* require_const= */ true);517Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);518push(appendix_arg_node);519}520521// ---------------------522// Does Class Hierarchy Analysis reveal only a single target of a v-call?523// Then we may inline or make a static call, but become dependent on there being only 1 target.524// Does the call-site type profile reveal only one receiver?525// Then we may introduce a run-time check and inline on the path where it succeeds.526// The other path may uncommon_trap, check for another receiver, or do a v-call.527528// Try to get the most accurate receiver type529ciMethod* callee = orig_callee;530int vtable_index = Method::invalid_vtable_index;531bool call_does_dispatch = false;532533// Speculative type of the receiver if any534ciKlass* speculative_receiver_type = NULL;535if (is_virtual_or_interface) {536Node* receiver_node = stack(sp() - nargs);537const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();538// call_does_dispatch and vtable_index are out-parameters. They might be changed.539// For arrays, klass below is Object. When vtable calls are used,540// resolving the call with Object would allow an illegal call to541// finalize() on an array. We use holder instead: illegal calls to542// finalize() won't be compiled as vtable calls (IC call543// resolution will catch the illegal call) and the few legal calls544// on array types won't be either.545callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,546receiver_type, is_virtual,547call_does_dispatch, vtable_index); // out-parameters548speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;549}550551// Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.552ciKlass* receiver_constraint = NULL;553if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {554ciInstanceKlass* calling_klass = method()->holder();555ciInstanceKlass* sender_klass = calling_klass;556if (sender_klass->is_interface()) {557receiver_constraint = sender_klass;558}559} else if (iter().cur_bc_raw() == Bytecodes::_invokeinterface && orig_callee->is_private()) {560assert(holder->is_interface(), "How did we get a non-interface method here!");561receiver_constraint = holder;562}563564if (receiver_constraint != NULL) {565Node* receiver_node = stack(sp() - nargs);566Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint));567Node* bad_type_ctrl = NULL;568Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);569if (bad_type_ctrl != NULL) {570PreserveJVMState pjvms(this);571set_control(bad_type_ctrl);572uncommon_trap(Deoptimization::Reason_class_check,573Deoptimization::Action_none);574}575if (stopped()) {576return; // MUST uncommon-trap?577}578set_stack(sp() - nargs, casted_receiver);579}580581// Note: It's OK to try to inline a virtual call.582// The call generator will not attempt to inline a polymorphic call583// unless it knows how to optimize the receiver dispatch.584bool try_inline = (C->do_inlining() || InlineAccessors);585586// ---------------------587dec_sp(nargs); // Temporarily pop args for JVM state of call588JVMState* jvms = sync_jvms();589590// ---------------------591// Decide call tactic.592// This call checks with CHA, the interpreter profile, intrinsics table, etc.593// It decides whether inlining is desirable or not.594CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);595596// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.597orig_callee = callee = NULL;598599// ---------------------600// Round double arguments before call601round_double_arguments(cg->method());602603// Feed profiling data for arguments to the type system so it can604// propagate it as speculative types605record_profiled_arguments_for_speculation(cg->method(), bc());606607#ifndef PRODUCT608// bump global counters for calls609count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());610611// Record first part of parsing work for this call612parse_histogram()->record_change();613#endif // not PRODUCT614615assert(jvms == this->jvms(), "still operating on the right JVMS");616assert(jvms_in_sync(), "jvms must carry full info into CG");617618// save across call, for a subsequent cast_not_null.619Node* receiver = has_receiver ? argument(0) : NULL;620621// The extra CheckCastPPs for speculative types mess with PhaseStringOpts622if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {623// Feed profiling data for a single receiver to the type system so624// it can propagate it as a speculative type625receiver = record_profiled_receiver_for_speculation(receiver);626}627628JVMState* new_jvms = cg->generate(jvms);629if (new_jvms == NULL) {630// When inlining attempt fails (e.g., too many arguments),631// it may contaminate the current compile state, making it632// impossible to pull back and try again. Once we call633// cg->generate(), we are committed. If it fails, the whole634// compilation task is compromised.635if (failing()) return;636637// This can happen if a library intrinsic is available, but refuses638// the call site, perhaps because it did not match a pattern the639// intrinsic was expecting to optimize. Should always be possible to640// get a normal java call that may inline in that case641cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);642new_jvms = cg->generate(jvms);643if (new_jvms == NULL) {644guarantee(failing(), "call failed to generate: calls should work");645return;646}647}648649if (cg->is_inline()) {650// Accumulate has_loops estimate651C->env()->notice_inlined_method(cg->method());652}653654// Reset parser state from [new_]jvms, which now carries results of the call.655// Return value (if any) is already pushed on the stack by the cg.656add_exception_states_from(new_jvms);657if (new_jvms->map()->control() == top()) {658stop_and_kill_map();659} else {660assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");661set_jvms(new_jvms);662}663664assert(check_call_consistency(jvms, cg), "inconsistent info");665666if (!stopped()) {667// This was some sort of virtual call, which did a null check for us.668// Now we can assert receiver-not-null, on the normal return path.669if (receiver != NULL && cg->is_virtual()) {670Node* cast = cast_not_null(receiver);671// %%% assert(receiver == cast, "should already have cast the receiver");672}673674ciType* rtype = cg->method()->return_type();675ciType* ctype = declared_signature->return_type();676677if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {678// Be careful here with return types.679if (ctype != rtype) {680BasicType rt = rtype->basic_type();681BasicType ct = ctype->basic_type();682if (ct == T_VOID) {683// It's OK for a method to return a value that is discarded.684// The discarding does not require any special action from the caller.685// The Java code knows this, at VerifyType.isNullConversion.686pop_node(rt); // whatever it was, pop it687} else if (rt == T_INT || is_subword_type(rt)) {688// Nothing. These cases are handled in lambda form bytecode.689assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct));690} else if (is_reference_type(rt)) {691assert(is_reference_type(ct), "rt=%s, ct=%s", type2name(rt), type2name(ct));692if (ctype->is_loaded()) {693const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());694const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());695if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {696Node* retnode = pop();697Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type));698push(cast_obj);699}700}701} else {702assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct));703// push a zero; it's better than getting an oop/int mismatch704pop_node(rt);705Node* retnode = zerocon(ct);706push_node(ct, retnode);707}708// Now that the value is well-behaved, continue with the call-site type.709rtype = ctype;710}711} else {712// Symbolic resolution enforces the types to be the same.713// NOTE: We must relax the assert for unloaded types because two714// different ciType instances of the same unloaded class type715// can appear to be "loaded" by different loaders (depending on716// the accessing class).717assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,718"mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name());719}720721// If the return type of the method is not loaded, assert that the722// value we got is a null. Otherwise, we need to recompile.723if (!rtype->is_loaded()) {724if (PrintOpto && (Verbose || WizardMode)) {725method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());726cg->method()->print_name(); tty->cr();727}728if (C->log() != NULL) {729C->log()->elem("assert_null reason='return' klass='%d'",730C->log()->identify(rtype));731}732// If there is going to be a trap, put it at the next bytecode:733set_bci(iter().next_bci());734null_assert(peek());735set_bci(iter().cur_bci()); // put it back736}737BasicType ct = ctype->basic_type();738if (is_reference_type(ct)) {739record_profiled_return_for_speculation();740}741}742743// Restart record of parsing work after possible inlining of call744#ifndef PRODUCT745parse_histogram()->set_initial_state(bc());746#endif747}748749//---------------------------catch_call_exceptions-----------------------------750// Put a Catch and CatchProj nodes behind a just-created call.751// Send their caught exceptions to the proper handler.752// This may be used after a call to the rethrow VM stub,753// when it is needed to process unloaded exception classes.754void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {755// Exceptions are delivered through this channel:756Node* i_o = this->i_o();757758// Add a CatchNode.759GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);760GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);761GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);762763bool default_handler = false;764for (; !handlers.is_done(); handlers.next()) {765ciExceptionHandler* h = handlers.handler();766int h_bci = h->handler_bci();767ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();768// Do not introduce unloaded exception types into the graph:769if (!h_klass->is_loaded()) {770if (saw_unloaded->contains(h_bci)) {771/* We've already seen an unloaded exception with h_bci,772so don't duplicate. Duplication will cause the CatchNode to be773unnecessarily large. See 4713716. */774continue;775} else {776saw_unloaded->append(h_bci);777}778}779const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);780// (We use make_from_klass because it respects UseUniqueSubclasses.)781h_extype = h_extype->join(TypeInstPtr::NOTNULL);782assert(!h_extype->empty(), "sanity");783// Note: It's OK if the BCIs repeat themselves.784bcis->append(h_bci);785extypes->append(h_extype);786if (h_bci == -1) {787default_handler = true;788}789}790791if (!default_handler) {792bcis->append(-1);793extypes->append(TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr());794}795796int len = bcis->length();797CatchNode *cn = new CatchNode(control(), i_o, len+1);798Node *catch_ = _gvn.transform(cn);799800// now branch with the exception state to each of the (potential)801// handlers802for(int i=0; i < len; i++) {803// Setup JVM state to enter the handler.804PreserveJVMState pjvms(this);805// Locals are just copied from before the call.806// Get control from the CatchNode.807int handler_bci = bcis->at(i);808Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci));809// This handler cannot happen?810if (ctrl == top()) continue;811set_control(ctrl);812813// Create exception oop814const TypeInstPtr* extype = extypes->at(i)->is_instptr();815Node *ex_oop = _gvn.transform(new CreateExNode(extypes->at(i), ctrl, i_o));816817// Handle unloaded exception classes.818if (saw_unloaded->contains(handler_bci)) {819// An unloaded exception type is coming here. Do an uncommon trap.820#ifndef PRODUCT821// We do not expect the same handler bci to take both cold unloaded822// and hot loaded exceptions. But, watch for it.823if ((Verbose || WizardMode) && extype->is_loaded()) {824tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());825method()->print_name(); tty->cr();826} else if (PrintOpto && (Verbose || WizardMode)) {827tty->print("Bailing out on unloaded exception type ");828extype->klass()->print_name();829tty->print(" at bci:%d in ", bci());830method()->print_name(); tty->cr();831}832#endif833// Emit an uncommon trap instead of processing the block.834set_bci(handler_bci);835push_ex_oop(ex_oop);836uncommon_trap(Deoptimization::Reason_unloaded,837Deoptimization::Action_reinterpret,838extype->klass(), "!loaded exception");839set_bci(iter().cur_bci()); // put it back840continue;841}842843// go to the exception handler844if (handler_bci < 0) { // merge with corresponding rethrow node845throw_to_exit(make_exception_state(ex_oop));846} else { // Else jump to corresponding handle847push_ex_oop(ex_oop); // Clear stack and push just the oop.848merge_exception(handler_bci);849}850}851852// The first CatchProj is for the normal return.853// (Note: If this is a call to rethrow_Java, this node goes dead.)854set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));855}856857858//----------------------------catch_inline_exceptions--------------------------859// Handle all exceptions thrown by an inlined method or individual bytecode.860// Common case 1: we have no handler, so all exceptions merge right into861// the rethrow case.862// Case 2: we have some handlers, with loaded exception klasses that have863// no subklasses. We do a Deutsch-Shiffman style type-check on the incoming864// exception oop and branch to the handler directly.865// Case 3: We have some handlers with subklasses or are not loaded at866// compile-time. We have to call the runtime to resolve the exception.867// So we insert a RethrowCall and all the logic that goes with it.868void Parse::catch_inline_exceptions(SafePointNode* ex_map) {869// Caller is responsible for saving away the map for normal control flow!870assert(stopped(), "call set_map(NULL) first");871assert(method()->has_exception_handlers(), "don't come here w/o work to do");872873Node* ex_node = saved_ex_oop(ex_map);874if (ex_node == top()) {875// No action needed.876return;877}878const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();879NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));880if (ex_type == NULL)881ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();882883// determine potential exception handlers884ciExceptionHandlerStream handlers(method(), bci(),885ex_type->klass()->as_instance_klass(),886ex_type->klass_is_exact());887888// Start executing from the given throw state. (Keep its stack, for now.)889// Get the exception oop as known at compile time.890ex_node = use_exception_state(ex_map);891892// Get the exception oop klass from its header893Node* ex_klass_node = NULL;894if (has_ex_handler() && !ex_type->klass_is_exact()) {895Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());896ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));897898// Compute the exception klass a little more cleverly.899// Obvious solution is to simple do a LoadKlass from the 'ex_node'.900// However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for901// each arm of the Phi. If I know something clever about the exceptions902// I'm loading the class from, I can replace the LoadKlass with the903// klass constant for the exception oop.904if (ex_node->is_Phi()) {905ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);906for (uint i = 1; i < ex_node->req(); i++) {907Node* ex_in = ex_node->in(i);908if (ex_in == top() || ex_in == NULL) {909// This path was not taken.910ex_klass_node->init_req(i, top());911continue;912}913Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());914Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));915ex_klass_node->init_req( i, k );916}917_gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);918919}920}921922// Scan the exception table for applicable handlers.923// If none, we can call rethrow() and be done!924// If precise (loaded with no subklasses), insert a D.S. style925// pointer compare to the correct handler and loop back.926// If imprecise, switch to the Rethrow VM-call style handling.927928int remaining = handlers.count_remaining();929930// iterate through all entries sequentially931for (;!handlers.is_done(); handlers.next()) {932ciExceptionHandler* handler = handlers.handler();933934if (handler->is_rethrow()) {935// If we fell off the end of the table without finding an imprecise936// exception klass (and without finding a generic handler) then we937// know this exception is not handled in this method. We just rethrow938// the exception into the caller.939throw_to_exit(make_exception_state(ex_node));940return;941}942943// exception handler bci range covers throw_bci => investigate further944int handler_bci = handler->handler_bci();945946if (remaining == 1) {947push_ex_oop(ex_node); // Push exception oop for handler948if (PrintOpto && WizardMode) {949tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);950}951merge_exception(handler_bci); // jump to handler952return; // No more handling to be done here!953}954955// Get the handler's klass956ciInstanceKlass* klass = handler->catch_klass();957958if (!klass->is_loaded()) { // klass is not loaded?959// fall through into catch_call_exceptions which will emit a960// handler with an uncommon trap.961break;962}963964if (klass->is_interface()) // should not happen, but...965break; // bail out966967// Check the type of the exception against the catch type968const TypeKlassPtr *tk = TypeKlassPtr::make(klass);969Node* con = _gvn.makecon(tk);970Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);971if (!stopped()) {972PreserveJVMState pjvms(this);973const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();974assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");975Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst));976push_ex_oop(ex_oop); // Push exception oop for handler977if (PrintOpto && WizardMode) {978tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);979klass->print_name();980tty->cr();981}982merge_exception(handler_bci);983}984set_control(not_subtype_ctrl);985986// Come here if exception does not match handler.987// Carry on with more handler checks.988--remaining;989}990991assert(!stopped(), "you should return if you finish the chain");992993// Oops, need to call into the VM to resolve the klasses at runtime.994// Note: This call must not deoptimize, since it is not a real at this bci!995kill_dead_locals();996997make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,998OptoRuntime::rethrow_Type(),999OptoRuntime::rethrow_stub(),1000NULL, NULL,1001ex_node);10021003// Rethrow is a pure call, no side effects, only a result.1004// The result cannot be allocated, so we use I_O10051006// Catch exceptions from the rethrow1007catch_call_exceptions(handlers);1008}100910101011// (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)101210131014#ifndef PRODUCT1015void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {1016if( CountCompiledCalls ) {1017if( at_method_entry ) {1018// bump invocation counter if top method (for statistics)1019if (CountCompiledCalls && depth() == 1) {1020const TypePtr* addr_type = TypeMetadataPtr::make(method());1021Node* adr1 = makecon(addr_type);1022Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));1023increment_counter(adr2);1024}1025} else if (is_inline) {1026switch (bc()) {1027case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;1028case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;1029case Bytecodes::_invokestatic:1030case Bytecodes::_invokedynamic:1031case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;1032default: fatal("unexpected call bytecode");1033}1034} else {1035switch (bc()) {1036case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;1037case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;1038case Bytecodes::_invokestatic:1039case Bytecodes::_invokedynamic:1040case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;1041default: fatal("unexpected call bytecode");1042}1043}1044}1045}1046#endif //PRODUCT104710481049ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,1050ciKlass* holder, ciMethod* callee,1051const TypeOopPtr* receiver_type, bool is_virtual,1052bool& call_does_dispatch, int& vtable_index,1053bool check_access) {1054// Set default values for out-parameters.1055call_does_dispatch = true;1056vtable_index = Method::invalid_vtable_index;10571058// Choose call strategy.1059ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, holder, callee,1060receiver_type, check_access);10611062// Have the call been sufficiently improved such that it is no longer a virtual?1063if (optimized_virtual_method != NULL) {1064callee = optimized_virtual_method;1065call_does_dispatch = false;1066} else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {1067// We can make a vtable call at this site1068vtable_index = callee->resolve_vtable_index(caller->holder(), holder);1069}1070return callee;1071}10721073// Identify possible target method and inlining style1074ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,1075ciMethod* callee, const TypeOopPtr* receiver_type,1076bool check_access) {1077// only use for virtual or interface calls10781079// If it is obviously final, do not bother to call find_monomorphic_target,1080// because the class hierarchy checks are not needed, and may fail due to1081// incompletely loaded classes. Since we do our own class loading checks1082// in this module, we may confidently bind to any method.1083if (callee->can_be_statically_bound()) {1084return callee;1085}10861087if (receiver_type == NULL) {1088return NULL; // no receiver type info1089}10901091// Attempt to improve the receiver1092bool actual_receiver_is_exact = false;1093ciInstanceKlass* actual_receiver = klass;1094// Array methods are all inherited from Object, and are monomorphic.1095// finalize() call on array is not allowed.1096if (receiver_type->isa_aryptr() &&1097callee->holder() == env()->Object_klass() &&1098callee->name() != ciSymbols::finalize_method_name()) {1099return callee;1100}11011102// All other interesting cases are instance klasses.1103if (!receiver_type->isa_instptr()) {1104return NULL;1105}11061107ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();1108if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&1109(ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {1110// ikl is a same or better type than the original actual_receiver,1111// e.g. static receiver from bytecodes.1112actual_receiver = ikl;1113// Is the actual_receiver exact?1114actual_receiver_is_exact = receiver_type->klass_is_exact();1115}11161117ciInstanceKlass* calling_klass = caller->holder();1118ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);11191120// Validate receiver info against target method.1121if (cha_monomorphic_target != NULL) {1122bool has_receiver = !cha_monomorphic_target->is_static();1123bool is_interface_holder = cha_monomorphic_target->holder()->is_interface();1124if (has_receiver && !is_interface_holder) {1125if (!cha_monomorphic_target->holder()->is_subtype_of(receiver_type->klass())) {1126cha_monomorphic_target = NULL; // not a subtype1127}1128}1129}11301131if (cha_monomorphic_target != NULL) {1132// Hardwiring a virtual.1133assert(!callee->can_be_statically_bound(), "should have been handled earlier");1134assert(!cha_monomorphic_target->is_abstract(), "");1135if (!cha_monomorphic_target->can_be_statically_bound(actual_receiver)) {1136// If we inlined because CHA revealed only a single target method,1137// then we are dependent on that target method not getting overridden1138// by dynamic class loading. Be sure to test the "static" receiver1139// dest_method here, as opposed to the actual receiver, which may1140// falsely lead us to believe that the receiver is final or private.1141dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target, holder, callee);1142}1143return cha_monomorphic_target;1144}11451146// If the type is exact, we can still bind the method w/o a vcall.1147// (This case comes after CHA so we can see how much extra work it does.)1148if (actual_receiver_is_exact) {1149// In case of evolution, there is a dependence on every inlined method, since each1150// such method can be changed when its class is redefined.1151ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);1152if (exact_method != NULL) {1153return exact_method;1154}1155}11561157return NULL;1158}115911601161