Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/doCall.cpp
32285 views
/*1* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/ciCallSite.hpp"26#include "ci/ciMethodHandle.hpp"27#include "classfile/vmSymbols.hpp"28#include "compiler/compileBroker.hpp"29#include "compiler/compileLog.hpp"30#include "interpreter/linkResolver.hpp"31#include "opto/addnode.hpp"32#include "opto/callGenerator.hpp"33#include "opto/cfgnode.hpp"34#include "opto/mulnode.hpp"35#include "opto/parse.hpp"36#include "opto/rootnode.hpp"37#include "opto/runtime.hpp"38#include "opto/subnode.hpp"39#include "prims/nativeLookup.hpp"40#include "runtime/sharedRuntime.hpp"4142void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {43if (TraceTypeProfile || C->print_inlining()) {44outputStream* out = tty;45if (!C->print_inlining()) {46if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {47method->print_short_name();48tty->cr();49}50CompileTask::print_inlining(prof_method, depth, bci);51} else {52out = C->print_inlining_stream();53}54CompileTask::print_inline_indent(depth, out);55out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);56stringStream ss;57prof_klass->name()->print_symbol_on(&ss);58out->print("%s", ss.as_string());59out->cr();60}61}6263CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,64JVMState* jvms, bool allow_inline,65float prof_factor, ciKlass* speculative_receiver_type,66bool allow_intrinsics, bool delayed_forbidden) {67ciMethod* caller = jvms->method();68int bci = jvms->bci();69Bytecodes::Code bytecode = caller->java_code_at_bci(bci);70guarantee(callee != NULL, "failed method resolution");7172// Dtrace currently doesn't work unless all calls are vanilla73if (env()->dtrace_method_probes()) {74allow_inline = false;75}7677// Note: When we get profiling during stage-1 compiles, we want to pull78// from more specific profile data which pertains to this inlining.79// Right now, ignore the information in jvms->caller(), and do method[bci].80ciCallProfile profile = caller->call_profile_at_bci(bci);8182// See how many times this site has been invoked.83int site_count = profile.count();84int receiver_count = -1;85if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {86// Receivers in the profile structure are ordered by call counts87// so that the most called (major) receiver is profile.receiver(0).88receiver_count = profile.receiver_count(0);89}9091CompileLog* log = this->log();92if (log != NULL) {93int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;94int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;95log->begin_elem("call method='%d' count='%d' prof_factor='%g'",96log->identify(callee), site_count, prof_factor);97if (call_does_dispatch) log->print(" virtual='1'");98if (allow_inline) log->print(" inline='1'");99if (receiver_count >= 0) {100log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);101if (profile.has_receiver(1)) {102log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));103}104}105log->end_elem();106}107108// Special case the handling of certain common, profitable library109// methods. If these methods are replaced with specialized code,110// then we return it as the inlined version of the call.111// We do this before the strict f.p. check below because the112// intrinsics handle strict f.p. correctly.113CallGenerator* cg_intrinsic = NULL;114if (allow_inline && allow_intrinsics) {115CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);116if (cg != NULL) {117if (cg->is_predicated()) {118// Code without intrinsic but, hopefully, inlined.119CallGenerator* inline_cg = this->call_generator(callee,120vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);121if (inline_cg != NULL) {122cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);123}124}125126// If intrinsic does the virtual dispatch, we try to use the type profile127// first, and hopefully inline it as the regular virtual call below.128// We will retry the intrinsic if nothing had claimed it afterwards.129if (cg->does_virtual_dispatch()) {130cg_intrinsic = cg;131cg = NULL;132} else {133return cg;134}135}136}137138// Do method handle calls.139// NOTE: This must happen before normal inlining logic below since140// MethodHandle.invoke* are native methods which obviously don't141// have bytecodes and so normal inlining fails.142if (callee->is_method_handle_intrinsic()) {143CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden);144assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");145return cg;146}147148// Do not inline strict fp into non-strict code, or the reverse149if (caller->is_strict() ^ callee->is_strict()) {150allow_inline = false;151}152153// Attempt to inline...154if (allow_inline) {155// The profile data is only partly attributable to this caller,156// scale back the call site information.157float past_uses = jvms->method()->scale_count(site_count, prof_factor);158// This is the number of times we expect the call code to be used.159float expected_uses = past_uses;160161// Try inlining a bytecoded method:162if (!call_does_dispatch) {163InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());164WarmCallInfo scratch_ci;165bool should_delay = false;166WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);167assert(ci != &scratch_ci, "do not let this pointer escape");168bool allow_inline = (ci != NULL && !ci->is_cold());169bool require_inline = (allow_inline && ci->is_hot());170171if (allow_inline) {172CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);173174if (require_inline && cg != NULL) {175// Delay the inlining of this method to give us the176// opportunity to perform some high level optimizations177// first.178if (should_delay_string_inlining(callee, jvms)) {179assert(!delayed_forbidden, "strange");180return CallGenerator::for_string_late_inline(callee, cg);181} else if (should_delay_boxing_inlining(callee, jvms)) {182assert(!delayed_forbidden, "strange");183return CallGenerator::for_boxing_late_inline(callee, cg);184} else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {185return CallGenerator::for_late_inline(callee, cg);186}187}188if (cg == NULL || should_delay) {189// Fall through.190} else if (require_inline || !InlineWarmCalls) {191return cg;192} else {193CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);194return CallGenerator::for_warm_call(ci, cold_cg, cg);195}196}197}198199// Try using the type profile.200if (call_does_dispatch && site_count > 0 && receiver_count > 0) {201// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.202bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);203ciMethod* receiver_method = NULL;204205int morphism = profile.morphism();206if (speculative_receiver_type != NULL) {207if (!too_many_traps(caller, bci, Deoptimization::Reason_speculate_class_check)) {208// We have a speculative type, we should be able to resolve209// the call. We do that before looking at the profiling at210// this invoke because it may lead to bimorphic inlining which211// a speculative type should help us avoid.212receiver_method = callee->resolve_invoke(jvms->method()->holder(),213speculative_receiver_type);214if (receiver_method == NULL) {215speculative_receiver_type = NULL;216} else {217morphism = 1;218}219} else {220// speculation failed before. Use profiling at the call221// (could allow bimorphic inlining for instance).222speculative_receiver_type = NULL;223}224}225if (receiver_method == NULL &&226(have_major_receiver || morphism == 1 ||227(morphism == 2 && UseBimorphicInlining))) {228// receiver_method = profile.method();229// Profiles do not suggest methods now. Look it up in the major receiver.230receiver_method = callee->resolve_invoke(jvms->method()->holder(),231profile.receiver(0));232}233if (receiver_method != NULL) {234// The single majority receiver sufficiently outweighs the minority.235CallGenerator* hit_cg = this->call_generator(receiver_method,236vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);237if (hit_cg != NULL) {238// Look up second receiver.239CallGenerator* next_hit_cg = NULL;240ciMethod* next_receiver_method = NULL;241if (morphism == 2 && UseBimorphicInlining) {242next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),243profile.receiver(1));244if (next_receiver_method != NULL) {245next_hit_cg = this->call_generator(next_receiver_method,246vtable_index, !call_does_dispatch, jvms,247allow_inline, prof_factor);248if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&249have_major_receiver && UseOnlyInlinedBimorphic) {250// Skip if we can't inline second receiver's method251next_hit_cg = NULL;252}253}254}255CallGenerator* miss_cg;256Deoptimization::DeoptReason reason = morphism == 2 ?257Deoptimization::Reason_bimorphic :258(speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);259if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&260!too_many_traps(caller, bci, reason)261) {262// Generate uncommon trap for class check failure path263// in case of monomorphic or bimorphic virtual call site.264miss_cg = CallGenerator::for_uncommon_trap(callee, reason,265Deoptimization::Action_maybe_recompile);266} else {267// Generate virtual call for class check failure path268// in case of polymorphic virtual call site.269miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);270}271if (miss_cg != NULL) {272if (next_hit_cg != NULL) {273assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");274trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));275// We don't need to record dependency on a receiver here and below.276// Whenever we inline, the dependency is added by Parse::Parse().277miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);278}279if (miss_cg != NULL) {280trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);281ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);282float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);283CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);284if (cg != NULL) return cg;285}286}287}288}289}290}291292// Nothing claimed the intrinsic, we go with straight-forward inlining293// for already discovered intrinsic.294if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {295assert(cg_intrinsic->does_virtual_dispatch(), "sanity");296return cg_intrinsic;297}298299// There was no special inlining tactic, or it bailed out.300// Use a more generic tactic, like a simple call.301if (call_does_dispatch) {302return CallGenerator::for_virtual_call(callee, vtable_index);303} else {304// Class Hierarchy Analysis or Type Profile reveals a unique target,305// or it is a static or special call.306return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));307}308}309310// Return true for methods that shouldn't be inlined early so that311// they are easier to analyze and optimize as intrinsics.312bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {313if (has_stringbuilder()) {314315if ((call_method->holder() == C->env()->StringBuilder_klass() ||316call_method->holder() == C->env()->StringBuffer_klass()) &&317(jvms->method()->holder() == C->env()->StringBuilder_klass() ||318jvms->method()->holder() == C->env()->StringBuffer_klass())) {319// Delay SB calls only when called from non-SB code320return false;321}322323switch (call_method->intrinsic_id()) {324case vmIntrinsics::_StringBuilder_void:325case vmIntrinsics::_StringBuilder_int:326case vmIntrinsics::_StringBuilder_String:327case vmIntrinsics::_StringBuilder_append_char:328case vmIntrinsics::_StringBuilder_append_int:329case vmIntrinsics::_StringBuilder_append_String:330case vmIntrinsics::_StringBuilder_toString:331case vmIntrinsics::_StringBuffer_void:332case vmIntrinsics::_StringBuffer_int:333case vmIntrinsics::_StringBuffer_String:334case vmIntrinsics::_StringBuffer_append_char:335case vmIntrinsics::_StringBuffer_append_int:336case vmIntrinsics::_StringBuffer_append_String:337case vmIntrinsics::_StringBuffer_toString:338case vmIntrinsics::_Integer_toString:339return true;340341case vmIntrinsics::_String_String:342{343Node* receiver = jvms->map()->in(jvms->argoff() + 1);344if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {345CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();346ciMethod* m = csj->method();347if (m != NULL &&348(m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||349m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))350// Delay String.<init>(new SB())351return true;352}353return false;354}355356default:357return false;358}359}360return false;361}362363bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {364if (eliminate_boxing() && call_method->is_boxing_method()) {365set_has_boxed_value(true);366return aggressive_unboxing();367}368return false;369}370371// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link372bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {373// Additional inputs to consider...374// bc = bc()375// caller = method()376// iter().get_method_holder_index()377assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );378// Interface classes can be loaded & linked and never get around to379// being initialized. Uncommon-trap for not-initialized static or380// v-calls. Let interface calls happen.381ciInstanceKlass* holder_klass = dest_method->holder();382if (!holder_klass->is_being_initialized() &&383!holder_klass->is_initialized() &&384!holder_klass->is_interface()) {385uncommon_trap(Deoptimization::Reason_uninitialized,386Deoptimization::Action_reinterpret,387holder_klass);388return true;389}390391assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");392return false;393}394395396//------------------------------do_call----------------------------------------397// Handle your basic call. Inline if we can & want to, else just setup call.398void Parse::do_call() {399// It's likely we are going to add debug info soon.400// Also, if we inline a guy who eventually needs debug info for this JVMS,401// our contribution to it is cleaned up right here.402kill_dead_locals();403404// Set frequently used booleans405const bool is_virtual = bc() == Bytecodes::_invokevirtual;406const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;407const bool has_receiver = Bytecodes::has_receiver(bc());408409// Find target being called410bool will_link;411ciSignature* declared_signature = NULL;412ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode413ciInstanceKlass* holder_klass = orig_callee->holder();414ciKlass* holder = iter().get_declared_method_holder();415ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);416assert(declared_signature != NULL, "cannot be null");417418// Bump max node limit for JSR292 users419if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {420C->set_max_node_limit(3*MaxNodeLimit);421}422423// uncommon-trap when callee is unloaded, uninitialized or will not link424// bailout when too many arguments for register representation425if (!will_link || can_not_compile_call_site(orig_callee, klass)) {426#ifndef PRODUCT427if (PrintOpto && (Verbose || WizardMode)) {428method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());429orig_callee->print_name(); tty->cr();430}431#endif432return;433}434assert(holder_klass->is_loaded(), "");435//assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)436// Note: this takes into account invokeinterface of methods declared in java/lang/Object,437// which should be invokevirtuals but according to the VM spec may be invokeinterfaces438assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");439// Note: In the absence of miranda methods, an abstract class K can perform440// an invokevirtual directly on an interface method I.m if K implements I.441442// orig_callee is the resolved callee which's signature includes the443// appendix argument.444const int nargs = orig_callee->arg_size();445const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());446447// Push appendix argument (MethodType, CallSite, etc.), if one.448if (iter().has_appendix()) {449ciObject* appendix_arg = iter().get_appendix();450const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);451Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);452push(appendix_arg_node);453}454455// ---------------------456// Does Class Hierarchy Analysis reveal only a single target of a v-call?457// Then we may inline or make a static call, but become dependent on there being only 1 target.458// Does the call-site type profile reveal only one receiver?459// Then we may introduce a run-time check and inline on the path where it succeeds.460// The other path may uncommon_trap, check for another receiver, or do a v-call.461462// Try to get the most accurate receiver type463ciMethod* callee = orig_callee;464int vtable_index = Method::invalid_vtable_index;465bool call_does_dispatch = false;466467// Speculative type of the receiver if any468ciKlass* speculative_receiver_type = NULL;469if (is_virtual_or_interface) {470Node* receiver_node = stack(sp() - nargs);471const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();472// call_does_dispatch and vtable_index are out-parameters. They might be changed.473// For arrays, klass below is Object. When vtable calls are used,474// resolving the call with Object would allow an illegal call to475// finalize() on an array. We use holder instead: illegal calls to476// finalize() won't be compiled as vtable calls (IC call477// resolution will catch the illegal call) and the few legal calls478// on array types won't be either.479callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,480receiver_type, is_virtual,481call_does_dispatch, vtable_index); // out-parameters482speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;483}484485// invoke-super-special486if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {487ciInstanceKlass* calling_klass = method()->holder();488ciInstanceKlass* sender_klass =489calling_klass->is_anonymous() ? calling_klass->host_klass() :490calling_klass;491if (sender_klass->is_interface()) {492Node* receiver_node = stack(sp() - nargs);493Node* cls_node = makecon(TypeKlassPtr::make(sender_klass));494Node* bad_type_ctrl = NULL;495Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);496if (bad_type_ctrl != NULL) {497PreserveJVMState pjvms(this);498set_control(bad_type_ctrl);499uncommon_trap(Deoptimization::Reason_class_check,500Deoptimization::Action_none);501}502if (stopped()) {503return; // MUST uncommon-trap?504}505set_stack(sp() - nargs, casted_receiver);506}507}508509// Note: It's OK to try to inline a virtual call.510// The call generator will not attempt to inline a polymorphic call511// unless it knows how to optimize the receiver dispatch.512bool try_inline = (C->do_inlining() || InlineAccessors);513514// ---------------------515dec_sp(nargs); // Temporarily pop args for JVM state of call516JVMState* jvms = sync_jvms();517518// ---------------------519// Decide call tactic.520// This call checks with CHA, the interpreter profile, intrinsics table, etc.521// It decides whether inlining is desirable or not.522CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);523524// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.525orig_callee = callee = NULL;526527// ---------------------528// Round double arguments before call529round_double_arguments(cg->method());530531// Feed profiling data for arguments to the type system so it can532// propagate it as speculative types533record_profiled_arguments_for_speculation(cg->method(), bc());534535#ifndef PRODUCT536// bump global counters for calls537count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());538539// Record first part of parsing work for this call540parse_histogram()->record_change();541#endif // not PRODUCT542543assert(jvms == this->jvms(), "still operating on the right JVMS");544assert(jvms_in_sync(), "jvms must carry full info into CG");545546// save across call, for a subsequent cast_not_null.547Node* receiver = has_receiver ? argument(0) : NULL;548549// The extra CheckCastPP for speculative types mess with PhaseStringOpts550if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {551// Feed profiling data for a single receiver to the type system so552// it can propagate it as a speculative type553receiver = record_profiled_receiver_for_speculation(receiver);554}555556// Bump method data counters (We profile *before* the call is made557// because exceptions don't return to the call site.)558profile_call(receiver);559560JVMState* new_jvms = cg->generate(jvms);561if (new_jvms == NULL) {562// When inlining attempt fails (e.g., too many arguments),563// it may contaminate the current compile state, making it564// impossible to pull back and try again. Once we call565// cg->generate(), we are committed. If it fails, the whole566// compilation task is compromised.567if (failing()) return;568569// This can happen if a library intrinsic is available, but refuses570// the call site, perhaps because it did not match a pattern the571// intrinsic was expecting to optimize. Should always be possible to572// get a normal java call that may inline in that case573cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);574if ((new_jvms = cg->generate(jvms)) == NULL) {575guarantee(failing(), "call failed to generate: calls should work");576return;577}578}579580if (cg->is_inline()) {581// Accumulate has_loops estimate582C->set_has_loops(C->has_loops() || cg->method()->has_loops());583C->env()->notice_inlined_method(cg->method());584}585586// Reset parser state from [new_]jvms, which now carries results of the call.587// Return value (if any) is already pushed on the stack by the cg.588add_exception_states_from(new_jvms);589if (new_jvms->map()->control() == top()) {590stop_and_kill_map();591} else {592assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");593set_jvms(new_jvms);594}595596if (!stopped()) {597// This was some sort of virtual call, which did a null check for us.598// Now we can assert receiver-not-null, on the normal return path.599if (receiver != NULL && cg->is_virtual()) {600Node* cast = cast_not_null(receiver);601// %%% assert(receiver == cast, "should already have cast the receiver");602}603604// Round double result after a call from strict to non-strict code605round_double_result(cg->method());606607ciType* rtype = cg->method()->return_type();608ciType* ctype = declared_signature->return_type();609610if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {611// Be careful here with return types.612if (ctype != rtype) {613BasicType rt = rtype->basic_type();614BasicType ct = ctype->basic_type();615if (ct == T_VOID) {616// It's OK for a method to return a value that is discarded.617// The discarding does not require any special action from the caller.618// The Java code knows this, at VerifyType.isNullConversion.619pop_node(rt); // whatever it was, pop it620} else if (rt == T_INT || is_subword_type(rt)) {621// Nothing. These cases are handled in lambda form bytecode.622assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));623} else if (rt == T_OBJECT || rt == T_ARRAY) {624assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));625if (ctype->is_loaded()) {626const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());627const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());628if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {629Node* retnode = pop();630Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type));631push(cast_obj);632}633}634} else {635assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)));636// push a zero; it's better than getting an oop/int mismatch637pop_node(rt);638Node* retnode = zerocon(ct);639push_node(ct, retnode);640}641// Now that the value is well-behaved, continue with the call-site type.642rtype = ctype;643}644} else {645// Symbolic resolution enforces the types to be the same.646// NOTE: We must relax the assert for unloaded types because two647// different ciType instances of the same unloaded class type648// can appear to be "loaded" by different loaders (depending on649// the accessing class).650assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,651err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));652}653654// If the return type of the method is not loaded, assert that the655// value we got is a null. Otherwise, we need to recompile.656if (!rtype->is_loaded()) {657#ifndef PRODUCT658if (PrintOpto && (Verbose || WizardMode)) {659method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());660cg->method()->print_name(); tty->cr();661}662#endif663if (C->log() != NULL) {664C->log()->elem("assert_null reason='return' klass='%d'",665C->log()->identify(rtype));666}667// If there is going to be a trap, put it at the next bytecode:668set_bci(iter().next_bci());669null_assert(peek());670set_bci(iter().cur_bci()); // put it back671}672BasicType ct = ctype->basic_type();673if (ct == T_OBJECT || ct == T_ARRAY) {674ciKlass* better_type = method()->return_profiled_type(bci());675if (UseTypeSpeculation && better_type != NULL) {676// If profiling reports a single type for the return value,677// feed it to the type system so it can propagate it as a678// speculative type679record_profile_for_speculation(stack(sp()-1), better_type);680}681}682}683684// Restart record of parsing work after possible inlining of call685#ifndef PRODUCT686parse_histogram()->set_initial_state(bc());687#endif688}689690//---------------------------catch_call_exceptions-----------------------------691// Put a Catch and CatchProj nodes behind a just-created call.692// Send their caught exceptions to the proper handler.693// This may be used after a call to the rethrow VM stub,694// when it is needed to process unloaded exception classes.695void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {696// Exceptions are delivered through this channel:697Node* i_o = this->i_o();698699// Add a CatchNode.700GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);701GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);702GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);703704for (; !handlers.is_done(); handlers.next()) {705ciExceptionHandler* h = handlers.handler();706int h_bci = h->handler_bci();707ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();708// Do not introduce unloaded exception types into the graph:709if (!h_klass->is_loaded()) {710if (saw_unloaded->contains(h_bci)) {711/* We've already seen an unloaded exception with h_bci,712so don't duplicate. Duplication will cause the CatchNode to be713unnecessarily large. See 4713716. */714continue;715} else {716saw_unloaded->append(h_bci);717}718}719const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);720// (We use make_from_klass because it respects UseUniqueSubclasses.)721h_extype = h_extype->join(TypeInstPtr::NOTNULL);722assert(!h_extype->empty(), "sanity");723// Note: It's OK if the BCIs repeat themselves.724bcis->append(h_bci);725extypes->append(h_extype);726}727728int len = bcis->length();729CatchNode *cn = new (C) CatchNode(control(), i_o, len+1);730Node *catch_ = _gvn.transform(cn);731732// now branch with the exception state to each of the (potential)733// handlers734for(int i=0; i < len; i++) {735// Setup JVM state to enter the handler.736PreserveJVMState pjvms(this);737// Locals are just copied from before the call.738// Get control from the CatchNode.739int handler_bci = bcis->at(i);740Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci));741// This handler cannot happen?742if (ctrl == top()) continue;743set_control(ctrl);744745// Create exception oop746const TypeInstPtr* extype = extypes->at(i)->is_instptr();747Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o));748749// Handle unloaded exception classes.750if (saw_unloaded->contains(handler_bci)) {751// An unloaded exception type is coming here. Do an uncommon trap.752#ifndef PRODUCT753// We do not expect the same handler bci to take both cold unloaded754// and hot loaded exceptions. But, watch for it.755if ((Verbose || WizardMode) && extype->is_loaded()) {756tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());757method()->print_name(); tty->cr();758} else if (PrintOpto && (Verbose || WizardMode)) {759tty->print("Bailing out on unloaded exception type ");760extype->klass()->print_name();761tty->print(" at bci:%d in ", bci());762method()->print_name(); tty->cr();763}764#endif765// Emit an uncommon trap instead of processing the block.766set_bci(handler_bci);767push_ex_oop(ex_oop);768uncommon_trap(Deoptimization::Reason_unloaded,769Deoptimization::Action_reinterpret,770extype->klass(), "!loaded exception");771set_bci(iter().cur_bci()); // put it back772continue;773}774775// go to the exception handler776if (handler_bci < 0) { // merge with corresponding rethrow node777throw_to_exit(make_exception_state(ex_oop));778} else { // Else jump to corresponding handle779push_ex_oop(ex_oop); // Clear stack and push just the oop.780merge_exception(handler_bci);781}782}783784// The first CatchProj is for the normal return.785// (Note: If this is a call to rethrow_Java, this node goes dead.)786set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));787}788789790//----------------------------catch_inline_exceptions--------------------------791// Handle all exceptions thrown by an inlined method or individual bytecode.792// Common case 1: we have no handler, so all exceptions merge right into793// the rethrow case.794// Case 2: we have some handlers, with loaded exception klasses that have795// no subklasses. We do a Deutsch-Shiffman style type-check on the incoming796// exception oop and branch to the handler directly.797// Case 3: We have some handlers with subklasses or are not loaded at798// compile-time. We have to call the runtime to resolve the exception.799// So we insert a RethrowCall and all the logic that goes with it.800void Parse::catch_inline_exceptions(SafePointNode* ex_map) {801// Caller is responsible for saving away the map for normal control flow!802assert(stopped(), "call set_map(NULL) first");803assert(method()->has_exception_handlers(), "don't come here w/o work to do");804805Node* ex_node = saved_ex_oop(ex_map);806if (ex_node == top()) {807// No action needed.808return;809}810const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();811NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));812if (ex_type == NULL)813ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();814815// determine potential exception handlers816ciExceptionHandlerStream handlers(method(), bci(),817ex_type->klass()->as_instance_klass(),818ex_type->klass_is_exact());819820// Start executing from the given throw state. (Keep its stack, for now.)821// Get the exception oop as known at compile time.822ex_node = use_exception_state(ex_map);823824// Get the exception oop klass from its header825Node* ex_klass_node = NULL;826if (has_ex_handler() && !ex_type->klass_is_exact()) {827Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());828ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));829830// Compute the exception klass a little more cleverly.831// Obvious solution is to simple do a LoadKlass from the 'ex_node'.832// However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for833// each arm of the Phi. If I know something clever about the exceptions834// I'm loading the class from, I can replace the LoadKlass with the835// klass constant for the exception oop.836if (ex_node->is_Phi()) {837ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);838for (uint i = 1; i < ex_node->req(); i++) {839Node* ex_in = ex_node->in(i);840if (ex_in == top() || ex_in == NULL) {841// This path was not taken.842ex_klass_node->init_req(i, top());843continue;844}845Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());846Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));847ex_klass_node->init_req( i, k );848}849_gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);850851}852}853854// Scan the exception table for applicable handlers.855// If none, we can call rethrow() and be done!856// If precise (loaded with no subklasses), insert a D.S. style857// pointer compare to the correct handler and loop back.858// If imprecise, switch to the Rethrow VM-call style handling.859860int remaining = handlers.count_remaining();861862// iterate through all entries sequentially863for (;!handlers.is_done(); handlers.next()) {864ciExceptionHandler* handler = handlers.handler();865866if (handler->is_rethrow()) {867// If we fell off the end of the table without finding an imprecise868// exception klass (and without finding a generic handler) then we869// know this exception is not handled in this method. We just rethrow870// the exception into the caller.871throw_to_exit(make_exception_state(ex_node));872return;873}874875// exception handler bci range covers throw_bci => investigate further876int handler_bci = handler->handler_bci();877878if (remaining == 1) {879push_ex_oop(ex_node); // Push exception oop for handler880#ifndef PRODUCT881if (PrintOpto && WizardMode) {882tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);883}884#endif885merge_exception(handler_bci); // jump to handler886return; // No more handling to be done here!887}888889// Get the handler's klass890ciInstanceKlass* klass = handler->catch_klass();891892if (!klass->is_loaded()) { // klass is not loaded?893// fall through into catch_call_exceptions which will emit a894// handler with an uncommon trap.895break;896}897898if (klass->is_interface()) // should not happen, but...899break; // bail out900901// Check the type of the exception against the catch type902const TypeKlassPtr *tk = TypeKlassPtr::make(klass);903Node* con = _gvn.makecon(tk);904Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);905if (!stopped()) {906PreserveJVMState pjvms(this);907const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();908assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");909Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst));910push_ex_oop(ex_oop); // Push exception oop for handler911#ifndef PRODUCT912if (PrintOpto && WizardMode) {913tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);914klass->print_name();915tty->cr();916}917#endif918merge_exception(handler_bci);919}920set_control(not_subtype_ctrl);921922// Come here if exception does not match handler.923// Carry on with more handler checks.924--remaining;925}926927assert(!stopped(), "you should return if you finish the chain");928929// Oops, need to call into the VM to resolve the klasses at runtime.930// Note: This call must not deoptimize, since it is not a real at this bci!931kill_dead_locals();932933make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,934OptoRuntime::rethrow_Type(),935OptoRuntime::rethrow_stub(),936NULL, NULL,937ex_node);938939// Rethrow is a pure call, no side effects, only a result.940// The result cannot be allocated, so we use I_O941942// Catch exceptions from the rethrow943catch_call_exceptions(handlers);944}945946947// (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)948949950#ifndef PRODUCT951void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {952if( CountCompiledCalls ) {953if( at_method_entry ) {954// bump invocation counter if top method (for statistics)955if (CountCompiledCalls && depth() == 1) {956const TypePtr* addr_type = TypeMetadataPtr::make(method());957Node* adr1 = makecon(addr_type);958Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));959increment_counter(adr2);960}961} else if (is_inline) {962switch (bc()) {963case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;964case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;965case Bytecodes::_invokestatic:966case Bytecodes::_invokedynamic:967case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;968default: fatal("unexpected call bytecode");969}970} else {971switch (bc()) {972case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;973case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;974case Bytecodes::_invokestatic:975case Bytecodes::_invokedynamic:976case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;977default: fatal("unexpected call bytecode");978}979}980}981}982#endif //PRODUCT983984985ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,986ciKlass* holder, ciMethod* callee,987const TypeOopPtr* receiver_type, bool is_virtual,988bool& call_does_dispatch, int& vtable_index,989bool check_access) {990// Set default values for out-parameters.991call_does_dispatch = true;992vtable_index = Method::invalid_vtable_index;993994// Choose call strategy.995ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee,996receiver_type, check_access);997998// Have the call been sufficiently improved such that it is no longer a virtual?999if (optimized_virtual_method != NULL) {1000callee = optimized_virtual_method;1001call_does_dispatch = false;1002} else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {1003// We can make a vtable call at this site1004vtable_index = callee->resolve_vtable_index(caller->holder(), holder);1005}1006return callee;1007}10081009// Identify possible target method and inlining style1010ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,1011ciMethod* callee, const TypeOopPtr* receiver_type,1012bool check_access) {1013// only use for virtual or interface calls10141015// If it is obviously final, do not bother to call find_monomorphic_target,1016// because the class hierarchy checks are not needed, and may fail due to1017// incompletely loaded classes. Since we do our own class loading checks1018// in this module, we may confidently bind to any method.1019if (callee->can_be_statically_bound()) {1020return callee;1021}10221023// Attempt to improve the receiver1024bool actual_receiver_is_exact = false;1025ciInstanceKlass* actual_receiver = klass;1026if (receiver_type != NULL) {1027// Array methods are all inherited from Object, and are monomorphic.1028// finalize() call on array is not allowed.1029if (receiver_type->isa_aryptr() &&1030callee->holder() == env()->Object_klass() &&1031callee->name() != ciSymbol::finalize_method_name()) {1032return callee;1033}10341035// All other interesting cases are instance klasses.1036if (!receiver_type->isa_instptr()) {1037return NULL;1038}10391040ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();1041if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&1042(ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {1043// ikl is a same or better type than the original actual_receiver,1044// e.g. static receiver from bytecodes.1045actual_receiver = ikl;1046// Is the actual_receiver exact?1047actual_receiver_is_exact = receiver_type->klass_is_exact();1048}1049}10501051ciInstanceKlass* calling_klass = caller->holder();1052ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);1053if (cha_monomorphic_target != NULL) {1054assert(!cha_monomorphic_target->is_abstract(), "");1055// Look at the method-receiver type. Does it add "too much information"?1056ciKlass* mr_klass = cha_monomorphic_target->holder();1057const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);1058if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {1059// Calling this method would include an implicit cast to its holder.1060// %%% Not yet implemented. Would throw minor asserts at present.1061// %%% The most common wins are already gained by +UseUniqueSubclasses.1062// To fix, put the higher_equal check at the call of this routine,1063// and add a CheckCastPP to the receiver.1064if (TraceDependencies) {1065tty->print_cr("found unique CHA method, but could not cast up");1066tty->print(" method = ");1067cha_monomorphic_target->print();1068tty->cr();1069}1070if (log() != NULL) {1071log()->elem("missed_CHA_opportunity klass='%d' method='%d'",1072log()->identify(klass),1073log()->identify(cha_monomorphic_target));1074}1075cha_monomorphic_target = NULL;1076}1077}1078if (cha_monomorphic_target != NULL) {1079// Hardwiring a virtual.1080// If we inlined because CHA revealed only a single target method,1081// then we are dependent on that target method not getting overridden1082// by dynamic class loading. Be sure to test the "static" receiver1083// dest_method here, as opposed to the actual receiver, which may1084// falsely lead us to believe that the receiver is final or private.1085dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);1086return cha_monomorphic_target;1087}10881089// If the type is exact, we can still bind the method w/o a vcall.1090// (This case comes after CHA so we can see how much extra work it does.)1091if (actual_receiver_is_exact) {1092// In case of evolution, there is a dependence on every inlined method, since each1093// such method can be changed when its class is redefined.1094ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);1095if (exact_method != NULL) {1096#ifndef PRODUCT1097if (PrintOpto) {1098tty->print(" Calling method via exact type @%d --- ", bci);1099exact_method->print_name();1100tty->cr();1101}1102#endif1103return exact_method;1104}1105}11061107return NULL;1108}110911101111