Path: blob/master/src/hotspot/share/opto/graphKit.cpp
40930 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "ci/ciUtilities.hpp"26#include "classfile/javaClasses.hpp"27#include "ci/ciNativeEntryPoint.hpp"28#include "ci/ciObjArray.hpp"29#include "asm/register.hpp"30#include "compiler/compileLog.hpp"31#include "gc/shared/barrierSet.hpp"32#include "gc/shared/c2/barrierSetC2.hpp"33#include "interpreter/interpreter.hpp"34#include "memory/resourceArea.hpp"35#include "opto/addnode.hpp"36#include "opto/castnode.hpp"37#include "opto/convertnode.hpp"38#include "opto/graphKit.hpp"39#include "opto/idealKit.hpp"40#include "opto/intrinsicnode.hpp"41#include "opto/locknode.hpp"42#include "opto/machnode.hpp"43#include "opto/opaquenode.hpp"44#include "opto/parse.hpp"45#include "opto/rootnode.hpp"46#include "opto/runtime.hpp"47#include "opto/subtypenode.hpp"48#include "runtime/deoptimization.hpp"49#include "runtime/sharedRuntime.hpp"50#include "utilities/bitMap.inline.hpp"51#include "utilities/powerOfTwo.hpp"52#include "utilities/growableArray.hpp"5354//----------------------------GraphKit-----------------------------------------55// Main utility constructor.56GraphKit::GraphKit(JVMState* jvms)57: Phase(Phase::Parser),58_env(C->env()),59_gvn(*C->initial_gvn()),60_barrier_set(BarrierSet::barrier_set()->barrier_set_c2())61{62_exceptions = jvms->map()->next_exception();63if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);64set_jvms(jvms);65}6667// Private constructor for parser.68GraphKit::GraphKit()69: Phase(Phase::Parser),70_env(C->env()),71_gvn(*C->initial_gvn()),72_barrier_set(BarrierSet::barrier_set()->barrier_set_c2())73{74_exceptions = NULL;75set_map(NULL);76debug_only(_sp = -99);77debug_only(set_bci(-99));78}79808182//---------------------------clean_stack---------------------------------------83// Clear away rubbish from the stack area of the JVM state.84// This destroys any arguments that may be waiting on the stack.85void GraphKit::clean_stack(int from_sp) {86SafePointNode* map = this->map();87JVMState* jvms = this->jvms();88int stk_size = jvms->stk_size();89int stkoff = jvms->stkoff();90Node* top = this->top();91for (int i = from_sp; i < stk_size; i++) {92if (map->in(stkoff + i) != top) {93map->set_req(stkoff + i, top);94}95}96}979899//--------------------------------sync_jvms-----------------------------------100// Make sure our current jvms agrees with our parse state.101JVMState* GraphKit::sync_jvms() const {102JVMState* jvms = this->jvms();103jvms->set_bci(bci()); // Record the new bci in the JVMState104jvms->set_sp(sp()); // Record the new sp in the JVMState105assert(jvms_in_sync(), "jvms is now in sync");106return jvms;107}108109//--------------------------------sync_jvms_for_reexecute---------------------110// Make sure our current jvms agrees with our parse state. This version111// uses the reexecute_sp for reexecuting bytecodes.112JVMState* GraphKit::sync_jvms_for_reexecute() {113JVMState* jvms = this->jvms();114jvms->set_bci(bci()); // Record the new bci in the JVMState115jvms->set_sp(reexecute_sp()); // Record the new sp in the JVMState116return jvms;117}118119#ifdef ASSERT120bool GraphKit::jvms_in_sync() const {121Parse* parse = is_Parse();122if (parse == NULL) {123if (bci() != jvms()->bci()) return false;124if (sp() != (int)jvms()->sp()) return false;125return true;126}127if (jvms()->method() != parse->method()) return false;128if (jvms()->bci() != parse->bci()) return false;129int jvms_sp = jvms()->sp();130if (jvms_sp != parse->sp()) return false;131int jvms_depth = jvms()->depth();132if (jvms_depth != parse->depth()) return false;133return true;134}135136// Local helper checks for special internal merge points137// used to accumulate and merge exception states.138// They are marked by the region's in(0) edge being the map itself.139// Such merge points must never "escape" into the parser at large,140// until they have been handed to gvn.transform.141static bool is_hidden_merge(Node* reg) {142if (reg == NULL) return false;143if (reg->is_Phi()) {144reg = reg->in(0);145if (reg == NULL) return false;146}147return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();148}149150void GraphKit::verify_map() const {151if (map() == NULL) return; // null map is OK152assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");153assert(!map()->has_exceptions(), "call add_exception_states_from 1st");154assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");155}156157void GraphKit::verify_exception_state(SafePointNode* ex_map) {158assert(ex_map->next_exception() == NULL, "not already part of a chain");159assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");160}161#endif162163//---------------------------stop_and_kill_map---------------------------------164// Set _map to NULL, signalling a stop to further bytecode execution.165// First smash the current map's control to a constant, to mark it dead.166void GraphKit::stop_and_kill_map() {167SafePointNode* dead_map = stop();168if (dead_map != NULL) {169dead_map->disconnect_inputs(C); // Mark the map as killed.170assert(dead_map->is_killed(), "must be so marked");171}172}173174175//--------------------------------stopped--------------------------------------176// Tell if _map is NULL, or control is top.177bool GraphKit::stopped() {178if (map() == NULL) return true;179else if (control() == top()) return true;180else return false;181}182183184//-----------------------------has_ex_handler----------------------------------185// Tell if this method or any caller method has exception handlers.186bool GraphKit::has_ex_handler() {187for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {188if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {189return true;190}191}192return false;193}194195//------------------------------save_ex_oop------------------------------------196// Save an exception without blowing stack contents or other JVM state.197void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {198assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");199ex_map->add_req(ex_oop);200debug_only(verify_exception_state(ex_map));201}202203inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {204assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");205Node* ex_oop = ex_map->in(ex_map->req()-1);206if (clear_it) ex_map->del_req(ex_map->req()-1);207return ex_oop;208}209210//-----------------------------saved_ex_oop------------------------------------211// Recover a saved exception from its map.212Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {213return common_saved_ex_oop(ex_map, false);214}215216//--------------------------clear_saved_ex_oop---------------------------------217// Erase a previously saved exception from its map.218Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {219return common_saved_ex_oop(ex_map, true);220}221222#ifdef ASSERT223//---------------------------has_saved_ex_oop----------------------------------224// Erase a previously saved exception from its map.225bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {226return ex_map->req() == ex_map->jvms()->endoff()+1;227}228#endif229230//-------------------------make_exception_state--------------------------------231// Turn the current JVM state into an exception state, appending the ex_oop.232SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {233sync_jvms();234SafePointNode* ex_map = stop(); // do not manipulate this map any more235set_saved_ex_oop(ex_map, ex_oop);236return ex_map;237}238239240//--------------------------add_exception_state--------------------------------241// Add an exception to my list of exceptions.242void GraphKit::add_exception_state(SafePointNode* ex_map) {243if (ex_map == NULL || ex_map->control() == top()) {244return;245}246#ifdef ASSERT247verify_exception_state(ex_map);248if (has_exceptions()) {249assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");250}251#endif252253// If there is already an exception of exactly this type, merge with it.254// In particular, null-checks and other low-level exceptions common up here.255Node* ex_oop = saved_ex_oop(ex_map);256const Type* ex_type = _gvn.type(ex_oop);257if (ex_oop == top()) {258// No action needed.259return;260}261assert(ex_type->isa_instptr(), "exception must be an instance");262for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {263const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));264// We check sp also because call bytecodes can generate exceptions265// both before and after arguments are popped!266if (ex_type2 == ex_type267&& e2->_jvms->sp() == ex_map->_jvms->sp()) {268combine_exception_states(ex_map, e2);269return;270}271}272273// No pre-existing exception of the same type. Chain it on the list.274push_exception_state(ex_map);275}276277//-----------------------add_exception_states_from-----------------------------278void GraphKit::add_exception_states_from(JVMState* jvms) {279SafePointNode* ex_map = jvms->map()->next_exception();280if (ex_map != NULL) {281jvms->map()->set_next_exception(NULL);282for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {283next_map = ex_map->next_exception();284ex_map->set_next_exception(NULL);285add_exception_state(ex_map);286}287}288}289290//-----------------------transfer_exceptions_into_jvms-------------------------291JVMState* GraphKit::transfer_exceptions_into_jvms() {292if (map() == NULL) {293// We need a JVMS to carry the exceptions, but the map has gone away.294// Create a scratch JVMS, cloned from any of the exception states...295if (has_exceptions()) {296_map = _exceptions;297_map = clone_map();298_map->set_next_exception(NULL);299clear_saved_ex_oop(_map);300debug_only(verify_map());301} else {302// ...or created from scratch303JVMState* jvms = new (C) JVMState(_method, NULL);304jvms->set_bci(_bci);305jvms->set_sp(_sp);306jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms));307set_jvms(jvms);308for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top());309set_all_memory(top());310while (map()->req() < jvms->endoff()) map()->add_req(top());311}312// (This is a kludge, in case you didn't notice.)313set_control(top());314}315JVMState* jvms = sync_jvms();316assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");317jvms->map()->set_next_exception(_exceptions);318_exceptions = NULL; // done with this set of exceptions319return jvms;320}321322static inline void add_n_reqs(Node* dstphi, Node* srcphi) {323assert(is_hidden_merge(dstphi), "must be a special merge node");324assert(is_hidden_merge(srcphi), "must be a special merge node");325uint limit = srcphi->req();326for (uint i = PhiNode::Input; i < limit; i++) {327dstphi->add_req(srcphi->in(i));328}329}330static inline void add_one_req(Node* dstphi, Node* src) {331assert(is_hidden_merge(dstphi), "must be a special merge node");332assert(!is_hidden_merge(src), "must not be a special merge node");333dstphi->add_req(src);334}335336//-----------------------combine_exception_states------------------------------337// This helper function combines exception states by building phis on a338// specially marked state-merging region. These regions and phis are339// untransformed, and can build up gradually. The region is marked by340// having a control input of its exception map, rather than NULL. Such341// regions do not appear except in this function, and in use_exception_state.342void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {343if (failing()) return; // dying anyway...344JVMState* ex_jvms = ex_map->_jvms;345assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");346assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");347assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");348assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");349assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");350assert(ex_map->req() == phi_map->req(), "matching maps");351uint tos = ex_jvms->stkoff() + ex_jvms->sp();352Node* hidden_merge_mark = root();353Node* region = phi_map->control();354MergeMemNode* phi_mem = phi_map->merged_memory();355MergeMemNode* ex_mem = ex_map->merged_memory();356if (region->in(0) != hidden_merge_mark) {357// The control input is not (yet) a specially-marked region in phi_map.358// Make it so, and build some phis.359region = new RegionNode(2);360_gvn.set_type(region, Type::CONTROL);361region->set_req(0, hidden_merge_mark); // marks an internal ex-state362region->init_req(1, phi_map->control());363phi_map->set_control(region);364Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);365record_for_igvn(io_phi);366_gvn.set_type(io_phi, Type::ABIO);367phi_map->set_i_o(io_phi);368for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {369Node* m = mms.memory();370Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));371record_for_igvn(m_phi);372_gvn.set_type(m_phi, Type::MEMORY);373mms.set_memory(m_phi);374}375}376377// Either or both of phi_map and ex_map might already be converted into phis.378Node* ex_control = ex_map->control();379// if there is special marking on ex_map also, we add multiple edges from src380bool add_multiple = (ex_control->in(0) == hidden_merge_mark);381// how wide was the destination phi_map, originally?382uint orig_width = region->req();383384if (add_multiple) {385add_n_reqs(region, ex_control);386add_n_reqs(phi_map->i_o(), ex_map->i_o());387} else {388// ex_map has no merges, so we just add single edges everywhere389add_one_req(region, ex_control);390add_one_req(phi_map->i_o(), ex_map->i_o());391}392for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {393if (mms.is_empty()) {394// get a copy of the base memory, and patch some inputs into it395const TypePtr* adr_type = mms.adr_type(C);396Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);397assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");398mms.set_memory(phi);399// Prepare to append interesting stuff onto the newly sliced phi:400while (phi->req() > orig_width) phi->del_req(phi->req()-1);401}402// Append stuff from ex_map:403if (add_multiple) {404add_n_reqs(mms.memory(), mms.memory2());405} else {406add_one_req(mms.memory(), mms.memory2());407}408}409uint limit = ex_map->req();410for (uint i = TypeFunc::Parms; i < limit; i++) {411// Skip everything in the JVMS after tos. (The ex_oop follows.)412if (i == tos) i = ex_jvms->monoff();413Node* src = ex_map->in(i);414Node* dst = phi_map->in(i);415if (src != dst) {416PhiNode* phi;417if (dst->in(0) != region) {418dst = phi = PhiNode::make(region, dst, _gvn.type(dst));419record_for_igvn(phi);420_gvn.set_type(phi, phi->type());421phi_map->set_req(i, dst);422// Prepare to append interesting stuff onto the new phi:423while (dst->req() > orig_width) dst->del_req(dst->req()-1);424} else {425assert(dst->is_Phi(), "nobody else uses a hidden region");426phi = dst->as_Phi();427}428if (add_multiple && src->in(0) == ex_control) {429// Both are phis.430add_n_reqs(dst, src);431} else {432while (dst->req() < region->req()) add_one_req(dst, src);433}434const Type* srctype = _gvn.type(src);435if (phi->type() != srctype) {436const Type* dsttype = phi->type()->meet_speculative(srctype);437if (phi->type() != dsttype) {438phi->set_type(dsttype);439_gvn.set_type(phi, dsttype);440}441}442}443}444phi_map->merge_replaced_nodes_with(ex_map);445}446447//--------------------------use_exception_state--------------------------------448Node* GraphKit::use_exception_state(SafePointNode* phi_map) {449if (failing()) { stop(); return top(); }450Node* region = phi_map->control();451Node* hidden_merge_mark = root();452assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");453Node* ex_oop = clear_saved_ex_oop(phi_map);454if (region->in(0) == hidden_merge_mark) {455// Special marking for internal ex-states. Process the phis now.456region->set_req(0, region); // now it's an ordinary region457set_jvms(phi_map->jvms()); // ...so now we can use it as a map458// Note: Setting the jvms also sets the bci and sp.459set_control(_gvn.transform(region));460uint tos = jvms()->stkoff() + sp();461for (uint i = 1; i < tos; i++) {462Node* x = phi_map->in(i);463if (x->in(0) == region) {464assert(x->is_Phi(), "expected a special phi");465phi_map->set_req(i, _gvn.transform(x));466}467}468for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {469Node* x = mms.memory();470if (x->in(0) == region) {471assert(x->is_Phi(), "nobody else uses a hidden region");472mms.set_memory(_gvn.transform(x));473}474}475if (ex_oop->in(0) == region) {476assert(ex_oop->is_Phi(), "expected a special phi");477ex_oop = _gvn.transform(ex_oop);478}479} else {480set_jvms(phi_map->jvms());481}482483assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");484assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");485return ex_oop;486}487488//---------------------------------java_bc-------------------------------------489Bytecodes::Code GraphKit::java_bc() const {490ciMethod* method = this->method();491int bci = this->bci();492if (method != NULL && bci != InvocationEntryBci)493return method->java_code_at_bci(bci);494else495return Bytecodes::_illegal;496}497498void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,499bool must_throw) {500// if the exception capability is set, then we will generate code501// to check the JavaThread.should_post_on_exceptions flag to see502// if we actually need to report exception events (for this503// thread). If we don't need to report exception events, we will504// take the normal fast path provided by add_exception_events. If505// exception event reporting is enabled for this thread, we will506// take the uncommon_trap in the BuildCutout below.507508// first must access the should_post_on_exceptions_flag in this thread's JavaThread509Node* jthread = _gvn.transform(new ThreadLocalNode());510Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));511Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);512513// Test the should_post_on_exceptions_flag vs. 0514Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) );515Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );516517// Branch to slow_path if should_post_on_exceptions_flag was true518{ BuildCutout unless(this, tst, PROB_MAX);519// Do not try anything fancy if we're notifying the VM on every throw.520// Cf. case Bytecodes::_athrow in parse2.cpp.521uncommon_trap(reason, Deoptimization::Action_none,522(ciKlass*)NULL, (char*)NULL, must_throw);523}524525}526527//------------------------------builtin_throw----------------------------------528void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {529bool must_throw = true;530531if (env()->jvmti_can_post_on_exceptions()) {532// check if we must post exception events, take uncommon trap if so533uncommon_trap_if_should_post_on_exceptions(reason, must_throw);534// here if should_post_on_exceptions is false535// continue on with the normal codegen536}537538// If this particular condition has not yet happened at this539// bytecode, then use the uncommon trap mechanism, and allow for540// a future recompilation if several traps occur here.541// If the throw is hot, try to use a more complicated inline mechanism542// which keeps execution inside the compiled code.543bool treat_throw_as_hot = false;544ciMethodData* md = method()->method_data();545546if (ProfileTraps) {547if (too_many_traps(reason)) {548treat_throw_as_hot = true;549}550// (If there is no MDO at all, assume it is early in551// execution, and that any deopts are part of the552// startup transient, and don't need to be remembered.)553554// Also, if there is a local exception handler, treat all throws555// as hot if there has been at least one in this method.556if (C->trap_count(reason) != 0557&& method()->method_data()->trap_count(reason) != 0558&& has_ex_handler()) {559treat_throw_as_hot = true;560}561}562563// If this throw happens frequently, an uncommon trap might cause564// a performance pothole. If there is a local exception handler,565// and if this particular bytecode appears to be deoptimizing often,566// let us handle the throw inline, with a preconstructed instance.567// Note: If the deopt count has blown up, the uncommon trap568// runtime is going to flush this nmethod, not matter what.569if (treat_throw_as_hot570&& (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {571// If the throw is local, we use a pre-existing instance and572// punt on the backtrace. This would lead to a missing backtrace573// (a repeat of 4292742) if the backtrace object is ever asked574// for its backtrace.575// Fixing this remaining case of 4292742 requires some flavor of576// escape analysis. Leave that for the future.577ciInstance* ex_obj = NULL;578switch (reason) {579case Deoptimization::Reason_null_check:580ex_obj = env()->NullPointerException_instance();581break;582case Deoptimization::Reason_div0_check:583ex_obj = env()->ArithmeticException_instance();584break;585case Deoptimization::Reason_range_check:586ex_obj = env()->ArrayIndexOutOfBoundsException_instance();587break;588case Deoptimization::Reason_class_check:589if (java_bc() == Bytecodes::_aastore) {590ex_obj = env()->ArrayStoreException_instance();591} else {592ex_obj = env()->ClassCastException_instance();593}594break;595default:596break;597}598if (failing()) { stop(); return; } // exception allocation might fail599if (ex_obj != NULL) {600// Cheat with a preallocated exception object.601if (C->log() != NULL)602C->log()->elem("hot_throw preallocated='1' reason='%s'",603Deoptimization::trap_reason_name(reason));604const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);605Node* ex_node = _gvn.transform(ConNode::make(ex_con));606607// Clear the detail message of the preallocated exception object.608// Weblogic sometimes mutates the detail message of exceptions609// using reflection.610int offset = java_lang_Throwable::get_detailMessage_offset();611const TypePtr* adr_typ = ex_con->add_offset(offset);612613Node *adr = basic_plus_adr(ex_node, ex_node, offset);614const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());615Node *store = access_store_at(ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP);616617add_exception_state(make_exception_state(ex_node));618return;619}620}621622// %%% Maybe add entry to OptoRuntime which directly throws the exc.?623// It won't be much cheaper than bailing to the interp., since we'll624// have to pass up all the debug-info, and the runtime will have to625// create the stack trace.626627// Usual case: Bail to interpreter.628// Reserve the right to recompile if we haven't seen anything yet.629630ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;631Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;632if (treat_throw_as_hot633&& (method()->method_data()->trap_recompiled_at(bci(), m)634|| C->too_many_traps(reason))) {635// We cannot afford to take more traps here. Suffer in the interpreter.636if (C->log() != NULL)637C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",638Deoptimization::trap_reason_name(reason),639C->trap_count(reason));640action = Deoptimization::Action_none;641}642643// "must_throw" prunes the JVM state to include only the stack, if there644// are no local exception handlers. This should cut down on register645// allocation time and code size, by drastically reducing the number646// of in-edges on the call to the uncommon trap.647648uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);649}650651652//----------------------------PreserveJVMState---------------------------------653PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {654debug_only(kit->verify_map());655_kit = kit;656_map = kit->map(); // preserve the map657_sp = kit->sp();658kit->set_map(clone_map ? kit->clone_map() : NULL);659#ifdef ASSERT660_bci = kit->bci();661Parse* parser = kit->is_Parse();662int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();663_block = block;664#endif665}666PreserveJVMState::~PreserveJVMState() {667GraphKit* kit = _kit;668#ifdef ASSERT669assert(kit->bci() == _bci, "bci must not shift");670Parse* parser = kit->is_Parse();671int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();672assert(block == _block, "block must not shift");673#endif674kit->set_map(_map);675kit->set_sp(_sp);676}677678679//-----------------------------BuildCutout-------------------------------------680BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)681: PreserveJVMState(kit)682{683assert(p->is_Con() || p->is_Bool(), "test must be a bool");684SafePointNode* outer_map = _map; // preserved map is caller's685SafePointNode* inner_map = kit->map();686IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);687outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) ));688inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) ));689}690BuildCutout::~BuildCutout() {691GraphKit* kit = _kit;692assert(kit->stopped(), "cutout code must stop, throw, return, etc.");693}694695//---------------------------PreserveReexecuteState----------------------------696PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {697assert(!kit->stopped(), "must call stopped() before");698_kit = kit;699_sp = kit->sp();700_reexecute = kit->jvms()->_reexecute;701}702PreserveReexecuteState::~PreserveReexecuteState() {703if (_kit->stopped()) return;704_kit->jvms()->_reexecute = _reexecute;705_kit->set_sp(_sp);706}707708//------------------------------clone_map--------------------------------------709// Implementation of PreserveJVMState710//711// Only clone_map(...) here. If this function is only used in the712// PreserveJVMState class we may want to get rid of this extra713// function eventually and do it all there.714715SafePointNode* GraphKit::clone_map() {716if (map() == NULL) return NULL;717718// Clone the memory edge first719Node* mem = MergeMemNode::make(map()->memory());720gvn().set_type_bottom(mem);721722SafePointNode *clonemap = (SafePointNode*)map()->clone();723JVMState* jvms = this->jvms();724JVMState* clonejvms = jvms->clone_shallow(C);725clonemap->set_memory(mem);726clonemap->set_jvms(clonejvms);727clonejvms->set_map(clonemap);728record_for_igvn(clonemap);729gvn().set_type_bottom(clonemap);730return clonemap;731}732733734//-----------------------------set_map_clone-----------------------------------735void GraphKit::set_map_clone(SafePointNode* m) {736_map = m;737_map = clone_map();738_map->set_next_exception(NULL);739debug_only(verify_map());740}741742743//----------------------------kill_dead_locals---------------------------------744// Detect any locals which are known to be dead, and force them to top.745void GraphKit::kill_dead_locals() {746// Consult the liveness information for the locals. If any747// of them are unused, then they can be replaced by top(). This748// should help register allocation time and cut down on the size749// of the deoptimization information.750751// This call is made from many of the bytecode handling752// subroutines called from the Big Switch in do_one_bytecode.753// Every bytecode which might include a slow path is responsible754// for killing its dead locals. The more consistent we755// are about killing deads, the fewer useless phis will be756// constructed for them at various merge points.757758// bci can be -1 (InvocationEntryBci). We return the entry759// liveness for the method.760761if (method() == NULL || method()->code_size() == 0) {762// We are building a graph for a call to a native method.763// All locals are live.764return;765}766767ResourceMark rm;768769// Consult the liveness information for the locals. If any770// of them are unused, then they can be replaced by top(). This771// should help register allocation time and cut down on the size772// of the deoptimization information.773MethodLivenessResult live_locals = method()->liveness_at_bci(bci());774775int len = (int)live_locals.size();776assert(len <= jvms()->loc_size(), "too many live locals");777for (int local = 0; local < len; local++) {778if (!live_locals.at(local)) {779set_local(local, top());780}781}782}783784#ifdef ASSERT785//-------------------------dead_locals_are_killed------------------------------786// Return true if all dead locals are set to top in the map.787// Used to assert "clean" debug info at various points.788bool GraphKit::dead_locals_are_killed() {789if (method() == NULL || method()->code_size() == 0) {790// No locals need to be dead, so all is as it should be.791return true;792}793794// Make sure somebody called kill_dead_locals upstream.795ResourceMark rm;796for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {797if (jvms->loc_size() == 0) continue; // no locals to consult798SafePointNode* map = jvms->map();799ciMethod* method = jvms->method();800int bci = jvms->bci();801if (jvms == this->jvms()) {802bci = this->bci(); // it might not yet be synched803}804MethodLivenessResult live_locals = method->liveness_at_bci(bci);805int len = (int)live_locals.size();806if (!live_locals.is_valid() || len == 0)807// This method is trivial, or is poisoned by a breakpoint.808return true;809assert(len == jvms->loc_size(), "live map consistent with locals map");810for (int local = 0; local < len; local++) {811if (!live_locals.at(local) && map->local(jvms, local) != top()) {812if (PrintMiscellaneous && (Verbose || WizardMode)) {813tty->print_cr("Zombie local %d: ", local);814jvms->dump();815}816return false;817}818}819}820return true;821}822823#endif //ASSERT824825// Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.826static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {827ciMethod* cur_method = jvms->method();828int cur_bci = jvms->bci();829if (cur_method != NULL && cur_bci != InvocationEntryBci) {830Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);831return Interpreter::bytecode_should_reexecute(code) ||832(is_anewarray && code == Bytecodes::_multianewarray);833// Reexecute _multianewarray bytecode which was replaced with834// sequence of [a]newarray. See Parse::do_multianewarray().835//836// Note: interpreter should not have it set since this optimization837// is limited by dimensions and guarded by flag so in some cases838// multianewarray() runtime calls will be generated and839// the bytecode should not be reexecutes (stack will not be reset).840} else {841return false;842}843}844845// Helper function for adding JVMState and debug information to node846void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {847// Add the safepoint edges to the call (or other safepoint).848849// Make sure dead locals are set to top. This850// should help register allocation time and cut down on the size851// of the deoptimization information.852assert(dead_locals_are_killed(), "garbage in debug info before safepoint");853854// Walk the inline list to fill in the correct set of JVMState's855// Also fill in the associated edges for each JVMState.856857// If the bytecode needs to be reexecuted we need to put858// the arguments back on the stack.859const bool should_reexecute = jvms()->should_reexecute();860JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();861862// NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to863// undefined if the bci is different. This is normal for Parse but it864// should not happen for LibraryCallKit because only one bci is processed.865assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),866"in LibraryCallKit the reexecute bit should not change");867868// If we are guaranteed to throw, we can prune everything but the869// input to the current bytecode.870bool can_prune_locals = false;871uint stack_slots_not_pruned = 0;872int inputs = 0, depth = 0;873if (must_throw) {874assert(method() == youngest_jvms->method(), "sanity");875if (compute_stack_effects(inputs, depth)) {876can_prune_locals = true;877stack_slots_not_pruned = inputs;878}879}880881if (env()->should_retain_local_variables()) {882// At any safepoint, this method can get breakpointed, which would883// then require an immediate deoptimization.884can_prune_locals = false; // do not prune locals885stack_slots_not_pruned = 0;886}887888// do not scribble on the input jvms889JVMState* out_jvms = youngest_jvms->clone_deep(C);890call->set_jvms(out_jvms); // Start jvms list for call node891892// For a known set of bytecodes, the interpreter should reexecute them if893// deoptimization happens. We set the reexecute state for them here894if (out_jvms->is_reexecute_undefined() && //don't change if already specified895should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {896#ifdef ASSERT897int inputs = 0, not_used; // initialized by GraphKit::compute_stack_effects()898assert(method() == youngest_jvms->method(), "sanity");899assert(compute_stack_effects(inputs, not_used), "unknown bytecode: %s", Bytecodes::name(java_bc()));900assert(out_jvms->sp() >= (uint)inputs, "not enough operands for reexecution");901#endif // ASSERT902out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed903}904905// Presize the call:906DEBUG_ONLY(uint non_debug_edges = call->req());907call->add_req_batch(top(), youngest_jvms->debug_depth());908assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");909910// Set up edges so that the call looks like this:911// Call [state:] ctl io mem fptr retadr912// [parms:] parm0 ... parmN913// [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN914// [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]915// [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN916// Note that caller debug info precedes callee debug info.917918// Fill pointer walks backwards from "young:" to "root:" in the diagram above:919uint debug_ptr = call->req();920921// Loop over the map input edges associated with jvms, add them922// to the call node, & reset all offsets to match call node array.923for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {924uint debug_end = debug_ptr;925uint debug_start = debug_ptr - in_jvms->debug_size();926debug_ptr = debug_start; // back up the ptr927928uint p = debug_start; // walks forward in [debug_start, debug_end)929uint j, k, l;930SafePointNode* in_map = in_jvms->map();931out_jvms->set_map(call);932933if (can_prune_locals) {934assert(in_jvms->method() == out_jvms->method(), "sanity");935// If the current throw can reach an exception handler in this JVMS,936// then we must keep everything live that can reach that handler.937// As a quick and dirty approximation, we look for any handlers at all.938if (in_jvms->method()->has_exception_handlers()) {939can_prune_locals = false;940}941}942943// Add the Locals944k = in_jvms->locoff();945l = in_jvms->loc_size();946out_jvms->set_locoff(p);947if (!can_prune_locals) {948for (j = 0; j < l; j++)949call->set_req(p++, in_map->in(k+j));950} else {951p += l; // already set to top above by add_req_batch952}953954// Add the Expression Stack955k = in_jvms->stkoff();956l = in_jvms->sp();957out_jvms->set_stkoff(p);958if (!can_prune_locals) {959for (j = 0; j < l; j++)960call->set_req(p++, in_map->in(k+j));961} else if (can_prune_locals && stack_slots_not_pruned != 0) {962// Divide stack into {S0,...,S1}, where S0 is set to top.963uint s1 = stack_slots_not_pruned;964stack_slots_not_pruned = 0; // for next iteration965if (s1 > l) s1 = l;966uint s0 = l - s1;967p += s0; // skip the tops preinstalled by add_req_batch968for (j = s0; j < l; j++)969call->set_req(p++, in_map->in(k+j));970} else {971p += l; // already set to top above by add_req_batch972}973974// Add the Monitors975k = in_jvms->monoff();976l = in_jvms->mon_size();977out_jvms->set_monoff(p);978for (j = 0; j < l; j++)979call->set_req(p++, in_map->in(k+j));980981// Copy any scalar object fields.982k = in_jvms->scloff();983l = in_jvms->scl_size();984out_jvms->set_scloff(p);985for (j = 0; j < l; j++)986call->set_req(p++, in_map->in(k+j));987988// Finish the new jvms.989out_jvms->set_endoff(p);990991assert(out_jvms->endoff() == debug_end, "fill ptr must match");992assert(out_jvms->depth() == in_jvms->depth(), "depth must match");993assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");994assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");995assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");996assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");997998// Update the two tail pointers in parallel.999out_jvms = out_jvms->caller();1000in_jvms = in_jvms->caller();1001}10021003assert(debug_ptr == non_debug_edges, "debug info must fit exactly");10041005// Test the correctness of JVMState::debug_xxx accessors:1006assert(call->jvms()->debug_start() == non_debug_edges, "");1007assert(call->jvms()->debug_end() == call->req(), "");1008assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");1009}10101011bool GraphKit::compute_stack_effects(int& inputs, int& depth) {1012Bytecodes::Code code = java_bc();1013if (code == Bytecodes::_wide) {1014code = method()->java_code_at_bci(bci() + 1);1015}10161017BasicType rtype = T_ILLEGAL;1018int rsize = 0;10191020if (code != Bytecodes::_illegal) {1021depth = Bytecodes::depth(code); // checkcast=0, athrow=-11022rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V1023if (rtype < T_CONFLICT)1024rsize = type2size[rtype];1025}10261027switch (code) {1028case Bytecodes::_illegal:1029return false;10301031case Bytecodes::_ldc:1032case Bytecodes::_ldc_w:1033case Bytecodes::_ldc2_w:1034inputs = 0;1035break;10361037case Bytecodes::_dup: inputs = 1; break;1038case Bytecodes::_dup_x1: inputs = 2; break;1039case Bytecodes::_dup_x2: inputs = 3; break;1040case Bytecodes::_dup2: inputs = 2; break;1041case Bytecodes::_dup2_x1: inputs = 3; break;1042case Bytecodes::_dup2_x2: inputs = 4; break;1043case Bytecodes::_swap: inputs = 2; break;1044case Bytecodes::_arraylength: inputs = 1; break;10451046case Bytecodes::_getstatic:1047case Bytecodes::_putstatic:1048case Bytecodes::_getfield:1049case Bytecodes::_putfield:1050{1051bool ignored_will_link;1052ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);1053int size = field->type()->size();1054bool is_get = (depth >= 0), is_static = (depth & 1);1055inputs = (is_static ? 0 : 1);1056if (is_get) {1057depth = size - inputs;1058} else {1059inputs += size; // putxxx pops the value from the stack1060depth = - inputs;1061}1062}1063break;10641065case Bytecodes::_invokevirtual:1066case Bytecodes::_invokespecial:1067case Bytecodes::_invokestatic:1068case Bytecodes::_invokedynamic:1069case Bytecodes::_invokeinterface:1070{1071bool ignored_will_link;1072ciSignature* declared_signature = NULL;1073ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);1074assert(declared_signature != NULL, "cannot be null");1075inputs = declared_signature->arg_size_for_bc(code);1076int size = declared_signature->return_type()->size();1077depth = size - inputs;1078}1079break;10801081case Bytecodes::_multianewarray:1082{1083ciBytecodeStream iter(method());1084iter.reset_to_bci(bci());1085iter.next();1086inputs = iter.get_dimensions();1087assert(rsize == 1, "");1088depth = rsize - inputs;1089}1090break;10911092case Bytecodes::_ireturn:1093case Bytecodes::_lreturn:1094case Bytecodes::_freturn:1095case Bytecodes::_dreturn:1096case Bytecodes::_areturn:1097assert(rsize == -depth, "");1098inputs = rsize;1099break;11001101case Bytecodes::_jsr:1102case Bytecodes::_jsr_w:1103inputs = 0;1104depth = 1; // S.B. depth=1, not zero1105break;11061107default:1108// bytecode produces a typed result1109inputs = rsize - depth;1110assert(inputs >= 0, "");1111break;1112}11131114#ifdef ASSERT1115// spot check1116int outputs = depth + inputs;1117assert(outputs >= 0, "sanity");1118switch (code) {1119case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;1120case Bytecodes::_athrow: assert(inputs == 1 && outputs == 0, ""); break;1121case Bytecodes::_aload_0: assert(inputs == 0 && outputs == 1, ""); break;1122case Bytecodes::_return: assert(inputs == 0 && outputs == 0, ""); break;1123case Bytecodes::_drem: assert(inputs == 4 && outputs == 2, ""); break;1124default: break;1125}1126#endif //ASSERT11271128return true;1129}1130113111321133//------------------------------basic_plus_adr---------------------------------1134Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {1135// short-circuit a common case1136if (offset == intcon(0)) return ptr;1137return _gvn.transform( new AddPNode(base, ptr, offset) );1138}11391140Node* GraphKit::ConvI2L(Node* offset) {1141// short-circuit a common case1142jint offset_con = find_int_con(offset, Type::OffsetBot);1143if (offset_con != Type::OffsetBot) {1144return longcon((jlong) offset_con);1145}1146return _gvn.transform( new ConvI2LNode(offset));1147}11481149Node* GraphKit::ConvI2UL(Node* offset) {1150juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);1151if (offset_con != (juint) Type::OffsetBot) {1152return longcon((julong) offset_con);1153}1154Node* conv = _gvn.transform( new ConvI2LNode(offset));1155Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));1156return _gvn.transform( new AndLNode(conv, mask) );1157}11581159Node* GraphKit::ConvL2I(Node* offset) {1160// short-circuit a common case1161jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);1162if (offset_con != (jlong)Type::OffsetBot) {1163return intcon((int) offset_con);1164}1165return _gvn.transform( new ConvL2INode(offset));1166}11671168//-------------------------load_object_klass-----------------------------------1169Node* GraphKit::load_object_klass(Node* obj) {1170// Special-case a fresh allocation to avoid building nodes:1171Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);1172if (akls != NULL) return akls;1173Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());1174return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));1175}11761177//-------------------------load_array_length-----------------------------------1178Node* GraphKit::load_array_length(Node* array) {1179// Special-case a fresh allocation to avoid building nodes:1180AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);1181Node *alen;1182if (alloc == NULL) {1183Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());1184alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));1185} else {1186alen = alloc->Ideal_length();1187Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);1188if (ccast != alen) {1189alen = _gvn.transform(ccast);1190}1191}1192return alen;1193}11941195//------------------------------do_null_check----------------------------------1196// Helper function to do a NULL pointer check. Returned value is1197// the incoming address with NULL casted away. You are allowed to use the1198// not-null value only if you are control dependent on the test.1199#ifndef PRODUCT1200extern int explicit_null_checks_inserted,1201explicit_null_checks_elided;1202#endif1203Node* GraphKit::null_check_common(Node* value, BasicType type,1204// optional arguments for variations:1205bool assert_null,1206Node* *null_control,1207bool speculative) {1208assert(!assert_null || null_control == NULL, "not both at once");1209if (stopped()) return top();1210NOT_PRODUCT(explicit_null_checks_inserted++);12111212// Construct NULL check1213Node *chk = NULL;1214switch(type) {1215case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;1216case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;1217case T_ARRAY : // fall through1218type = T_OBJECT; // simplify further tests1219case T_OBJECT : {1220const Type *t = _gvn.type( value );12211222const TypeOopPtr* tp = t->isa_oopptr();1223if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()1224// Only for do_null_check, not any of its siblings:1225&& !assert_null && null_control == NULL) {1226// Usually, any field access or invocation on an unloaded oop type1227// will simply fail to link, since the statically linked class is1228// likely also to be unloaded. However, in -Xcomp mode, sometimes1229// the static class is loaded but the sharper oop type is not.1230// Rather than checking for this obscure case in lots of places,1231// we simply observe that a null check on an unloaded class1232// will always be followed by a nonsense operation, so we1233// can just issue the uncommon trap here.1234// Our access to the unloaded class will only be correct1235// after it has been loaded and initialized, which requires1236// a trip through the interpreter.1237#ifndef PRODUCT1238if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }1239#endif1240uncommon_trap(Deoptimization::Reason_unloaded,1241Deoptimization::Action_reinterpret,1242tp->klass(), "!loaded");1243return top();1244}12451246if (assert_null) {1247// See if the type is contained in NULL_PTR.1248// If so, then the value is already null.1249if (t->higher_equal(TypePtr::NULL_PTR)) {1250NOT_PRODUCT(explicit_null_checks_elided++);1251return value; // Elided null assert quickly!1252}1253} else {1254// See if mixing in the NULL pointer changes type.1255// If so, then the NULL pointer was not allowed in the original1256// type. In other words, "value" was not-null.1257if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {1258// same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...1259NOT_PRODUCT(explicit_null_checks_elided++);1260return value; // Elided null check quickly!1261}1262}1263chk = new CmpPNode( value, null() );1264break;1265}12661267default:1268fatal("unexpected type: %s", type2name(type));1269}1270assert(chk != NULL, "sanity check");1271chk = _gvn.transform(chk);12721273BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;1274BoolNode *btst = new BoolNode( chk, btest);1275Node *tst = _gvn.transform( btst );12761277//-----------1278// if peephole optimizations occurred, a prior test existed.1279// If a prior test existed, maybe it dominates as we can avoid this test.1280if (tst != btst && type == T_OBJECT) {1281// At this point we want to scan up the CFG to see if we can1282// find an identical test (and so avoid this test altogether).1283Node *cfg = control();1284int depth = 0;1285while( depth < 16 ) { // Limit search depth for speed1286if( cfg->Opcode() == Op_IfTrue &&1287cfg->in(0)->in(1) == tst ) {1288// Found prior test. Use "cast_not_null" to construct an identical1289// CastPP (and hence hash to) as already exists for the prior test.1290// Return that casted value.1291if (assert_null) {1292replace_in_map(value, null());1293return null(); // do not issue the redundant test1294}1295Node *oldcontrol = control();1296set_control(cfg);1297Node *res = cast_not_null(value);1298set_control(oldcontrol);1299NOT_PRODUCT(explicit_null_checks_elided++);1300return res;1301}1302cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);1303if (cfg == NULL) break; // Quit at region nodes1304depth++;1305}1306}13071308//-----------1309// Branch to failure if null1310float ok_prob = PROB_MAX; // a priori estimate: nulls never happen1311Deoptimization::DeoptReason reason;1312if (assert_null) {1313reason = Deoptimization::reason_null_assert(speculative);1314} else if (type == T_OBJECT) {1315reason = Deoptimization::reason_null_check(speculative);1316} else {1317reason = Deoptimization::Reason_div0_check;1318}1319// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,1320// ciMethodData::has_trap_at will return a conservative -1 if any1321// must-be-null assertion has failed. This could cause performance1322// problems for a method after its first do_null_assert failure.1323// Consider using 'Reason_class_check' instead?13241325// To cause an implicit null check, we set the not-null probability1326// to the maximum (PROB_MAX). For an explicit check the probability1327// is set to a smaller value.1328if (null_control != NULL || too_many_traps(reason)) {1329// probability is less likely1330ok_prob = PROB_LIKELY_MAG(3);1331} else if (!assert_null &&1332(ImplicitNullCheckThreshold > 0) &&1333method() != NULL &&1334(method()->method_data()->trap_count(reason)1335>= (uint)ImplicitNullCheckThreshold)) {1336ok_prob = PROB_LIKELY_MAG(3);1337}13381339if (null_control != NULL) {1340IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);1341Node* null_true = _gvn.transform( new IfFalseNode(iff));1342set_control( _gvn.transform( new IfTrueNode(iff)));1343#ifndef PRODUCT1344if (null_true == top()) {1345explicit_null_checks_elided++;1346}1347#endif1348(*null_control) = null_true;1349} else {1350BuildCutout unless(this, tst, ok_prob);1351// Check for optimizer eliding test at parse time1352if (stopped()) {1353// Failure not possible; do not bother making uncommon trap.1354NOT_PRODUCT(explicit_null_checks_elided++);1355} else if (assert_null) {1356uncommon_trap(reason,1357Deoptimization::Action_make_not_entrant,1358NULL, "assert_null");1359} else {1360replace_in_map(value, zerocon(type));1361builtin_throw(reason);1362}1363}13641365// Must throw exception, fall-thru not possible?1366if (stopped()) {1367return top(); // No result1368}13691370if (assert_null) {1371// Cast obj to null on this path.1372replace_in_map(value, zerocon(type));1373return zerocon(type);1374}13751376// Cast obj to not-null on this path, if there is no null_control.1377// (If there is a null_control, a non-null value may come back to haunt us.)1378if (type == T_OBJECT) {1379Node* cast = cast_not_null(value, false);1380if (null_control == NULL || (*null_control) == top())1381replace_in_map(value, cast);1382value = cast;1383}13841385return value;1386}138713881389//------------------------------cast_not_null----------------------------------1390// Cast obj to not-null on this path1391Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {1392const Type *t = _gvn.type(obj);1393const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);1394// Object is already not-null?1395if( t == t_not_null ) return obj;13961397Node *cast = new CastPPNode(obj,t_not_null);1398cast->init_req(0, control());1399cast = _gvn.transform( cast );14001401// Scan for instances of 'obj' in the current JVM mapping.1402// These instances are known to be not-null after the test.1403if (do_replace_in_map)1404replace_in_map(obj, cast);14051406return cast; // Return casted value1407}14081409// Sometimes in intrinsics, we implicitly know an object is not null1410// (there's no actual null check) so we can cast it to not null. In1411// the course of optimizations, the input to the cast can become null.1412// In that case that data path will die and we need the control path1413// to become dead as well to keep the graph consistent. So we have to1414// add a check for null for which one branch can't be taken. It uses1415// an Opaque4 node that will cause the check to be removed after loop1416// opts so the test goes away and the compiled code doesn't execute a1417// useless check.1418Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {1419if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) {1420return value;1421}1422Node* chk = _gvn.transform(new CmpPNode(value, null()));1423Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));1424Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1)));1425IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN);1426_gvn.set_type(iff, iff->Value(&_gvn));1427Node *if_f = _gvn.transform(new IfFalseNode(iff));1428Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));1429Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic"));1430C->root()->add_req(halt);1431Node *if_t = _gvn.transform(new IfTrueNode(iff));1432set_control(if_t);1433return cast_not_null(value, do_replace_in_map);1434}143514361437//--------------------------replace_in_map-------------------------------------1438void GraphKit::replace_in_map(Node* old, Node* neww) {1439if (old == neww) {1440return;1441}14421443map()->replace_edge(old, neww);14441445// Note: This operation potentially replaces any edge1446// on the map. This includes locals, stack, and monitors1447// of the current (innermost) JVM state.14481449// don't let inconsistent types from profiling escape this1450// method14511452const Type* told = _gvn.type(old);1453const Type* tnew = _gvn.type(neww);14541455if (!tnew->higher_equal(told)) {1456return;1457}14581459map()->record_replaced_node(old, neww);1460}146114621463//=============================================================================1464//--------------------------------memory---------------------------------------1465Node* GraphKit::memory(uint alias_idx) {1466MergeMemNode* mem = merged_memory();1467Node* p = mem->memory_at(alias_idx);1468assert(p != mem->empty_memory(), "empty");1469_gvn.set_type(p, Type::MEMORY); // must be mapped1470return p;1471}14721473//-----------------------------reset_memory------------------------------------1474Node* GraphKit::reset_memory() {1475Node* mem = map()->memory();1476// do not use this node for any more parsing!1477debug_only( map()->set_memory((Node*)NULL) );1478return _gvn.transform( mem );1479}14801481//------------------------------set_all_memory---------------------------------1482void GraphKit::set_all_memory(Node* newmem) {1483Node* mergemem = MergeMemNode::make(newmem);1484gvn().set_type_bottom(mergemem);1485map()->set_memory(mergemem);1486}14871488//------------------------------set_all_memory_call----------------------------1489void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {1490Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );1491set_all_memory(newmem);1492}14931494//=============================================================================1495//1496// parser factory methods for MemNodes1497//1498// These are layered on top of the factory methods in LoadNode and StoreNode,1499// and integrate with the parser's memory state and _gvn engine.1500//15011502// factory methods in "int adr_idx"1503Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,1504int adr_idx,1505MemNode::MemOrd mo,1506LoadNode::ControlDependency control_dependency,1507bool require_atomic_access,1508bool unaligned,1509bool mismatched,1510bool unsafe,1511uint8_t barrier_data) {1512assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );1513const TypePtr* adr_type = NULL; // debug-mode-only argument1514debug_only(adr_type = C->get_adr_type(adr_idx));1515Node* mem = memory(adr_idx);1516Node* ld;1517if (require_atomic_access && bt == T_LONG) {1518ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);1519} else if (require_atomic_access && bt == T_DOUBLE) {1520ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);1521} else {1522ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);1523}1524ld = _gvn.transform(ld);1525if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {1526// Improve graph before escape analysis and boxing elimination.1527record_for_igvn(ld);1528}1529return ld;1530}15311532Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,1533int adr_idx,1534MemNode::MemOrd mo,1535bool require_atomic_access,1536bool unaligned,1537bool mismatched,1538bool unsafe) {1539assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );1540const TypePtr* adr_type = NULL;1541debug_only(adr_type = C->get_adr_type(adr_idx));1542Node *mem = memory(adr_idx);1543Node* st;1544if (require_atomic_access && bt == T_LONG) {1545st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);1546} else if (require_atomic_access && bt == T_DOUBLE) {1547st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);1548} else {1549st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);1550}1551if (unaligned) {1552st->as_Store()->set_unaligned_access();1553}1554if (mismatched) {1555st->as_Store()->set_mismatched_access();1556}1557if (unsafe) {1558st->as_Store()->set_unsafe_access();1559}1560st = _gvn.transform(st);1561set_memory(st, adr_idx);1562// Back-to-back stores can only remove intermediate store with DU info1563// so push on worklist for optimizer.1564if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))1565record_for_igvn(st);15661567return st;1568}15691570Node* GraphKit::access_store_at(Node* obj,1571Node* adr,1572const TypePtr* adr_type,1573Node* val,1574const Type* val_type,1575BasicType bt,1576DecoratorSet decorators) {1577// Transformation of a value which could be NULL pointer (CastPP #NULL)1578// could be delayed during Parse (for example, in adjust_map_after_if()).1579// Execute transformation here to avoid barrier generation in such case.1580if (_gvn.type(val) == TypePtr::NULL_PTR) {1581val = _gvn.makecon(TypePtr::NULL_PTR);1582}15831584if (stopped()) {1585return top(); // Dead path ?1586}15871588assert(val != NULL, "not dead path");15891590C2AccessValuePtr addr(adr, adr_type);1591C2AccessValue value(val, val_type);1592C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);1593if (access.is_raw()) {1594return _barrier_set->BarrierSetC2::store_at(access, value);1595} else {1596return _barrier_set->store_at(access, value);1597}1598}15991600Node* GraphKit::access_load_at(Node* obj, // containing obj1601Node* adr, // actual adress to store val at1602const TypePtr* adr_type,1603const Type* val_type,1604BasicType bt,1605DecoratorSet decorators) {1606if (stopped()) {1607return top(); // Dead path ?1608}16091610C2AccessValuePtr addr(adr, adr_type);1611C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);1612if (access.is_raw()) {1613return _barrier_set->BarrierSetC2::load_at(access, val_type);1614} else {1615return _barrier_set->load_at(access, val_type);1616}1617}16181619Node* GraphKit::access_load(Node* adr, // actual adress to load val at1620const Type* val_type,1621BasicType bt,1622DecoratorSet decorators) {1623if (stopped()) {1624return top(); // Dead path ?1625}16261627C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());1628C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);1629if (access.is_raw()) {1630return _barrier_set->BarrierSetC2::load_at(access, val_type);1631} else {1632return _barrier_set->load_at(access, val_type);1633}1634}16351636Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj,1637Node* adr,1638const TypePtr* adr_type,1639int alias_idx,1640Node* expected_val,1641Node* new_val,1642const Type* value_type,1643BasicType bt,1644DecoratorSet decorators) {1645C2AccessValuePtr addr(adr, adr_type);1646C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,1647bt, obj, addr, alias_idx);1648if (access.is_raw()) {1649return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);1650} else {1651return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);1652}1653}16541655Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj,1656Node* adr,1657const TypePtr* adr_type,1658int alias_idx,1659Node* expected_val,1660Node* new_val,1661const Type* value_type,1662BasicType bt,1663DecoratorSet decorators) {1664C2AccessValuePtr addr(adr, adr_type);1665C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,1666bt, obj, addr, alias_idx);1667if (access.is_raw()) {1668return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);1669} else {1670return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);1671}1672}16731674Node* GraphKit::access_atomic_xchg_at(Node* obj,1675Node* adr,1676const TypePtr* adr_type,1677int alias_idx,1678Node* new_val,1679const Type* value_type,1680BasicType bt,1681DecoratorSet decorators) {1682C2AccessValuePtr addr(adr, adr_type);1683C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,1684bt, obj, addr, alias_idx);1685if (access.is_raw()) {1686return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);1687} else {1688return _barrier_set->atomic_xchg_at(access, new_val, value_type);1689}1690}16911692Node* GraphKit::access_atomic_add_at(Node* obj,1693Node* adr,1694const TypePtr* adr_type,1695int alias_idx,1696Node* new_val,1697const Type* value_type,1698BasicType bt,1699DecoratorSet decorators) {1700C2AccessValuePtr addr(adr, adr_type);1701C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);1702if (access.is_raw()) {1703return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);1704} else {1705return _barrier_set->atomic_add_at(access, new_val, value_type);1706}1707}17081709void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {1710return _barrier_set->clone(this, src, dst, size, is_array);1711}17121713//-------------------------array_element_address-------------------------1714Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,1715const TypeInt* sizetype, Node* ctrl) {1716uint shift = exact_log2(type2aelembytes(elembt));1717uint header = arrayOopDesc::base_offset_in_bytes(elembt);17181719// short-circuit a common case (saves lots of confusing waste motion)1720jint idx_con = find_int_con(idx, -1);1721if (idx_con >= 0) {1722intptr_t offset = header + ((intptr_t)idx_con << shift);1723return basic_plus_adr(ary, offset);1724}17251726// must be correct type for alignment purposes1727Node* base = basic_plus_adr(ary, header);1728idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);1729Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );1730return basic_plus_adr(ary, base, scale);1731}17321733//-------------------------load_array_element-------------------------1734Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {1735const Type* elemtype = arytype->elem();1736BasicType elembt = elemtype->array_element_basic_type();1737Node* adr = array_element_address(ary, idx, elembt, arytype->size());1738if (elembt == T_NARROWOOP) {1739elembt = T_OBJECT; // To satisfy switch in LoadNode::make()1740}1741Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);1742return ld;1743}17441745//-------------------------set_arguments_for_java_call-------------------------1746// Arguments (pre-popped from the stack) are taken from the JVMS.1747void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {1748// Add the call arguments:1749uint nargs = call->method()->arg_size();1750for (uint i = 0; i < nargs; i++) {1751Node* arg = argument(i);1752call->init_req(i + TypeFunc::Parms, arg);1753}1754}17551756//---------------------------set_edges_for_java_call---------------------------1757// Connect a newly created call into the current JVMS.1758// A return value node (if any) is returned from set_edges_for_java_call.1759void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {17601761// Add the predefined inputs:1762call->init_req( TypeFunc::Control, control() );1763call->init_req( TypeFunc::I_O , i_o() );1764call->init_req( TypeFunc::Memory , reset_memory() );1765call->init_req( TypeFunc::FramePtr, frameptr() );1766call->init_req( TypeFunc::ReturnAdr, top() );17671768add_safepoint_edges(call, must_throw);17691770Node* xcall = _gvn.transform(call);17711772if (xcall == top()) {1773set_control(top());1774return;1775}1776assert(xcall == call, "call identity is stable");17771778// Re-use the current map to produce the result.17791780set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));1781set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));1782set_all_memory_call(xcall, separate_io_proj);17831784//return xcall; // no need, caller already has it1785}17861787Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {1788if (stopped()) return top(); // maybe the call folded up?17891790// Capture the return value, if any.1791Node* ret;1792if (call->method() == NULL ||1793call->method()->return_type()->basic_type() == T_VOID)1794ret = top();1795else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));17961797// Note: Since any out-of-line call can produce an exception,1798// we always insert an I_O projection from the call into the result.17991800make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);18011802if (separate_io_proj) {1803// The caller requested separate projections be used by the fall1804// through and exceptional paths, so replace the projections for1805// the fall through path.1806set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));1807set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));1808}1809return ret;1810}18111812//--------------------set_predefined_input_for_runtime_call--------------------1813// Reading and setting the memory state is way conservative here.1814// The real problem is that I am not doing real Type analysis on memory,1815// so I cannot distinguish card mark stores from other stores. Across a GC1816// point the Store Barrier and the card mark memory has to agree. I cannot1817// have a card mark store and its barrier split across the GC point from1818// either above or below. Here I get that to happen by reading ALL of memory.1819// A better answer would be to separate out card marks from other memory.1820// For now, return the input memory state, so that it can be reused1821// after the call, if this call has restricted memory effects.1822Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {1823// Set fixed predefined input arguments1824Node* memory = reset_memory();1825Node* m = narrow_mem == NULL ? memory : narrow_mem;1826call->init_req( TypeFunc::Control, control() );1827call->init_req( TypeFunc::I_O, top() ); // does no i/o1828call->init_req( TypeFunc::Memory, m ); // may gc ptrs1829call->init_req( TypeFunc::FramePtr, frameptr() );1830call->init_req( TypeFunc::ReturnAdr, top() );1831return memory;1832}18331834//-------------------set_predefined_output_for_runtime_call--------------------1835// Set control and memory (not i_o) from the call.1836// If keep_mem is not NULL, use it for the output state,1837// except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.1838// If hook_mem is NULL, this call produces no memory effects at all.1839// If hook_mem is a Java-visible memory slice (such as arraycopy operands),1840// then only that memory slice is taken from the call.1841// In the last case, we must put an appropriate memory barrier before1842// the call, so as to create the correct anti-dependencies on loads1843// preceding the call.1844void GraphKit::set_predefined_output_for_runtime_call(Node* call,1845Node* keep_mem,1846const TypePtr* hook_mem) {1847// no i/o1848set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) ));1849if (keep_mem) {1850// First clone the existing memory state1851set_all_memory(keep_mem);1852if (hook_mem != NULL) {1853// Make memory for the call1854Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) );1855// Set the RawPtr memory state only. This covers all the heap top/GC stuff1856// We also use hook_mem to extract specific effects from arraycopy stubs.1857set_memory(mem, hook_mem);1858}1859// ...else the call has NO memory effects.18601861// Make sure the call advertises its memory effects precisely.1862// This lets us build accurate anti-dependences in gcm.cpp.1863assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),1864"call node must be constructed correctly");1865} else {1866assert(hook_mem == NULL, "");1867// This is not a "slow path" call; all memory comes from the call.1868set_all_memory_call(call);1869}1870}18711872// Keep track of MergeMems feeding into other MergeMems1873static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) {1874if (!mem->is_MergeMem()) {1875return;1876}1877for (SimpleDUIterator i(mem); i.has_next(); i.next()) {1878Node* use = i.get();1879if (use->is_MergeMem()) {1880wl.push(use);1881}1882}1883}18841885// Replace the call with the current state of the kit.1886void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {1887JVMState* ejvms = NULL;1888if (has_exceptions()) {1889ejvms = transfer_exceptions_into_jvms();1890}18911892ReplacedNodes replaced_nodes = map()->replaced_nodes();1893ReplacedNodes replaced_nodes_exception;1894Node* ex_ctl = top();18951896SafePointNode* final_state = stop();18971898// Find all the needed outputs of this call1899CallProjections callprojs;1900call->extract_projections(&callprojs, true);19011902Unique_Node_List wl;1903Node* init_mem = call->in(TypeFunc::Memory);1904Node* final_mem = final_state->in(TypeFunc::Memory);1905Node* final_ctl = final_state->in(TypeFunc::Control);1906Node* final_io = final_state->in(TypeFunc::I_O);19071908// Replace all the old call edges with the edges from the inlining result1909if (callprojs.fallthrough_catchproj != NULL) {1910C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);1911}1912if (callprojs.fallthrough_memproj != NULL) {1913if (final_mem->is_MergeMem()) {1914// Parser's exits MergeMem was not transformed but may be optimized1915final_mem = _gvn.transform(final_mem);1916}1917C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);1918add_mergemem_users_to_worklist(wl, final_mem);1919}1920if (callprojs.fallthrough_ioproj != NULL) {1921C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);1922}19231924// Replace the result with the new result if it exists and is used1925if (callprojs.resproj != NULL && result != NULL) {1926C->gvn_replace_by(callprojs.resproj, result);1927}19281929if (ejvms == NULL) {1930// No exception edges to simply kill off those paths1931if (callprojs.catchall_catchproj != NULL) {1932C->gvn_replace_by(callprojs.catchall_catchproj, C->top());1933}1934if (callprojs.catchall_memproj != NULL) {1935C->gvn_replace_by(callprojs.catchall_memproj, C->top());1936}1937if (callprojs.catchall_ioproj != NULL) {1938C->gvn_replace_by(callprojs.catchall_ioproj, C->top());1939}1940// Replace the old exception object with top1941if (callprojs.exobj != NULL) {1942C->gvn_replace_by(callprojs.exobj, C->top());1943}1944} else {1945GraphKit ekit(ejvms);19461947// Load my combined exception state into the kit, with all phis transformed:1948SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();1949replaced_nodes_exception = ex_map->replaced_nodes();19501951Node* ex_oop = ekit.use_exception_state(ex_map);19521953if (callprojs.catchall_catchproj != NULL) {1954C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());1955ex_ctl = ekit.control();1956}1957if (callprojs.catchall_memproj != NULL) {1958Node* ex_mem = ekit.reset_memory();1959C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);1960add_mergemem_users_to_worklist(wl, ex_mem);1961}1962if (callprojs.catchall_ioproj != NULL) {1963C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());1964}19651966// Replace the old exception object with the newly created one1967if (callprojs.exobj != NULL) {1968C->gvn_replace_by(callprojs.exobj, ex_oop);1969}1970}19711972// Disconnect the call from the graph1973call->disconnect_inputs(C);1974C->gvn_replace_by(call, C->top());19751976// Clean up any MergeMems that feed other MergeMems since the1977// optimizer doesn't like that.1978while (wl.size() > 0) {1979_gvn.transform(wl.pop());1980}19811982if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {1983replaced_nodes.apply(C, final_ctl);1984}1985if (!ex_ctl->is_top() && do_replaced_nodes) {1986replaced_nodes_exception.apply(C, ex_ctl);1987}1988}198919901991//------------------------------increment_counter------------------------------1992// for statistics: increment a VM counter by 119931994void GraphKit::increment_counter(address counter_addr) {1995Node* adr1 = makecon(TypeRawPtr::make(counter_addr));1996increment_counter(adr1);1997}19981999void GraphKit::increment_counter(Node* counter_addr) {2000int adr_type = Compile::AliasIdxRaw;2001Node* ctrl = control();2002Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);2003Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));2004store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered);2005}200620072008//------------------------------uncommon_trap----------------------------------2009// Bail out to the interpreter in mid-method. Implemented by calling the2010// uncommon_trap blob. This helper function inserts a runtime call with the2011// right debug info.2012void GraphKit::uncommon_trap(int trap_request,2013ciKlass* klass, const char* comment,2014bool must_throw,2015bool keep_exact_action) {2016if (failing()) stop();2017if (stopped()) return; // trap reachable?20182019// Note: If ProfileTraps is true, and if a deopt. actually2020// occurs here, the runtime will make sure an MDO exists. There is2021// no need to call method()->ensure_method_data() at this point.20222023// Set the stack pointer to the right value for reexecution:2024set_sp(reexecute_sp());20252026#ifdef ASSERT2027if (!must_throw) {2028// Make sure the stack has at least enough depth to execute2029// the current bytecode.2030int inputs, ignored_depth;2031if (compute_stack_effects(inputs, ignored_depth)) {2032assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d",2033Bytecodes::name(java_bc()), sp(), inputs);2034}2035}2036#endif20372038Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);2039Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);20402041switch (action) {2042case Deoptimization::Action_maybe_recompile:2043case Deoptimization::Action_reinterpret:2044// Temporary fix for 6529811 to allow virtual calls to be sure they2045// get the chance to go from mono->bi->mega2046if (!keep_exact_action &&2047Deoptimization::trap_request_index(trap_request) < 0 &&2048too_many_recompiles(reason)) {2049// This BCI is causing too many recompilations.2050if (C->log() != NULL) {2051C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",2052Deoptimization::trap_reason_name(reason),2053Deoptimization::trap_action_name(action));2054}2055action = Deoptimization::Action_none;2056trap_request = Deoptimization::make_trap_request(reason, action);2057} else {2058C->set_trap_can_recompile(true);2059}2060break;2061case Deoptimization::Action_make_not_entrant:2062C->set_trap_can_recompile(true);2063break;2064case Deoptimization::Action_none:2065case Deoptimization::Action_make_not_compilable:2066break;2067default:2068#ifdef ASSERT2069fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action));2070#endif2071break;2072}20732074if (TraceOptoParse) {2075char buf[100];2076tty->print_cr("Uncommon trap %s at bci:%d",2077Deoptimization::format_trap_request(buf, sizeof(buf),2078trap_request), bci());2079}20802081CompileLog* log = C->log();2082if (log != NULL) {2083int kid = (klass == NULL)? -1: log->identify(klass);2084log->begin_elem("uncommon_trap bci='%d'", bci());2085char buf[100];2086log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),2087trap_request));2088if (kid >= 0) log->print(" klass='%d'", kid);2089if (comment != NULL) log->print(" comment='%s'", comment);2090log->end_elem();2091}20922093// Make sure any guarding test views this path as very unlikely2094Node *i0 = control()->in(0);2095if (i0 != NULL && i0->is_If()) { // Found a guarding if test?2096IfNode *iff = i0->as_If();2097float f = iff->_prob; // Get prob2098if (control()->Opcode() == Op_IfTrue) {2099if (f > PROB_UNLIKELY_MAG(4))2100iff->_prob = PROB_MIN;2101} else {2102if (f < PROB_LIKELY_MAG(4))2103iff->_prob = PROB_MAX;2104}2105}21062107// Clear out dead values from the debug info.2108kill_dead_locals();21092110// Now insert the uncommon trap subroutine call2111address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();2112const TypePtr* no_memory_effects = NULL;2113// Pass the index of the class to be loaded2114Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |2115(must_throw ? RC_MUST_THROW : 0),2116OptoRuntime::uncommon_trap_Type(),2117call_addr, "uncommon_trap", no_memory_effects,2118intcon(trap_request));2119assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,2120"must extract request correctly from the graph");2121assert(trap_request != 0, "zero value reserved by uncommon_trap_request");21222123call->set_req(TypeFunc::ReturnAdr, returnadr());2124// The debug info is the only real input to this call.21252126// Halt-and-catch fire here. The above call should never return!2127HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen"2128PRODUCT_ONLY(COMMA /*reachable*/false));2129_gvn.set_type_bottom(halt);2130root()->add_req(halt);21312132stop_and_kill_map();2133}213421352136//--------------------------just_allocated_object------------------------------2137// Report the object that was just allocated.2138// It must be the case that there are no intervening safepoints.2139// We use this to determine if an object is so "fresh" that2140// it does not require card marks.2141Node* GraphKit::just_allocated_object(Node* current_control) {2142Node* ctrl = current_control;2143// Object::<init> is invoked after allocation, most of invoke nodes2144// will be reduced, but a region node is kept in parse time, we check2145// the pattern and skip the region node if it degraded to a copy.2146if (ctrl != NULL && ctrl->is_Region() && ctrl->req() == 2 &&2147ctrl->as_Region()->is_copy()) {2148ctrl = ctrl->as_Region()->is_copy();2149}2150if (C->recent_alloc_ctl() == ctrl) {2151return C->recent_alloc_obj();2152}2153return NULL;2154}215521562157/**2158* Record profiling data exact_kls for Node n with the type system so2159* that it can propagate it (speculation)2160*2161* @param n node that the type applies to2162* @param exact_kls type from profiling2163* @param maybe_null did profiling see null?2164*2165* @return node with improved type2166*/2167Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {2168const Type* current_type = _gvn.type(n);2169assert(UseTypeSpeculation, "type speculation must be on");21702171const TypePtr* speculative = current_type->speculative();21722173// Should the klass from the profile be recorded in the speculative type?2174if (current_type->would_improve_type(exact_kls, jvms()->depth())) {2175const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);2176const TypeOopPtr* xtype = tklass->as_instance_type();2177assert(xtype->klass_is_exact(), "Should be exact");2178// Any reason to believe n is not null (from this profiling or a previous one)?2179assert(ptr_kind != ProfileAlwaysNull, "impossible here");2180const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;2181// record the new speculative type's depth2182speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();2183speculative = speculative->with_inline_depth(jvms()->depth());2184} else if (current_type->would_improve_ptr(ptr_kind)) {2185// Profiling report that null was never seen so we can change the2186// speculative type to non null ptr.2187if (ptr_kind == ProfileAlwaysNull) {2188speculative = TypePtr::NULL_PTR;2189} else {2190assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");2191const TypePtr* ptr = TypePtr::NOTNULL;2192if (speculative != NULL) {2193speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();2194} else {2195speculative = ptr;2196}2197}2198}21992200if (speculative != current_type->speculative()) {2201// Build a type with a speculative type (what we think we know2202// about the type but will need a guard when we use it)2203const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);2204// We're changing the type, we need a new CheckCast node to carry2205// the new type. The new type depends on the control: what2206// profiling tells us is only valid from here as far as we can2207// tell.2208Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));2209cast = _gvn.transform(cast);2210replace_in_map(n, cast);2211n = cast;2212}22132214return n;2215}22162217/**2218* Record profiling data from receiver profiling at an invoke with the2219* type system so that it can propagate it (speculation)2220*2221* @param n receiver node2222*2223* @return node with improved type2224*/2225Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {2226if (!UseTypeSpeculation) {2227return n;2228}2229ciKlass* exact_kls = profile_has_unique_klass();2230ProfilePtrKind ptr_kind = ProfileMaybeNull;2231if ((java_bc() == Bytecodes::_checkcast ||2232java_bc() == Bytecodes::_instanceof ||2233java_bc() == Bytecodes::_aastore) &&2234method()->method_data()->is_mature()) {2235ciProfileData* data = method()->method_data()->bci_to_data(bci());2236if (data != NULL) {2237if (!data->as_BitData()->null_seen()) {2238ptr_kind = ProfileNeverNull;2239} else {2240assert(data->is_ReceiverTypeData(), "bad profile data type");2241ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();2242uint i = 0;2243for (; i < call->row_limit(); i++) {2244ciKlass* receiver = call->receiver(i);2245if (receiver != NULL) {2246break;2247}2248}2249ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;2250}2251}2252}2253return record_profile_for_speculation(n, exact_kls, ptr_kind);2254}22552256/**2257* Record profiling data from argument profiling at an invoke with the2258* type system so that it can propagate it (speculation)2259*2260* @param dest_method target method for the call2261* @param bc what invoke bytecode is this?2262*/2263void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {2264if (!UseTypeSpeculation) {2265return;2266}2267const TypeFunc* tf = TypeFunc::make(dest_method);2268int nargs = tf->domain()->cnt() - TypeFunc::Parms;2269int skip = Bytecodes::has_receiver(bc) ? 1 : 0;2270for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {2271const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);2272if (is_reference_type(targ->basic_type())) {2273ProfilePtrKind ptr_kind = ProfileMaybeNull;2274ciKlass* better_type = NULL;2275if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {2276record_profile_for_speculation(argument(j), better_type, ptr_kind);2277}2278i++;2279}2280}2281}22822283/**2284* Record profiling data from parameter profiling at an invoke with2285* the type system so that it can propagate it (speculation)2286*/2287void GraphKit::record_profiled_parameters_for_speculation() {2288if (!UseTypeSpeculation) {2289return;2290}2291for (int i = 0, j = 0; i < method()->arg_size() ; i++) {2292if (_gvn.type(local(i))->isa_oopptr()) {2293ProfilePtrKind ptr_kind = ProfileMaybeNull;2294ciKlass* better_type = NULL;2295if (method()->parameter_profiled_type(j, better_type, ptr_kind)) {2296record_profile_for_speculation(local(i), better_type, ptr_kind);2297}2298j++;2299}2300}2301}23022303/**2304* Record profiling data from return value profiling at an invoke with2305* the type system so that it can propagate it (speculation)2306*/2307void GraphKit::record_profiled_return_for_speculation() {2308if (!UseTypeSpeculation) {2309return;2310}2311ProfilePtrKind ptr_kind = ProfileMaybeNull;2312ciKlass* better_type = NULL;2313if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {2314// If profiling reports a single type for the return value,2315// feed it to the type system so it can propagate it as a2316// speculative type2317record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);2318}2319}23202321void GraphKit::round_double_arguments(ciMethod* dest_method) {2322if (Matcher::strict_fp_requires_explicit_rounding) {2323// (Note: TypeFunc::make has a cache that makes this fast.)2324const TypeFunc* tf = TypeFunc::make(dest_method);2325int nargs = tf->domain()->cnt() - TypeFunc::Parms;2326for (int j = 0; j < nargs; j++) {2327const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);2328if (targ->basic_type() == T_DOUBLE) {2329// If any parameters are doubles, they must be rounded before2330// the call, dstore_rounding does gvn.transform2331Node *arg = argument(j);2332arg = dstore_rounding(arg);2333set_argument(j, arg);2334}2335}2336}2337}23382339// rounding for strict float precision conformance2340Node* GraphKit::precision_rounding(Node* n) {2341if (Matcher::strict_fp_requires_explicit_rounding) {2342#ifdef IA322343if (UseSSE == 0) {2344return _gvn.transform(new RoundFloatNode(0, n));2345}2346#else2347Unimplemented();2348#endif // IA322349}2350return n;2351}23522353// rounding for strict double precision conformance2354Node* GraphKit::dprecision_rounding(Node *n) {2355if (Matcher::strict_fp_requires_explicit_rounding) {2356#ifdef IA322357if (UseSSE < 2) {2358return _gvn.transform(new RoundDoubleNode(0, n));2359}2360#else2361Unimplemented();2362#endif // IA322363}2364return n;2365}23662367// rounding for non-strict double stores2368Node* GraphKit::dstore_rounding(Node* n) {2369if (Matcher::strict_fp_requires_explicit_rounding) {2370#ifdef IA322371if (UseSSE < 2) {2372return _gvn.transform(new RoundDoubleNode(0, n));2373}2374#else2375Unimplemented();2376#endif // IA322377}2378return n;2379}23802381//=============================================================================2382// Generate a fast path/slow path idiom. Graph looks like:2383// [foo] indicates that 'foo' is a parameter2384//2385// [in] NULL2386// \ /2387// CmpP2388// Bool ne2389// If2390// / \2391// True False-<2>2392// / |2393// / cast_not_null2394// Load | | ^2395// [fast_test] | |2396// gvn to opt_test | |2397// / \ | <1>2398// True False |2399// | \\ |2400// [slow_call] \[fast_result]2401// Ctl Val \ \2402// | \ \2403// Catch <1> \ \2404// / \ ^ \ \2405// Ex No_Ex | \ \2406// | \ \ | \ <2> \2407// ... \ [slow_res] | | \ [null_result]2408// \ \--+--+--- | |2409// \ | / \ | /2410// --------Region Phi2411//2412//=============================================================================2413// Code is structured as a series of driver functions all called 'do_XXX' that2414// call a set of helper functions. Helper functions first, then drivers.24152416//------------------------------null_check_oop---------------------------------2417// Null check oop. Set null-path control into Region in slot 3.2418// Make a cast-not-nullness use the other not-null control. Return cast.2419Node* GraphKit::null_check_oop(Node* value, Node* *null_control,2420bool never_see_null,2421bool safe_for_replace,2422bool speculative) {2423// Initial NULL check taken path2424(*null_control) = top();2425Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);24262427// Generate uncommon_trap:2428if (never_see_null && (*null_control) != top()) {2429// If we see an unexpected null at a check-cast we record it and force a2430// recompile; the offending check-cast will be compiled to handle NULLs.2431// If we see more than one offending BCI, then all checkcasts in the2432// method will be compiled to handle NULLs.2433PreserveJVMState pjvms(this);2434set_control(*null_control);2435replace_in_map(value, null());2436Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);2437uncommon_trap(reason,2438Deoptimization::Action_make_not_entrant);2439(*null_control) = top(); // NULL path is dead2440}2441if ((*null_control) == top() && safe_for_replace) {2442replace_in_map(value, cast);2443}24442445// Cast away null-ness on the result2446return cast;2447}24482449//------------------------------opt_iff----------------------------------------2450// Optimize the fast-check IfNode. Set the fast-path region slot 2.2451// Return slow-path control.2452Node* GraphKit::opt_iff(Node* region, Node* iff) {2453IfNode *opt_iff = _gvn.transform(iff)->as_If();24542455// Fast path taken; set region slot 22456Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) );2457region->init_req(2,fast_taken); // Capture fast-control24582459// Fast path not-taken, i.e. slow path2460Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) );2461return slow_taken;2462}24632464//-----------------------------make_runtime_call-------------------------------2465Node* GraphKit::make_runtime_call(int flags,2466const TypeFunc* call_type, address call_addr,2467const char* call_name,2468const TypePtr* adr_type,2469// The following parms are all optional.2470// The first NULL ends the list.2471Node* parm0, Node* parm1,2472Node* parm2, Node* parm3,2473Node* parm4, Node* parm5,2474Node* parm6, Node* parm7) {2475assert(call_addr != NULL, "must not call NULL targets");24762477// Slow-path call2478bool is_leaf = !(flags & RC_NO_LEAF);2479bool has_io = (!is_leaf && !(flags & RC_NO_IO));2480if (call_name == NULL) {2481assert(!is_leaf, "must supply name for leaf");2482call_name = OptoRuntime::stub_name(call_addr);2483}2484CallNode* call;2485if (!is_leaf) {2486call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);2487} else if (flags & RC_NO_FP) {2488call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);2489} else if (flags & RC_VECTOR){2490uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;2491call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);2492} else {2493call = new CallLeafNode(call_type, call_addr, call_name, adr_type);2494}24952496// The following is similar to set_edges_for_java_call,2497// except that the memory effects of the call are restricted to AliasIdxRaw.24982499// Slow path call has no side-effects, uses few values2500bool wide_in = !(flags & RC_NARROW_MEM);2501bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);25022503Node* prev_mem = NULL;2504if (wide_in) {2505prev_mem = set_predefined_input_for_runtime_call(call);2506} else {2507assert(!wide_out, "narrow in => narrow out");2508Node* narrow_mem = memory(adr_type);2509prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);2510}25112512// Hook each parm in order. Stop looking at the first NULL.2513if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);2514if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);2515if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);2516if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);2517if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);2518if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);2519if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);2520if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);2521/* close each nested if ===> */ } } } } } } } }2522assert(call->in(call->req()-1) != NULL, "must initialize all parms");25232524if (!is_leaf) {2525// Non-leaves can block and take safepoints:2526add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));2527}2528// Non-leaves can throw exceptions:2529if (has_io) {2530call->set_req(TypeFunc::I_O, i_o());2531}25322533if (flags & RC_UNCOMMON) {2534// Set the count to a tiny probability. Cf. Estimate_Block_Frequency.2535// (An "if" probability corresponds roughly to an unconditional count.2536// Sort of.)2537call->set_cnt(PROB_UNLIKELY_MAG(4));2538}25392540Node* c = _gvn.transform(call);2541assert(c == call, "cannot disappear");25422543if (wide_out) {2544// Slow path call has full side-effects.2545set_predefined_output_for_runtime_call(call);2546} else {2547// Slow path call has few side-effects, and/or sets few values.2548set_predefined_output_for_runtime_call(call, prev_mem, adr_type);2549}25502551if (has_io) {2552set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));2553}2554return call;25552556}25572558// i2b2559Node* GraphKit::sign_extend_byte(Node* in) {2560Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));2561return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));2562}25632564// i2s2565Node* GraphKit::sign_extend_short(Node* in) {2566Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));2567return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));2568}25692570//-----------------------------make_native_call-------------------------------2571Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {2572// Select just the actual call args to pass on2573// [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]2574// | |2575// V V2576// [ ... args ]2577uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;2578ResourceMark rm;2579Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);2580const Type** arg_types = TypeTuple::fields(n_filtered_args);2581GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());25822583VMReg* argRegs = nep->argMoves();2584{2585for (uint vm_arg_pos = 0, java_arg_read_pos = 0;2586vm_arg_pos < n_filtered_args; vm_arg_pos++) {2587uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)2588Node* node = argument(vm_unfiltered_arg_pos);2589const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);2590VMReg reg = type == Type::HALF2591? VMRegImpl::Bad()2592: argRegs[java_arg_read_pos++];25932594argument_nodes[vm_arg_pos] = node;2595arg_types[TypeFunc::Parms + vm_arg_pos] = type;2596arg_regs.at_put(vm_arg_pos, reg);2597}2598}25992600uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;2601GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());2602const Type** ret_types = TypeTuple::fields(n_returns);26032604VMReg* retRegs = nep->returnMoves();2605{2606for (uint vm_ret_pos = 0, java_ret_read_pos = 0;2607vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 12608const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);2609VMReg reg = type == Type::HALF2610? VMRegImpl::Bad()2611: retRegs[java_ret_read_pos++];26122613ret_regs.at_put(vm_ret_pos, reg);2614ret_types[TypeFunc::Parms + vm_ret_pos] = type;2615}2616}26172618const TypeFunc* new_call_type = TypeFunc::make(2619TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),2620TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)2621);26222623if (nep->need_transition()) {2624RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,2625nep->shadow_space(),2626arg_regs, ret_regs);2627if (invoker == NULL) {2628C->record_failure("native invoker not implemented on this platform");2629return NULL;2630}2631C->add_native_invoker(invoker);2632call_addr = invoker->code_begin();2633}2634assert(call_addr != NULL, "sanity");26352636CallNativeNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM,2637arg_regs,2638ret_regs,2639nep->shadow_space(),2640nep->need_transition());26412642if (call->_need_transition) {2643add_safepoint_edges(call);2644}26452646set_predefined_input_for_runtime_call(call);26472648for (uint i = 0; i < n_filtered_args; i++) {2649call->init_req(i + TypeFunc::Parms, argument_nodes[i]);2650}26512652Node* c = gvn().transform(call);2653assert(c == call, "cannot disappear");26542655set_predefined_output_for_runtime_call(call);26562657Node* ret;2658if (method() == NULL || method()->return_type()->basic_type() == T_VOID) {2659ret = top();2660} else {2661ret = gvn().transform(new ProjNode(call, TypeFunc::Parms));2662// Unpack native results if needed2663// Need this method type since it's unerased2664switch (nep->method_type()->rtype()->basic_type()) {2665case T_CHAR:2666ret = _gvn.transform(new AndINode(ret, _gvn.intcon(0xFFFF)));2667break;2668case T_BYTE:2669ret = sign_extend_byte(ret);2670break;2671case T_SHORT:2672ret = sign_extend_short(ret);2673break;2674default: // do nothing2675break;2676}2677}26782679push_node(method()->return_type()->basic_type(), ret);26802681return call;2682}26832684//------------------------------merge_memory-----------------------------------2685// Merge memory from one path into the current memory state.2686void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {2687for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {2688Node* old_slice = mms.force_memory();2689Node* new_slice = mms.memory2();2690if (old_slice != new_slice) {2691PhiNode* phi;2692if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {2693if (mms.is_empty()) {2694// clone base memory Phi's inputs for this memory slice2695assert(old_slice == mms.base_memory(), "sanity");2696phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));2697_gvn.set_type(phi, Type::MEMORY);2698for (uint i = 1; i < phi->req(); i++) {2699phi->init_req(i, old_slice->in(i));2700}2701} else {2702phi = old_slice->as_Phi(); // Phi was generated already2703}2704} else {2705phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));2706_gvn.set_type(phi, Type::MEMORY);2707}2708phi->set_req(new_path, new_slice);2709mms.set_memory(phi);2710}2711}2712}27132714//------------------------------make_slow_call_ex------------------------------2715// Make the exception handler hookups for the slow call2716void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) {2717if (stopped()) return;27182719// Make a catch node with just two handlers: fall-through and catch-all2720Node* i_o = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) );2721Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) );2722Node* norm = _gvn.transform( new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );2723Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) );27242725{ PreserveJVMState pjvms(this);2726set_control(excp);2727set_i_o(i_o);27282729if (excp != top()) {2730if (deoptimize) {2731// Deoptimize if an exception is caught. Don't construct exception state in this case.2732uncommon_trap(Deoptimization::Reason_unhandled,2733Deoptimization::Action_none);2734} else {2735// Create an exception state also.2736// Use an exact type if the caller has a specific exception.2737const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);2738Node* ex_oop = new CreateExNode(ex_type, control(), i_o);2739add_exception_state(make_exception_state(_gvn.transform(ex_oop)));2740}2741}2742}27432744// Get the no-exception control from the CatchNode.2745set_control(norm);2746}27472748static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) {2749Node* cmp = NULL;2750switch(bt) {2751case T_INT: cmp = new CmpINode(in1, in2); break;2752case T_ADDRESS: cmp = new CmpPNode(in1, in2); break;2753default: fatal("unexpected comparison type %s", type2name(bt));2754}2755gvn.transform(cmp);2756Node* bol = gvn.transform(new BoolNode(cmp, test));2757IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN);2758gvn.transform(iff);2759if (!bol->is_Con()) gvn.record_for_igvn(iff);2760return iff;2761}27622763//-------------------------------gen_subtype_check-----------------------------2764// Generate a subtyping check. Takes as input the subtype and supertype.2765// Returns 2 values: sets the default control() to the true path and returns2766// the false path. Only reads invariant memory; sets no (visible) memory.2767// The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding2768// but that's not exposed to the optimizer. This call also doesn't take in an2769// Object; if you wish to check an Object you need to load the Object's class2770// prior to coming here.2771Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Node* mem, PhaseGVN& gvn) {2772Compile* C = gvn.C;2773if ((*ctrl)->is_top()) {2774return C->top();2775}27762777// Fast check for identical types, perhaps identical constants.2778// The types can even be identical non-constants, in cases2779// involving Array.newInstance, Object.clone, etc.2780if (subklass == superklass)2781return C->top(); // false path is dead; no test needed.27822783if (gvn.type(superklass)->singleton()) {2784ciKlass* superk = gvn.type(superklass)->is_klassptr()->klass();2785ciKlass* subk = gvn.type(subklass)->is_klassptr()->klass();27862787// In the common case of an exact superklass, try to fold up the2788// test before generating code. You may ask, why not just generate2789// the code and then let it fold up? The answer is that the generated2790// code will necessarily include null checks, which do not always2791// completely fold away. If they are also needless, then they turn2792// into a performance loss. Example:2793// Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;2794// Here, the type of 'fa' is often exact, so the store check2795// of fa[1]=x will fold up, without testing the nullness of x.2796switch (C->static_subtype_check(superk, subk)) {2797case Compile::SSC_always_false:2798{2799Node* always_fail = *ctrl;2800*ctrl = gvn.C->top();2801return always_fail;2802}2803case Compile::SSC_always_true:2804return C->top();2805case Compile::SSC_easy_test:2806{2807// Just do a direct pointer compare and be done.2808IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);2809*ctrl = gvn.transform(new IfTrueNode(iff));2810return gvn.transform(new IfFalseNode(iff));2811}2812case Compile::SSC_full_test:2813break;2814default:2815ShouldNotReachHere();2816}2817}28182819// %%% Possible further optimization: Even if the superklass is not exact,2820// if the subklass is the unique subtype of the superklass, the check2821// will always succeed. We could leave a dependency behind to ensure this.28222823// First load the super-klass's check-offset2824Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset()))));2825Node* m = C->immutable_memory();2826Node *chk_off = gvn.transform(new LoadINode(NULL, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));2827int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());2828bool might_be_cache = (gvn.find_int_con(chk_off, cacheoff_con) == cacheoff_con);28292830// Load from the sub-klass's super-class display list, or a 1-word cache of2831// the secondary superclass list, or a failing value with a sentinel offset2832// if the super-klass is an interface or exceptionally deep in the Java2833// hierarchy and we have to scan the secondary superclass list the hard way.2834// Worst-case type is a little odd: NULL is allowed as a result (usually2835// klass loads can never produce a NULL).2836Node *chk_off_X = chk_off;2837#ifdef _LP642838chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X));2839#endif2840Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X));2841// For some types like interfaces the following loadKlass is from a 1-word2842// cache which is mutable so can't use immutable memory. Other2843// types load from the super-class display table which is immutable.2844Node *kmem = C->immutable_memory();2845// secondary_super_cache is not immutable but can be treated as such because:2846// - no ideal node writes to it in a way that could cause an2847// incorrect/missed optimization of the following Load.2848// - it's a cache so, worse case, not reading the latest value2849// wouldn't cause incorrect execution2850if (might_be_cache && mem != NULL) {2851kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem;2852}2853Node *nkls = gvn.transform(LoadKlassNode::make(gvn, NULL, kmem, p2, gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));28542855// Compile speed common case: ARE a subtype and we canNOT fail2856if( superklass == nkls )2857return C->top(); // false path is dead; no test needed.28582859// See if we get an immediate positive hit. Happens roughly 83% of the2860// time. Test to see if the value loaded just previously from the subklass2861// is exactly the superklass.2862IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS);2863Node *iftrue1 = gvn.transform( new IfTrueNode (iff1));2864*ctrl = gvn.transform(new IfFalseNode(iff1));28652866// Compile speed common case: Check for being deterministic right now. If2867// chk_off is a constant and not equal to cacheoff then we are NOT a2868// subklass. In this case we need exactly the 1 test above and we can2869// return those results immediately.2870if (!might_be_cache) {2871Node* not_subtype_ctrl = *ctrl;2872*ctrl = iftrue1; // We need exactly the 1 test above2873return not_subtype_ctrl;2874}28752876// Gather the various success & failures here2877RegionNode *r_ok_subtype = new RegionNode(4);2878gvn.record_for_igvn(r_ok_subtype);2879RegionNode *r_not_subtype = new RegionNode(3);2880gvn.record_for_igvn(r_not_subtype);28812882r_ok_subtype->init_req(1, iftrue1);28832884// Check for immediate negative hit. Happens roughly 11% of the time (which2885// is roughly 63% of the remaining cases). Test to see if the loaded2886// check-offset points into the subklass display list or the 1-element2887// cache. If it points to the display (and NOT the cache) and the display2888// missed then it's not a subtype.2889Node *cacheoff = gvn.intcon(cacheoff_con);2890IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT);2891r_not_subtype->init_req(1, gvn.transform(new IfTrueNode (iff2)));2892*ctrl = gvn.transform(new IfFalseNode(iff2));28932894// Check for self. Very rare to get here, but it is taken 1/3 the time.2895// No performance impact (too rare) but allows sharing of secondary arrays2896// which has some footprint reduction.2897IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS);2898r_ok_subtype->init_req(2, gvn.transform(new IfTrueNode(iff3)));2899*ctrl = gvn.transform(new IfFalseNode(iff3));29002901// -- Roads not taken here: --2902// We could also have chosen to perform the self-check at the beginning2903// of this code sequence, as the assembler does. This would not pay off2904// the same way, since the optimizer, unlike the assembler, can perform2905// static type analysis to fold away many successful self-checks.2906// Non-foldable self checks work better here in second position, because2907// the initial primary superclass check subsumes a self-check for most2908// types. An exception would be a secondary type like array-of-interface,2909// which does not appear in its own primary supertype display.2910// Finally, we could have chosen to move the self-check into the2911// PartialSubtypeCheckNode, and from there out-of-line in a platform2912// dependent manner. But it is worthwhile to have the check here,2913// where it can be perhaps be optimized. The cost in code space is2914// small (register compare, branch).29152916// Now do a linear scan of the secondary super-klass array. Again, no real2917// performance impact (too rare) but it's gotta be done.2918// Since the code is rarely used, there is no penalty for moving it2919// out of line, and it can only improve I-cache density.2920// The decision to inline or out-of-line this final check is platform2921// dependent, and is found in the AD file definition of PartialSubtypeCheck.2922Node* psc = gvn.transform(2923new PartialSubtypeCheckNode(*ctrl, subklass, superklass));29242925IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);2926r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));2927r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));29282929// Return false path; set default control to true path.2930*ctrl = gvn.transform(r_ok_subtype);2931return gvn.transform(r_not_subtype);2932}29332934Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {2935if (ExpandSubTypeCheckAtParseTime) {2936MergeMemNode* mem = merged_memory();2937Node* ctrl = control();2938Node* subklass = obj_or_subklass;2939if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {2940subklass = load_object_klass(obj_or_subklass);2941}29422943Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);2944set_control(ctrl);2945return n;2946}29472948const TypePtr* adr_type = TypeKlassPtr::make(TypePtr::NotNull, C->env()->Object_klass(), Type::OffsetBot);2949Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass));2950Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));2951IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);2952set_control(_gvn.transform(new IfTrueNode(iff)));2953return _gvn.transform(new IfFalseNode(iff));2954}29552956// Profile-driven exact type check:2957Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,2958float prob,2959Node* *casted_receiver) {2960const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);2961Node* recv_klass = load_object_klass(receiver);2962Node* want_klass = makecon(tklass);2963Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass) );2964Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );2965IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);2966set_control( _gvn.transform( new IfTrueNode (iff) ));2967Node* fail = _gvn.transform( new IfFalseNode(iff) );29682969const TypeOopPtr* recv_xtype = tklass->as_instance_type();2970assert(recv_xtype->klass_is_exact(), "");29712972// Subsume downstream occurrences of receiver with a cast to2973// recv_xtype, since now we know what the type will be.2974Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);2975(*casted_receiver) = _gvn.transform(cast);2976// (User must make the replace_in_map call.)29772978return fail;2979}29802981//------------------------------subtype_check_receiver-------------------------2982Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,2983Node** casted_receiver) {2984const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);2985Node* want_klass = makecon(tklass);29862987Node* slow_ctl = gen_subtype_check(receiver, want_klass);29882989// Cast receiver after successful check2990const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();2991Node* cast = new CheckCastPPNode(control(), receiver, recv_type);2992(*casted_receiver) = _gvn.transform(cast);29932994return slow_ctl;2995}29962997//------------------------------seems_never_null-------------------------------2998// Use null_seen information if it is available from the profile.2999// If we see an unexpected null at a type check we record it and force a3000// recompile; the offending check will be recompiled to handle NULLs.3001// If we see several offending BCIs, then all checks in the3002// method will be recompiled.3003bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {3004speculating = !_gvn.type(obj)->speculative_maybe_null();3005Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);3006if (UncommonNullCast // Cutout for this technique3007&& obj != null() // And not the -Xcomp stupid case?3008&& !too_many_traps(reason)3009) {3010if (speculating) {3011return true;3012}3013if (data == NULL)3014// Edge case: no mature data. Be optimistic here.3015return true;3016// If the profile has not seen a null, assume it won't happen.3017assert(java_bc() == Bytecodes::_checkcast ||3018java_bc() == Bytecodes::_instanceof ||3019java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");3020return !data->as_BitData()->null_seen();3021}3022speculating = false;3023return false;3024}30253026void GraphKit::guard_klass_being_initialized(Node* klass) {3027int init_state_off = in_bytes(InstanceKlass::init_state_offset());3028Node* adr = basic_plus_adr(top(), klass, init_state_off);3029Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr,3030adr->bottom_type()->is_ptr(), TypeInt::BYTE,3031T_BYTE, MemNode::unordered);3032init_state = _gvn.transform(init_state);30333034Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));30353036Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state));3037Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));30383039{ BuildCutout unless(this, tst, PROB_MAX);3040uncommon_trap(Deoptimization::Reason_initialized, Deoptimization::Action_reinterpret);3041}3042}30433044void GraphKit::guard_init_thread(Node* klass) {3045int init_thread_off = in_bytes(InstanceKlass::init_thread_offset());3046Node* adr = basic_plus_adr(top(), klass, init_thread_off);30473048Node* init_thread = LoadNode::make(_gvn, NULL, immutable_memory(), adr,3049adr->bottom_type()->is_ptr(), TypePtr::NOTNULL,3050T_ADDRESS, MemNode::unordered);3051init_thread = _gvn.transform(init_thread);30523053Node* cur_thread = _gvn.transform(new ThreadLocalNode());30543055Node* chk = _gvn.transform(new CmpPNode(cur_thread, init_thread));3056Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));30573058{ BuildCutout unless(this, tst, PROB_MAX);3059uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_none);3060}3061}30623063void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) {3064if (ik->is_being_initialized()) {3065if (C->needs_clinit_barrier(ik, context)) {3066Node* klass = makecon(TypeKlassPtr::make(ik));3067guard_klass_being_initialized(klass);3068guard_init_thread(klass);3069insert_mem_bar(Op_MemBarCPUOrder);3070}3071} else if (ik->is_initialized()) {3072return; // no barrier needed3073} else {3074uncommon_trap(Deoptimization::Reason_uninitialized,3075Deoptimization::Action_reinterpret,3076NULL);3077}3078}30793080//------------------------maybe_cast_profiled_receiver-------------------------3081// If the profile has seen exactly one type, narrow to exactly that type.3082// Subsequent type checks will always fold up.3083Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,3084ciKlass* require_klass,3085ciKlass* spec_klass,3086bool safe_for_replace) {3087if (!UseTypeProfile || !TypeProfileCasts) return NULL;30883089Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);30903091// Make sure we haven't already deoptimized from this tactic.3092if (too_many_traps_or_recompiles(reason))3093return NULL;30943095// (No, this isn't a call, but it's enough like a virtual call3096// to use the same ciMethod accessor to get the profile info...)3097// If we have a speculative type use it instead of profiling (which3098// may not help us)3099ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;3100if (exact_kls != NULL) {// no cast failures here3101if (require_klass == NULL ||3102C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {3103// If we narrow the type to match what the type profile sees or3104// the speculative type, we can then remove the rest of the3105// cast.3106// This is a win, even if the exact_kls is very specific,3107// because downstream operations, such as method calls,3108// will often benefit from the sharper type.3109Node* exact_obj = not_null_obj; // will get updated in place...3110Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,3111&exact_obj);3112{ PreserveJVMState pjvms(this);3113set_control(slow_ctl);3114uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);3115}3116if (safe_for_replace) {3117replace_in_map(not_null_obj, exact_obj);3118}3119return exact_obj;3120}3121// assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us.3122}31233124return NULL;3125}31263127/**3128* Cast obj to type and emit guard unless we had too many traps here3129* already3130*3131* @param obj node being casted3132* @param type type to cast the node to3133* @param not_null true if we know node cannot be null3134*/3135Node* GraphKit::maybe_cast_profiled_obj(Node* obj,3136ciKlass* type,3137bool not_null) {3138if (stopped()) {3139return obj;3140}31413142// type == NULL if profiling tells us this object is always null3143if (type != NULL) {3144Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;3145Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;31463147if (!too_many_traps_or_recompiles(null_reason) &&3148!too_many_traps_or_recompiles(class_reason)) {3149Node* not_null_obj = NULL;3150// not_null is true if we know the object is not null and3151// there's no need for a null check3152if (!not_null) {3153Node* null_ctl = top();3154not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);3155assert(null_ctl->is_top(), "no null control here");3156} else {3157not_null_obj = obj;3158}31593160Node* exact_obj = not_null_obj;3161ciKlass* exact_kls = type;3162Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,3163&exact_obj);3164{3165PreserveJVMState pjvms(this);3166set_control(slow_ctl);3167uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);3168}3169replace_in_map(not_null_obj, exact_obj);3170obj = exact_obj;3171}3172} else {3173if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {3174Node* exact_obj = null_assert(obj);3175replace_in_map(obj, exact_obj);3176obj = exact_obj;3177}3178}3179return obj;3180}31813182//-------------------------------gen_instanceof--------------------------------3183// Generate an instance-of idiom. Used by both the instance-of bytecode3184// and the reflective instance-of call.3185Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {3186kill_dead_locals(); // Benefit all the uncommon traps3187assert( !stopped(), "dead parse path should be checked in callers" );3188assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),3189"must check for not-null not-dead klass in callers");31903191// Make the merge point3192enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };3193RegionNode* region = new RegionNode(PATH_LIMIT);3194Node* phi = new PhiNode(region, TypeInt::BOOL);3195C->set_has_split_ifs(true); // Has chance for split-if optimization31963197ciProfileData* data = NULL;3198if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode3199data = method()->method_data()->bci_to_data(bci());3200}3201bool speculative_not_null = false;3202bool never_see_null = (ProfileDynamicTypes // aggressive use of profile3203&& seems_never_null(obj, data, speculative_not_null));32043205// Null check; get casted pointer; set region slot 33206Node* null_ctl = top();3207Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);32083209// If not_null_obj is dead, only null-path is taken3210if (stopped()) { // Doing instance-of on a NULL?3211set_control(null_ctl);3212return intcon(0);3213}3214region->init_req(_null_path, null_ctl);3215phi ->init_req(_null_path, intcon(0)); // Set null path value3216if (null_ctl == top()) {3217// Do this eagerly, so that pattern matches like is_diamond_phi3218// will work even during parsing.3219assert(_null_path == PATH_LIMIT-1, "delete last");3220region->del_req(_null_path);3221phi ->del_req(_null_path);3222}32233224// Do we know the type check always succeed?3225bool known_statically = false;3226if (_gvn.type(superklass)->singleton()) {3227ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();3228ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();3229if (subk != NULL && subk->is_loaded()) {3230int static_res = C->static_subtype_check(superk, subk);3231known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);3232}3233}32343235if (!known_statically) {3236const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();3237// We may not have profiling here or it may not help us. If we3238// have a speculative type use it to perform an exact cast.3239ciKlass* spec_obj_type = obj_type->speculative_type();3240if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {3241Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);3242if (stopped()) { // Profile disagrees with this path.3243set_control(null_ctl); // Null is the only remaining possibility.3244return intcon(0);3245}3246if (cast_obj != NULL) {3247not_null_obj = cast_obj;3248}3249}3250}32513252// Generate the subtype check3253Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);32543255// Plug in the success path to the general merge in slot 1.3256region->init_req(_obj_path, control());3257phi ->init_req(_obj_path, intcon(1));32583259// Plug in the failing path to the general merge in slot 2.3260region->init_req(_fail_path, not_subtype_ctrl);3261phi ->init_req(_fail_path, intcon(0));32623263// Return final merged results3264set_control( _gvn.transform(region) );3265record_for_igvn(region);32663267// If we know the type check always succeeds then we don't use the3268// profiling data at this bytecode. Don't lose it, feed it to the3269// type system as a speculative type.3270if (safe_for_replace) {3271Node* casted_obj = record_profiled_receiver_for_speculation(obj);3272replace_in_map(obj, casted_obj);3273}32743275return _gvn.transform(phi);3276}32773278//-------------------------------gen_checkcast---------------------------------3279// Generate a checkcast idiom. Used by both the checkcast bytecode and the3280// array store bytecode. Stack must be as-if BEFORE doing the bytecode so the3281// uncommon-trap paths work. Adjust stack after this call.3282// If failure_control is supplied and not null, it is filled in with3283// the control edge for the cast failure. Otherwise, an appropriate3284// uncommon trap or exception is thrown.3285Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,3286Node* *failure_control) {3287kill_dead_locals(); // Benefit all the uncommon traps3288const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();3289const Type *toop = TypeOopPtr::make_from_klass(tk->klass());32903291// Fast cutout: Check the case that the cast is vacuously true.3292// This detects the common cases where the test will short-circuit3293// away completely. We do this before we perform the null check,3294// because if the test is going to turn into zero code, we don't3295// want a residual null check left around. (Causes a slowdown,3296// for example, in some objArray manipulations, such as a[i]=a[j].)3297if (tk->singleton()) {3298const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();3299if (objtp != NULL && objtp->klass() != NULL) {3300switch (C->static_subtype_check(tk->klass(), objtp->klass())) {3301case Compile::SSC_always_true:3302// If we know the type check always succeed then we don't use3303// the profiling data at this bytecode. Don't lose it, feed it3304// to the type system as a speculative type.3305return record_profiled_receiver_for_speculation(obj);3306case Compile::SSC_always_false:3307// It needs a null check because a null will *pass* the cast check.3308// A non-null value will always produce an exception.3309if (!objtp->maybe_null()) {3310builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(objtp->klass())));3311return top();3312} else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {3313return null_assert(obj);3314}3315break; // Fall through to full check3316}3317}3318}33193320ciProfileData* data = NULL;3321bool safe_for_replace = false;3322if (failure_control == NULL) { // use MDO in regular case only3323assert(java_bc() == Bytecodes::_aastore ||3324java_bc() == Bytecodes::_checkcast,3325"interpreter profiles type checks only for these BCs");3326data = method()->method_data()->bci_to_data(bci());3327safe_for_replace = true;3328}33293330// Make the merge point3331enum { _obj_path = 1, _null_path, PATH_LIMIT };3332RegionNode* region = new RegionNode(PATH_LIMIT);3333Node* phi = new PhiNode(region, toop);3334C->set_has_split_ifs(true); // Has chance for split-if optimization33353336// Use null-cast information if it is available3337bool speculative_not_null = false;3338bool never_see_null = ((failure_control == NULL) // regular case only3339&& seems_never_null(obj, data, speculative_not_null));33403341// Null check; get casted pointer; set region slot 33342Node* null_ctl = top();3343Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);33443345// If not_null_obj is dead, only null-path is taken3346if (stopped()) { // Doing instance-of on a NULL?3347set_control(null_ctl);3348return null();3349}3350region->init_req(_null_path, null_ctl);3351phi ->init_req(_null_path, null()); // Set null path value3352if (null_ctl == top()) {3353// Do this eagerly, so that pattern matches like is_diamond_phi3354// will work even during parsing.3355assert(_null_path == PATH_LIMIT-1, "delete last");3356region->del_req(_null_path);3357phi ->del_req(_null_path);3358}33593360Node* cast_obj = NULL;3361if (tk->klass_is_exact()) {3362// The following optimization tries to statically cast the speculative type of the object3363// (for example obtained during profiling) to the type of the superklass and then do a3364// dynamic check that the type of the object is what we expect. To work correctly3365// for checkcast and aastore the type of superklass should be exact.3366const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();3367// We may not have profiling here or it may not help us. If we have3368// a speculative type use it to perform an exact cast.3369ciKlass* spec_obj_type = obj_type->speculative_type();3370if (spec_obj_type != NULL || data != NULL) {3371cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);3372if (cast_obj != NULL) {3373if (failure_control != NULL) // failure is now impossible3374(*failure_control) = top();3375// adjust the type of the phi to the exact klass:3376phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));3377}3378}3379}33803381if (cast_obj == NULL) {3382// Generate the subtype check3383Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass );33843385// Plug in success path into the merge3386cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));3387// Failure path ends in uncommon trap (or may be dead - failure impossible)3388if (failure_control == NULL) {3389if (not_subtype_ctrl != top()) { // If failure is possible3390PreserveJVMState pjvms(this);3391set_control(not_subtype_ctrl);3392builtin_throw(Deoptimization::Reason_class_check, load_object_klass(not_null_obj));3393}3394} else {3395(*failure_control) = not_subtype_ctrl;3396}3397}33983399region->init_req(_obj_path, control());3400phi ->init_req(_obj_path, cast_obj);34013402// A merge of NULL or Casted-NotNull obj3403Node* res = _gvn.transform(phi);34043405// Note I do NOT always 'replace_in_map(obj,result)' here.3406// if( tk->klass()->can_be_primary_super() )3407// This means that if I successfully store an Object into an array-of-String3408// I 'forget' that the Object is really now known to be a String. I have to3409// do this because we don't have true union types for interfaces - if I store3410// a Baz into an array-of-Interface and then tell the optimizer it's an3411// Interface, I forget that it's also a Baz and cannot do Baz-like field3412// references to it. FIX THIS WHEN UNION TYPES APPEAR!3413// replace_in_map( obj, res );34143415// Return final merged results3416set_control( _gvn.transform(region) );3417record_for_igvn(region);34183419return record_profiled_receiver_for_speculation(res);3420}34213422//------------------------------next_monitor-----------------------------------3423// What number should be given to the next monitor?3424int GraphKit::next_monitor() {3425int current = jvms()->monitor_depth()* C->sync_stack_slots();3426int next = current + C->sync_stack_slots();3427// Keep the toplevel high water mark current:3428if (C->fixed_slots() < next) C->set_fixed_slots(next);3429return current;3430}34313432//------------------------------insert_mem_bar---------------------------------3433// Memory barrier to avoid floating things around3434// The membar serves as a pinch point between both control and all memory slices.3435Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {3436MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);3437mb->init_req(TypeFunc::Control, control());3438mb->init_req(TypeFunc::Memory, reset_memory());3439Node* membar = _gvn.transform(mb);3440set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));3441set_all_memory_call(membar);3442return membar;3443}34443445//-------------------------insert_mem_bar_volatile----------------------------3446// Memory barrier to avoid floating things around3447// The membar serves as a pinch point between both control and memory(alias_idx).3448// If you want to make a pinch point on all memory slices, do not use this3449// function (even with AliasIdxBot); use insert_mem_bar() instead.3450Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {3451// When Parse::do_put_xxx updates a volatile field, it appends a series3452// of MemBarVolatile nodes, one for *each* volatile field alias category.3453// The first membar is on the same memory slice as the field store opcode.3454// This forces the membar to follow the store. (Bug 6500685 broke this.)3455// All the other membars (for other volatile slices, including AliasIdxBot,3456// which stands for all unknown volatile slices) are control-dependent3457// on the first membar. This prevents later volatile loads or stores3458// from sliding up past the just-emitted store.34593460MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);3461mb->set_req(TypeFunc::Control,control());3462if (alias_idx == Compile::AliasIdxBot) {3463mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());3464} else {3465assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");3466mb->set_req(TypeFunc::Memory, memory(alias_idx));3467}3468Node* membar = _gvn.transform(mb);3469set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));3470if (alias_idx == Compile::AliasIdxBot) {3471merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));3472} else {3473set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);3474}3475return membar;3476}34773478//------------------------------shared_lock------------------------------------3479// Emit locking code.3480FastLockNode* GraphKit::shared_lock(Node* obj) {3481// bci is either a monitorenter bc or InvocationEntryBci3482// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces3483assert(SynchronizationEntryBCI == InvocationEntryBci, "");34843485if( !GenerateSynchronizationCode )3486return NULL; // Not locking things?3487if (stopped()) // Dead monitor?3488return NULL;34893490assert(dead_locals_are_killed(), "should kill locals before sync. point");34913492// Box the stack location3493Node* box = _gvn.transform(new BoxLockNode(next_monitor()));3494Node* mem = reset_memory();34953496FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();3497if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {3498// Create the counters for this fast lock.3499flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci3500}35013502// Create the rtm counters for this fast lock if needed.3503flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci35043505// Add monitor to debug info for the slow path. If we block inside the3506// slow path and de-opt, we need the monitor hanging around3507map()->push_monitor( flock );35083509const TypeFunc *tf = LockNode::lock_type();3510LockNode *lock = new LockNode(C, tf);35113512lock->init_req( TypeFunc::Control, control() );3513lock->init_req( TypeFunc::Memory , mem );3514lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o3515lock->init_req( TypeFunc::FramePtr, frameptr() );3516lock->init_req( TypeFunc::ReturnAdr, top() );35173518lock->init_req(TypeFunc::Parms + 0, obj);3519lock->init_req(TypeFunc::Parms + 1, box);3520lock->init_req(TypeFunc::Parms + 2, flock);3521add_safepoint_edges(lock);35223523lock = _gvn.transform( lock )->as_Lock();35243525// lock has no side-effects, sets few values3526set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);35273528insert_mem_bar(Op_MemBarAcquireLock);35293530// Add this to the worklist so that the lock can be eliminated3531record_for_igvn(lock);35323533#ifndef PRODUCT3534if (PrintLockStatistics) {3535// Update the counter for this lock. Don't bother using an atomic3536// operation since we don't require absolute accuracy.3537lock->create_lock_counter(map()->jvms());3538increment_counter(lock->counter()->addr());3539}3540#endif35413542return flock;3543}354435453546//------------------------------shared_unlock----------------------------------3547// Emit unlocking code.3548void GraphKit::shared_unlock(Node* box, Node* obj) {3549// bci is either a monitorenter bc or InvocationEntryBci3550// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces3551assert(SynchronizationEntryBCI == InvocationEntryBci, "");35523553if( !GenerateSynchronizationCode )3554return;3555if (stopped()) { // Dead monitor?3556map()->pop_monitor(); // Kill monitor from debug info3557return;3558}35593560// Memory barrier to avoid floating things down past the locked region3561insert_mem_bar(Op_MemBarReleaseLock);35623563const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();3564UnlockNode *unlock = new UnlockNode(C, tf);3565#ifdef ASSERT3566unlock->set_dbg_jvms(sync_jvms());3567#endif3568uint raw_idx = Compile::AliasIdxRaw;3569unlock->init_req( TypeFunc::Control, control() );3570unlock->init_req( TypeFunc::Memory , memory(raw_idx) );3571unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o3572unlock->init_req( TypeFunc::FramePtr, frameptr() );3573unlock->init_req( TypeFunc::ReturnAdr, top() );35743575unlock->init_req(TypeFunc::Parms + 0, obj);3576unlock->init_req(TypeFunc::Parms + 1, box);3577unlock = _gvn.transform(unlock)->as_Unlock();35783579Node* mem = reset_memory();35803581// unlock has no side-effects, sets few values3582set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);35833584// Kill monitor from debug info3585map()->pop_monitor( );3586}35873588//-------------------------------get_layout_helper-----------------------------3589// If the given klass is a constant or known to be an array,3590// fetch the constant layout helper value into constant_value3591// and return (Node*)NULL. Otherwise, load the non-constant3592// layout helper value, and return the node which represents it.3593// This two-faced routine is useful because allocation sites3594// almost always feature constant types.3595Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {3596const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();3597if (!StressReflectiveCode && inst_klass != NULL) {3598ciKlass* klass = inst_klass->klass();3599bool xklass = inst_klass->klass_is_exact();3600if (xklass || klass->is_array_klass()) {3601jint lhelper = klass->layout_helper();3602if (lhelper != Klass::_lh_neutral_value) {3603constant_value = lhelper;3604return (Node*) NULL;3605}3606}3607}3608constant_value = Klass::_lh_neutral_value; // put in a known value3609Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));3610return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);3611}36123613// We just put in an allocate/initialize with a big raw-memory effect.3614// Hook selected additional alias categories on the initialization.3615static void hook_memory_on_init(GraphKit& kit, int alias_idx,3616MergeMemNode* init_in_merge,3617Node* init_out_raw) {3618DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());3619assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");36203621Node* prevmem = kit.memory(alias_idx);3622init_in_merge->set_memory_at(alias_idx, prevmem);3623kit.set_memory(init_out_raw, alias_idx);3624}36253626//---------------------------set_output_for_allocation-------------------------3627Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,3628const TypeOopPtr* oop_type,3629bool deoptimize_on_exception) {3630int rawidx = Compile::AliasIdxRaw;3631alloc->set_req( TypeFunc::FramePtr, frameptr() );3632add_safepoint_edges(alloc);3633Node* allocx = _gvn.transform(alloc);3634set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );3635// create memory projection for i_o3636set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );3637make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);36383639// create a memory projection as for the normal control path3640Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));3641set_memory(malloc, rawidx);36423643// a normal slow-call doesn't change i_o, but an allocation does3644// we create a separate i_o projection for the normal control path3645set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );3646Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );36473648// put in an initialization barrier3649InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,3650rawoop)->as_Initialize();3651assert(alloc->initialization() == init, "2-way macro link must work");3652assert(init ->allocation() == alloc, "2-way macro link must work");3653{3654// Extract memory strands which may participate in the new object's3655// initialization, and source them from the new InitializeNode.3656// This will allow us to observe initializations when they occur,3657// and link them properly (as a group) to the InitializeNode.3658assert(init->in(InitializeNode::Memory) == malloc, "");3659MergeMemNode* minit_in = MergeMemNode::make(malloc);3660init->set_req(InitializeNode::Memory, minit_in);3661record_for_igvn(minit_in); // fold it up later, if possible3662Node* minit_out = memory(rawidx);3663assert(minit_out->is_Proj() && minit_out->in(0) == init, "");3664// Add an edge in the MergeMem for the header fields so an access3665// to one of those has correct memory state3666set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));3667set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));3668if (oop_type->isa_aryptr()) {3669const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);3670int elemidx = C->get_alias_index(telemref);3671hook_memory_on_init(*this, elemidx, minit_in, minit_out);3672} else if (oop_type->isa_instptr()) {3673ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();3674for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {3675ciField* field = ik->nonstatic_field_at(i);3676if (field->offset() >= TrackedInitializationLimit * HeapWordSize)3677continue; // do not bother to track really large numbers of fields3678// Find (or create) the alias category for this field:3679int fieldidx = C->alias_type(field)->index();3680hook_memory_on_init(*this, fieldidx, minit_in, minit_out);3681}3682}3683}36843685// Cast raw oop to the real thing...3686Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);3687javaoop = _gvn.transform(javaoop);3688C->set_recent_alloc(control(), javaoop);3689assert(just_allocated_object(control()) == javaoop, "just allocated");36903691#ifdef ASSERT3692{ // Verify that the AllocateNode::Ideal_allocation recognizers work:3693assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,3694"Ideal_allocation works");3695assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,3696"Ideal_allocation works");3697if (alloc->is_AllocateArray()) {3698assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),3699"Ideal_allocation works");3700assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),3701"Ideal_allocation works");3702} else {3703assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");3704}3705}3706#endif //ASSERT37073708return javaoop;3709}37103711//---------------------------new_instance--------------------------------------3712// This routine takes a klass_node which may be constant (for a static type)3713// or may be non-constant (for reflective code). It will work equally well3714// for either, and the graph will fold nicely if the optimizer later reduces3715// the type to a constant.3716// The optional arguments are for specialized use by intrinsics:3717// - If 'extra_slow_test' if not null is an extra condition for the slow-path.3718// - If 'return_size_val', report the the total object size to the caller.3719// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)3720Node* GraphKit::new_instance(Node* klass_node,3721Node* extra_slow_test,3722Node* *return_size_val,3723bool deoptimize_on_exception) {3724// Compute size in doublewords3725// The size is always an integral number of doublewords, represented3726// as a positive bytewise size stored in the klass's layout_helper.3727// The layout_helper also encodes (in a low bit) the need for a slow path.3728jint layout_con = Klass::_lh_neutral_value;3729Node* layout_val = get_layout_helper(klass_node, layout_con);3730int layout_is_con = (layout_val == NULL);37313732if (extra_slow_test == NULL) extra_slow_test = intcon(0);3733// Generate the initial go-slow test. It's either ALWAYS (return a3734// Node for 1) or NEVER (return a NULL) or perhaps (in the reflective3735// case) a computed value derived from the layout_helper.3736Node* initial_slow_test = NULL;3737if (layout_is_con) {3738assert(!StressReflectiveCode, "stress mode does not use these paths");3739bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);3740initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;3741} else { // reflective case3742// This reflective path is used by Unsafe.allocateInstance.3743// (It may be stress-tested by specifying StressReflectiveCode.)3744// Basically, we want to get into the VM is there's an illegal argument.3745Node* bit = intcon(Klass::_lh_instance_slow_path_bit);3746initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );3747if (extra_slow_test != intcon(0)) {3748initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );3749}3750// (Macro-expander will further convert this to a Bool, if necessary.)3751}37523753// Find the size in bytes. This is easy; it's the layout_helper.3754// The size value must be valid even if the slow path is taken.3755Node* size = NULL;3756if (layout_is_con) {3757size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));3758} else { // reflective case3759// This reflective path is used by clone and Unsafe.allocateInstance.3760size = ConvI2X(layout_val);37613762// Clear the low bits to extract layout_helper_size_in_bytes:3763assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");3764Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));3765size = _gvn.transform( new AndXNode(size, mask) );3766}3767if (return_size_val != NULL) {3768(*return_size_val) = size;3769}37703771// This is a precise notnull oop of the klass.3772// (Actually, it need not be precise if this is a reflective allocation.)3773// It's what we cast the result to.3774const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();3775if (!tklass) tklass = TypeKlassPtr::OBJECT;3776const TypeOopPtr* oop_type = tklass->as_instance_type();37773778// Now generate allocation code37793780// The entire memory state is needed for slow path of the allocation3781// since GC and deoptimization can happened.3782Node *mem = reset_memory();3783set_all_memory(mem); // Create new memory state37843785AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),3786control(), mem, i_o(),3787size, klass_node,3788initial_slow_test);37893790return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);3791}37923793//-------------------------------new_array-------------------------------------3794// helper for both newarray and anewarray3795// The 'length' parameter is (obviously) the length of the array.3796// See comments on new_instance for the meaning of the other arguments.3797Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)3798Node* length, // number of array elements3799int nargs, // number of arguments to push back for uncommon trap3800Node* *return_size_val,3801bool deoptimize_on_exception) {3802jint layout_con = Klass::_lh_neutral_value;3803Node* layout_val = get_layout_helper(klass_node, layout_con);3804int layout_is_con = (layout_val == NULL);38053806if (!layout_is_con && !StressReflectiveCode &&3807!too_many_traps(Deoptimization::Reason_class_check)) {3808// This is a reflective array creation site.3809// Optimistically assume that it is a subtype of Object[],3810// so that we can fold up all the address arithmetic.3811layout_con = Klass::array_layout_helper(T_OBJECT);3812Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );3813Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );3814{ BuildCutout unless(this, bol_lh, PROB_MAX);3815inc_sp(nargs);3816uncommon_trap(Deoptimization::Reason_class_check,3817Deoptimization::Action_maybe_recompile);3818}3819layout_val = NULL;3820layout_is_con = true;3821}38223823// Generate the initial go-slow test. Make sure we do not overflow3824// if length is huge (near 2Gig) or negative! We do not need3825// exact double-words here, just a close approximation of needed3826// double-words. We can't add any offset or rounding bits, lest we3827// take a size -1 of bytes and make it positive. Use an unsigned3828// compare, so negative sizes look hugely positive.3829int fast_size_limit = FastAllocateSizeLimit;3830if (layout_is_con) {3831assert(!StressReflectiveCode, "stress mode does not use these paths");3832// Increase the size limit if we have exact knowledge of array type.3833int log2_esize = Klass::layout_helper_log2_element_size(layout_con);3834fast_size_limit <<= (LogBytesPerLong - log2_esize);3835}38363837Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );3838Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );38393840// --- Size Computation ---3841// array_size = round_to_heap(array_header + (length << elem_shift));3842// where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)3843// and align_to(x, y) == ((x + y-1) & ~(y-1))3844// The rounding mask is strength-reduced, if possible.3845int round_mask = MinObjAlignmentInBytes - 1;3846Node* header_size = NULL;3847int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);3848// (T_BYTE has the weakest alignment and size restrictions...)3849if (layout_is_con) {3850int hsize = Klass::layout_helper_header_size(layout_con);3851int eshift = Klass::layout_helper_log2_element_size(layout_con);3852BasicType etype = Klass::layout_helper_element_type(layout_con);3853if ((round_mask & ~right_n_bits(eshift)) == 0)3854round_mask = 0; // strength-reduce it if it goes away completely3855assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");3856assert(header_size_min <= hsize, "generic minimum is smallest");3857header_size_min = hsize;3858header_size = intcon(hsize + round_mask);3859} else {3860Node* hss = intcon(Klass::_lh_header_size_shift);3861Node* hsm = intcon(Klass::_lh_header_size_mask);3862Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );3863hsize = _gvn.transform( new AndINode(hsize, hsm) );3864Node* mask = intcon(round_mask);3865header_size = _gvn.transform( new AddINode(hsize, mask) );3866}38673868Node* elem_shift = NULL;3869if (layout_is_con) {3870int eshift = Klass::layout_helper_log2_element_size(layout_con);3871if (eshift != 0)3872elem_shift = intcon(eshift);3873} else {3874// There is no need to mask or shift this value.3875// The semantics of LShiftINode include an implicit mask to 0x1F.3876assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");3877elem_shift = layout_val;3878}38793880// Transition to native address size for all offset calculations:3881Node* lengthx = ConvI2X(length);3882Node* headerx = ConvI2X(header_size);3883#ifdef _LP643884{ const TypeInt* tilen = _gvn.find_int_type(length);3885if (tilen != NULL && tilen->_lo < 0) {3886// Add a manual constraint to a positive range. Cf. array_element_address.3887jint size_max = fast_size_limit;3888if (size_max > tilen->_hi) size_max = tilen->_hi;3889const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);38903891// Only do a narrow I2L conversion if the range check passed.3892IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);3893_gvn.transform(iff);3894RegionNode* region = new RegionNode(3);3895_gvn.set_type(region, Type::CONTROL);3896lengthx = new PhiNode(region, TypeLong::LONG);3897_gvn.set_type(lengthx, TypeLong::LONG);38983899// Range check passed. Use ConvI2L node with narrow type.3900Node* passed = IfFalse(iff);3901region->init_req(1, passed);3902// Make I2L conversion control dependent to prevent it from3903// floating above the range check during loop optimizations.3904lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));39053906// Range check failed. Use ConvI2L with wide type because length may be invalid.3907region->init_req(2, IfTrue(iff));3908lengthx->init_req(2, ConvI2X(length));39093910set_control(region);3911record_for_igvn(region);3912record_for_igvn(lengthx);3913}3914}3915#endif39163917// Combine header size (plus rounding) and body size. Then round down.3918// This computation cannot overflow, because it is used only in two3919// places, one where the length is sharply limited, and the other3920// after a successful allocation.3921Node* abody = lengthx;3922if (elem_shift != NULL)3923abody = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );3924Node* size = _gvn.transform( new AddXNode(headerx, abody) );3925if (round_mask != 0) {3926Node* mask = MakeConX(~round_mask);3927size = _gvn.transform( new AndXNode(size, mask) );3928}3929// else if round_mask == 0, the size computation is self-rounding39303931if (return_size_val != NULL) {3932// This is the size3933(*return_size_val) = size;3934}39353936// Now generate allocation code39373938// The entire memory state is needed for slow path of the allocation3939// since GC and deoptimization can happened.3940Node *mem = reset_memory();3941set_all_memory(mem); // Create new memory state39423943if (initial_slow_test->is_Bool()) {3944// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.3945initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);3946}39473948// Create the AllocateArrayNode and its result projections3949AllocateArrayNode* alloc3950= new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),3951control(), mem, i_o(),3952size, klass_node,3953initial_slow_test,3954length);39553956// Cast to correct type. Note that the klass_node may be constant or not,3957// and in the latter case the actual array type will be inexact also.3958// (This happens via a non-constant argument to inline_native_newArray.)3959// In any case, the value of klass_node provides the desired array type.3960const TypeInt* length_type = _gvn.find_int_type(length);3961const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();3962if (ary_type->isa_aryptr() && length_type != NULL) {3963// Try to get a better type than POS for the size3964ary_type = ary_type->is_aryptr()->cast_to_size(length_type);3965}39663967Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);39683969// Cast length on remaining path to be as narrow as possible3970if (map()->find_edge(length) >= 0) {3971Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);3972if (ccast != length) {3973_gvn.set_type_bottom(ccast);3974record_for_igvn(ccast);3975replace_in_map(length, ccast);3976}3977}39783979return javaoop;3980}39813982// The following "Ideal_foo" functions are placed here because they recognize3983// the graph shapes created by the functions immediately above.39843985//---------------------------Ideal_allocation----------------------------------3986// Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.3987AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {3988if (ptr == NULL) { // reduce dumb test in callers3989return NULL;3990}39913992BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();3993ptr = bs->step_over_gc_barrier(ptr);39943995if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast3996ptr = ptr->in(1);3997if (ptr == NULL) return NULL;3998}3999// Return NULL for allocations with several casts:4000// j.l.reflect.Array.newInstance(jobject, jint)4001// Object.clone()4002// to keep more precise type from last cast.4003if (ptr->is_Proj()) {4004Node* allo = ptr->in(0);4005if (allo != NULL && allo->is_Allocate()) {4006return allo->as_Allocate();4007}4008}4009// Report failure to match.4010return NULL;4011}40124013// Fancy version which also strips off an offset (and reports it to caller).4014AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,4015intptr_t& offset) {4016Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);4017if (base == NULL) return NULL;4018return Ideal_allocation(base, phase);4019}40204021// Trace Initialize <- Proj[Parm] <- Allocate4022AllocateNode* InitializeNode::allocation() {4023Node* rawoop = in(InitializeNode::RawAddress);4024if (rawoop->is_Proj()) {4025Node* alloc = rawoop->in(0);4026if (alloc->is_Allocate()) {4027return alloc->as_Allocate();4028}4029}4030return NULL;4031}40324033// Trace Allocate -> Proj[Parm] -> Initialize4034InitializeNode* AllocateNode::initialization() {4035ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress);4036if (rawoop == NULL) return NULL;4037for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {4038Node* init = rawoop->fast_out(i);4039if (init->is_Initialize()) {4040assert(init->as_Initialize()->allocation() == this, "2-way link");4041return init->as_Initialize();4042}4043}4044return NULL;4045}40464047//----------------------------- loop predicates ---------------------------40484049//------------------------------add_predicate_impl----------------------------4050void GraphKit::add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {4051// Too many traps seen?4052if (too_many_traps(reason)) {4053#ifdef ASSERT4054if (TraceLoopPredicate) {4055int tc = C->trap_count(reason);4056tty->print("too many traps=%s tcount=%d in ",4057Deoptimization::trap_reason_name(reason), tc);4058method()->print(); // which method has too many predicate traps4059tty->cr();4060}4061#endif4062// We cannot afford to take more traps here,4063// do not generate predicate.4064return;4065}40664067Node *cont = _gvn.intcon(1);4068Node* opq = _gvn.transform(new Opaque1Node(C, cont));4069Node *bol = _gvn.transform(new Conv2BNode(opq));4070IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);4071Node* iffalse = _gvn.transform(new IfFalseNode(iff));4072C->add_predicate_opaq(opq);4073{4074PreserveJVMState pjvms(this);4075set_control(iffalse);4076inc_sp(nargs);4077uncommon_trap(reason, Deoptimization::Action_maybe_recompile);4078}4079Node* iftrue = _gvn.transform(new IfTrueNode(iff));4080set_control(iftrue);4081}40824083//------------------------------add_predicate---------------------------------4084void GraphKit::add_empty_predicates(int nargs) {4085// These loop predicates remain empty. All concrete loop predicates are inserted above the corresponding4086// empty loop predicate later by 'PhaseIdealLoop::create_new_if_for_predicate'. All concrete loop predicates of4087// a specific kind (normal, profile or limit check) share the same uncommon trap as the empty loop predicate.4088if (UseLoopPredicate) {4089add_empty_predicate_impl(Deoptimization::Reason_predicate, nargs);4090}4091if (UseProfiledLoopPredicate) {4092add_empty_predicate_impl(Deoptimization::Reason_profile_predicate, nargs);4093}4094// loop's limit check predicate should be near the loop.4095add_empty_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);4096}40974098void GraphKit::sync_kit(IdealKit& ideal) {4099set_all_memory(ideal.merged_memory());4100set_i_o(ideal.i_o());4101set_control(ideal.ctrl());4102}41034104void GraphKit::final_sync(IdealKit& ideal) {4105// Final sync IdealKit and graphKit.4106sync_kit(ideal);4107}41084109Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {4110Node* len = load_array_length(load_String_value(str, set_ctrl));4111Node* coder = load_String_coder(str, set_ctrl);4112// Divide length by 2 if coder is UTF164113return _gvn.transform(new RShiftINode(len, coder));4114}41154116Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {4117int value_offset = java_lang_String::value_offset();4118const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),4119false, NULL, 0);4120const TypePtr* value_field_type = string_type->add_offset(value_offset);4121const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,4122TypeAry::make(TypeInt::BYTE, TypeInt::POS),4123ciTypeArrayKlass::make(T_BYTE), true, 0);4124Node* p = basic_plus_adr(str, str, value_offset);4125Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,4126IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);4127return load;4128}41294130Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {4131if (!CompactStrings) {4132return intcon(java_lang_String::CODER_UTF16);4133}4134int coder_offset = java_lang_String::coder_offset();4135const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),4136false, NULL, 0);4137const TypePtr* coder_field_type = string_type->add_offset(coder_offset);41384139Node* p = basic_plus_adr(str, str, coder_offset);4140Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,4141IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);4142return load;4143}41444145void GraphKit::store_String_value(Node* str, Node* value) {4146int value_offset = java_lang_String::value_offset();4147const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),4148false, NULL, 0);4149const TypePtr* value_field_type = string_type->add_offset(value_offset);41504151access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,4152value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);4153}41544155void GraphKit::store_String_coder(Node* str, Node* value) {4156int coder_offset = java_lang_String::coder_offset();4157const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),4158false, NULL, 0);4159const TypePtr* coder_field_type = string_type->add_offset(coder_offset);41604161access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,4162value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);4163}41644165// Capture src and dst memory state with a MergeMemNode4166Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {4167if (src_type == dst_type) {4168// Types are equal, we don't need a MergeMemNode4169return memory(src_type);4170}4171MergeMemNode* merge = MergeMemNode::make(map()->memory());4172record_for_igvn(merge); // fold it up later, if possible4173int src_idx = C->get_alias_index(src_type);4174int dst_idx = C->get_alias_index(dst_type);4175merge->set_memory_at(src_idx, memory(src_idx));4176merge->set_memory_at(dst_idx, memory(dst_idx));4177return merge;4178}41794180Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {4181assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");4182assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type");4183// If input and output memory types differ, capture both states to preserve4184// the dependency between preceding and subsequent loads/stores.4185// For example, the following program:4186// StoreB4187// compress_string4188// LoadB4189// has this memory graph (use->def):4190// LoadB -> compress_string -> CharMem4191// ... -> StoreB -> ByteMem4192// The intrinsic hides the dependency between LoadB and StoreB, causing4193// the load to read from memory not containing the result of the StoreB.4194// The correct memory graph should look like this:4195// LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))4196Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);4197StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);4198Node* res_mem = _gvn.transform(new SCMemProjNode(_gvn.transform(str)));4199set_memory(res_mem, TypeAryPtr::BYTES);4200return str;4201}42024203void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {4204assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");4205assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");4206// Capture src and dst memory (see comment in 'compress_string').4207Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);4208StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);4209set_memory(_gvn.transform(str), dst_type);4210}42114212void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {4213/**4214* int i_char = start;4215* for (int i_byte = 0; i_byte < count; i_byte++) {4216* dst[i_char++] = (char)(src[i_byte] & 0xff);4217* }4218*/4219add_empty_predicates();4220C->set_has_loops(true);42214222RegionNode* head = new RegionNode(3);4223head->init_req(1, control());4224gvn().set_type(head, Type::CONTROL);4225record_for_igvn(head);42264227Node* i_byte = new PhiNode(head, TypeInt::INT);4228i_byte->init_req(1, intcon(0));4229gvn().set_type(i_byte, TypeInt::INT);4230record_for_igvn(i_byte);42314232Node* i_char = new PhiNode(head, TypeInt::INT);4233i_char->init_req(1, start);4234gvn().set_type(i_char, TypeInt::INT);4235record_for_igvn(i_char);42364237Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES);4238gvn().set_type(mem, Type::MEMORY);4239record_for_igvn(mem);4240set_control(head);4241set_memory(mem, TypeAryPtr::BYTES);4242Node* ch = load_array_element(control(), src, i_byte, TypeAryPtr::BYTES);4243Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE),4244AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,4245false, false, true /* mismatched */);42464247IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);4248head->init_req(2, IfTrue(iff));4249mem->init_req(2, st);4250i_byte->init_req(2, AddI(i_byte, intcon(1)));4251i_char->init_req(2, AddI(i_char, intcon(2)));42524253set_control(IfFalse(iff));4254set_memory(st, TypeAryPtr::BYTES);4255}42564257Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {4258if (!field->is_constant()) {4259return NULL; // Field not marked as constant.4260}4261ciInstance* holder = NULL;4262if (!field->is_static()) {4263ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();4264if (const_oop != NULL && const_oop->is_instance()) {4265holder = const_oop->as_instance();4266}4267}4268const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),4269/*is_unsigned_load=*/false);4270if (con_type != NULL) {4271return makecon(con_type);4272}4273return NULL;4274}427542764277