Path: blob/master/src/hotspot/share/c1/c1_Runtime1.cpp
40931 views
/*1* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "asm/codeBuffer.hpp"26#include "c1/c1_CodeStubs.hpp"27#include "c1/c1_Defs.hpp"28#include "c1/c1_FrameMap.hpp"29#include "c1/c1_LIRAssembler.hpp"30#include "c1/c1_MacroAssembler.hpp"31#include "c1/c1_Runtime1.hpp"32#include "classfile/javaClasses.inline.hpp"33#include "classfile/vmClasses.hpp"34#include "classfile/vmSymbols.hpp"35#include "code/codeBlob.hpp"36#include "code/compiledIC.hpp"37#include "code/pcDesc.hpp"38#include "code/scopeDesc.hpp"39#include "code/vtableStubs.hpp"40#include "compiler/compilationPolicy.hpp"41#include "compiler/disassembler.hpp"42#include "compiler/oopMap.hpp"43#include "gc/shared/barrierSet.hpp"44#include "gc/shared/c1/barrierSetC1.hpp"45#include "gc/shared/collectedHeap.hpp"46#include "interpreter/bytecode.hpp"47#include "interpreter/interpreter.hpp"48#include "jfr/support/jfrIntrinsics.hpp"49#include "logging/log.hpp"50#include "memory/allocation.inline.hpp"51#include "memory/oopFactory.hpp"52#include "memory/resourceArea.hpp"53#include "memory/universe.hpp"54#include "oops/access.inline.hpp"55#include "oops/klass.inline.hpp"56#include "oops/objArrayOop.inline.hpp"57#include "oops/objArrayKlass.hpp"58#include "oops/oop.inline.hpp"59#include "prims/jvmtiExport.hpp"60#include "runtime/atomic.hpp"61#include "runtime/biasedLocking.hpp"62#include "runtime/fieldDescriptor.inline.hpp"63#include "runtime/frame.inline.hpp"64#include "runtime/handles.inline.hpp"65#include "runtime/interfaceSupport.inline.hpp"66#include "runtime/javaCalls.hpp"67#include "runtime/sharedRuntime.hpp"68#include "runtime/stackWatermarkSet.hpp"69#include "runtime/stubRoutines.hpp"70#include "runtime/threadCritical.hpp"71#include "runtime/vframe.inline.hpp"72#include "runtime/vframeArray.hpp"73#include "runtime/vm_version.hpp"74#include "utilities/copy.hpp"75#include "utilities/events.hpp"767778// Implementation of StubAssembler7980StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {81_name = name;82_must_gc_arguments = false;83_frame_size = no_frame_size;84_num_rt_args = 0;85_stub_id = stub_id;86}878889void StubAssembler::set_info(const char* name, bool must_gc_arguments) {90_name = name;91_must_gc_arguments = must_gc_arguments;92}939495void StubAssembler::set_frame_size(int size) {96if (_frame_size == no_frame_size) {97_frame_size = size;98}99assert(_frame_size == size, "can't change the frame size");100}101102103void StubAssembler::set_num_rt_args(int args) {104if (_num_rt_args == 0) {105_num_rt_args = args;106}107assert(_num_rt_args == args, "can't change the number of args");108}109110// Implementation of Runtime1111112CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];113const char *Runtime1::_blob_names[] = {114RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)115};116117#ifndef PRODUCT118// statistics119int Runtime1::_generic_arraycopystub_cnt = 0;120int Runtime1::_arraycopy_slowcase_cnt = 0;121int Runtime1::_arraycopy_checkcast_cnt = 0;122int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;123int Runtime1::_new_type_array_slowcase_cnt = 0;124int Runtime1::_new_object_array_slowcase_cnt = 0;125int Runtime1::_new_instance_slowcase_cnt = 0;126int Runtime1::_new_multi_array_slowcase_cnt = 0;127int Runtime1::_monitorenter_slowcase_cnt = 0;128int Runtime1::_monitorexit_slowcase_cnt = 0;129int Runtime1::_patch_code_slowcase_cnt = 0;130int Runtime1::_throw_range_check_exception_count = 0;131int Runtime1::_throw_index_exception_count = 0;132int Runtime1::_throw_div0_exception_count = 0;133int Runtime1::_throw_null_pointer_exception_count = 0;134int Runtime1::_throw_class_cast_exception_count = 0;135int Runtime1::_throw_incompatible_class_change_error_count = 0;136int Runtime1::_throw_count = 0;137138static int _byte_arraycopy_stub_cnt = 0;139static int _short_arraycopy_stub_cnt = 0;140static int _int_arraycopy_stub_cnt = 0;141static int _long_arraycopy_stub_cnt = 0;142static int _oop_arraycopy_stub_cnt = 0;143144address Runtime1::arraycopy_count_address(BasicType type) {145switch (type) {146case T_BOOLEAN:147case T_BYTE: return (address)&_byte_arraycopy_stub_cnt;148case T_CHAR:149case T_SHORT: return (address)&_short_arraycopy_stub_cnt;150case T_FLOAT:151case T_INT: return (address)&_int_arraycopy_stub_cnt;152case T_DOUBLE:153case T_LONG: return (address)&_long_arraycopy_stub_cnt;154case T_ARRAY:155case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;156default:157ShouldNotReachHere();158return NULL;159}160}161162163#endif164165// Simple helper to see if the caller of a runtime stub which166// entered the VM has been deoptimized167168static bool caller_is_deopted(JavaThread* current) {169RegisterMap reg_map(current, false);170frame runtime_frame = current->last_frame();171frame caller_frame = runtime_frame.sender(®_map);172assert(caller_frame.is_compiled_frame(), "must be compiled");173return caller_frame.is_deoptimized_frame();174}175176// Stress deoptimization177static void deopt_caller(JavaThread* current) {178if (!caller_is_deopted(current)) {179RegisterMap reg_map(current, false);180frame runtime_frame = current->last_frame();181frame caller_frame = runtime_frame.sender(®_map);182Deoptimization::deoptimize_frame(current, caller_frame.id());183assert(caller_is_deopted(current), "Must be deoptimized");184}185}186187class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {188private:189Runtime1::StubID _id;190public:191StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}192virtual OopMapSet* generate_code(StubAssembler* sasm) {193return Runtime1::generate_code_for(_id, sasm);194}195};196197CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {198ResourceMark rm;199// create code buffer for code storage200CodeBuffer code(buffer_blob);201202OopMapSet* oop_maps;203int frame_size;204bool must_gc_arguments;205206Compilation::setup_code_buffer(&code, 0);207208// create assembler for code generation209StubAssembler* sasm = new StubAssembler(&code, name, stub_id);210// generate code for runtime stub211oop_maps = cl->generate_code(sasm);212assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,213"if stub has an oop map it must have a valid frame size");214assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");215216// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)217sasm->align(BytesPerWord);218// make sure all code is in code buffer219sasm->flush();220221frame_size = sasm->frame_size();222must_gc_arguments = sasm->must_gc_arguments();223// create blob - distinguish a few special cases224CodeBlob* blob = RuntimeStub::new_runtime_stub(name,225&code,226CodeOffsets::frame_never_safe,227frame_size,228oop_maps,229must_gc_arguments);230assert(blob != NULL, "blob must exist");231return blob;232}233234void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {235assert(0 <= id && id < number_of_ids, "illegal stub id");236bool expect_oop_map = true;237#ifdef ASSERT238// Make sure that stubs that need oopmaps have them239switch (id) {240// These stubs don't need to have an oopmap241case dtrace_object_alloc_id:242case slow_subtype_check_id:243case fpu2long_stub_id:244case unwind_exception_id:245case counter_overflow_id:246#if defined(PPC32)247case handle_exception_nofpu_id:248#endif249expect_oop_map = false;250break;251default:252break;253}254#endif255StubIDStubAssemblerCodeGenClosure cl(id);256CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);257// install blob258_blobs[id] = blob;259}260261void Runtime1::initialize(BufferBlob* blob) {262// platform-dependent initialization263initialize_pd();264// generate stubs265for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);266// printing267#ifndef PRODUCT268if (PrintSimpleStubs) {269ResourceMark rm;270for (int id = 0; id < number_of_ids; id++) {271_blobs[id]->print();272if (_blobs[id]->oop_maps() != NULL) {273_blobs[id]->oop_maps()->print();274}275}276}277#endif278BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();279bs->generate_c1_runtime_stubs(blob);280}281282CodeBlob* Runtime1::blob_for(StubID id) {283assert(0 <= id && id < number_of_ids, "illegal stub id");284return _blobs[id];285}286287288const char* Runtime1::name_for(StubID id) {289assert(0 <= id && id < number_of_ids, "illegal stub id");290return _blob_names[id];291}292293const char* Runtime1::name_for_address(address entry) {294for (int id = 0; id < number_of_ids; id++) {295if (entry == entry_for((StubID)id)) return name_for((StubID)id);296}297298#define FUNCTION_CASE(a, f) \299if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f300301FUNCTION_CASE(entry, os::javaTimeMillis);302FUNCTION_CASE(entry, os::javaTimeNanos);303FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);304FUNCTION_CASE(entry, SharedRuntime::d2f);305FUNCTION_CASE(entry, SharedRuntime::d2i);306FUNCTION_CASE(entry, SharedRuntime::d2l);307FUNCTION_CASE(entry, SharedRuntime::dcos);308FUNCTION_CASE(entry, SharedRuntime::dexp);309FUNCTION_CASE(entry, SharedRuntime::dlog);310FUNCTION_CASE(entry, SharedRuntime::dlog10);311FUNCTION_CASE(entry, SharedRuntime::dpow);312FUNCTION_CASE(entry, SharedRuntime::drem);313FUNCTION_CASE(entry, SharedRuntime::dsin);314FUNCTION_CASE(entry, SharedRuntime::dtan);315FUNCTION_CASE(entry, SharedRuntime::f2i);316FUNCTION_CASE(entry, SharedRuntime::f2l);317FUNCTION_CASE(entry, SharedRuntime::frem);318FUNCTION_CASE(entry, SharedRuntime::l2d);319FUNCTION_CASE(entry, SharedRuntime::l2f);320FUNCTION_CASE(entry, SharedRuntime::ldiv);321FUNCTION_CASE(entry, SharedRuntime::lmul);322FUNCTION_CASE(entry, SharedRuntime::lrem);323FUNCTION_CASE(entry, SharedRuntime::lrem);324FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);325FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);326FUNCTION_CASE(entry, is_instance_of);327FUNCTION_CASE(entry, trace_block_entry);328#ifdef JFR_HAVE_INTRINSICS329FUNCTION_CASE(entry, JFR_TIME_FUNCTION);330#endif331FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());332FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());333FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());334FUNCTION_CASE(entry, StubRoutines::dexp());335FUNCTION_CASE(entry, StubRoutines::dlog());336FUNCTION_CASE(entry, StubRoutines::dlog10());337FUNCTION_CASE(entry, StubRoutines::dpow());338FUNCTION_CASE(entry, StubRoutines::dsin());339FUNCTION_CASE(entry, StubRoutines::dcos());340FUNCTION_CASE(entry, StubRoutines::dtan());341342#undef FUNCTION_CASE343344// Soft float adds more runtime names.345return pd_name_for_address(entry);346}347348349JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))350NOT_PRODUCT(_new_instance_slowcase_cnt++;)351352assert(klass->is_klass(), "not a class");353Handle holder(current, klass->klass_holder()); // keep the klass alive354InstanceKlass* h = InstanceKlass::cast(klass);355h->check_valid_for_instantiation(true, CHECK);356// make sure klass is initialized357h->initialize(CHECK);358// allocate instance and return via TLS359oop obj = h->allocate_instance(CHECK);360current->set_vm_result(obj);361JRT_END362363364JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))365NOT_PRODUCT(_new_type_array_slowcase_cnt++;)366// Note: no handle for klass needed since they are not used367// anymore after new_typeArray() and no GC can happen before.368// (This may have to change if this code changes!)369assert(klass->is_klass(), "not a class");370BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();371oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);372current->set_vm_result(obj);373// This is pretty rare but this runtime patch is stressful to deoptimization374// if we deoptimize here so force a deopt to stress the path.375if (DeoptimizeALot) {376deopt_caller(current);377}378379JRT_END380381382JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))383NOT_PRODUCT(_new_object_array_slowcase_cnt++;)384385// Note: no handle for klass needed since they are not used386// anymore after new_objArray() and no GC can happen before.387// (This may have to change if this code changes!)388assert(array_klass->is_klass(), "not a class");389Handle holder(current, array_klass->klass_holder()); // keep the klass alive390Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();391objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);392current->set_vm_result(obj);393// This is pretty rare but this runtime patch is stressful to deoptimization394// if we deoptimize here so force a deopt to stress the path.395if (DeoptimizeALot) {396deopt_caller(current);397}398JRT_END399400401JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))402NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)403404assert(klass->is_klass(), "not a class");405assert(rank >= 1, "rank must be nonzero");406Handle holder(current, klass->klass_holder()); // keep the klass alive407oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);408current->set_vm_result(obj);409JRT_END410411412JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))413tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);414JRT_END415416417JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))418ResourceMark rm(current);419const char* klass_name = obj->klass()->external_name();420SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);421JRT_END422423424// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method425// associated with the top activation record. The inlinee (that is possibly included in the enclosing426// method) method is passed as an argument. In order to do that it is embedded in the code as427// a constant.428static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {429nmethod* osr_nm = NULL;430methodHandle method(current, m);431432RegisterMap map(current, false);433frame fr = current->last_frame().sender(&map);434nmethod* nm = (nmethod*) fr.cb();435assert(nm!= NULL && nm->is_nmethod(), "Sanity check");436methodHandle enclosing_method(current, nm->method());437438CompLevel level = (CompLevel)nm->comp_level();439int bci = InvocationEntryBci;440if (branch_bci != InvocationEntryBci) {441// Compute destination bci442address pc = method()->code_base() + branch_bci;443Bytecodes::Code branch = Bytecodes::code_at(method(), pc);444int offset = 0;445switch (branch) {446case Bytecodes::_if_icmplt: case Bytecodes::_iflt:447case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:448case Bytecodes::_if_icmple: case Bytecodes::_ifle:449case Bytecodes::_if_icmpge: case Bytecodes::_ifge:450case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:451case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:452case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:453offset = (int16_t)Bytes::get_Java_u2(pc + 1);454break;455case Bytecodes::_goto_w:456offset = Bytes::get_Java_u4(pc + 1);457break;458default: ;459}460bci = branch_bci + offset;461}462osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);463return osr_nm;464}465466JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))467nmethod* osr_nm;468JRT_BLOCK469osr_nm = counter_overflow_helper(current, bci, method);470if (osr_nm != NULL) {471RegisterMap map(current, false);472frame fr = current->last_frame().sender(&map);473Deoptimization::deoptimize_frame(current, fr.id());474}475JRT_BLOCK_END476return NULL;477JRT_END478479extern void vm_exit(int code);480481// Enter this method from compiled code handler below. This is where we transition482// to VM mode. This is done as a helper routine so that the method called directly483// from compiled code does not have to transition to VM. This allows the entry484// method to see if the nmethod that we have just looked up a handler for has485// been deoptimized while we were in the vm. This simplifies the assembly code486// cpu directories.487//488// We are entering here from exception stub (via the entry method below)489// If there is a compiled exception handler in this method, we will continue there;490// otherwise we will unwind the stack and continue at the caller of top frame method491// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to492// control the area where we can allow a safepoint. After we exit the safepoint area we can493// check to see if the handler we are going to return is now in a nmethod that has494// been deoptimized. If that is the case we return the deopt blob495// unpack_with_exception entry instead. This makes life for the exception blob easier496// because making that same check and diverting is painful from assembly language.497JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))498// Reset method handle flag.499current->set_is_method_handle_return(false);500501Handle exception(current, ex);502503// This function is called when we are about to throw an exception. Therefore,504// we have to poll the stack watermark barrier to make sure that not yet safe505// stack frames are made safe before returning into them.506if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {507// The Runtime1::handle_exception_from_callee_id handler is invoked after the508// frame has been unwound. It instead builds its own stub frame, to call the509// runtime. But the throwing frame has already been unwound here.510StackWatermarkSet::after_unwind(current);511}512513nm = CodeCache::find_nmethod(pc);514assert(nm != NULL, "this is not an nmethod");515// Adjust the pc as needed/516if (nm->is_deopt_pc(pc)) {517RegisterMap map(current, false);518frame exception_frame = current->last_frame().sender(&map);519// if the frame isn't deopted then pc must not correspond to the caller of last_frame520assert(exception_frame.is_deoptimized_frame(), "must be deopted");521pc = exception_frame.pc();522}523assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");524// Check that exception is a subclass of Throwable525assert(exception->is_a(vmClasses::Throwable_klass()),526"Exception not subclass of Throwable");527528// debugging support529// tracing530if (log_is_enabled(Info, exceptions)) {531ResourceMark rm;532stringStream tempst;533assert(nm->method() != NULL, "Unexpected NULL method()");534tempst.print("C1 compiled method <%s>\n"535" at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,536nm->method()->print_value_string(), p2i(pc), p2i(current));537Exceptions::log_exception(exception, tempst.as_string());538}539// for AbortVMOnException flag540Exceptions::debug_check_abort(exception);541542// Check the stack guard pages and reenable them if necessary and there is543// enough space on the stack to do so. Use fast exceptions only if the guard544// pages are enabled.545bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();546547if (JvmtiExport::can_post_on_exceptions()) {548// To ensure correct notification of exception catches and throws549// we have to deoptimize here. If we attempted to notify the550// catches and throws during this exception lookup it's possible551// we could deoptimize on the way out of the VM and end back in552// the interpreter at the throw site. This would result in double553// notifications since the interpreter would also notify about554// these same catches and throws as it unwound the frame.555556RegisterMap reg_map(current);557frame stub_frame = current->last_frame();558frame caller_frame = stub_frame.sender(®_map);559560// We don't really want to deoptimize the nmethod itself since we561// can actually continue in the exception handler ourselves but I562// don't see an easy way to have the desired effect.563Deoptimization::deoptimize_frame(current, caller_frame.id());564assert(caller_is_deopted(current), "Must be deoptimized");565566return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();567}568569// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions570if (guard_pages_enabled) {571address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);572if (fast_continuation != NULL) {573// Set flag if return address is a method handle call site.574current->set_is_method_handle_return(nm->is_method_handle_return(pc));575return fast_continuation;576}577}578579// If the stack guard pages are enabled, check whether there is a handler in580// the current method. Otherwise (guard pages disabled), force an unwind and581// skip the exception cache update (i.e., just leave continuation==NULL).582address continuation = NULL;583if (guard_pages_enabled) {584585// New exception handling mechanism can support inlined methods586// with exception handlers since the mappings are from PC to PC587588// Clear out the exception oop and pc since looking up an589// exception handler can cause class loading, which might throw an590// exception and those fields are expected to be clear during591// normal bytecode execution.592current->clear_exception_oop_and_pc();593594bool recursive_exception = false;595continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);596// If an exception was thrown during exception dispatch, the exception oop may have changed597current->set_exception_oop(exception());598current->set_exception_pc(pc);599600// the exception cache is used only by non-implicit exceptions601// Update the exception cache only when there didn't happen602// another exception during the computation of the compiled603// exception handler. Checking for exception oop equality is not604// sufficient because some exceptions are pre-allocated and reused.605if (continuation != NULL && !recursive_exception) {606nm->add_handler_for_exception_and_pc(exception, pc, continuation);607}608}609610current->set_vm_result(exception());611// Set flag if return address is a method handle call site.612current->set_is_method_handle_return(nm->is_method_handle_return(pc));613614if (log_is_enabled(Info, exceptions)) {615ResourceMark rm;616log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT617" for exception thrown at PC " PTR_FORMAT,618p2i(current), p2i(continuation), p2i(pc));619}620621return continuation;622JRT_END623624// Enter this method from compiled code only if there is a Java exception handler625// in the method handling the exception.626// We are entering here from exception stub. We don't do a normal VM transition here.627// We do it in a helper. This is so we can check to see if the nmethod we have just628// searched for an exception handler has been deoptimized in the meantime.629address Runtime1::exception_handler_for_pc(JavaThread* current) {630oop exception = current->exception_oop();631address pc = current->exception_pc();632// Still in Java mode633DEBUG_ONLY(NoHandleMark nhm);634nmethod* nm = NULL;635address continuation = NULL;636{637// Enter VM mode by calling the helper638ResetNoHandleMark rnhm;639continuation = exception_handler_for_pc_helper(current, exception, pc, nm);640}641// Back in JAVA, use no oops DON'T safepoint642643// Now check to see if the nmethod we were called from is now deoptimized.644// If so we must return to the deopt blob and deoptimize the nmethod645if (nm != NULL && caller_is_deopted(current)) {646continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();647}648649assert(continuation != NULL, "no handler found");650return continuation;651}652653654JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))655NOT_PRODUCT(_throw_range_check_exception_count++;)656const int len = 35;657assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");658char message[2 * jintAsStringSize + len];659sprintf(message, "Index %d out of bounds for length %d", index, a->length());660SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);661JRT_END662663664JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))665NOT_PRODUCT(_throw_index_exception_count++;)666char message[16];667sprintf(message, "%d", index);668SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);669JRT_END670671672JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))673NOT_PRODUCT(_throw_div0_exception_count++;)674SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");675JRT_END676677678JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))679NOT_PRODUCT(_throw_null_pointer_exception_count++;)680SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());681JRT_END682683684JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))685NOT_PRODUCT(_throw_class_cast_exception_count++;)686ResourceMark rm(current);687char* message = SharedRuntime::generate_class_cast_message(current, object->klass());688SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);689JRT_END690691692JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))693NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)694ResourceMark rm(current);695SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());696JRT_END697698699JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))700NOT_PRODUCT(_monitorenter_slowcase_cnt++;)701if (!UseFastLocking) {702lock->set_obj(obj);703}704assert(obj == lock->obj(), "must match");705SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);706JRT_END707708709JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))710NOT_PRODUCT(_monitorexit_slowcase_cnt++;)711assert(current->last_Java_sp(), "last_Java_sp must be set");712oop obj = lock->obj();713assert(oopDesc::is_oop(obj), "must be NULL or an object");714SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);715JRT_END716717// Cf. OptoRuntime::deoptimize_caller_frame718JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))719// Called from within the owner thread, so no need for safepoint720RegisterMap reg_map(current, false);721frame stub_frame = current->last_frame();722assert(stub_frame.is_runtime_frame(), "Sanity check");723frame caller_frame = stub_frame.sender(®_map);724nmethod* nm = caller_frame.cb()->as_nmethod_or_null();725assert(nm != NULL, "Sanity check");726methodHandle method(current, nm->method());727assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");728Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);729Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);730731if (action == Deoptimization::Action_make_not_entrant) {732if (nm->make_not_entrant()) {733if (reason == Deoptimization::Reason_tenured) {734MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);735if (trap_mdo != NULL) {736trap_mdo->inc_tenure_traps();737}738}739}740}741742// Deoptimize the caller frame.743Deoptimization::deoptimize_frame(current, caller_frame.id());744// Return to the now deoptimized frame.745JRT_END746747748#ifndef DEOPTIMIZE_WHEN_PATCHING749750static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {751Bytecode_field field_access(caller, bci);752// This can be static or non-static field access753Bytecodes::Code code = field_access.code();754755// We must load class, initialize class and resolve the field756fieldDescriptor result; // initialize class if needed757constantPoolHandle constants(THREAD, caller->constants());758LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);759return result.field_holder();760}761762763//764// This routine patches sites where a class wasn't loaded or765// initialized at the time the code was generated. It handles766// references to classes, fields and forcing of initialization. Most767// of the cases are straightforward and involving simply forcing768// resolution of a class, rewriting the instruction stream with the769// needed constant and replacing the call in this function with the770// patched code. The case for static field is more complicated since771// the thread which is in the process of initializing a class can772// access it's static fields but other threads can't so the code773// either has to deoptimize when this case is detected or execute a774// check that the current thread is the initializing thread. The775// current776//777// Patches basically look like this:778//779//780// patch_site: jmp patch stub ;; will be patched781// continue: ...782// ...783// ...784// ...785//786// They have a stub which looks like this:787//788// ;; patch body789// movl <const>, reg (for class constants)790// <or> movl [reg1 + <const>], reg (for field offsets)791// <or> movl reg, [reg1 + <const>] (for field offsets)792// <being_init offset> <bytes to copy> <bytes to skip>793// patch_stub: call Runtime1::patch_code (through a runtime stub)794// jmp patch_site795//796//797// A normal patch is done by rewriting the patch body, usually a move,798// and then copying it into place over top of the jmp instruction799// being careful to flush caches and doing it in an MP-safe way. The800// constants following the patch body are used to find various pieces801// of the patch relative to the call site for Runtime1::patch_code.802// The case for getstatic and putstatic is more complicated because803// getstatic and putstatic have special semantics when executing while804// the class is being initialized. getstatic/putstatic on a class805// which is being_initialized may be executed by the initializing806// thread but other threads have to block when they execute it. This807// is accomplished in compiled code by executing a test of the current808// thread against the initializing thread of the class. It's emitted809// as boilerplate in their stub which allows the patched code to be810// executed before it's copied back into the main body of the nmethod.811//812// being_init: get_thread(<tmp reg>813// cmpl [reg1 + <init_thread_offset>], <tmp reg>814// jne patch_stub815// movl [reg1 + <const>], reg (for field offsets) <or>816// movl reg, [reg1 + <const>] (for field offsets)817// jmp continue818// <being_init offset> <bytes to copy> <bytes to skip>819// patch_stub: jmp Runtim1::patch_code (through a runtime stub)820// jmp patch_site821//822// If the class is being initialized the patch body is rewritten and823// the patch site is rewritten to jump to being_init, instead of824// patch_stub. Whenever this code is executed it checks the current825// thread against the intializing thread so other threads will enter826// the runtime and end up blocked waiting the class to finish827// initializing inside the calls to resolve_field below. The828// initializing class will continue on it's way. Once the class is829// fully_initialized, the intializing_thread of the class becomes830// NULL, so the next thread to execute this code will fail the test,831// call into patch_code and complete the patching process by copying832// the patch body back into the main part of the nmethod and resume833// executing.834835// NB:836//837// Patchable instruction sequences inherently exhibit race conditions,838// where thread A is patching an instruction at the same time thread B839// is executing it. The algorithms we use ensure that any observation840// that B can make on any intermediate states during A's patching will841// always end up with a correct outcome. This is easiest if there are842// few or no intermediate states. (Some inline caches have two843// related instructions that must be patched in tandem. For those,844// intermediate states seem to be unavoidable, but we will get the845// right answer from all possible observation orders.)846//847// When patching the entry instruction at the head of a method, or a848// linkable call instruction inside of a method, we try very hard to849// use a patch sequence which executes as a single memory transaction.850// This means, in practice, that when thread A patches an instruction,851// it should patch a 32-bit or 64-bit word that somehow overlaps the852// instruction or is contained in it. We believe that memory hardware853// will never break up such a word write, if it is naturally aligned854// for the word being written. We also know that some CPUs work very855// hard to create atomic updates even of naturally unaligned words,856// but we don't want to bet the farm on this always working.857//858// Therefore, if there is any chance of a race condition, we try to859// patch only naturally aligned words, as single, full-word writes.860861JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id ))862NOT_PRODUCT(_patch_code_slowcase_cnt++;)863864ResourceMark rm(current);865RegisterMap reg_map(current, false);866frame runtime_frame = current->last_frame();867frame caller_frame = runtime_frame.sender(®_map);868869// last java frame on stack870vframeStream vfst(current, true);871assert(!vfst.at_end(), "Java frame must exist");872873methodHandle caller_method(current, vfst.method());874// Note that caller_method->code() may not be same as caller_code because of OSR's875// Note also that in the presence of inlining it is not guaranteed876// that caller_method() == caller_code->method()877878int bci = vfst.bci();879Bytecodes::Code code = caller_method()->java_code_at(bci);880881// this is used by assertions in the access_field_patching_id882BasicType patch_field_type = T_ILLEGAL;883bool deoptimize_for_volatile = false;884bool deoptimize_for_atomic = false;885int patch_field_offset = -1;886Klass* init_klass = NULL; // klass needed by load_klass_patching code887Klass* load_klass = NULL; // klass needed by load_klass_patching code888Handle mirror(current, NULL); // oop needed by load_mirror_patching code889Handle appendix(current, NULL); // oop needed by appendix_patching code890bool load_klass_or_mirror_patch_id =891(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);892893if (stub_id == Runtime1::access_field_patching_id) {894895Bytecode_field field_access(caller_method, bci);896fieldDescriptor result; // initialize class if needed897Bytecodes::Code code = field_access.code();898constantPoolHandle constants(current, caller_method->constants());899LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);900patch_field_offset = result.offset();901902// If we're patching a field which is volatile then at compile it903// must not have been know to be volatile, so the generated code904// isn't correct for a volatile reference. The nmethod has to be905// deoptimized so that the code can be regenerated correctly.906// This check is only needed for access_field_patching since this907// is the path for patching field offsets. load_klass is only908// used for patching references to oops which don't need special909// handling in the volatile case.910911deoptimize_for_volatile = result.access_flags().is_volatile();912913// If we are patching a field which should be atomic, then914// the generated code is not correct either, force deoptimizing.915// We need to only cover T_LONG and T_DOUBLE fields, as we can916// break access atomicity only for them.917918// Strictly speaking, the deoptimization on 64-bit platforms919// is unnecessary, and T_LONG stores on 32-bit platforms need920// to be handled by special patching code when AlwaysAtomicAccesses921// becomes product feature. At this point, we are still going922// for the deoptimization for consistency against volatile923// accesses.924925patch_field_type = result.field_type();926deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));927928} else if (load_klass_or_mirror_patch_id) {929Klass* k = NULL;930switch (code) {931case Bytecodes::_putstatic:932case Bytecodes::_getstatic:933{ Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);934init_klass = klass;935mirror = Handle(current, klass->java_mirror());936}937break;938case Bytecodes::_new:939{ Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));940k = caller_method->constants()->klass_at(bnew.index(), CHECK);941}942break;943case Bytecodes::_multianewarray:944{ Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));945k = caller_method->constants()->klass_at(mna.index(), CHECK);946}947break;948case Bytecodes::_instanceof:949{ Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));950k = caller_method->constants()->klass_at(io.index(), CHECK);951}952break;953case Bytecodes::_checkcast:954{ Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));955k = caller_method->constants()->klass_at(cc.index(), CHECK);956}957break;958case Bytecodes::_anewarray:959{ Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));960Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);961k = ek->array_klass(CHECK);962}963break;964case Bytecodes::_ldc:965case Bytecodes::_ldc_w:966{967Bytecode_loadconstant cc(caller_method, bci);968oop m = cc.resolve_constant(CHECK);969mirror = Handle(current, m);970}971break;972default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");973}974load_klass = k;975} else if (stub_id == load_appendix_patching_id) {976Bytecode_invoke bytecode(caller_method, bci);977Bytecodes::Code bc = bytecode.invoke_code();978979CallInfo info;980constantPoolHandle pool(current, caller_method->constants());981int index = bytecode.index();982LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);983switch (bc) {984case Bytecodes::_invokehandle: {985int cache_index = ConstantPool::decode_cpcache_index(index, true);986assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");987ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);988cpce->set_method_handle(pool, info);989appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry990break;991}992case Bytecodes::_invokedynamic: {993ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);994cpce->set_dynamic_call(pool, info);995appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry996break;997}998default: fatal("unexpected bytecode for load_appendix_patching_id");999}1000} else {1001ShouldNotReachHere();1002}10031004if (deoptimize_for_volatile || deoptimize_for_atomic) {1005// At compile time we assumed the field wasn't volatile/atomic but after1006// loading it turns out it was volatile/atomic so we have to throw the1007// compiled code out and let it be regenerated.1008if (TracePatching) {1009if (deoptimize_for_volatile) {1010tty->print_cr("Deoptimizing for patching volatile field reference");1011}1012if (deoptimize_for_atomic) {1013tty->print_cr("Deoptimizing for patching atomic field reference");1014}1015}10161017// It's possible the nmethod was invalidated in the last1018// safepoint, but if it's still alive then make it not_entrant.1019nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1020if (nm != NULL) {1021nm->make_not_entrant();1022}10231024Deoptimization::deoptimize_frame(current, caller_frame.id());10251026// Return to the now deoptimized frame.1027}10281029// Now copy code back10301031{1032MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);1033//1034// Deoptimization may have happened while we waited for the lock.1035// In that case we don't bother to do any patching we just return1036// and let the deopt happen1037if (!caller_is_deopted(current)) {1038NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());1039address instr_pc = jump->jump_destination();1040NativeInstruction* ni = nativeInstruction_at(instr_pc);1041if (ni->is_jump() ) {1042// the jump has not been patched yet1043// The jump destination is slow case and therefore not part of the stubs1044// (stubs are only for StaticCalls)10451046// format of buffer1047// ....1048// instr byte 0 <-- copy_buff1049// instr byte 11050// ..1051// instr byte n-11052// n1053// .... <-- call destination10541055address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();1056unsigned char* byte_count = (unsigned char*) (stub_location - 1);1057unsigned char* byte_skip = (unsigned char*) (stub_location - 2);1058unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);1059address copy_buff = stub_location - *byte_skip - *byte_count;1060address being_initialized_entry = stub_location - *being_initialized_entry_offset;1061if (TracePatching) {1062ttyLocker ttyl;1063tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci,1064p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");1065nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());1066assert(caller_code != NULL, "nmethod not found");10671068// NOTE we use pc() not original_pc() because we already know they are1069// identical otherwise we'd have never entered this block of code10701071const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());1072assert(map != NULL, "null check");1073map->print();1074tty->cr();10751076Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1077}1078// depending on the code below, do_patch says whether to copy the patch body back into the nmethod1079bool do_patch = true;1080if (stub_id == Runtime1::access_field_patching_id) {1081// The offset may not be correct if the class was not loaded at code generation time.1082// Set it now.1083NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);1084assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");1085assert(patch_field_offset >= 0, "illegal offset");1086n_move->add_offset_in_bytes(patch_field_offset);1087} else if (load_klass_or_mirror_patch_id) {1088// If a getstatic or putstatic is referencing a klass which1089// isn't fully initialized, the patch body isn't copied into1090// place until initialization is complete. In this case the1091// patch site is setup so that any threads besides the1092// initializing thread are forced to come into the VM and1093// block.1094do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||1095InstanceKlass::cast(init_klass)->is_initialized();1096NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);1097if (jump->jump_destination() == being_initialized_entry) {1098assert(do_patch == true, "initialization must be complete at this point");1099} else {1100// patch the instruction <move reg, klass>1101NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);11021103assert(n_copy->data() == 0 ||1104n_copy->data() == (intptr_t)Universe::non_oop_word(),1105"illegal init value");1106if (stub_id == Runtime1::load_klass_patching_id) {1107assert(load_klass != NULL, "klass not set");1108n_copy->set_data((intx) (load_klass));1109} else {1110assert(mirror() != NULL, "klass not set");1111// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.1112n_copy->set_data(cast_from_oop<intx>(mirror()));1113}11141115if (TracePatching) {1116Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1117}1118}1119} else if (stub_id == Runtime1::load_appendix_patching_id) {1120NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);1121assert(n_copy->data() == 0 ||1122n_copy->data() == (intptr_t)Universe::non_oop_word(),1123"illegal init value");1124n_copy->set_data(cast_from_oop<intx>(appendix()));11251126if (TracePatching) {1127Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1128}1129} else {1130ShouldNotReachHere();1131}11321133#if defined(PPC32)1134if (load_klass_or_mirror_patch_id ||1135stub_id == Runtime1::load_appendix_patching_id) {1136// Update the location in the nmethod with the proper1137// metadata. When the code was generated, a NULL was stuffed1138// in the metadata table and that table needs to be update to1139// have the right value. On intel the value is kept1140// directly in the instruction instead of in the metadata1141// table, so set_data above effectively updated the value.1142nmethod* nm = CodeCache::find_nmethod(instr_pc);1143assert(nm != NULL, "invalid nmethod_pc");1144RelocIterator mds(nm, copy_buff, copy_buff + 1);1145bool found = false;1146while (mds.next() && !found) {1147if (mds.type() == relocInfo::oop_type) {1148assert(stub_id == Runtime1::load_mirror_patching_id ||1149stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");1150oop_Relocation* r = mds.oop_reloc();1151oop* oop_adr = r->oop_addr();1152*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();1153r->fix_oop_relocation();1154found = true;1155} else if (mds.type() == relocInfo::metadata_type) {1156assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");1157metadata_Relocation* r = mds.metadata_reloc();1158Metadata** metadata_adr = r->metadata_addr();1159*metadata_adr = load_klass;1160r->fix_metadata_relocation();1161found = true;1162}1163}1164assert(found, "the metadata must exist!");1165}1166#endif1167if (do_patch) {1168// replace instructions1169// first replace the tail, then the call1170#ifdef ARM1171if((load_klass_or_mirror_patch_id ||1172stub_id == Runtime1::load_appendix_patching_id) &&1173nativeMovConstReg_at(copy_buff)->is_pc_relative()) {1174nmethod* nm = CodeCache::find_nmethod(instr_pc);1175address addr = NULL;1176assert(nm != NULL, "invalid nmethod_pc");1177RelocIterator mds(nm, copy_buff, copy_buff + 1);1178while (mds.next()) {1179if (mds.type() == relocInfo::oop_type) {1180assert(stub_id == Runtime1::load_mirror_patching_id ||1181stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");1182oop_Relocation* r = mds.oop_reloc();1183addr = (address)r->oop_addr();1184break;1185} else if (mds.type() == relocInfo::metadata_type) {1186assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");1187metadata_Relocation* r = mds.metadata_reloc();1188addr = (address)r->metadata_addr();1189break;1190}1191}1192assert(addr != NULL, "metadata relocation must exist");1193copy_buff -= *byte_count;1194NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);1195n_copy2->set_pc_relative_offset(addr, instr_pc);1196}1197#endif11981199for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {1200address ptr = copy_buff + i;1201int a_byte = (*ptr) & 0xFF;1202address dst = instr_pc + i;1203*(unsigned char*)dst = (unsigned char) a_byte;1204}1205ICache::invalidate_range(instr_pc, *byte_count);1206NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);12071208if (load_klass_or_mirror_patch_id ||1209stub_id == Runtime1::load_appendix_patching_id) {1210relocInfo::relocType rtype =1211(stub_id == Runtime1::load_klass_patching_id) ?1212relocInfo::metadata_type :1213relocInfo::oop_type;1214// update relocInfo to metadata1215nmethod* nm = CodeCache::find_nmethod(instr_pc);1216assert(nm != NULL, "invalid nmethod_pc");12171218// The old patch site is now a move instruction so update1219// the reloc info so that it will get updated during1220// future GCs.1221RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));1222relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,1223relocInfo::none, rtype);1224#ifdef PPC321225{ address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;1226RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);1227relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,1228relocInfo::none, rtype);1229}1230#endif1231}12321233} else {1234ICache::invalidate_range(copy_buff, *byte_count);1235NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);1236}1237}1238}1239}12401241// If we are patching in a non-perm oop, make sure the nmethod1242// is on the right list.1243{1244MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);1245nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1246guarantee(nm != NULL, "only nmethods can contain non-perm oops");12471248// Since we've patched some oops in the nmethod,1249// (re)register it with the heap.1250Universe::heap()->register_nmethod(nm);1251}1252JRT_END12531254#else // DEOPTIMIZE_WHEN_PATCHING12551256void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) {1257NOT_PRODUCT(_patch_code_slowcase_cnt++);12581259// Enable WXWrite: the function is called by c1 stub as a runtime function1260// (see another implementation above).1261MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));12621263if (TracePatching) {1264tty->print_cr("Deoptimizing because patch is needed");1265}12661267RegisterMap reg_map(current, false);12681269frame runtime_frame = current->last_frame();1270frame caller_frame = runtime_frame.sender(®_map);1271assert(caller_frame.is_compiled_frame(), "Wrong frame type");12721273// Make sure the nmethod is invalidated, i.e. made not entrant.1274nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1275if (nm != NULL) {1276nm->make_not_entrant();1277}12781279Deoptimization::deoptimize_frame(current, caller_frame.id());1280// Return to the now deoptimized frame.1281postcond(caller_is_deopted(current));1282}12831284#endif // DEOPTIMIZE_WHEN_PATCHING12851286// Entry point for compiled code. We want to patch a nmethod.1287// We don't do a normal VM transition here because we want to1288// know after the patching is complete and any safepoint(s) are taken1289// if the calling nmethod was deoptimized. We do this by calling a1290// helper method which does the normal VM transition and when it1291// completes we can check for deoptimization. This simplifies the1292// assembly code in the cpu directories.1293//1294int Runtime1::move_klass_patching(JavaThread* current) {1295//1296// NOTE: we are still in Java1297//1298debug_only(NoHandleMark nhm;)1299{1300// Enter VM mode1301ResetNoHandleMark rnhm;1302patch_code(current, load_klass_patching_id);1303}1304// Back in JAVA, use no oops DON'T safepoint13051306// Return true if calling code is deoptimized13071308return caller_is_deopted(current);1309}13101311int Runtime1::move_mirror_patching(JavaThread* current) {1312//1313// NOTE: we are still in Java1314//1315debug_only(NoHandleMark nhm;)1316{1317// Enter VM mode1318ResetNoHandleMark rnhm;1319patch_code(current, load_mirror_patching_id);1320}1321// Back in JAVA, use no oops DON'T safepoint13221323// Return true if calling code is deoptimized13241325return caller_is_deopted(current);1326}13271328int Runtime1::move_appendix_patching(JavaThread* current) {1329//1330// NOTE: we are still in Java1331//1332debug_only(NoHandleMark nhm;)1333{1334// Enter VM mode1335ResetNoHandleMark rnhm;1336patch_code(current, load_appendix_patching_id);1337}1338// Back in JAVA, use no oops DON'T safepoint13391340// Return true if calling code is deoptimized13411342return caller_is_deopted(current);1343}13441345// Entry point for compiled code. We want to patch a nmethod.1346// We don't do a normal VM transition here because we want to1347// know after the patching is complete and any safepoint(s) are taken1348// if the calling nmethod was deoptimized. We do this by calling a1349// helper method which does the normal VM transition and when it1350// completes we can check for deoptimization. This simplifies the1351// assembly code in the cpu directories.1352//1353int Runtime1::access_field_patching(JavaThread* current) {1354//1355// NOTE: we are still in Java1356//1357// Handles created in this function will be deleted by the1358// HandleMarkCleaner in the transition to the VM.1359NoHandleMark nhm;1360{1361// Enter VM mode1362ResetNoHandleMark rnhm;1363patch_code(current, access_field_patching_id);1364}1365// Back in JAVA, use no oops DON'T safepoint13661367// Return true if calling code is deoptimized13681369return caller_is_deopted(current);1370}137113721373JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))1374// for now we just print out the block id1375tty->print("%d ", block_id);1376JRT_END137713781379JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))1380// had to return int instead of bool, otherwise there may be a mismatch1381// between the C calling convention and the Java one.1382// e.g., on x86, GCC may clear only %al when returning a bool false, but1383// JVM takes the whole %eax as the return value, which may misinterpret1384// the return value as a boolean true.13851386assert(mirror != NULL, "should null-check on mirror before calling");1387Klass* k = java_lang_Class::as_Klass(mirror);1388return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;1389JRT_END13901391JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))1392ResourceMark rm;13931394RegisterMap reg_map(current, false);1395frame runtime_frame = current->last_frame();1396frame caller_frame = runtime_frame.sender(®_map);13971398nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1399assert (nm != NULL, "no more nmethod?");1400nm->make_not_entrant();14011402methodHandle m(current, nm->method());1403MethodData* mdo = m->method_data();14041405if (mdo == NULL && !HAS_PENDING_EXCEPTION) {1406// Build an MDO. Ignore errors like OutOfMemory;1407// that simply means we won't have an MDO to update.1408Method::build_interpreter_method_data(m, THREAD);1409if (HAS_PENDING_EXCEPTION) {1410// Only metaspace OOM is expected. No Java code executed.1411assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");1412CLEAR_PENDING_EXCEPTION;1413}1414mdo = m->method_data();1415}14161417if (mdo != NULL) {1418mdo->inc_trap_count(Deoptimization::Reason_none);1419}14201421if (TracePredicateFailedTraps) {1422stringStream ss1, ss2;1423vframeStream vfst(current);1424Method* inlinee = vfst.method();1425inlinee->print_short_name(&ss1);1426m->print_short_name(&ss2);1427tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));1428}142914301431Deoptimization::deoptimize_frame(current, caller_frame.id());14321433JRT_END14341435#ifndef PRODUCT1436void Runtime1::print_statistics() {1437tty->print_cr("C1 Runtime statistics:");1438tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr);1439tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);1440tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr);1441tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);1442tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);1443tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);1444tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_stub_cnt);1445tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_stub_cnt);1446tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_stub_cnt);1447tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_stub_cnt);1448tty->print_cr(" _oop_arraycopy_cnt: %d", _oop_arraycopy_stub_cnt);1449tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);1450tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);1451tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);14521453tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);1454tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);1455tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt);1456tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt);1457tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt);1458tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt);1459tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt);14601461tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count);1462tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count);1463tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count);1464tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count);1465tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count);1466tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);1467tty->print_cr(" _throw_count: %d:", _throw_count);14681469SharedRuntime::print_ic_miss_histogram();1470tty->cr();1471}1472#endif // PRODUCT147314741475