Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/c1/c1_Runtime1.cpp
32285 views
/*1* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "asm/codeBuffer.hpp"26#include "c1/c1_CodeStubs.hpp"27#include "c1/c1_Defs.hpp"28#include "c1/c1_FrameMap.hpp"29#include "c1/c1_LIRAssembler.hpp"30#include "c1/c1_MacroAssembler.hpp"31#include "c1/c1_Runtime1.hpp"32#include "classfile/systemDictionary.hpp"33#include "classfile/vmSymbols.hpp"34#include "code/codeBlob.hpp"35#include "code/compiledIC.hpp"36#include "code/pcDesc.hpp"37#include "code/scopeDesc.hpp"38#include "code/vtableStubs.hpp"39#include "compiler/disassembler.hpp"40#include "gc_interface/collectedHeap.hpp"41#include "interpreter/bytecode.hpp"42#include "interpreter/interpreter.hpp"43#include "jfr/support/jfrIntrinsics.hpp"44#include "memory/allocation.inline.hpp"45#include "memory/barrierSet.hpp"46#include "memory/oopFactory.hpp"47#include "memory/resourceArea.hpp"48#include "oops/objArrayKlass.hpp"49#include "oops/oop.inline.hpp"50#include "runtime/biasedLocking.hpp"51#include "runtime/compilationPolicy.hpp"52#include "runtime/interfaceSupport.hpp"53#include "runtime/javaCalls.hpp"54#include "runtime/sharedRuntime.hpp"55#include "runtime/threadCritical.hpp"56#include "runtime/vframe.hpp"57#include "runtime/vframeArray.hpp"58#include "utilities/copy.hpp"59#include "utilities/events.hpp"60#include "utilities/macros.hpp"61#if INCLUDE_ALL_GCS62#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"63#endif6465// Implementation of StubAssembler6667StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {68_name = name;69_must_gc_arguments = false;70_frame_size = no_frame_size;71_num_rt_args = 0;72_stub_id = stub_id;73}747576void StubAssembler::set_info(const char* name, bool must_gc_arguments) {77_name = name;78_must_gc_arguments = must_gc_arguments;79}808182void StubAssembler::set_frame_size(int size) {83if (_frame_size == no_frame_size) {84_frame_size = size;85}86assert(_frame_size == size, "can't change the frame size");87}888990void StubAssembler::set_num_rt_args(int args) {91if (_num_rt_args == 0) {92_num_rt_args = args;93}94assert(_num_rt_args == args, "can't change the number of args");95}9697// Implementation of Runtime19899CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];100const char *Runtime1::_blob_names[] = {101RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)102};103104#ifndef PRODUCT105// statistics106int Runtime1::_generic_arraycopy_cnt = 0;107int Runtime1::_primitive_arraycopy_cnt = 0;108int Runtime1::_oop_arraycopy_cnt = 0;109int Runtime1::_generic_arraycopystub_cnt = 0;110int Runtime1::_arraycopy_slowcase_cnt = 0;111int Runtime1::_arraycopy_checkcast_cnt = 0;112int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;113int Runtime1::_new_type_array_slowcase_cnt = 0;114int Runtime1::_new_object_array_slowcase_cnt = 0;115int Runtime1::_new_instance_slowcase_cnt = 0;116int Runtime1::_new_multi_array_slowcase_cnt = 0;117int Runtime1::_monitorenter_slowcase_cnt = 0;118int Runtime1::_monitorexit_slowcase_cnt = 0;119int Runtime1::_patch_code_slowcase_cnt = 0;120int Runtime1::_throw_range_check_exception_count = 0;121int Runtime1::_throw_index_exception_count = 0;122int Runtime1::_throw_div0_exception_count = 0;123int Runtime1::_throw_null_pointer_exception_count = 0;124int Runtime1::_throw_class_cast_exception_count = 0;125int Runtime1::_throw_incompatible_class_change_error_count = 0;126int Runtime1::_throw_array_store_exception_count = 0;127int Runtime1::_throw_count = 0;128129static int _byte_arraycopy_cnt = 0;130static int _short_arraycopy_cnt = 0;131static int _int_arraycopy_cnt = 0;132static int _long_arraycopy_cnt = 0;133static int _oop_arraycopy_cnt = 0;134135address Runtime1::arraycopy_count_address(BasicType type) {136switch (type) {137case T_BOOLEAN:138case T_BYTE: return (address)&_byte_arraycopy_cnt;139case T_CHAR:140case T_SHORT: return (address)&_short_arraycopy_cnt;141case T_FLOAT:142case T_INT: return (address)&_int_arraycopy_cnt;143case T_DOUBLE:144case T_LONG: return (address)&_long_arraycopy_cnt;145case T_ARRAY:146case T_OBJECT: return (address)&_oop_arraycopy_cnt;147default:148ShouldNotReachHere();149return NULL;150}151}152153154#endif155156// Simple helper to see if the caller of a runtime stub which157// entered the VM has been deoptimized158159static bool caller_is_deopted() {160JavaThread* thread = JavaThread::current();161RegisterMap reg_map(thread, false);162frame runtime_frame = thread->last_frame();163frame caller_frame = runtime_frame.sender(®_map);164assert(caller_frame.is_compiled_frame(), "must be compiled");165return caller_frame.is_deoptimized_frame();166}167168// Stress deoptimization169static void deopt_caller() {170if ( !caller_is_deopted()) {171JavaThread* thread = JavaThread::current();172RegisterMap reg_map(thread, false);173frame runtime_frame = thread->last_frame();174frame caller_frame = runtime_frame.sender(®_map);175Deoptimization::deoptimize_frame(thread, caller_frame.id());176assert(caller_is_deopted(), "Must be deoptimized");177}178}179180181void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {182assert(0 <= id && id < number_of_ids, "illegal stub id");183ResourceMark rm;184// create code buffer for code storage185CodeBuffer code(buffer_blob);186187Compilation::setup_code_buffer(&code, 0);188189// create assembler for code generation190StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);191// generate code for runtime stub192OopMapSet* oop_maps;193oop_maps = generate_code_for(id, sasm);194assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,195"if stub has an oop map it must have a valid frame size");196197#ifdef ASSERT198// Make sure that stubs that need oopmaps have them199switch (id) {200// These stubs don't need to have an oopmap201case dtrace_object_alloc_id:202case g1_pre_barrier_slow_id:203case g1_post_barrier_slow_id:204case shenandoah_lrb_slow_id:205case slow_subtype_check_id:206case fpu2long_stub_id:207case unwind_exception_id:208case counter_overflow_id:209#if defined(SPARC) || defined(PPC)210case handle_exception_nofpu_id: // Unused on sparc211#endif212break;213214// All other stubs should have oopmaps215default:216assert(oop_maps != NULL, "must have an oopmap");217}218#endif219220// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)221sasm->align(BytesPerWord);222// make sure all code is in code buffer223sasm->flush();224// create blob - distinguish a few special cases225CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),226&code,227CodeOffsets::frame_never_safe,228sasm->frame_size(),229oop_maps,230sasm->must_gc_arguments());231// install blob232assert(blob != NULL, "blob must exist");233_blobs[id] = blob;234}235236237void Runtime1::initialize(BufferBlob* blob) {238// platform-dependent initialization239initialize_pd();240// generate stubs241for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);242// printing243#ifndef PRODUCT244if (PrintSimpleStubs) {245ResourceMark rm;246for (int id = 0; id < number_of_ids; id++) {247_blobs[id]->print();248if (_blobs[id]->oop_maps() != NULL) {249_blobs[id]->oop_maps()->print();250}251}252}253#endif254}255256257CodeBlob* Runtime1::blob_for(StubID id) {258assert(0 <= id && id < number_of_ids, "illegal stub id");259return _blobs[id];260}261262263const char* Runtime1::name_for(StubID id) {264assert(0 <= id && id < number_of_ids, "illegal stub id");265return _blob_names[id];266}267268const char* Runtime1::name_for_address(address entry) {269for (int id = 0; id < number_of_ids; id++) {270if (entry == entry_for((StubID)id)) return name_for((StubID)id);271}272273#define FUNCTION_CASE(a, f) \274if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f275276FUNCTION_CASE(entry, os::javaTimeMillis);277FUNCTION_CASE(entry, os::javaTimeNanos);278FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);279FUNCTION_CASE(entry, SharedRuntime::d2f);280FUNCTION_CASE(entry, SharedRuntime::d2i);281FUNCTION_CASE(entry, SharedRuntime::d2l);282FUNCTION_CASE(entry, SharedRuntime::dcos);283FUNCTION_CASE(entry, SharedRuntime::dexp);284FUNCTION_CASE(entry, SharedRuntime::dlog);285FUNCTION_CASE(entry, SharedRuntime::dlog10);286FUNCTION_CASE(entry, SharedRuntime::dpow);287FUNCTION_CASE(entry, SharedRuntime::drem);288FUNCTION_CASE(entry, SharedRuntime::dsin);289FUNCTION_CASE(entry, SharedRuntime::dtan);290FUNCTION_CASE(entry, SharedRuntime::f2i);291FUNCTION_CASE(entry, SharedRuntime::f2l);292FUNCTION_CASE(entry, SharedRuntime::frem);293FUNCTION_CASE(entry, SharedRuntime::l2d);294FUNCTION_CASE(entry, SharedRuntime::l2f);295FUNCTION_CASE(entry, SharedRuntime::ldiv);296FUNCTION_CASE(entry, SharedRuntime::lmul);297FUNCTION_CASE(entry, SharedRuntime::lrem);298FUNCTION_CASE(entry, SharedRuntime::lrem);299FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);300FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);301FUNCTION_CASE(entry, is_instance_of);302FUNCTION_CASE(entry, trace_block_entry);303#ifdef JFR_HAVE_INTRINSICS304FUNCTION_CASE(entry, JFR_TIME_FUNCTION);305#endif306FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());307308#undef FUNCTION_CASE309310// Soft float adds more runtime names.311return pd_name_for_address(entry);312}313314315JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))316NOT_PRODUCT(_new_instance_slowcase_cnt++;)317318assert(klass->is_klass(), "not a class");319Handle holder(THREAD, klass->klass_holder()); // keep the klass alive320instanceKlassHandle h(thread, klass);321h->check_valid_for_instantiation(true, CHECK);322// make sure klass is initialized323h->initialize(CHECK);324// allocate instance and return via TLS325oop obj = h->allocate_instance(CHECK);326thread->set_vm_result(obj);327JRT_END328329330JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))331NOT_PRODUCT(_new_type_array_slowcase_cnt++;)332// Note: no handle for klass needed since they are not used333// anymore after new_typeArray() and no GC can happen before.334// (This may have to change if this code changes!)335assert(klass->is_klass(), "not a class");336BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();337oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);338thread->set_vm_result(obj);339// This is pretty rare but this runtime patch is stressful to deoptimization340// if we deoptimize here so force a deopt to stress the path.341if (DeoptimizeALot) {342deopt_caller();343}344345JRT_END346347348JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))349NOT_PRODUCT(_new_object_array_slowcase_cnt++;)350351// Note: no handle for klass needed since they are not used352// anymore after new_objArray() and no GC can happen before.353// (This may have to change if this code changes!)354assert(array_klass->is_klass(), "not a class");355Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive356Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();357objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);358thread->set_vm_result(obj);359// This is pretty rare but this runtime patch is stressful to deoptimization360// if we deoptimize here so force a deopt to stress the path.361if (DeoptimizeALot) {362deopt_caller();363}364JRT_END365366367JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))368NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)369370assert(klass->is_klass(), "not a class");371assert(rank >= 1, "rank must be nonzero");372Handle holder(THREAD, klass->klass_holder()); // keep the klass alive373oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);374thread->set_vm_result(obj);375JRT_END376377378JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))379tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);380JRT_END381382383JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj))384ResourceMark rm(thread);385const char* klass_name = obj->klass()->external_name();386SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);387JRT_END388389390// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method391// associated with the top activation record. The inlinee (that is possibly included in the enclosing392// method) method oop is passed as an argument. In order to do that it is embedded in the code as393// a constant.394static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {395nmethod* osr_nm = NULL;396methodHandle method(THREAD, m);397398RegisterMap map(THREAD, false);399frame fr = THREAD->last_frame().sender(&map);400nmethod* nm = (nmethod*) fr.cb();401assert(nm!= NULL && nm->is_nmethod(), "Sanity check");402methodHandle enclosing_method(THREAD, nm->method());403404CompLevel level = (CompLevel)nm->comp_level();405int bci = InvocationEntryBci;406if (branch_bci != InvocationEntryBci) {407// Compute desination bci408address pc = method()->code_base() + branch_bci;409Bytecodes::Code branch = Bytecodes::code_at(method(), pc);410int offset = 0;411switch (branch) {412case Bytecodes::_if_icmplt: case Bytecodes::_iflt:413case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:414case Bytecodes::_if_icmple: case Bytecodes::_ifle:415case Bytecodes::_if_icmpge: case Bytecodes::_ifge:416case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:417case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:418case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:419offset = (int16_t)Bytes::get_Java_u2(pc + 1);420break;421case Bytecodes::_goto_w:422offset = Bytes::get_Java_u4(pc + 1);423break;424default: ;425}426bci = branch_bci + offset;427}428assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");429osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);430assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");431return osr_nm;432}433434JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))435nmethod* osr_nm;436JRT_BLOCK437osr_nm = counter_overflow_helper(thread, bci, method);438if (osr_nm != NULL) {439RegisterMap map(thread, false);440frame fr = thread->last_frame().sender(&map);441Deoptimization::deoptimize_frame(thread, fr.id());442}443JRT_BLOCK_END444return NULL;445JRT_END446447extern void vm_exit(int code);448449// Enter this method from compiled code handler below. This is where we transition450// to VM mode. This is done as a helper routine so that the method called directly451// from compiled code does not have to transition to VM. This allows the entry452// method to see if the nmethod that we have just looked up a handler for has453// been deoptimized while we were in the vm. This simplifies the assembly code454// cpu directories.455//456// We are entering here from exception stub (via the entry method below)457// If there is a compiled exception handler in this method, we will continue there;458// otherwise we will unwind the stack and continue at the caller of top frame method459// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to460// control the area where we can allow a safepoint. After we exit the safepoint area we can461// check to see if the handler we are going to return is now in a nmethod that has462// been deoptimized. If that is the case we return the deopt blob463// unpack_with_exception entry instead. This makes life for the exception blob easier464// because making that same check and diverting is painful from assembly language.465JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))466// Reset method handle flag.467thread->set_is_method_handle_return(false);468469Handle exception(thread, ex);470nm = CodeCache::find_nmethod(pc);471assert(nm != NULL, "this is not an nmethod");472// Adjust the pc as needed/473if (nm->is_deopt_pc(pc)) {474RegisterMap map(thread, false);475frame exception_frame = thread->last_frame().sender(&map);476// if the frame isn't deopted then pc must not correspond to the caller of last_frame477assert(exception_frame.is_deoptimized_frame(), "must be deopted");478pc = exception_frame.pc();479}480#ifdef ASSERT481assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");482assert(exception->is_oop(), "just checking");483// Check that exception is a subclass of Throwable, otherwise we have a VerifyError484if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {485if (ExitVMOnVerifyError) vm_exit(-1);486ShouldNotReachHere();487}488#endif489490// Check the stack guard pages and reenable them if necessary and there is491// enough space on the stack to do so. Use fast exceptions only if the guard492// pages are enabled.493bool guard_pages_enabled = thread->stack_yellow_zone_enabled();494if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();495496if (JvmtiExport::can_post_on_exceptions()) {497// To ensure correct notification of exception catches and throws498// we have to deoptimize here. If we attempted to notify the499// catches and throws during this exception lookup it's possible500// we could deoptimize on the way out of the VM and end back in501// the interpreter at the throw site. This would result in double502// notifications since the interpreter would also notify about503// these same catches and throws as it unwound the frame.504505RegisterMap reg_map(thread);506frame stub_frame = thread->last_frame();507frame caller_frame = stub_frame.sender(®_map);508509// We don't really want to deoptimize the nmethod itself since we510// can actually continue in the exception handler ourselves but I511// don't see an easy way to have the desired effect.512Deoptimization::deoptimize_frame(thread, caller_frame.id());513assert(caller_is_deopted(), "Must be deoptimized");514515return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();516}517518// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions519if (guard_pages_enabled) {520address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);521if (fast_continuation != NULL) {522// Set flag if return address is a method handle call site.523thread->set_is_method_handle_return(nm->is_method_handle_return(pc));524return fast_continuation;525}526}527528// If the stack guard pages are enabled, check whether there is a handler in529// the current method. Otherwise (guard pages disabled), force an unwind and530// skip the exception cache update (i.e., just leave continuation==NULL).531address continuation = NULL;532if (guard_pages_enabled) {533534// New exception handling mechanism can support inlined methods535// with exception handlers since the mappings are from PC to PC536537// debugging support538// tracing539if (TraceExceptions) {540ttyLocker ttyl;541ResourceMark rm;542tty->print_cr("Exception <%s> (" INTPTR_FORMAT ") thrown in compiled method <%s> at PC " INTPTR_FORMAT " for thread " INTPTR_FORMAT "",543exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));544}545// for AbortVMOnException flag546NOT_PRODUCT(Exceptions::debug_check_abort(exception));547548// Clear out the exception oop and pc since looking up an549// exception handler can cause class loading, which might throw an550// exception and those fields are expected to be clear during551// normal bytecode execution.552thread->clear_exception_oop_and_pc();553554bool recursive_exception = false;555continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);556// If an exception was thrown during exception dispatch, the exception oop may have changed557thread->set_exception_oop(exception());558thread->set_exception_pc(pc);559560// the exception cache is used only by non-implicit exceptions561// Update the exception cache only when there didn't happen562// another exception during the computation of the compiled563// exception handler. Checking for exception oop equality is not564// sufficient because some exceptions are pre-allocated and reused.565if (continuation != NULL && !recursive_exception) {566nm->add_handler_for_exception_and_pc(exception, pc, continuation);567}568}569570thread->set_vm_result(exception());571// Set flag if return address is a method handle call site.572thread->set_is_method_handle_return(nm->is_method_handle_return(pc));573574if (TraceExceptions) {575ttyLocker ttyl;576ResourceMark rm;577tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,578p2i(thread), p2i(continuation), p2i(pc));579}580581return continuation;582JRT_END583584// Enter this method from compiled code only if there is a Java exception handler585// in the method handling the exception.586// We are entering here from exception stub. We don't do a normal VM transition here.587// We do it in a helper. This is so we can check to see if the nmethod we have just588// searched for an exception handler has been deoptimized in the meantime.589address Runtime1::exception_handler_for_pc(JavaThread* thread) {590oop exception = thread->exception_oop();591address pc = thread->exception_pc();592// Still in Java mode593DEBUG_ONLY(ResetNoHandleMark rnhm);594nmethod* nm = NULL;595address continuation = NULL;596{597// Enter VM mode by calling the helper598ResetNoHandleMark rnhm;599continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);600}601// Back in JAVA, use no oops DON'T safepoint602603// Now check to see if the nmethod we were called from is now deoptimized.604// If so we must return to the deopt blob and deoptimize the nmethod605if (nm != NULL && caller_is_deopted()) {606continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();607}608609assert(continuation != NULL, "no handler found");610return continuation;611}612613614JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))615NOT_PRODUCT(_throw_range_check_exception_count++;)616char message[jintAsStringSize];617sprintf(message, "%d", index);618SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);619JRT_END620621622JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))623NOT_PRODUCT(_throw_index_exception_count++;)624char message[16];625sprintf(message, "%d", index);626SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);627JRT_END628629630JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread))631NOT_PRODUCT(_throw_div0_exception_count++;)632SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");633JRT_END634635636JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread))637NOT_PRODUCT(_throw_null_pointer_exception_count++;)638SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());639JRT_END640641642JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object))643NOT_PRODUCT(_throw_class_cast_exception_count++;)644ResourceMark rm(thread);645char* message = SharedRuntime::generate_class_cast_message(646thread, object->klass()->external_name());647SharedRuntime::throw_and_post_jvmti_exception(648thread, vmSymbols::java_lang_ClassCastException(), message);649JRT_END650651652JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))653NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)654ResourceMark rm(thread);655SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());656JRT_END657658659JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))660NOT_PRODUCT(_monitorenter_slowcase_cnt++;)661if (PrintBiasedLockingStatistics) {662Atomic::inc(BiasedLocking::slow_path_entry_count_addr());663}664Handle h_obj(thread, obj);665assert(h_obj()->is_oop(), "must be NULL or an object");666if (UseBiasedLocking) {667// Retry fast entry if bias is revoked to avoid unnecessary inflation668ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);669} else {670if (UseFastLocking) {671// When using fast locking, the compiled code has already tried the fast case672assert(obj == lock->obj(), "must match");673ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);674} else {675lock->set_obj(obj);676ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);677}678}679JRT_END680681682JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))683NOT_PRODUCT(_monitorexit_slowcase_cnt++;)684assert(thread == JavaThread::current(), "threads must correspond");685assert(thread->last_Java_sp(), "last_Java_sp must be set");686// monitorexit is non-blocking (leaf routine) => no exceptions can be thrown687EXCEPTION_MARK;688689oop obj = lock->obj();690assert(obj->is_oop(), "must be NULL or an object");691if (UseFastLocking) {692// When using fast locking, the compiled code has already tried the fast case693ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);694} else {695ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);696}697JRT_END698699// Cf. OptoRuntime::deoptimize_caller_frame700JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))701// Called from within the owner thread, so no need for safepoint702RegisterMap reg_map(thread, false);703frame stub_frame = thread->last_frame();704assert(stub_frame.is_runtime_frame(), "sanity check");705frame caller_frame = stub_frame.sender(®_map);706707// We are coming from a compiled method; check this is true.708assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");709710// Deoptimize the caller frame.711Deoptimization::deoptimize_frame(thread, caller_frame.id());712713// Return to the now deoptimized frame.714JRT_END715716717static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {718Bytecode_field field_access(caller, bci);719// This can be static or non-static field access720Bytecodes::Code code = field_access.code();721722// We must load class, initialize class and resolvethe field723fieldDescriptor result; // initialize class if needed724constantPoolHandle constants(THREAD, caller->constants());725LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);726return result.field_holder();727}728729730//731// This routine patches sites where a class wasn't loaded or732// initialized at the time the code was generated. It handles733// references to classes, fields and forcing of initialization. Most734// of the cases are straightforward and involving simply forcing735// resolution of a class, rewriting the instruction stream with the736// needed constant and replacing the call in this function with the737// patched code. The case for static field is more complicated since738// the thread which is in the process of initializing a class can739// access it's static fields but other threads can't so the code740// either has to deoptimize when this case is detected or execute a741// check that the current thread is the initializing thread. The742// current743//744// Patches basically look like this:745//746//747// patch_site: jmp patch stub ;; will be patched748// continue: ...749// ...750// ...751// ...752//753// They have a stub which looks like this:754//755// ;; patch body756// movl <const>, reg (for class constants)757// <or> movl [reg1 + <const>], reg (for field offsets)758// <or> movl reg, [reg1 + <const>] (for field offsets)759// <being_init offset> <bytes to copy> <bytes to skip>760// patch_stub: call Runtime1::patch_code (through a runtime stub)761// jmp patch_site762//763//764// A normal patch is done by rewriting the patch body, usually a move,765// and then copying it into place over top of the jmp instruction766// being careful to flush caches and doing it in an MP-safe way. The767// constants following the patch body are used to find various pieces768// of the patch relative to the call site for Runtime1::patch_code.769// The case for getstatic and putstatic is more complicated because770// getstatic and putstatic have special semantics when executing while771// the class is being initialized. getstatic/putstatic on a class772// which is being_initialized may be executed by the initializing773// thread but other threads have to block when they execute it. This774// is accomplished in compiled code by executing a test of the current775// thread against the initializing thread of the class. It's emitted776// as boilerplate in their stub which allows the patched code to be777// executed before it's copied back into the main body of the nmethod.778//779// being_init: get_thread(<tmp reg>780// cmpl [reg1 + <init_thread_offset>], <tmp reg>781// jne patch_stub782// movl [reg1 + <const>], reg (for field offsets) <or>783// movl reg, [reg1 + <const>] (for field offsets)784// jmp continue785// <being_init offset> <bytes to copy> <bytes to skip>786// patch_stub: jmp Runtim1::patch_code (through a runtime stub)787// jmp patch_site788//789// If the class is being initialized the patch body is rewritten and790// the patch site is rewritten to jump to being_init, instead of791// patch_stub. Whenever this code is executed it checks the current792// thread against the intializing thread so other threads will enter793// the runtime and end up blocked waiting the class to finish794// initializing inside the calls to resolve_field below. The795// initializing class will continue on it's way. Once the class is796// fully_initialized, the intializing_thread of the class becomes797// NULL, so the next thread to execute this code will fail the test,798// call into patch_code and complete the patching process by copying799// the patch body back into the main part of the nmethod and resume800// executing.801//802//803804JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))805NOT_PRODUCT(_patch_code_slowcase_cnt++;)806807#ifdef AARCH64808// AArch64 does not patch C1-generated code.809ShouldNotReachHere();810#endif811812ResourceMark rm(thread);813RegisterMap reg_map(thread, false);814frame runtime_frame = thread->last_frame();815frame caller_frame = runtime_frame.sender(®_map);816817// last java frame on stack818vframeStream vfst(thread, true);819assert(!vfst.at_end(), "Java frame must exist");820821methodHandle caller_method(THREAD, vfst.method());822// Note that caller_method->code() may not be same as caller_code because of OSR's823// Note also that in the presence of inlining it is not guaranteed824// that caller_method() == caller_code->method()825826int bci = vfst.bci();827Bytecodes::Code code = caller_method()->java_code_at(bci);828829#ifndef PRODUCT830// this is used by assertions in the access_field_patching_id831BasicType patch_field_type = T_ILLEGAL;832#endif // PRODUCT833bool deoptimize_for_volatile = false;834int patch_field_offset = -1;835KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code836KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code837Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code838Handle appendix(THREAD, NULL); // oop needed by appendix_patching code839bool load_klass_or_mirror_patch_id =840(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);841842if (stub_id == Runtime1::access_field_patching_id) {843844Bytecode_field field_access(caller_method, bci);845fieldDescriptor result; // initialize class if needed846Bytecodes::Code code = field_access.code();847constantPoolHandle constants(THREAD, caller_method->constants());848LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);849patch_field_offset = result.offset();850851// If we're patching a field which is volatile then at compile it852// must not have been know to be volatile, so the generated code853// isn't correct for a volatile reference. The nmethod has to be854// deoptimized so that the code can be regenerated correctly.855// This check is only needed for access_field_patching since this856// is the path for patching field offsets. load_klass is only857// used for patching references to oops which don't need special858// handling in the volatile case.859deoptimize_for_volatile = result.access_flags().is_volatile();860861#ifndef PRODUCT862patch_field_type = result.field_type();863#endif864} else if (load_klass_or_mirror_patch_id) {865Klass* k = NULL;866switch (code) {867case Bytecodes::_putstatic:868case Bytecodes::_getstatic:869{ Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);870init_klass = KlassHandle(THREAD, klass);871mirror = Handle(THREAD, klass->java_mirror());872}873break;874case Bytecodes::_new:875{ Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));876k = caller_method->constants()->klass_at(bnew.index(), CHECK);877}878break;879case Bytecodes::_multianewarray:880{ Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));881k = caller_method->constants()->klass_at(mna.index(), CHECK);882}883break;884case Bytecodes::_instanceof:885{ Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));886k = caller_method->constants()->klass_at(io.index(), CHECK);887}888break;889case Bytecodes::_checkcast:890{ Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));891k = caller_method->constants()->klass_at(cc.index(), CHECK);892}893break;894case Bytecodes::_anewarray:895{ Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));896Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);897k = ek->array_klass(CHECK);898}899break;900case Bytecodes::_ldc:901case Bytecodes::_ldc_w:902{903Bytecode_loadconstant cc(caller_method, bci);904oop m = cc.resolve_constant(CHECK);905mirror = Handle(THREAD, m);906}907break;908default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");909}910// convert to handle911load_klass = KlassHandle(THREAD, k);912} else if (stub_id == load_appendix_patching_id) {913Bytecode_invoke bytecode(caller_method, bci);914Bytecodes::Code bc = bytecode.invoke_code();915916CallInfo info;917constantPoolHandle pool(thread, caller_method->constants());918int index = bytecode.index();919LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);920appendix = info.resolved_appendix();921switch (bc) {922case Bytecodes::_invokehandle: {923int cache_index = ConstantPool::decode_cpcache_index(index, true);924assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");925pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);926break;927}928case Bytecodes::_invokedynamic: {929pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);930break;931}932default: fatal("unexpected bytecode for load_appendix_patching_id");933}934} else {935ShouldNotReachHere();936}937938if (deoptimize_for_volatile) {939// At compile time we assumed the field wasn't volatile but after940// loading it turns out it was volatile so we have to throw the941// compiled code out and let it be regenerated.942if (TracePatching) {943tty->print_cr("Deoptimizing for patching volatile field reference");944}945// It's possible the nmethod was invalidated in the last946// safepoint, but if it's still alive then make it not_entrant.947nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());948if (nm != NULL) {949nm->make_not_entrant();950}951952Deoptimization::deoptimize_frame(thread, caller_frame.id());953954// Return to the now deoptimized frame.955}956957// Now copy code back958{959MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);960//961// Deoptimization may have happened while we waited for the lock.962// In that case we don't bother to do any patching we just return963// and let the deopt happen964if (!caller_is_deopted()) {965NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());966address instr_pc = jump->jump_destination();967NativeInstruction* ni = nativeInstruction_at(instr_pc);968if (ni->is_jump() ) {969// the jump has not been patched yet970// The jump destination is slow case and therefore not part of the stubs971// (stubs are only for StaticCalls)972973// format of buffer974// ....975// instr byte 0 <-- copy_buff976// instr byte 1977// ..978// instr byte n-1979// n980// .... <-- call destination981982address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();983unsigned char* byte_count = (unsigned char*) (stub_location - 1);984unsigned char* byte_skip = (unsigned char*) (stub_location - 2);985unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);986address copy_buff = stub_location - *byte_skip - *byte_count;987address being_initialized_entry = stub_location - *being_initialized_entry_offset;988if (TracePatching) {989ttyLocker ttyl;990tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci,991p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");992nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());993assert(caller_code != NULL, "nmethod not found");994995// NOTE we use pc() not original_pc() because we already know they are996// identical otherwise we'd have never entered this block of code997998OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());999assert(map != NULL, "null check");1000map->print();1001tty->cr();10021003Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1004}1005// depending on the code below, do_patch says whether to copy the patch body back into the nmethod1006bool do_patch = true;1007if (stub_id == Runtime1::access_field_patching_id) {1008// The offset may not be correct if the class was not loaded at code generation time.1009// Set it now.1010NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);1011assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");1012assert(patch_field_offset >= 0, "illegal offset");1013n_move->add_offset_in_bytes(patch_field_offset);1014} else if (load_klass_or_mirror_patch_id) {1015// If a getstatic or putstatic is referencing a klass which1016// isn't fully initialized, the patch body isn't copied into1017// place until initialization is complete. In this case the1018// patch site is setup so that any threads besides the1019// initializing thread are forced to come into the VM and1020// block.1021do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||1022InstanceKlass::cast(init_klass())->is_initialized();1023NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);1024if (jump->jump_destination() == being_initialized_entry) {1025assert(do_patch == true, "initialization must be complete at this point");1026} else {1027// patch the instruction <move reg, klass>1028NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);10291030assert(n_copy->data() == 0 ||1031n_copy->data() == (intptr_t)Universe::non_oop_word(),1032"illegal init value");1033if (stub_id == Runtime1::load_klass_patching_id) {1034assert(load_klass() != NULL, "klass not set");1035n_copy->set_data((intx) (load_klass()));1036} else {1037assert(mirror() != NULL, "klass not set");1038// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.1039n_copy->set_data(cast_from_oop<intx>(mirror()));1040}10411042if (TracePatching) {1043Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1044}1045}1046} else if (stub_id == Runtime1::load_appendix_patching_id) {1047NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);1048assert(n_copy->data() == 0 ||1049n_copy->data() == (intptr_t)Universe::non_oop_word(),1050"illegal init value");1051n_copy->set_data(cast_from_oop<intx>(appendix()));10521053if (TracePatching) {1054Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);1055}1056} else {1057ShouldNotReachHere();1058}10591060#if defined(SPARC) || defined(PPC)1061if (load_klass_or_mirror_patch_id ||1062stub_id == Runtime1::load_appendix_patching_id) {1063// Update the location in the nmethod with the proper1064// metadata. When the code was generated, a NULL was stuffed1065// in the metadata table and that table needs to be update to1066// have the right value. On intel the value is kept1067// directly in the instruction instead of in the metadata1068// table, so set_data above effectively updated the value.1069nmethod* nm = CodeCache::find_nmethod(instr_pc);1070assert(nm != NULL, "invalid nmethod_pc");1071RelocIterator mds(nm, copy_buff, copy_buff + 1);1072bool found = false;1073while (mds.next() && !found) {1074if (mds.type() == relocInfo::oop_type) {1075assert(stub_id == Runtime1::load_mirror_patching_id ||1076stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");1077oop_Relocation* r = mds.oop_reloc();1078oop* oop_adr = r->oop_addr();1079*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();1080r->fix_oop_relocation();1081found = true;1082} else if (mds.type() == relocInfo::metadata_type) {1083assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");1084metadata_Relocation* r = mds.metadata_reloc();1085Metadata** metadata_adr = r->metadata_addr();1086*metadata_adr = load_klass();1087r->fix_metadata_relocation();1088found = true;1089}1090}1091assert(found, "the metadata must exist!");1092}1093#endif1094if (do_patch) {1095// replace instructions1096// first replace the tail, then the call1097#if defined(ARM) && !defined(AARCH32)1098if((load_klass_or_mirror_patch_id ||1099stub_id == Runtime1::load_appendix_patching_id) &&1100nativeMovConstReg_at(copy_buff)->is_pc_relative()) {1101nmethod* nm = CodeCache::find_nmethod(instr_pc);1102address addr = NULL;1103assert(nm != NULL, "invalid nmethod_pc");1104RelocIterator mds(nm, copy_buff, copy_buff + 1);1105while (mds.next()) {1106if (mds.type() == relocInfo::oop_type) {1107assert(stub_id == Runtime1::load_mirror_patching_id ||1108stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");1109oop_Relocation* r = mds.oop_reloc();1110addr = (address)r->oop_addr();1111break;1112} else if (mds.type() == relocInfo::metadata_type) {1113assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");1114metadata_Relocation* r = mds.metadata_reloc();1115addr = (address)r->metadata_addr();1116break;1117}1118}1119assert(addr != NULL, "metadata relocation must exist");1120copy_buff -= *byte_count;1121NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);1122n_copy2->set_pc_relative_offset(addr, instr_pc);1123}1124#endif11251126for (int i = NativeCall::instruction_size; i < *byte_count; i++) {1127address ptr = copy_buff + i;1128int a_byte = (*ptr) & 0xFF;1129address dst = instr_pc + i;1130*(unsigned char*)dst = (unsigned char) a_byte;1131}1132ICache::invalidate_range(instr_pc, *byte_count);1133NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);11341135if (load_klass_or_mirror_patch_id ||1136stub_id == Runtime1::load_appendix_patching_id) {1137relocInfo::relocType rtype =1138(stub_id == Runtime1::load_klass_patching_id) ?1139relocInfo::metadata_type :1140relocInfo::oop_type;1141// update relocInfo to metadata1142nmethod* nm = CodeCache::find_nmethod(instr_pc);1143assert(nm != NULL, "invalid nmethod_pc");11441145// The old patch site is now a move instruction so update1146// the reloc info so that it will get updated during1147// future GCs.1148RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));1149relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,1150relocInfo::none, rtype);1151#ifdef SPARC1152// Sparc takes two relocations for an metadata so update the second one.1153address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;1154RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);1155relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,1156relocInfo::none, rtype);1157#endif1158#ifdef PPC1159{ address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;1160RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);1161relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,1162relocInfo::none, rtype);1163}1164#endif1165}1166#ifdef AARCH321167// AArch32 have (disabled) relocation for offset, should enable it back1168if (stub_id == Runtime1::access_field_patching_id) {1169nmethod* nm = CodeCache::find_nmethod(instr_pc);1170RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));1171relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,1172relocInfo::none, relocInfo::section_word_type);1173}1174#endif11751176} else {1177ICache::invalidate_range(copy_buff, *byte_count);1178NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);1179}1180}1181}1182}11831184// If we are patching in a non-perm oop, make sure the nmethod1185// is on the right list.1186if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||1187(appendix.not_null() && appendix->is_scavengable()))) {1188MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);1189nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1190guarantee(nm != NULL, "only nmethods can contain non-perm oops");1191if (!nm->on_scavenge_root_list()) {1192CodeCache::add_scavenge_root_nmethod(nm);1193}11941195// Since we've patched some oops in the nmethod,1196// (re)register it with the heap.1197Universe::heap()->register_nmethod(nm);1198}1199JRT_END12001201//1202// Entry point for compiled code. We want to patch a nmethod.1203// We don't do a normal VM transition here because we want to1204// know after the patching is complete and any safepoint(s) are taken1205// if the calling nmethod was deoptimized. We do this by calling a1206// helper method which does the normal VM transition and when it1207// completes we can check for deoptimization. This simplifies the1208// assembly code in the cpu directories.1209//1210#ifndef TARGET_ARCH_aarch641211int Runtime1::move_klass_patching(JavaThread* thread) {1212//1213// NOTE: we are still in Java1214//1215Thread* THREAD = thread;1216debug_only(NoHandleMark nhm;)1217{1218// Enter VM mode12191220ResetNoHandleMark rnhm;1221patch_code(thread, load_klass_patching_id);1222}1223// Back in JAVA, use no oops DON'T safepoint12241225// Return true if calling code is deoptimized12261227return caller_is_deopted();1228}12291230int Runtime1::move_mirror_patching(JavaThread* thread) {1231//1232// NOTE: we are still in Java1233//1234Thread* THREAD = thread;1235debug_only(NoHandleMark nhm;)1236{1237// Enter VM mode12381239ResetNoHandleMark rnhm;1240patch_code(thread, load_mirror_patching_id);1241}1242// Back in JAVA, use no oops DON'T safepoint12431244// Return true if calling code is deoptimized12451246return caller_is_deopted();1247}12481249int Runtime1::move_appendix_patching(JavaThread* thread) {1250//1251// NOTE: we are still in Java1252//1253Thread* THREAD = thread;1254debug_only(NoHandleMark nhm;)1255{1256// Enter VM mode12571258ResetNoHandleMark rnhm;1259patch_code(thread, load_appendix_patching_id);1260}1261// Back in JAVA, use no oops DON'T safepoint12621263// Return true if calling code is deoptimized12641265return caller_is_deopted();1266}1267//1268// Entry point for compiled code. We want to patch a nmethod.1269// We don't do a normal VM transition here because we want to1270// know after the patching is complete and any safepoint(s) are taken1271// if the calling nmethod was deoptimized. We do this by calling a1272// helper method which does the normal VM transition and when it1273// completes we can check for deoptimization. This simplifies the1274// assembly code in the cpu directories.1275//12761277int Runtime1::access_field_patching(JavaThread* thread) {1278//1279// NOTE: we are still in Java1280//1281Thread* THREAD = thread;1282debug_only(NoHandleMark nhm;)1283{1284// Enter VM mode12851286ResetNoHandleMark rnhm;1287patch_code(thread, access_field_patching_id);1288}1289// Back in JAVA, use no oops DON'T safepoint12901291// Return true if calling code is deoptimized12921293return caller_is_deopted();1294JRT_END1295#endif12961297JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))1298// for now we just print out the block id1299tty->print("%d ", block_id);1300JRT_END130113021303// Array copy return codes.1304enum {1305ac_failed = -1, // arraycopy failed1306ac_ok = 0 // arraycopy succeeded1307};130813091310// Below length is the # elements copied.1311template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,1312oopDesc* dst, T* dst_addr,1313int length) {13141315// For performance reasons, we assume we are using a card marking write1316// barrier. The assert will fail if this is not the case.1317// Note that we use the non-virtual inlineable variant of write_ref_array.1318BarrierSet* bs = Universe::heap()->barrier_set();1319assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");1320assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");13211322#if INCLUDE_ALL_GCS1323if (UseShenandoahGC) {1324ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src_addr, dst_addr, length);1325}1326#endif13271328if (src == dst) {1329// same object, no check1330bs->write_ref_array_pre(dst_addr, length);1331Copy::conjoint_oops_atomic(src_addr, dst_addr, length);1332bs->write_ref_array((HeapWord*)dst_addr, length);1333return ac_ok;1334} else {1335Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();1336Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();1337if (stype == bound || stype->is_subtype_of(bound)) {1338// Elements are guaranteed to be subtypes, so no check necessary1339bs->write_ref_array_pre(dst_addr, length);1340Copy::conjoint_oops_atomic(src_addr, dst_addr, length);1341bs->write_ref_array((HeapWord*)dst_addr, length);1342return ac_ok;1343}1344}1345return ac_failed;1346}13471348// fast and direct copy of arrays; returning -1, means that an exception may be thrown1349// and we did not copy anything1350JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))1351#ifndef PRODUCT1352_generic_arraycopy_cnt++; // Slow-path oop array copy1353#endif13541355if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;1356if (!dst->is_array() || !src->is_array()) return ac_failed;1357if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;1358if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;13591360if (length == 0) return ac_ok;1361if (src->is_typeArray()) {1362Klass* klass_oop = src->klass();1363if (klass_oop != dst->klass()) return ac_failed;1364TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);1365const int l2es = klass->log2_element_size();1366const int ihs = klass->array_header_in_bytes() / wordSize;1367char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);1368char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);1369// Potential problem: memmove is not guaranteed to be word atomic1370// Revisit in Merlin1371memmove(dst_addr, src_addr, length << l2es);1372return ac_ok;1373} else if (src->is_objArray() && dst->is_objArray()) {1374if (UseCompressedOops) {1375narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);1376narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);1377return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);1378} else {1379oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);1380oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);1381return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);1382}1383}1384return ac_failed;1385JRT_END138613871388JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))1389#ifndef PRODUCT1390_primitive_arraycopy_cnt++;1391#endif13921393if (length == 0) return;1394// Not guaranteed to be word atomic, but that doesn't matter1395// for anything but an oop array, which is covered by oop_arraycopy.1396Copy::conjoint_jbytes(src, dst, length);1397JRT_END13981399JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))1400#ifndef PRODUCT1401_oop_arraycopy_cnt++;1402#endif14031404if (num == 0) return;1405BarrierSet* bs = Universe::heap()->barrier_set();1406assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");1407assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");1408if (UseCompressedOops) {1409bs->write_ref_array_pre((narrowOop*)dst, num);1410Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);1411} else {1412bs->write_ref_array_pre((oop*)dst, num);1413Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);1414}1415bs->write_ref_array(dst, num);1416JRT_END141714181419JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))1420// had to return int instead of bool, otherwise there may be a mismatch1421// between the C calling convention and the Java one.1422// e.g., on x86, GCC may clear only %al when returning a bool false, but1423// JVM takes the whole %eax as the return value, which may misinterpret1424// the return value as a boolean true.14251426assert(mirror != NULL, "should null-check on mirror before calling");1427Klass* k = java_lang_Class::as_Klass(mirror);1428return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;1429JRT_END14301431JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))1432ResourceMark rm;14331434assert(!TieredCompilation, "incompatible with tiered compilation");14351436RegisterMap reg_map(thread, false);1437frame runtime_frame = thread->last_frame();1438frame caller_frame = runtime_frame.sender(®_map);14391440nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1441assert (nm != NULL, "no more nmethod?");1442nm->make_not_entrant();14431444methodHandle m(nm->method());1445MethodData* mdo = m->method_data();14461447if (mdo == NULL && !HAS_PENDING_EXCEPTION) {1448// Build an MDO. Ignore errors like OutOfMemory;1449// that simply means we won't have an MDO to update.1450Method::build_interpreter_method_data(m, THREAD);1451if (HAS_PENDING_EXCEPTION) {1452assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");1453CLEAR_PENDING_EXCEPTION;1454}1455mdo = m->method_data();1456}14571458if (mdo != NULL) {1459mdo->inc_trap_count(Deoptimization::Reason_none);1460}14611462if (TracePredicateFailedTraps) {1463stringStream ss1, ss2;1464vframeStream vfst(thread);1465methodHandle inlinee = methodHandle(vfst.method());1466inlinee->print_short_name(&ss1);1467m->print_short_name(&ss2);1468tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));1469}147014711472Deoptimization::deoptimize_frame(thread, caller_frame.id());14731474JRT_END14751476#ifndef PRODUCT1477void Runtime1::print_statistics() {1478tty->print_cr("C1 Runtime statistics:");1479tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr);1480tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);1481tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr);1482tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);1483tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);1484tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt);1485tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);1486tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt);1487tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt);1488tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt);1489tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt);1490tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt);1491tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt);1492tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt);1493tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);1494tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);1495tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);14961497tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);1498tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);1499tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt);1500tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt);1501tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt);1502tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt);1503tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt);15041505tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count);1506tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count);1507tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count);1508tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count);1509tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count);1510tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);1511tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count);1512tty->print_cr(" _throw_count: %d:", _throw_count);15131514SharedRuntime::print_ic_miss_histogram();1515tty->cr();1516}1517#endif // PRODUCT151815191520